text
stringlengths 7
1.01M
|
|---|
/**
* Copyright © 2016-2021 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.mqtt.provision.sql;
import org.thingsboard.server.dao.service.DaoSqlTest;
import org.thingsboard.server.mqtt.provision.AbstractMqttProvisionProtoDeviceTest;
@DaoSqlTest
public class MqttProvisionDeviceProtoSqlTest extends AbstractMqttProvisionProtoDeviceTest {
}
|
/**
*
* Class NotExpAS$Factory$Class.java
*
* Generated by KMFStudio at 25 November 2003 13:00:53
* Visit http://www.cs.ukc.ac.uk/kmf
*
*/
package ocl.syntax.ast.expressions;
public class NotExpAS$Factory$Class
extends ocl.syntax.SyntaxFactory$Class
implements NotExpAS$Factory
{
/** Default factory constructor */
public NotExpAS$Factory$Class() {
}
public NotExpAS$Factory$Class(ocl.syntax.repository.SyntaxRepository repository) {
this.repository = repository;
}
/** Default build method */
public Object build() {
NotExpAS obj = new NotExpAS$Class();
obj.setId(ocl.syntax.SyntaxFactory$Class.newId());
repository.addElement("syntax.ast.expressions.NotExpAS", obj);
return obj;
}
/** Specialized build method */
public Object build(Boolean isMarkedPre) {
NotExpAS obj = new NotExpAS$Class(isMarkedPre);
obj.setId(ocl.syntax.SyntaxFactory$Class.newId());
repository.addElement("syntax.ast.expressions.NotExpAS", obj);
return obj;
}
/** Override toString method */
public String toString() {
return "NotExpAS_Factory";
}
/** Accept 'ocl.syntax.ast.expressions.NotExpAS$Visitor' */
public Object accept(ocl.syntax.SyntaxVisitor v, Object data) {
return v.visit(this, data);
}
}
|
package chap12;
import stone.StoneException;
import chap11.Symbols;
public class SymbolThis extends Symbols {
public static final String NAME = "this";
public SymbolThis(Symbols outer) {
super(outer);
add(NAME);
}
@Override public int putNew(String key) {
throw new StoneException("fatal");
}
@Override public Location put(String key) {
Location loc = outer.put(key);
if (loc.nest >= 0)
loc.nest++;
return loc;
}
}
|
/* Copyright (c) 2008, Avian Contributors
Permission to use, copy, modify, and/or distribute this software
for any purpose with or without fee is hereby granted, provided
that the above copyright notice and this permission notice appear
in all copies.
There is NO WARRANTY for this software. See license.txt for
details. */
package java.lang;
import java.util.Map;
public class ThreadLocal<T> {
private static final Object Null = new Object();
protected T initialValue() {
return null;
}
public T get() {
Map<ThreadLocal, Object> map = Thread.currentThread().locals();
Object o = map.get(this);
if (o == null) {
o = initialValue();
if (o == null) {
o = Null;
}
map.put(this, o);
}
if (o == Null) {
o = null;
}
return (T) o;
}
public void set(T value) {
Map<ThreadLocal, Object> map = Thread.currentThread().locals();
Object o = value;
if (o == null) {
o = Null;
}
map.put(this, o);
}
}
|
package org.hl7.fhir.r5.context;
/*-
* #%L
* org.hl7.fhir.r5
* %%
* Copyright (C) 2014 - 2019 Health Level 7
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.commons.io.IOUtils;
import org.hl7.fhir.exceptions.DefinitionException;
import org.hl7.fhir.exceptions.FHIRException;
import org.hl7.fhir.exceptions.FHIRFormatError;
import org.hl7.fhir.r5.conformance.ProfileUtilities;
import org.hl7.fhir.r5.conformance.ProfileUtilities.ProfileKnowledgeProvider;
import org.hl7.fhir.r5.context.IWorkerContext.ILoggingService.LogCategory;
import org.hl7.fhir.r5.context.SimpleWorkerContext.ILoadFilter;
import org.hl7.fhir.r5.formats.IParser;
import org.hl7.fhir.r5.formats.JsonParser;
import org.hl7.fhir.r5.formats.ParserType;
import org.hl7.fhir.r5.formats.XmlParser;
import org.hl7.fhir.r5.formats.IParser.OutputStyle;
import org.hl7.fhir.r5.model.Bundle;
import org.hl7.fhir.r5.model.Bundle.BundleEntryComponent;
import org.hl7.fhir.r5.model.ElementDefinition.ElementDefinitionBindingComponent;
import org.hl7.fhir.r5.model.ImplementationGuide;
import org.hl7.fhir.r5.model.MetadataResource;
import org.hl7.fhir.r5.model.Questionnaire;
import org.hl7.fhir.r5.model.Resource;
import org.hl7.fhir.r5.model.ResourceType;
import org.hl7.fhir.r5.model.StructureDefinition;
import org.hl7.fhir.r5.model.StructureDefinition.StructureDefinitionKind;
import org.hl7.fhir.r5.model.StructureDefinition.TypeDerivationRule;
import org.hl7.fhir.r5.model.StructureMap;
import org.hl7.fhir.r5.model.StructureMap.StructureMapModelMode;
import org.hl7.fhir.r5.model.StructureMap.StructureMapStructureComponent;
import org.hl7.fhir.r5.terminologies.TerminologyClient;
import org.hl7.fhir.r5.utils.INarrativeGenerator;
import org.hl7.fhir.r5.utils.IResourceValidator;
import org.hl7.fhir.r5.utils.NarrativeGenerator;
import org.hl7.fhir.utilities.CSFileInputStream;
import org.hl7.fhir.utilities.TextFile;
import org.hl7.fhir.utilities.Utilities;
import org.hl7.fhir.utilities.cache.NpmPackage;
import org.hl7.fhir.utilities.validation.ValidationMessage;
import org.hl7.fhir.utilities.validation.ValidationMessage.IssueType;
import org.hl7.fhir.utilities.validation.ValidationMessage.Source;
import ca.uhn.fhir.fluentpath.IFluentPath;
import ca.uhn.fhir.parser.DataFormatException;
/*
* This is a stand alone implementation of worker context for use inside a tool.
* It loads from the validation package (validation-min.xml.zip), and has a
* very light client to connect to an open unauthenticated terminology service
*/
public class SimpleWorkerContext extends BaseWorkerContext implements IWorkerContext, ProfileKnowledgeProvider {
public interface ILoadFilter {
boolean isOkToLoad(Resource resource);
}
public interface IContextResourceLoader {
Bundle loadBundle(InputStream stream, boolean isJson) throws FHIRException, IOException;
String[] getTypes();
}
public interface IValidatorFactory {
IResourceValidator makeValidator(IWorkerContext ctxts) throws FHIRException;
}
private Questionnaire questionnaire;
private String version;
private String revision;
private String date;
private IValidatorFactory validatorFactory;
private boolean ignoreProfileErrors;
private boolean progress;
public SimpleWorkerContext() throws FileNotFoundException, IOException, FHIRException {
super();
}
public SimpleWorkerContext(SimpleWorkerContext other) throws FileNotFoundException, IOException, FHIRException {
super();
copy(other);
}
protected void copy(SimpleWorkerContext other) {
super.copy(other);
questionnaire = other.questionnaire;
binaries.putAll(other.binaries);
version = other.version;
revision = other.revision;
date = other.date;
validatorFactory = other.validatorFactory;
}
// -- Initializations
/**
* Load the working context from the validation pack
*
* @param path
* filename of the validation pack
* @return
* @throws IOException
* @throws FileNotFoundException
* @throws FHIRException
* @throws Exception
*/
public static SimpleWorkerContext fromPack(String path) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.loadFromPack(path, null);
return res;
}
public static SimpleWorkerContext fromPackage(NpmPackage pi, boolean allowDuplicates) throws FileNotFoundException, IOException, FHIRException {
return fromPackage(pi, allowDuplicates, null);
}
public static SimpleWorkerContext fromPackage(NpmPackage pi, boolean allowDuplicates, ILoadFilter filter) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.setAllowLoadingDuplicates(allowDuplicates);
res.loadFromPackage(pi, null, filter);
return res;
}
public static SimpleWorkerContext fromPackage(NpmPackage pi) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.loadFromPackage(pi, null);
return res;
}
public static SimpleWorkerContext fromPackage(NpmPackage pi, IContextResourceLoader loader) throws FileNotFoundException, IOException, FHIRException {
return fromPackage(pi, loader, null);
}
public static SimpleWorkerContext fromPackage(NpmPackage pi, IContextResourceLoader loader, ILoadFilter filter) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.setAllowLoadingDuplicates(true);
res.version = pi.getNpm().get("version").getAsString();
res.loadFromPackage(pi, loader, filter);
return res;
}
public static SimpleWorkerContext fromPack(String path, boolean allowDuplicates) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.setAllowLoadingDuplicates(allowDuplicates);
res.loadFromPack(path, null);
return res;
}
public static SimpleWorkerContext fromPack(String path, IContextResourceLoader loader) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.loadFromPack(path, loader);
return res;
}
public static SimpleWorkerContext fromClassPath() throws IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
res.loadFromStream(SimpleWorkerContext.class.getResourceAsStream("validation.json.zip"), null);
return res;
}
public static SimpleWorkerContext fromClassPath(String name) throws IOException, FHIRException {
InputStream s = SimpleWorkerContext.class.getResourceAsStream("/"+name);
SimpleWorkerContext res = new SimpleWorkerContext();
res.loadFromStream(s, null);
return res;
}
public static SimpleWorkerContext fromDefinitions(Map<String, byte[]> source) throws IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
for (String name : source.keySet()) {
res.loadDefinitionItem(name, new ByteArrayInputStream(source.get(name)), null, null);
}
return res;
}
public static SimpleWorkerContext fromDefinitions(Map<String, byte[]> source, IContextResourceLoader loader) throws FileNotFoundException, IOException, FHIRException {
SimpleWorkerContext res = new SimpleWorkerContext();
for (String name : source.keySet()) {
try {
res.loadDefinitionItem(name, new ByteArrayInputStream(source.get(name)), loader, null);
} catch (Exception e) {
System.out.println("Error loading "+name+": "+e.getMessage());
throw new FHIRException("Error loading "+name+": "+e.getMessage(), e);
}
}
return res;
}
private void loadDefinitionItem(String name, InputStream stream, IContextResourceLoader loader, ILoadFilter filter) throws IOException, FHIRException {
if (name.endsWith(".xml"))
loadFromFile(stream, name, loader, filter);
else if (name.endsWith(".json"))
loadFromFileJson(stream, name, loader, filter);
else if (name.equals("version.info"))
readVersionInfo(stream);
else
loadBytes(name, stream);
}
public String connectToTSServer(TerminologyClient client, String log) {
try {
tlog("Connect to "+client.getAddress());
txClient = client;
txLog = new HTMLClientLogger(log);
txClient.setLogger(txLog);
return txClient.getCapabilitiesStatementQuick().getSoftware().getVersion();
} catch (Exception e) {
throw new FHIRException("Unable to connect to terminology server. Use parameter '-tx n/a' tun run without using terminology services to validate LOINC, SNOMED, ICD-X etc. Error = "+e.getMessage(), e);
}
}
public void loadFromFile(InputStream stream, String name, IContextResourceLoader loader) throws IOException, FHIRException {
loadFromFile(stream, name, null);
}
public void loadFromFile(InputStream stream, String name, IContextResourceLoader loader, ILoadFilter filter) throws IOException, FHIRException {
Resource f;
try {
if (loader != null)
f = loader.loadBundle(stream, false);
else {
XmlParser xml = new XmlParser();
f = xml.parse(stream);
}
} catch (DataFormatException e1) {
throw new org.hl7.fhir.exceptions.FHIRFormatError("Error parsing "+name+":" +e1.getMessage(), e1);
} catch (Exception e1) {
throw new org.hl7.fhir.exceptions.FHIRFormatError("Error parsing "+name+":" +e1.getMessage(), e1);
}
if (f instanceof Bundle) {
Bundle bnd = (Bundle) f;
for (BundleEntryComponent e : bnd.getEntry()) {
if (e.getFullUrl() == null) {
logger.logDebugMessage(LogCategory.CONTEXT, "unidentified resource in " + name+" (no fullUrl)");
}
if (filter == null || filter.isOkToLoad(e.getResource())) {
cacheResource(e.getResource());
}
}
} else if (f instanceof MetadataResource) {
if (filter == null || filter.isOkToLoad(f)) {
cacheResource(f);
}
}
}
private void loadFromFileJson(InputStream stream, String name, IContextResourceLoader loader, ILoadFilter filter) throws IOException, FHIRException {
Bundle f = null;
try {
if (loader != null)
f = loader.loadBundle(stream, true);
else {
JsonParser json = new JsonParser();
Resource r = json.parse(stream);
if (r instanceof Bundle)
f = (Bundle) r;
else if (filter == null || filter.isOkToLoad(f)) {
cacheResource(r);
}
}
} catch (FHIRFormatError e1) {
throw new org.hl7.fhir.exceptions.FHIRFormatError(e1.getMessage(), e1);
}
if (f != null)
for (BundleEntryComponent e : f.getEntry()) {
if (filter == null || filter.isOkToLoad(e.getResource())) {
cacheResource(e.getResource());
}
}
}
private void loadFromPack(String path, IContextResourceLoader loader) throws FileNotFoundException, IOException, FHIRException {
loadFromStream(new CSFileInputStream(path), loader);
}
public void loadFromPackage(NpmPackage pi, IContextResourceLoader loader, ILoadFilter filter) throws FileNotFoundException, IOException, FHIRException {
if (progress) {
System.out.println("Load Package "+pi.name()+"#"+pi.version());
}
for (String s : pi.listResources(loader.getTypes())) {
loadDefinitionItem(s, pi.load("package", s), loader, filter);
}
for (String s : pi.list("other")) {
binaries.put(s, TextFile.streamToBytes(pi.load("other", s)));
}
if (version == null) {
version = pi.version();
}
}
public void loadFromPackage(NpmPackage pi, IContextResourceLoader loader, String... types) throws FileNotFoundException, IOException, FHIRException {
if (progress) {
System.out.println("Load Package "+pi.name()+"#"+pi.version());
}
if (types.length == 0)
types = new String[] { "StructureDefinition", "ValueSet", "CodeSystem", "SearchParameter", "OperationDefinition", "Questionnaire","ConceptMap","StructureMap", "NamingSystem"};
for (String s : pi.listResources(types)) {
loadDefinitionItem(s, pi.load("package", s), loader, null);
}
for (String s : pi.list("other")) {
binaries.put(s, TextFile.streamToBytes(pi.load("other", s)));
}
if (version == null) {
version = pi.version();
}
}
public void loadFromFile(String file, IContextResourceLoader loader) throws IOException, FHIRException {
loadDefinitionItem(file, new CSFileInputStream(file), loader, null);
}
private void loadFromStream(InputStream stream, IContextResourceLoader loader) throws IOException, FHIRException {
ZipInputStream zip = new ZipInputStream(stream);
ZipEntry ze;
while ((ze = zip.getNextEntry()) != null) {
loadDefinitionItem(ze.getName(), zip, loader, null);
zip.closeEntry();
}
zip.close();
}
private void readVersionInfo(InputStream stream) throws IOException, DefinitionException {
byte[] bytes = IOUtils.toByteArray(stream);
binaries.put("version.info", bytes);
String[] vi = new String(bytes).split("\\r?\\n");
for (String s : vi) {
if (s.startsWith("version=")) {
if (version == null)
version = s.substring(8);
else if (!version.equals(s.substring(8)))
throw new DefinitionException("Version mismatch. The context has version "+version+" loaded, and the new content being loaded is version "+s.substring(8));
}
if (s.startsWith("revision="))
revision = s.substring(9);
if (s.startsWith("date="))
date = s.substring(5);
}
}
private void loadBytes(String name, InputStream stream) throws IOException {
byte[] bytes = IOUtils.toByteArray(stream);
binaries.put(name, bytes);
}
@Override
public IParser getParser(ParserType type) {
switch (type) {
case JSON: return newJsonParser();
case XML: return newXmlParser();
default:
throw new Error("Parser Type "+type.toString()+" not supported");
}
}
@Override
public IParser getParser(String type) {
if (type.equalsIgnoreCase("JSON"))
return new JsonParser();
if (type.equalsIgnoreCase("XML"))
return new XmlParser();
throw new Error("Parser Type "+type.toString()+" not supported");
}
@Override
public IParser newJsonParser() {
return new JsonParser();
}
@Override
public IParser newXmlParser() {
return new XmlParser();
}
@Override
public INarrativeGenerator getNarrativeGenerator(String prefix, String basePath) {
return new NarrativeGenerator(prefix, basePath, this);
}
@Override
public IResourceValidator newValidator() throws FHIRException {
if (validatorFactory == null)
throw new Error("No validator configured");
return validatorFactory.makeValidator(this);
}
@Override
public List<String> getResourceNames() {
List<String> result = new ArrayList<String>();
for (StructureDefinition sd : listStructures()) {
if (sd.getKind() == StructureDefinitionKind.RESOURCE && sd.getDerivation() == TypeDerivationRule.SPECIALIZATION)
result.add(sd.getName());
}
Collections.sort(result);
return result;
}
@Override
public List<String> getTypeNames() {
List<String> result = new ArrayList<String>();
for (StructureDefinition sd : listStructures()) {
if (sd.getKind() != StructureDefinitionKind.LOGICAL && sd.getDerivation() == TypeDerivationRule.SPECIALIZATION)
result.add(sd.getName());
}
Collections.sort(result);
return result;
}
@Override
public String getAbbreviation(String name) {
return "xxx";
}
@Override
public boolean isDatatype(String typeSimple) {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean isResource(String t) {
StructureDefinition sd;
try {
sd = fetchResource(StructureDefinition.class, "http://hl7.org/fhir/StructureDefinition/"+t);
} catch (Exception e) {
return false;
}
if (sd == null)
return false;
if (sd.getDerivation() == TypeDerivationRule.CONSTRAINT)
return false;
return sd.getKind() == StructureDefinitionKind.RESOURCE;
}
@Override
public boolean hasLinkFor(String typeSimple) {
return false;
}
@Override
public String getLinkFor(String corePath, String typeSimple) {
return null;
}
@Override
public BindingResolution resolveBinding(StructureDefinition profile, ElementDefinitionBindingComponent binding, String path) {
return null;
}
@Override
public BindingResolution resolveBinding(StructureDefinition profile, String url, String path) {
return null;
}
@Override
public String getLinkForProfile(StructureDefinition profile, String url) {
return null;
}
public Questionnaire getQuestionnaire() {
return questionnaire;
}
public void setQuestionnaire(Questionnaire questionnaire) {
this.questionnaire = questionnaire;
}
@Override
public List<StructureDefinition> allStructures() {
List<StructureDefinition> result = new ArrayList<StructureDefinition>();
Set<StructureDefinition> set = new HashSet<StructureDefinition>();
for (StructureDefinition sd : listStructures()) {
if (!set.contains(sd)) {
try {
generateSnapshot(sd);
// new XmlParser().setOutputStyle(OutputStyle.PRETTY).compose(new FileOutputStream(Utilities.path("[tmp]", "snapshot", tail(sd.getUrl())+".xml")), sd);
} catch (Exception e) {
System.out.println("Unable to generate snapshot for "+tail(sd.getUrl()) +" from "+tail(sd.getBaseDefinition())+" because "+e.getMessage());
if (true) {
e.printStackTrace();
}
}
result.add(sd);
set.add(sd);
}
}
return result;
}
private String tail(String url) {
if (Utilities.noString(url)) {
return "noname";
}
if (url.contains("/")) {
return url.substring(url.lastIndexOf("/")+1);
}
return url;
}
public void loadBinariesFromFolder(String folder) throws FileNotFoundException, Exception {
for (String n : new File(folder).list()) {
loadBytes(n, new FileInputStream(Utilities.path(folder, n)));
}
}
public void loadBinariesFromFolder(NpmPackage pi) throws FileNotFoundException, Exception {
for (String n : pi.list("other")) {
loadBytes(n, pi.load("other", n));
}
}
public void loadFromFolder(String folder) throws FileNotFoundException, Exception {
for (String n : new File(folder).list()) {
if (n.endsWith(".json"))
loadFromFile(Utilities.path(folder, n), new JsonParser());
else if (n.endsWith(".xml"))
loadFromFile(Utilities.path(folder, n), new XmlParser());
}
}
private void loadFromFile(String filename, IParser p) throws FileNotFoundException, Exception {
Resource r;
try {
r = p.parse(new FileInputStream(filename));
if (r.getResourceType() == ResourceType.Bundle) {
for (BundleEntryComponent e : ((Bundle) r).getEntry()) {
cacheResource(e.getResource());
}
} else {
cacheResource(r);
}
} catch (Exception e) {
return;
}
}
@Override
public boolean prependLinks() {
return false;
}
@Override
public boolean hasCache() {
return false;
}
@Override
public String getVersion() {
return version;
}
public List<StructureMap> findTransformsforSource(String url) {
List<StructureMap> res = new ArrayList<StructureMap>();
for (StructureMap map : listTransforms()) {
boolean match = false;
boolean ok = true;
for (StructureMapStructureComponent t : map.getStructure()) {
if (t.getMode() == StructureMapModelMode.SOURCE) {
match = match || t.getUrl().equals(url);
ok = ok && t.getUrl().equals(url);
}
}
if (match && ok)
res.add(map);
}
return res;
}
public IValidatorFactory getValidatorFactory() {
return validatorFactory;
}
public void setValidatorFactory(IValidatorFactory validatorFactory) {
this.validatorFactory = validatorFactory;
}
@Override
public <T extends Resource> T fetchResource(Class<T> class_, String uri) {
T r = super.fetchResource(class_, uri);
if (r instanceof StructureDefinition) {
StructureDefinition p = (StructureDefinition)r;
try {
generateSnapshot(p);
} catch (Exception e) {
// not sure what to do in this case?
System.out.println("Unable to generate snapshot for "+uri+": "+e.getMessage());
}
}
return r;
}
@Override
public void generateSnapshot(StructureDefinition p) throws DefinitionException, FHIRException {
generateSnapshot(p, false);
}
@Override
public void generateSnapshot(StructureDefinition p, boolean logical) throws DefinitionException, FHIRException {
if (!p.hasSnapshot() && (logical || p.getKind() != StructureDefinitionKind.LOGICAL)) {
if (!p.hasBaseDefinition())
throw new DefinitionException("Profile "+p.getName()+" ("+p.getUrl()+") has no base and no snapshot");
StructureDefinition sd = fetchResource(StructureDefinition.class, p.getBaseDefinition());
if (sd == null && "http://hl7.org/fhir/StructureDefinition/Base".equals(p.getBaseDefinition())) {
sd = ProfileUtilities.makeBaseDefinition(p.getFhirVersion());
}
if (sd == null) {
throw new DefinitionException("Profile "+p.getName()+" ("+p.getUrl()+") base "+p.getBaseDefinition()+" could not be resolved");
}
List<ValidationMessage> msgs = new ArrayList<ValidationMessage>();
List<String> errors = new ArrayList<String>();
ProfileUtilities pu = new ProfileUtilities(this, msgs, this);
pu.setThrowException(false);
if (sd.getDerivation() == TypeDerivationRule.CONSTRAINT) {
pu.sortDifferential(sd, p, p.getUrl(), errors);
}
pu.setDebug(false);
for (String err : errors)
msgs.add(new ValidationMessage(Source.ProfileValidator, IssueType.EXCEPTION, p.getUserString("path"), "Error sorting Differential: "+err, ValidationMessage.IssueSeverity.ERROR));
pu.generateSnapshot(sd, p, p.getUrl(), Utilities.extractBaseUrl(sd.getUserString("path")), p.getName());
for (ValidationMessage msg : msgs) {
if ((!ignoreProfileErrors && msg.getLevel() == ValidationMessage.IssueSeverity.ERROR) || msg.getLevel() == ValidationMessage.IssueSeverity.FATAL)
throw new DefinitionException("Profile "+p.getName()+" ("+p.getUrl()+"). Error generating snapshot: "+msg.getMessage());
}
if (!p.hasSnapshot())
throw new FHIRException("Profile "+p.getName()+" ("+p.getUrl()+"). Error generating snapshot");
pu = null;
}
}
public boolean isIgnoreProfileErrors() {
return ignoreProfileErrors;
}
public void setIgnoreProfileErrors(boolean ignoreProfileErrors) {
this.ignoreProfileErrors = ignoreProfileErrors;
}
public String listMapUrls() {
return Utilities.listCanonicalUrls(transforms.keys());
}
public boolean isProgress() {
return progress;
}
public void setProgress(boolean progress) {
this.progress = progress;
}
}
|
package com.xr.message.service.impl;
import com.xr.base.common.enums.Cluster;
import com.xr.base.common.util.AssertUtils;
import com.xr.base.common.util.DateUtils;
import com.xr.base.common.util.Utils;
import com.xr.base.jdbc.service.impl.BaseServiceImpl;
import com.xr.message.common.enums.MessageStatus;
import com.xr.message.mapper.MessageMapper;
import com.xr.message.model.ConsumerModel;
import com.xr.message.model.MessageConsumeModel;
import com.xr.message.model.MessageContentModel;
import com.xr.message.model.MessageModel;
import com.xr.message.service.IConsumerService;
import com.xr.message.service.IMessageConsumeService;
import com.xr.message.service.IMessageContentService;
import com.xr.message.service.IMessageService;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* <b>author</b>: forvoyager@outlook.com
* <b>time</b>: 2020-01-15 15:27:59 <br>
* <b>description</b>: 消息记录 服务实现 <br>
*/
@Service("messageService")
public class MessageServiceImpl extends BaseServiceImpl<MessageMapper, MessageModel> implements IMessageService {
@Override
protected String getPrimaryKeyName() {
return "id";
}
@Transactional
@Override
public Long insert(Long topic_id, Long tag_id, int type, String data) throws Exception {
// 消息信息
long current = DateUtils.currentTimeInSecond();
MessageModel messageModel = new MessageModel();
messageModel.setTopic_id(topic_id);
messageModel.setTag_id(tag_id);
messageModel.setType(type);
messageModel.setStatus(MessageStatus.init.getCode());
// if(type == MessageType.simple.getCode()){
// // 非事务消息,状态直接置为待发送(不需要确认)
// messageModel.setStatus(MessageStatus.available.getCode());
// }
messageModel.setCreate_time(current);
messageModel.setUpdate_time(current);
messageModel.setVersion(0L);
messageModel = this.insert(messageModel);
// 消息内容
MessageContentModel contentModel = new MessageContentModel();
contentModel.setMessage_id(messageModel.getId());
contentModel.setContent(data);
messageContentService.insert(contentModel);
return messageModel.getId();
}
@Transactional
@Override
public List<MessageContentModel> pull(long consumer_id, int offset_type, long offset, int size) throws Exception {
// 限制每次最大拉取的消息数
size = Math.min(size, 1000);
ConsumerModel consumer = consumerService.selectById(consumer_id, Cluster.master);
AssertUtils.notNull(consumer, "消费者不存在,请先注册。");
long current = DateUtils.currentTimeInSecond();
// 查询需要被消费的消息
List<MessageContentModel> contentModels = Collections.EMPTY_LIST;
if(offset_type == 0){
// 先锁住消费者记录(当前事务没提交)
ConsumerModel updateConsumer = new ConsumerModel();
updateConsumer.setId(consumer.getId());
updateConsumer.setWhere_version(consumer.getVersion());
updateConsumer.setUpdate_time(current);
if( 1 != consumerService.update(updateConsumer) ){
Utils.throwsBizException("拉取消息失败,稍后重试。");
}
// 查询本批次需要消费的消息
contentModels = messageContentService.selectAvailableMessage(
consumer.getTopic_id(), consumer.getTag_id(), consumer.getOffset_message_id(), size);
int rows = contentModels.size();
// 抛出异常,回滚上面的更新操作
//AssertUtils.isTrue(rows > 0,"暂时没有需要处理的消息");
if( rows > 0){
Long next_consume_message_id = null;
// 生成消息投递记录
MessageConsumeModel consumeModel = null;
List<MessageConsumeModel> consumeModels = new ArrayList<>();
for(MessageContentModel mcm : contentModels){
consumeModel = new MessageConsumeModel();
consumeModel.setConsumer_id(consumer.getId());
consumeModel.setMessage_id(mcm.getMessage_id());
consumeModel.setStatus(0);
consumeModel.setCreate_time(current);
consumeModel.setUpdate_time(current);
consumeModel.setVersion(0L);
consumeModels.add(consumeModel);
next_consume_message_id = mcm.getMessage_id();
}
messageConsumeService.insertBatch(consumeModels);
// 分批消费消息,更新下一个需要消费的消息id
updateConsumer.setWhere_version(consumer.getVersion()+1);
updateConsumer.setOffset_message_id( next_consume_message_id + 1 );
if( 1 != consumerService.update(updateConsumer) ){
Utils.throwsBizException("拉取消息失败,稍后重试。");
}
}
} else if(offset_type == 1){
contentModels = messageContentService.selectAvailableMessage(
consumer.getTopic_id(), consumer.getTag_id(), offset, size);
} else if(offset_type == 2){
// todo
}
return contentModels;
}
@Resource
private IMessageContentService messageContentService;
@Resource
private IMessageConsumeService messageConsumeService;
@Resource
private IConsumerService consumerService;
}
|
package dev.halilerkan.week4;
import io.swagger.v3.oas.annotations.OpenAPIDefinition;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
@OpenAPIDefinition
public class Week4Application {
public static void main(String[] args) {
SpringApplication.run(Week4Application.class, args);
}
}
|
/*
* Copyright (c) 2021 Airbyte, Inc., all rights reserved.
*/
package io.airbyte.workers.worker_run;
import io.airbyte.api.model.ConnectionUpdate;
import io.airbyte.commons.features.FeatureFlags;
import io.airbyte.commons.functional.CheckedSupplier;
import io.airbyte.commons.json.Jsons;
import io.airbyte.config.JobConfig.ConfigType;
import io.airbyte.config.JobOutput;
import io.airbyte.config.JobResetConnectionConfig;
import io.airbyte.config.JobSyncConfig;
import io.airbyte.config.StandardSyncOutput;
import io.airbyte.config.StandardSyncSummary.ReplicationStatus;
import io.airbyte.config.persistence.ConfigNotFoundException;
import io.airbyte.scheduler.models.Job;
import io.airbyte.validation.json.JsonValidationException;
import io.airbyte.workers.JobStatus;
import io.airbyte.workers.OutputAndStatus;
import io.airbyte.workers.WorkerConstants;
import io.airbyte.workers.temporal.TemporalClient;
import io.airbyte.workers.temporal.TemporalClient.ManualSyncSubmissionResult;
import io.airbyte.workers.temporal.TemporalJobType;
import io.airbyte.workers.temporal.TemporalResponse;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Set;
import java.util.UUID;
import lombok.AllArgsConstructor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@AllArgsConstructor
public class TemporalWorkerRunFactory {
private static final Logger LOGGER = LoggerFactory.getLogger(TemporalWorkerRunFactory.class);
private final TemporalClient temporalClient;
private final Path workspaceRoot;
private final String airbyteVersionOrWarnings;
private final FeatureFlags featureFlags;
public WorkerRun create(final Job job) {
final int attemptId = job.getAttemptsCount();
return WorkerRun.create(workspaceRoot, job.getId(), attemptId, createSupplier(job, attemptId), airbyteVersionOrWarnings);
}
public void createNewSchedulerWorkflow(final UUID connectionId) {
temporalClient.submitConnectionUpdaterAsync(connectionId);
}
public ManualSyncSubmissionResult startNewManualSync(final UUID connectionId) {
return temporalClient.startNewManualSync(connectionId);
}
public ManualSyncSubmissionResult startNewCancelation(final UUID connectionId) {
return temporalClient.startNewCancelation(connectionId);
}
public ManualSyncSubmissionResult resetConnection(final UUID connectionId) {
return temporalClient.resetConnection(connectionId);
}
public ManualSyncSubmissionResult synchronousResetConnection(final UUID connectionId) {
return temporalClient.synchronousResetConnection(connectionId);
}
public void deleteConnection(final UUID connectionId) {
temporalClient.deleteConnection(connectionId);
}
public void migrateSyncIfNeeded(final Set<UUID> connectionIds) {
temporalClient.migrateSyncIfNeeded(connectionIds);
}
public CheckedSupplier<OutputAndStatus<JobOutput>, Exception> createSupplier(final Job job, final int attemptId) {
final TemporalJobType temporalJobType = toTemporalJobType(job.getConfigType());
final UUID connectionId = UUID.fromString(job.getScope());
return switch (job.getConfigType()) {
case SYNC -> () -> {
final TemporalResponse<StandardSyncOutput> output = temporalClient.submitSync(job.getId(),
attemptId, job.getConfig().getSync(), connectionId);
return toOutputAndStatus(output);
};
case RESET_CONNECTION -> () -> {
final JobResetConnectionConfig resetConnection = job.getConfig().getResetConnection();
final JobSyncConfig config = new JobSyncConfig()
.withNamespaceDefinition(resetConnection.getNamespaceDefinition())
.withNamespaceFormat(resetConnection.getNamespaceFormat())
.withPrefix(resetConnection.getPrefix())
.withSourceDockerImage(WorkerConstants.RESET_JOB_SOURCE_DOCKER_IMAGE_STUB)
.withDestinationDockerImage(resetConnection.getDestinationDockerImage())
.withSourceConfiguration(Jsons.emptyObject())
.withDestinationConfiguration(resetConnection.getDestinationConfiguration())
.withConfiguredAirbyteCatalog(resetConnection.getConfiguredAirbyteCatalog())
.withOperationSequence(resetConnection.getOperationSequence())
.withResourceRequirements(resetConnection.getResourceRequirements());
final TemporalResponse<StandardSyncOutput> output = temporalClient.submitSync(job.getId(), attemptId, config, connectionId);
return toOutputAndStatus(output);
};
default -> throw new IllegalArgumentException("Does not support job type: " + temporalJobType);
};
}
private static TemporalJobType toTemporalJobType(final ConfigType jobType) {
return switch (jobType) {
case GET_SPEC -> TemporalJobType.GET_SPEC;
case CHECK_CONNECTION_SOURCE, CHECK_CONNECTION_DESTINATION -> TemporalJobType.CHECK_CONNECTION;
case DISCOVER_SCHEMA -> TemporalJobType.DISCOVER_SCHEMA;
case SYNC, RESET_CONNECTION -> TemporalJobType.SYNC;
};
}
private OutputAndStatus<JobOutput> toOutputAndStatus(final TemporalResponse<StandardSyncOutput> response) {
final JobStatus status;
if (!response.isSuccess()) {
status = JobStatus.FAILED;
} else {
final ReplicationStatus replicationStatus = response.getOutput().orElseThrow().getStandardSyncSummary().getStatus();
if (replicationStatus == ReplicationStatus.FAILED || replicationStatus == ReplicationStatus.CANCELLED) {
status = JobStatus.FAILED;
} else {
status = JobStatus.SUCCEEDED;
}
}
return new OutputAndStatus<>(status, new JobOutput().withSync(response.getOutput().orElse(null)));
}
private OutputAndStatus<JobOutput> toOutputAndStatusConnector() {
// Since we are async we technically can't fail
final JobStatus status = JobStatus.SUCCEEDED;
return new OutputAndStatus<>(status, new JobOutput().withSync(null));
}
public void update(final ConnectionUpdate connectionUpdate) throws JsonValidationException, ConfigNotFoundException, IOException {
temporalClient.update(connectionUpdate);
}
}
|
public class Ex7a {
public static void main(String[] args) {
int p, u, soma = 0;
System.out.println("Introduza o valor de p:");
p = Ler.umInt();
System.out.println("Introduza o valor de u:");
u = Ler.umInt();
for (int i = p; i <= u; i++) {
soma += i;
}
System.out.println(soma);
}
}
|
/**
* Copyright © 2017 GIP-RECIA (https://www.recia.fr/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.esco.portlet.mediacentre.model.ressource;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"nom",
"uri"
})
public class DomaineEnseignement extends AbstractJson {
@JsonProperty("nom")
private String nom;
@JsonProperty("uri")
private String uri;
@JsonProperty("nom")
public String getNom() {
return nom;
}
@JsonProperty("nom")
public void setNom(String nom) {
this.nom = nom;
}
@JsonProperty("uri")
public String getUri() {
return uri;
}
@JsonProperty("uri")
public void setUri(String uri) {
this.uri = uri;
}
}
|
/*******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2012 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans;
import java.util.ArrayList;
import java.util.List;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.RowSet;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.logging.LogChannelInterface;
import org.pentaho.di.trans.TransMeta.TransformationType;
import org.pentaho.di.trans.step.StepMetaDataCombi;
import org.pentaho.di.trans.step.errorhandling.StreamInterface;
public class SingleThreadedTransExecutor {
private Trans trans;
private List<StepMetaDataCombi> steps;
private boolean[] done;
private int nrDone;
private List<List<StreamInterface>> stepInfoStreams;
private List<List<RowSet>> stepInfoRowSets;
private LogChannelInterface log;
public SingleThreadedTransExecutor(final Trans trans) {
this.trans = trans;
this.log = trans.getLogChannel();
steps = trans.getSteps();
// Always disable thread priority management, it will always slow us down...
//
for (StepMetaDataCombi combi : steps) {
combi.step.setUsingThreadPriorityManagment(false);
}
sortSteps();
done = new boolean[steps.size()];
nrDone = 0;
stepInfoStreams = new ArrayList<List<StreamInterface>>();
stepInfoRowSets = new ArrayList<List<RowSet>>();
for (StepMetaDataCombi combi : steps) {
List<StreamInterface> infoStreams = combi.stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStreams();
stepInfoStreams.add( infoStreams );
List<RowSet> infoRowSets = new ArrayList<RowSet>();
for (StreamInterface infoStream : infoStreams) {
RowSet infoRowSet = trans.findRowSet(infoStream.getStepname(), 0, combi.stepname, 0);
if (infoRowSet!=null) {
infoRowSets.add(infoRowSet);
}
}
stepInfoRowSets.add(infoRowSets);
}
}
/**
* Sort the steps from start to finish...
*/
private void sortSteps() {
// The bubble sort algorithm in contrast to the QuickSort or MergeSort
// algorithms
// does indeed cover all possibilities.
// Sorting larger transformations with hundreds of steps might be too slow
// though.
// We should consider caching TransMeta.findPrevious() results in that case.
//
trans.getTransMeta().clearCaches();
//
// Cocktail sort (bi-directional bubble sort)
//
// Original sort was taking 3ms for 30 steps
// cocktail sort takes about 8ms for the same 30, but it works :)
// set these to true if you are working on this algorithm and don't like
// flying blind.
//
boolean testing = true; // log sort details
int stepsMinSize = 0;
int stepsSize = steps.size();
// Noticed a problem with an immediate shrinking iteration window
// trapping rows that need to be sorted.
// This threshold buys us some time to get the sorting close before
// starting to decrease the window size.
//
// TODO: this could become much smarter by tracking row movement
// and reacting to that each outer iteration verses
// using a threshold.
//
// After this many iterations enable trimming inner iteration
// window on no change being detected.
//
int windowShrinkThreshold = (int) Math.round(stepsSize * 0.75);
// give ourselves some room to sort big lists. the window threshold should
// stop us before reaching this anyway.
//
int totalIterations = stepsSize * 2;
int actualIterations = 0;
boolean isBefore = false;
boolean forwardChange = false;
boolean backwardChange = false;
boolean lastForwardChange = true;
boolean keepSortingForward = true;
StepMetaDataCombi one = null;
StepMetaDataCombi two = null;
StringBuilder tLogString = new StringBuilder();// this helps group our
// output so other threads
// don't get logs in our
// output.
tLogString.append("-------------------------------------------------------").append("\n");
tLogString.append("--SingleThreadedTransExecutor.sortSteps(cocktail)").append("\n");
tLogString.append("--Trans: ").append(trans.getName()).append("\n");
tLogString.append("-").append("\n");
long startTime = System.currentTimeMillis();
for (int x = 0; x < totalIterations; x++) {
// Go forward through the list
//
if (keepSortingForward) {
for (int y = stepsMinSize; y < stepsSize - 1; y++) {
one = steps.get(y);
two = steps.get(y + 1);
isBefore = trans.getTransMeta().findPrevious(one.stepMeta, two.stepMeta);
if (isBefore) {
// two was found to be positioned BEFORE one so we need to
// switch them...
//
steps.set(y, two);
steps.set(y + 1, one);
forwardChange = true;
}
}
}
// Go backward through the list
//
for (int z = stepsSize - 1; z > stepsMinSize; z--) {
one = steps.get(z);
two = steps.get(z - 1);
isBefore = trans.getTransMeta().findPrevious(one.stepMeta, two.stepMeta);
if (!isBefore) {
// two was found NOT to be positioned BEFORE one so we need to
// switch them...
//
steps.set(z, two);
steps.set(z - 1, one);
backwardChange = true;
}
}
// Shrink stepsSize(max) if there was no forward change
//
if (x > windowShrinkThreshold && !forwardChange) {
// should we keep going? check the window size
//
stepsSize--;
if (stepsSize <= stepsMinSize) {
if (testing) {
tLogString.append(String.format("stepsMinSize:%s stepsSize:%s", stepsMinSize, stepsSize));
tLogString.append("stepsSize is <= stepsMinSize.. exiting outer sort loop. index:" + x).append("\n");
}
break;
}
}
// shrink stepsMinSize(min) if there was no backward change
//
if (x > windowShrinkThreshold && !backwardChange) {
// should we keep going? check the window size
//
stepsMinSize++;
if (stepsMinSize >= stepsSize) {
if (testing) {
tLogString.append(String.format("stepsMinSize:%s stepsSize:%s", stepsMinSize, stepsSize)).append("\n");
tLogString.append("stepsMinSize is >= stepsSize.. exiting outer sort loop. index:" + x).append("\n");
}
break;
}
}
// End of both forward and backward traversal.
// Time to see if we should keep going.
//
actualIterations++;
if (!forwardChange && !backwardChange) {
if (testing) {
tLogString.append(String.format("existing outer loop because no change was detected going forward or backward. index:%s min:%s max:%s", x, stepsMinSize, stepsSize)).append("\n");
}
break;
}
//
// if we are past the first iteration and there has been no change twice,
// quit doing it!
//
if (keepSortingForward && x > 0 && !lastForwardChange && !forwardChange) {
keepSortingForward = false;
}
lastForwardChange = forwardChange;
forwardChange = false;
backwardChange = false;
}// finished sorting
long endTime = System.currentTimeMillis();
long totalTime = (endTime - startTime);
tLogString.append("-------------------------------------------------------").append("\n");
tLogString.append("Steps sort time: " + totalTime + "ms").append("\n");
tLogString.append("Total iterations: " + actualIterations).append("\n");
tLogString.append("Step count: " + steps.size()).append("\n");
tLogString.append("Steps after sort: ").append("\n");
for (StepMetaDataCombi combi : steps) {
tLogString.append(combi.step.getStepname()).append("\n");
}
tLogString.append("-------------------------------------------------------").append("\n");
if (log.isDetailed()) {
log.logDetailed(tLogString.toString());
}
}
public boolean init() throws KettleException {
// See if the steps support the SingleThreaded transformation type...
//
for (StepMetaDataCombi combi : steps) {
TransformationType[] types = combi.stepMeta.getStepMetaInterface().getSupportedTransformationTypes();
boolean ok = false;
for (TransformationType type : types) {
if (type == TransformationType.SingleThreaded) ok = true;
}
if (!ok) {
throw new KettleException("Step '"+combi.stepname+"' of type '"+combi.stepMeta.getStepID()+"' is not yet supported in a Single Threaded transformation engine.");
}
}
// Initialize all the steps...
//
for (StepMetaDataCombi combi : steps) {
boolean ok = combi.step.init(combi.meta, combi.data);
if (!ok) {
return false;
}
}
return true;
}
/**
* Give all steps in the transformation the chance to process all rows on input...
*
* @return true if more iterations can be performed. False if this is not the case.
*/
public boolean oneIteration() throws KettleException {
for (int s=0;s<steps.size() && !trans.isStopped();s++) {
if (!done[s]) {
StepMetaDataCombi combi = steps.get(s);
// If this step is waiting for data (text, db, and so on), we simply read all the data
// This means that it is impractical to use this transformation type to load large files.
//
boolean stepDone = false;
// For every input row we call the processRow() method of the step.
//
List<RowSet> infoRowSets = stepInfoRowSets.get(s);
// Loop over info-rowsets FIRST to make sure we support the "Stream Lookup" step and so on.
//
for (RowSet rowSet : infoRowSets) {
boolean once = true;
while(once || (rowSet.size()>0 && !stepDone)) {
once=false;
stepDone = !combi.step.processRow(combi.meta, combi.data);
if(combi.step.getErrors() > 0) {
return false;
}
}
}
// Do normal processing of input rows...
//
List<RowSet> rowSets = combi.step.getInputRowSets();
// If there are no input row sets, we read all rows until finish.
// This applies to steps like "Table Input", "Text File Input" and so on.
// If they do have an input row set, to get filenames or other parameters,
// we need to handle this in the batchComplete() methods.
//
if (rowSets.size()==0) {
while (!stepDone && !trans.isStopped()) {
stepDone = !combi.step.processRow(combi.meta, combi.data);
if(combi.step.getErrors() > 0) {
return false;
}
}
} else {
// Since we can't be sure that the step actually reads from the row sets where we measure rows,
// we simply count the total nr of rows on input. The steps will find the rows in either row set.
//
int nrRows=0;
for (RowSet rowSet : rowSets) {
nrRows+=rowSet.size();
}
// Now do the number of processRows() calls.
//
for (int i=0;i<nrRows;i++) {
stepDone = !combi.step.processRow(combi.meta, combi.data);
if(combi.step.getErrors() > 0) {
return false;
}
}
}
// Signal the step that a batch of rows has passed for this iteration (sort rows and all)
//
combi.step.batchComplete();
// System.out.println(combi.step.toString()+" : input="+getTotalRows(combi.step.getInputRowSets())+", output="+getTotalRows(combi.step.getOutputRowSets()));
if (stepDone) {
nrDone++;
}
done[s] = stepDone;
}
}
return nrDone<steps.size() && !trans.isStopped();
}
protected int getTotalRows(List<RowSet> rowSets) {
int total=0;
for (RowSet rowSet : rowSets) total+=rowSet.size();
return total;
}
public long getErrors() {
return trans.getErrors();
}
public Result getResult() {
return trans.getResult();
}
public boolean isStopped() {
return trans.isStopped();
}
public void dispose() throws KettleException {
// Call output done.
//
for (StepMetaDataCombi combi : trans.getSteps()) {
combi.step.setOutputDone();
}
// Finalize all the steps...
//
for (StepMetaDataCombi combi : steps) {
combi.step.dispose(combi.meta, combi.data);
combi.step.markStop();
}
}
}
|
/*
* Copyright 2000-2014 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui;
import org.jetbrains.annotations.NotNull;
import java.util.EventListener;
/**
* @author nik
*/
public interface CheckboxTreeListener extends EventListener {
void mouseDoubleClicked(@NotNull CheckedTreeNode node);
void nodeStateChanged(@NotNull CheckedTreeNode node);
void beforeNodeStateChanged(@NotNull CheckedTreeNode node);
}
|
package com.baidu.ueditor.hunter;
import com.baidu.ueditor.PathFormat;
import com.baidu.ueditor.define.*;
import com.baidu.ueditor.upload.StorageManager;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* 图片抓取器
*
* @author hancong03@baidu.com
*
*/
public class ImageHunter {
private String filename = null;
private String savePath = null;
private String rootPath = null;
private List<String> allowTypes = null;
private long maxSize = -1;
private List<String> filters = null;
public ImageHunter(Map<String, Object> conf) {
this.filename = (String) conf.get("filename");
this.savePath = (String) conf.get("savePath");
this.rootPath = (String) conf.get("rootPath");
this.maxSize = (Long) conf.get("maxSize");
this.allowTypes = Arrays.asList((String[]) conf.get("allowFiles"));
this.filters = Arrays.asList((String[]) conf.get("filter"));
}
public State capture(String[] list) {
MultiState state = new MultiState(true);
for (String source : list) {
state.addState(captureRemoteData(source));
}
return state;
}
public State captureRemoteData(String urlStr) {
HttpURLConnection connection = null;
URL url = null;
String suffix = null;
try {
url = new URL(urlStr);
if (!validHost(url.getHost())) {
return new BaseState(false, AppInfo.PREVENT_HOST);
}
connection = (HttpURLConnection) url.openConnection();
connection.setInstanceFollowRedirects(true);
connection.setUseCaches(true);
if (!validContentState(connection.getResponseCode())) {
return new BaseState(false, AppInfo.CONNECTION_ERROR);
}
suffix = MIMEType.getSuffix(connection.getContentType());
if (!validFileType(suffix)) {
return new BaseState(false, AppInfo.NOT_ALLOW_FILE_TYPE);
}
if (!validFileSize(connection.getContentLength())) {
return new BaseState(false, AppInfo.MAX_SIZE);
}
String savePath = this
.getPath(this.savePath, this.filename, suffix);
String physicalPath = this.rootPath + savePath;
State state = StorageManager.saveFileByInputStream(
connection.getInputStream(), physicalPath);
if (state.isSuccess()) {
state.putInfo("url", PathFormat.format(savePath));
state.putInfo("source", urlStr);
}
return state;
} catch (Exception e) {
return new BaseState(false, AppInfo.REMOTE_FAIL);
}
}
private String getPath(String savePath, String filename, String suffix) {
return PathFormat.parse(savePath + suffix, filename);
}
private boolean validHost(String hostname) {
return !filters.contains(hostname);
}
private boolean validContentState(int code) {
return HttpURLConnection.HTTP_OK == code;
}
private boolean validFileType(String type) {
return this.allowTypes.contains(type);
}
private boolean validFileSize(int size) {
return size < this.maxSize;
}
}
|
/**
* Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.model.interestrate;
import org.apache.commons.lang.Validate;
import org.threeten.bp.ZonedDateTime;
import com.opengamma.analytics.financial.model.interestrate.definition.StandardDiscountBondModelDataBundle;
import com.opengamma.analytics.financial.model.tree.RecombiningBinomialTree;
import com.opengamma.analytics.math.function.Function1D;
import com.opengamma.analytics.math.rootfinding.BrentSingleRootFinder;
import com.opengamma.analytics.math.rootfinding.RealSingleRootFinder;
import com.opengamma.util.time.DateUtils;
import com.opengamma.util.tuple.Triple;
/**
*
*/
public class BlackDermanToyYieldOnlyInterestRateModel {
private final RealSingleRootFinder _rootFinder = new BrentSingleRootFinder();
private final int _n;
private final int _j;
public BlackDermanToyYieldOnlyInterestRateModel(final int n) {
if (n < 2) {
throw new IllegalArgumentException("Must have more than one node");
}
_n = n;
_j = RecombiningBinomialTree.NODES.evaluate(_n);
}
public Function1D<StandardDiscountBondModelDataBundle, RecombiningBinomialTree<Triple<Double, Double, Double>>> getTrees(final ZonedDateTime time) {
Validate.notNull(time, "time");
return new Function1D<StandardDiscountBondModelDataBundle, RecombiningBinomialTree<Triple<Double, Double, Double>>>() {
@SuppressWarnings({"unchecked", "synthetic-access" })
@Override
public RecombiningBinomialTree<Triple<Double, Double, Double>> evaluate(final StandardDiscountBondModelDataBundle data) {
Validate.notNull(data, "data");
final double[][] r = new double[_n + 1][_j];
final double[][] q = new double[_n + 1][_j];
final double[][] d = new double[_n + 1][_j];
final double[] u = new double[_n + 1];
final double[] p = new double[_n + 2];
final double t = DateUtils.getDifferenceInYears(data.getDate(), time);
final double dt = t / _n;
final double dtSqrt = Math.sqrt(dt);
final double r1 = data.getShortRate(dt);
final double sigma = data.getShortRateVolatility(dt);
p[0] = 1.0;
for (int i = 1; i <= _n + 1; i++) {
p[i] = 1. / Math.pow(1 + data.getShortRate(i) * dt, dt * i);
}
q[0][0] = 1.;
u[0] = r1;
r[0][0] = r1;
d[0][0] = 1. / (1 + r1 * dt);
for (int i = 1; i <= _n; i++) {
q[i][0] = 0.5 * q[i - 1][0] * d[i - 1][0];
q[i][i] = 0.5 * q[i - 1][i - 1] * d[i - 1][i - 1];
for (int j = -i + 2, k = 1; j <= i - 2; j += 2, k++) {
q[i][k] = 0.5 * (q[i - 1][k - 1] * d[i - 1][k - 1] + q[i - 1][k] * d[i - 1][k]);
}
u[i] = _rootFinder.getRoot(getMedian(sigma, i, dt, q, p[i + 1]), 0., 1.);
for (int j = -i, k = 0; j <= i; j += 2, k++) {
r[i][k] = u[i] * Math.exp(sigma * j * dtSqrt);
d[i][k] = 1. / (1 + r[i][k] * dt);
}
}
final Triple<Double, Double, Double>[][] result = new Triple[_n + 1][_j];
for (int i = 0; i <= _n; i++) {
for (int j = 0; j < _j; j++) {
result[i][j] = Triple.of(r[i][j], d[i][j], q[i][j]);
}
}
return new RecombiningBinomialTree<>(result);
}
};
}
protected Function1D<Double, Double> getMedian(final double sigma, final int i, final double dt, final double[][] q, final double p) {
return new Function1D<Double, Double>() {
@Override
public Double evaluate(final Double u) {
double sum = 0.;
final double dtSqrt = Math.sqrt(dt);
for (int j = -i, k = 0; j <= i; j += 2, k++) {
sum += q[i][k] / (1 + u * Math.exp(sigma * j * dtSqrt) * dt);
}
return sum - p;
}
};
}
}
|
package net.bytebuddy.dynamic.scaffold.inline;
import net.bytebuddy.description.method.MethodDescription;
import net.bytebuddy.description.method.ParameterList;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.description.type.generic.GenericTypeDescription;
import net.bytebuddy.description.type.generic.GenericTypeList;
import net.bytebuddy.implementation.Implementation;
import net.bytebuddy.implementation.bytecode.StackManipulation;
import net.bytebuddy.implementation.bytecode.StackSize;
import net.bytebuddy.test.utility.MockitoRule;
import net.bytebuddy.test.utility.ObjectPropertyAssertion;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.mockito.Mock;
import org.objectweb.asm.MethodVisitor;
import org.objectweb.asm.Opcodes;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.mockito.Mockito.*;
@RunWith(Parameterized.class)
public class MethodRebaseResolverResolutionForRebasedMethodTest {
private static final String FOO = "foo", BAR = "bar", QUX = "qux", BAZ = "baz";
@Parameterized.Parameters
public static Collection<Object[]> data() {
return Arrays.asList(new Object[][] {
{false, Opcodes.ACC_SYNTHETIC | Opcodes.ACC_PRIVATE},
{true, Opcodes.ACC_SYNTHETIC | Opcodes.ACC_PUBLIC}
});
}
private final boolean interfaceType;
private final int rebasedMethodModifiers;
public MethodRebaseResolverResolutionForRebasedMethodTest(boolean interfaceType, int rebasedMethodModifiers) {
this.interfaceType = interfaceType;
this.rebasedMethodModifiers = rebasedMethodModifiers;
}
@Rule
public TestRule mockitoRule = new MockitoRule(this);
@Mock
private MethodDescription.InDefinedShape methodDescription;
@Mock
private MethodRebaseResolver.MethodNameTransformer methodNameTransformer, otherMethodNameTransformer;
@Mock
private StackManipulation stackManipulation;
@Mock
private TypeDescription typeDescription, returnType, parameterType;
@Mock
private MethodVisitor methodVisitor;
@Mock
private Implementation.Context implementationContext;
@Before
@SuppressWarnings("unchecked")
public void setUp() throws Exception {
when(methodDescription.getDeclaringType()).thenReturn(typeDescription);
when(methodDescription.getReturnType()).thenReturn(returnType);
when(methodDescription.getInternalName()).thenReturn(FOO);
when(methodDescription.getDescriptor()).thenReturn(BAZ);
when(typeDescription.getInternalName()).thenReturn(BAR);
when(typeDescription.getDescriptor()).thenReturn(BAR);
when(typeDescription.isInterface()).thenReturn(interfaceType);
when(methodNameTransformer.transform(methodDescription)).thenReturn(QUX);
when(otherMethodNameTransformer.transform(methodDescription)).thenReturn(FOO + BAR);
when(parameterType.getStackSize()).thenReturn(StackSize.ZERO);
when(methodDescription.getParameters()).thenReturn(new ParameterList.Explicit.ForTypes(methodDescription, Collections.singletonList(parameterType)));
when(returnType.asErasure()).thenReturn(returnType);
when(returnType.accept(any(GenericTypeDescription.Visitor.class))).thenReturn(returnType);
when(parameterType.asErasure()).thenReturn(parameterType);
when(parameterType.accept(any(GenericTypeDescription.Visitor.class))).thenReturn(parameterType);
}
@Test
public void testPreservation() throws Exception {
MethodRebaseResolver.Resolution resolution = MethodRebaseResolver.Resolution.ForRebasedMethod.of(methodDescription, methodNameTransformer);
assertThat(resolution.isRebased(), is(true));
assertThat(resolution.getResolvedMethod().getDeclaringType(), is((GenericTypeDescription) typeDescription));
assertThat(resolution.getResolvedMethod().getInternalName(), is(QUX));
assertThat(resolution.getResolvedMethod().getModifiers(), is(rebasedMethodModifiers));
assertThat(resolution.getResolvedMethod().getReturnType(), is((GenericTypeDescription) returnType));
assertThat(resolution.getResolvedMethod().getParameters(), is((ParameterList) new ParameterList.Explicit.ForTypes(resolution.getResolvedMethod(),
Collections.singletonList(parameterType))));
StackManipulation.Size size = resolution.getAdditionalArguments().apply(methodVisitor, implementationContext);
assertThat(size.getSizeImpact(), is(0));
assertThat(size.getMaximalSize(), is(0));
verifyZeroInteractions(methodVisitor);
verifyZeroInteractions(implementationContext);
}
@Test
@SuppressWarnings("unchecked")
public void testObjectProperties() throws Exception {
ObjectPropertyAssertion.of(MethodRebaseResolver.Resolution.ForRebasedMethod.class).refine(new ObjectPropertyAssertion.Refinement<MethodDescription>() {
@Override
public void apply(MethodDescription mock) {
when(mock.getParameters()).thenReturn((ParameterList) new ParameterList.Empty());
when(mock.getExceptionTypes()).thenReturn(new GenericTypeList.Empty());
when(mock.getDeclaringType()).thenReturn(mock(TypeDescription.class));
TypeDescription returnType = mock(TypeDescription.class);
when(returnType.asErasure()).thenReturn(returnType);
when(mock.getReturnType()).thenReturn(returnType);
}
}).refine(new ObjectPropertyAssertion.Refinement<MethodRebaseResolver.MethodNameTransformer>() {
@Override
public void apply(MethodRebaseResolver.MethodNameTransformer mock) {
when(mock.transform(any(MethodDescription.class))).thenReturn(FOO + System.identityHashCode(mock));
}
}).apply();
}
}
|
package com.wanjian.puppet;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
}
|
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
// Copyright (c) 2011, 2012 Open Networking Foundation
// Copyright (c) 2012, 2013 Big Switch Networks, Inc.
// This library was generated by the LoxiGen Compiler.
// See the file LICENSE.txt which should have been included in the source distribution
// Automatically generated by LOXI from template of_class.java
// Do not modify
package org.projectfloodlight.openflow.protocol.ver14;
import org.projectfloodlight.openflow.protocol.*;
import org.projectfloodlight.openflow.protocol.action.*;
import org.projectfloodlight.openflow.protocol.actionid.*;
import org.projectfloodlight.openflow.protocol.bsntlv.*;
import org.projectfloodlight.openflow.protocol.errormsg.*;
import org.projectfloodlight.openflow.protocol.meterband.*;
import org.projectfloodlight.openflow.protocol.instruction.*;
import org.projectfloodlight.openflow.protocol.instructionid.*;
import org.projectfloodlight.openflow.protocol.match.*;
import org.projectfloodlight.openflow.protocol.stat.*;
import org.projectfloodlight.openflow.protocol.oxm.*;
import org.projectfloodlight.openflow.protocol.oxs.*;
import org.projectfloodlight.openflow.protocol.queueprop.*;
import org.projectfloodlight.openflow.types.*;
import org.projectfloodlight.openflow.util.*;
import org.projectfloodlight.openflow.exceptions.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Set;
import com.google.common.collect.ImmutableSet;
import io.netty.buffer.ByteBuf;
import com.google.common.hash.PrimitiveSink;
import com.google.common.hash.Funnel;
class OFMeterStatsRequestVer14 implements OFMeterStatsRequest {
private static final Logger logger = LoggerFactory.getLogger(OFMeterStatsRequestVer14.class);
// version: 1.4
final static byte WIRE_VERSION = 5;
final static int LENGTH = 24;
private final static long DEFAULT_XID = 0x0L;
private final static Set<OFStatsRequestFlags> DEFAULT_FLAGS = ImmutableSet.<OFStatsRequestFlags>of();
private final static long DEFAULT_METER_ID = 0x0L;
// OF message fields
private final long xid;
private final Set<OFStatsRequestFlags> flags;
private final long meterId;
//
// Immutable default instance
final static OFMeterStatsRequestVer14 DEFAULT = new OFMeterStatsRequestVer14(
DEFAULT_XID, DEFAULT_FLAGS, DEFAULT_METER_ID
);
// package private constructor - used by readers, builders, and factory
OFMeterStatsRequestVer14(long xid, Set<OFStatsRequestFlags> flags, long meterId) {
if(flags == null) {
throw new NullPointerException("OFMeterStatsRequestVer14: property flags cannot be null");
}
this.xid = U32.normalize(xid);
this.flags = flags;
this.meterId = U32.normalize(meterId);
}
// Accessors for OF message fields
@Override
public OFVersion getVersion() {
return OFVersion.OF_14;
}
@Override
public OFType getType() {
return OFType.STATS_REQUEST;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFStatsType getStatsType() {
return OFStatsType.METER;
}
@Override
public Set<OFStatsRequestFlags> getFlags() {
return flags;
}
@Override
public long getMeterId() {
return meterId;
}
public OFMeterStatsRequest.Builder createBuilder() {
return new BuilderWithParent(this);
}
static class BuilderWithParent implements OFMeterStatsRequest.Builder {
final OFMeterStatsRequestVer14 parentMessage;
// OF message fields
private boolean xidSet;
private long xid;
private boolean flagsSet;
private Set<OFStatsRequestFlags> flags;
private boolean meterIdSet;
private long meterId;
BuilderWithParent(OFMeterStatsRequestVer14 parentMessage) {
this.parentMessage = parentMessage;
}
@Override
public OFVersion getVersion() {
return OFVersion.OF_14;
}
@Override
public OFType getType() {
return OFType.STATS_REQUEST;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFMeterStatsRequest.Builder setXid(long xid) {
this.xid = xid;
this.xidSet = true;
return this;
}
@Override
public OFStatsType getStatsType() {
return OFStatsType.METER;
}
@Override
public Set<OFStatsRequestFlags> getFlags() {
return flags;
}
@Override
public OFMeterStatsRequest.Builder setFlags(Set<OFStatsRequestFlags> flags) {
this.flags = flags;
this.flagsSet = true;
return this;
}
@Override
public long getMeterId() {
return meterId;
}
@Override
public OFMeterStatsRequest.Builder setMeterId(long meterId) {
this.meterId = meterId;
this.meterIdSet = true;
return this;
}
@Override
public OFMeterStatsRequest build() {
long xid = this.xidSet ? this.xid : parentMessage.xid;
Set<OFStatsRequestFlags> flags = this.flagsSet ? this.flags : parentMessage.flags;
if(flags == null)
throw new NullPointerException("Property flags must not be null");
long meterId = this.meterIdSet ? this.meterId : parentMessage.meterId;
//
return new OFMeterStatsRequestVer14(
xid,
flags,
meterId
);
}
}
static class Builder implements OFMeterStatsRequest.Builder {
// OF message fields
private boolean xidSet;
private long xid;
private boolean flagsSet;
private Set<OFStatsRequestFlags> flags;
private boolean meterIdSet;
private long meterId;
@Override
public OFVersion getVersion() {
return OFVersion.OF_14;
}
@Override
public OFType getType() {
return OFType.STATS_REQUEST;
}
@Override
public long getXid() {
return xid;
}
@Override
public OFMeterStatsRequest.Builder setXid(long xid) {
this.xid = xid;
this.xidSet = true;
return this;
}
@Override
public OFStatsType getStatsType() {
return OFStatsType.METER;
}
@Override
public Set<OFStatsRequestFlags> getFlags() {
return flags;
}
@Override
public OFMeterStatsRequest.Builder setFlags(Set<OFStatsRequestFlags> flags) {
this.flags = flags;
this.flagsSet = true;
return this;
}
@Override
public long getMeterId() {
return meterId;
}
@Override
public OFMeterStatsRequest.Builder setMeterId(long meterId) {
this.meterId = meterId;
this.meterIdSet = true;
return this;
}
//
@Override
public OFMeterStatsRequest build() {
long xid = this.xidSet ? this.xid : DEFAULT_XID;
Set<OFStatsRequestFlags> flags = this.flagsSet ? this.flags : DEFAULT_FLAGS;
if(flags == null)
throw new NullPointerException("Property flags must not be null");
long meterId = this.meterIdSet ? this.meterId : DEFAULT_METER_ID;
return new OFMeterStatsRequestVer14(
xid,
flags,
meterId
);
}
}
final static Reader READER = new Reader();
static class Reader implements OFMessageReader<OFMeterStatsRequest> {
@Override
public OFMeterStatsRequest readFrom(ByteBuf bb) throws OFParseError {
int start = bb.readerIndex();
// fixed value property version == 5
byte version = bb.readByte();
if(version != (byte) 0x5)
throw new OFParseError("Wrong version: Expected=OFVersion.OF_14(5), got="+version);
// fixed value property type == 18
byte type = bb.readByte();
if(type != (byte) 0x12)
throw new OFParseError("Wrong type: Expected=OFType.STATS_REQUEST(18), got="+type);
int length = U16.f(bb.readShort());
if(length != 24)
throw new OFParseError("Wrong length: Expected=24(24), got="+length);
if(bb.readableBytes() + (bb.readerIndex() - start) < length) {
// Buffer does not have all data yet
bb.readerIndex(start);
return null;
}
if(logger.isTraceEnabled())
logger.trace("readFrom - length={}", length);
long xid = U32.f(bb.readInt());
// fixed value property statsType == 9
short statsType = bb.readShort();
if(statsType != (short) 0x9)
throw new OFParseError("Wrong statsType: Expected=OFStatsType.METER(9), got="+statsType);
Set<OFStatsRequestFlags> flags = OFStatsRequestFlagsSerializerVer14.readFrom(bb);
// pad: 4 bytes
bb.skipBytes(4);
long meterId = U32.f(bb.readInt());
// pad: 4 bytes
bb.skipBytes(4);
OFMeterStatsRequestVer14 meterStatsRequestVer14 = new OFMeterStatsRequestVer14(
xid,
flags,
meterId
);
if(logger.isTraceEnabled())
logger.trace("readFrom - read={}", meterStatsRequestVer14);
return meterStatsRequestVer14;
}
}
public void putTo(PrimitiveSink sink) {
FUNNEL.funnel(this, sink);
}
final static OFMeterStatsRequestVer14Funnel FUNNEL = new OFMeterStatsRequestVer14Funnel();
static class OFMeterStatsRequestVer14Funnel implements Funnel<OFMeterStatsRequestVer14> {
private static final long serialVersionUID = 1L;
@Override
public void funnel(OFMeterStatsRequestVer14 message, PrimitiveSink sink) {
// fixed value property version = 5
sink.putByte((byte) 0x5);
// fixed value property type = 18
sink.putByte((byte) 0x12);
// fixed value property length = 24
sink.putShort((short) 0x18);
sink.putLong(message.xid);
// fixed value property statsType = 9
sink.putShort((short) 0x9);
OFStatsRequestFlagsSerializerVer14.putTo(message.flags, sink);
// skip pad (4 bytes)
sink.putLong(message.meterId);
// skip pad (4 bytes)
}
}
public void writeTo(ByteBuf bb) {
WRITER.write(bb, this);
}
final static Writer WRITER = new Writer();
static class Writer implements OFMessageWriter<OFMeterStatsRequestVer14> {
@Override
public void write(ByteBuf bb, OFMeterStatsRequestVer14 message) {
// fixed value property version = 5
bb.writeByte((byte) 0x5);
// fixed value property type = 18
bb.writeByte((byte) 0x12);
// fixed value property length = 24
bb.writeShort((short) 0x18);
bb.writeInt(U32.t(message.xid));
// fixed value property statsType = 9
bb.writeShort((short) 0x9);
OFStatsRequestFlagsSerializerVer14.writeTo(bb, message.flags);
// pad: 4 bytes
bb.writeZero(4);
bb.writeInt(U32.t(message.meterId));
// pad: 4 bytes
bb.writeZero(4);
}
}
@Override
public String toString() {
StringBuilder b = new StringBuilder("OFMeterStatsRequestVer14(");
b.append("xid=").append(xid);
b.append(", ");
b.append("flags=").append(flags);
b.append(", ");
b.append("meterId=").append(meterId);
b.append(")");
return b.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OFMeterStatsRequestVer14 other = (OFMeterStatsRequestVer14) obj;
if( xid != other.xid)
return false;
if (flags == null) {
if (other.flags != null)
return false;
} else if (!flags.equals(other.flags))
return false;
if( meterId != other.meterId)
return false;
return true;
}
@Override
public boolean equalsIgnoreXid(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
OFMeterStatsRequestVer14 other = (OFMeterStatsRequestVer14) obj;
// ignore XID
if (flags == null) {
if (other.flags != null)
return false;
} else if (!flags.equals(other.flags))
return false;
if( meterId != other.meterId)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * (int) (xid ^ (xid >>> 32));
result = prime * result + ((flags == null) ? 0 : flags.hashCode());
result = prime * (int) (meterId ^ (meterId >>> 32));
return result;
}
@Override
public int hashCodeIgnoreXid() {
final int prime = 31;
int result = 1;
// ignore XID
result = prime * result + ((flags == null) ? 0 : flags.hashCode());
result = prime * (int) (meterId ^ (meterId >>> 32));
return result;
}
}
|
/*
* This software is provided "AS IS" without a warranty of any kind.
* You use it on your own risk and responsibility!!!
*
* This file is shared under BSD v3 license.
* See readme.txt and BSD3 file for details.
*
*/
package kendzi.josm.kendzi3d.jogl.selection.event;
import kendzi.josm.kendzi3d.jogl.selection.editor.Editor;
public class SelectEditorEvent {
Editor editor;
public SelectEditorEvent(Editor editor) {
this.editor = editor;
}
public Editor getEditor() {
return this.editor;
}
}
|
/*
* JBoss, Home of Professional Open Source
* Copyright 2009 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.loaders.jdbm;
import java.util.Comparator;
import org.infinispan.config.Dynamic;
import org.infinispan.loaders.LockSupportCacheStoreConfig;
import org.infinispan.util.Util;
/**
* Configures {@link JdbmCacheStore}.
*
* @author Elias Ross
* @author Galder Zamarreño
* @since 4.0
*/
public class JdbmCacheStoreConfig extends LockSupportCacheStoreConfig {
/**
* The serialVersionUID
*/
private static final long serialVersionUID = -3686035269816837880L;
/**
* @configRef desc="A location on disk where the store can write internal files"
*/
String location = "jdbm";
/**
* @configRef desc="Comparator class used to sort the keys by the cache loader.
* This should only need to be set when using keys that do not have a natural ordering."
*/
String comparatorClassName = NaturalComparator.class.getName();
/**
* @configRef desc="Whenever a new entry is stored, an expiry entry is created and added
* to the a queue that is later consumed by the eviction thread. This parameter sets the size
* of this queue."
*/
@Dynamic
int expiryQueueSize = 10000;
public JdbmCacheStoreConfig() {
setCacheLoaderClassName(JdbmCacheStore.class.getName());
}
public String getLocation() {
return location;
}
public void setLocation(String location) {
testImmutability("location");
this.location = location;
}
public String getComparatorClassName() {
return comparatorClassName;
}
public void setComparatorClassName(String comparatorClassName) {
testImmutability("comparatorClassName");
this.comparatorClassName = comparatorClassName;
}
public int getExpiryQueueSize() {
return expiryQueueSize;
}
public void setExpiryQueueSize(int expiryQueueSize) {
testImmutability("expiryQueueSize");
this.expiryQueueSize = expiryQueueSize;
}
/**
* Returns a new comparator instance based on {@link #setComparatorClassName(String)}.
*/
public Comparator<?> createComparator() {
return (Comparator<?>) Util.getInstance(comparatorClassName, getClassLoader());
}
}
|
/*
* Copyright 1999-2011 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.bingo.framework.rpc.protocol.bingo;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.HashMap;
import java.util.Map;
import com.bingo.framework.common.Constants;
import com.bingo.framework.common.logger.Logger;
import com.bingo.framework.common.logger.LoggerFactory;
import com.bingo.framework.common.serialize.Cleanable;
import com.bingo.framework.common.serialize.ObjectInput;
import com.bingo.framework.common.utils.Assert;
import com.bingo.framework.common.utils.ReflectUtils;
import com.bingo.framework.common.utils.StringUtils;
import com.bingo.framework.remoting.Channel;
import com.bingo.framework.remoting.Codec;
import com.bingo.framework.remoting.Decodeable;
import com.bingo.framework.remoting.exchange.Request;
import com.bingo.framework.remoting.transport.CodecSupport;
import com.bingo.framework.rpc.RpcInvocation;
import static com.bingo.framework.rpc.protocol.bingo.CallbackServiceCodec.decodeInvocationArgument;
/**
* @author <a href="mailto:gang.lvg@alibaba-inc.com">kimi</a>
*/
public class DecodeableRpcInvocation extends RpcInvocation implements Codec, Decodeable {
private static final Logger log = LoggerFactory.getLogger(DecodeableRpcInvocation.class);
private Channel channel;
private byte serializationType;
private InputStream inputStream;
private Request request;
private volatile boolean hasDecoded;
public DecodeableRpcInvocation(Channel channel, Request request, InputStream is, byte id) {
Assert.notNull(channel, "channel == null");
Assert.notNull(request, "request == null");
Assert.notNull(is, "inputStream == null");
this.channel = channel;
this.request = request;
this.inputStream = is;
this.serializationType = id;
}
public void decode() throws Exception {
if (!hasDecoded && channel != null && inputStream != null) {
try {
decode(channel, inputStream);
} catch (Throwable e) {
if (log.isWarnEnabled()) {
log.warn("Decode rpc invocation failed: " + e.getMessage(), e);
}
request.setBroken(true);
request.setData(e);
} finally {
hasDecoded = true;
}
}
}
public void encode(Channel channel, OutputStream output, Object message) throws IOException {
throw new UnsupportedOperationException();
}
public Object decode(Channel channel, InputStream input) throws IOException {
ObjectInput in = CodecSupport.getSerialization(channel.getUrl(), serializationType)
.deserialize(channel.getUrl(), input);
try {
setAttachment(Constants.BINGO_VERSION_KEY, in.readUTF());
setAttachment(Constants.PATH_KEY, in.readUTF());
setAttachment(Constants.VERSION_KEY, in.readUTF());
setMethodName(in.readUTF());
try {
Object[] args;
Class<?>[] pts;
// NOTICE modified by lishen
int argNum = in.readInt();
if (argNum >= 0) {
if (argNum == 0) {
pts = BingoCodec.EMPTY_CLASS_ARRAY;
args = BingoCodec.EMPTY_OBJECT_ARRAY;
} else {
args = new Object[argNum];
pts = new Class[argNum];
for (int i = 0; i < args.length; i++) {
try {
args[i] = in.readObject();
pts[i] = args[i].getClass();
} catch (Exception e) {
if (log.isWarnEnabled()) {
log.warn("Decode argument failed: " + e.getMessage(), e);
}
}
}
}
} else {
String desc = in.readUTF();
if (desc.length() == 0) {
pts = BingoCodec.EMPTY_CLASS_ARRAY;
args = BingoCodec.EMPTY_OBJECT_ARRAY;
} else {
pts = ReflectUtils.desc2classArray(desc);
args = new Object[pts.length];
for (int i = 0; i < args.length; i++) {
try {
args[i] = in.readObject(pts[i]);
} catch (Exception e) {
if (log.isWarnEnabled()) {
log.warn("Decode argument failed: " + e.getMessage(), e);
}
}
}
}
}
setParameterTypes(pts);
Map<String, String> map = (Map<String, String>) in.readObject(Map.class);
if (map != null && map.size() > 0) {
Map<String, String> attachment = getAttachments();
if (attachment == null) {
attachment = new HashMap<String, String>();
}
attachment.putAll(map);
setAttachments(attachment);
}
//decode argument ,may be callback
for (int i = 0; i < args.length; i++) {
args[i] = decodeInvocationArgument(channel, this, pts, i, args[i]);
}
setArguments(args);
} catch (ClassNotFoundException e) {
throw new IOException(StringUtils.toString("Read invocation data failed.", e));
}
} finally {
// modified by lishen
if (in instanceof Cleanable) {
((Cleanable) in).cleanup();
}
}
return this;
}
}
|
/* Generated by camel build tools - do NOT edit this file! */
package org.apache.camel.component.apns;
import org.apache.camel.CamelContext;
import org.apache.camel.spi.GeneratedPropertyConfigurer;
import org.apache.camel.support.component.PropertyConfigurerSupport;
/**
* Generated by camel build tools - do NOT edit this file!
*/
@SuppressWarnings("unchecked")
public class ApnsEndpointConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer {
@Override
public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) {
ApnsEndpoint target = (ApnsEndpoint) obj;
switch (ignoreCase ? name.toLowerCase() : name) {
case "tokens": target.setTokens(property(camelContext, java.lang.String.class, value)); return true;
case "bridgeerrorhandler":
case "bridgeErrorHandler": target.setBridgeErrorHandler(property(camelContext, boolean.class, value)); return true;
case "sendemptymessagewhenidle":
case "sendEmptyMessageWhenIdle": target.setSendEmptyMessageWhenIdle(property(camelContext, boolean.class, value)); return true;
case "exceptionhandler":
case "exceptionHandler": target.setExceptionHandler(property(camelContext, org.apache.camel.spi.ExceptionHandler.class, value)); return true;
case "exchangepattern":
case "exchangePattern": target.setExchangePattern(property(camelContext, org.apache.camel.ExchangePattern.class, value)); return true;
case "pollstrategy":
case "pollStrategy": target.setPollStrategy(property(camelContext, org.apache.camel.spi.PollingConsumerPollStrategy.class, value)); return true;
case "lazystartproducer":
case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true;
case "basicpropertybinding":
case "basicPropertyBinding": target.setBasicPropertyBinding(property(camelContext, boolean.class, value)); return true;
case "synchronous": target.setSynchronous(property(camelContext, boolean.class, value)); return true;
case "backofferrorthreshold":
case "backoffErrorThreshold": target.setBackoffErrorThreshold(property(camelContext, int.class, value)); return true;
case "backoffidlethreshold":
case "backoffIdleThreshold": target.setBackoffIdleThreshold(property(camelContext, int.class, value)); return true;
case "backoffmultiplier":
case "backoffMultiplier": target.setBackoffMultiplier(property(camelContext, int.class, value)); return true;
case "delay": target.setDelay(property(camelContext, long.class, value)); return true;
case "greedy": target.setGreedy(property(camelContext, boolean.class, value)); return true;
case "initialdelay":
case "initialDelay": target.setInitialDelay(property(camelContext, long.class, value)); return true;
case "repeatcount":
case "repeatCount": target.setRepeatCount(property(camelContext, long.class, value)); return true;
case "runlogginglevel":
case "runLoggingLevel": target.setRunLoggingLevel(property(camelContext, org.apache.camel.LoggingLevel.class, value)); return true;
case "scheduledexecutorservice":
case "scheduledExecutorService": target.setScheduledExecutorService(property(camelContext, java.util.concurrent.ScheduledExecutorService.class, value)); return true;
case "scheduler": target.setScheduler(property(camelContext, java.lang.String.class, value)); return true;
case "schedulerproperties":
case "schedulerProperties": target.setSchedulerProperties(property(camelContext, java.util.Map.class, value)); return true;
case "startscheduler":
case "startScheduler": target.setStartScheduler(property(camelContext, boolean.class, value)); return true;
case "timeunit":
case "timeUnit": target.setTimeUnit(property(camelContext, java.util.concurrent.TimeUnit.class, value)); return true;
case "usefixeddelay":
case "useFixedDelay": target.setUseFixedDelay(property(camelContext, boolean.class, value)); return true;
default: return false;
}
}
}
|
/*******************************************************************************
* Copyright 2012 Geoscience Australia
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package au.gov.ga.earthsci.worldwind.common.layers.geometry.types.airspace;
import gov.nasa.worldwind.geom.LatLon;
import gov.nasa.worldwind.geom.Vec4;
import gov.nasa.worldwind.render.DrawContext;
import gov.nasa.worldwind.render.airspaces.AirspaceAttributes;
import gov.nasa.worldwind.render.airspaces.Curtain;
import gov.nasa.worldwind.render.airspaces.Geometry;
import gov.nasa.worldwind.util.OGLStackHandler;
import javax.media.opengl.GL2;
/**
* An extension of the {@link Curtain} airspace that can render the generating
* shape as a line at the upper and lower elevations.
*
* @author James Navin (james.navin@ga.gov.au)
*/
public class ShapeOutlineCurtain extends Curtain implements ShapeOutlineAirspace
{
private static final int GEOMETRY_TYPE_ELEMENT = 1;
private static final int GEOMETRY_TYPE_VERTEX = 2;
boolean drawCurtain = true;
boolean drawUpperShapeOutline = false;
boolean drawLowerShapeOutline = false;
public ShapeOutlineCurtain()
{
super();
}
public ShapeOutlineCurtain(AirspaceAttributes attributes)
{
super(attributes);
}
public ShapeOutlineCurtain(Iterable<? extends LatLon> locations)
{
super(locations);
}
@Override
protected void doRenderGeometry(DrawContext dc, String drawStyle)
{
setExpiryTime(0);
if (drawCurtain)
{
super.doRenderGeometry(dc, drawStyle);
}
if (drawUpperShapeOutline)
{
renderUpperShapeOutline(dc);
}
if (drawLowerShapeOutline)
{
renderLowerShapeOutline(dc);
}
}
private void renderUpperShapeOutline(DrawContext dc)
{
Vec4 refCenter = computeReferenceCenter(dc);
Geometry vertexGeometry = getCurtainGeometry(dc, refCenter).getVertexGeometry();
int count = vertexGeometry.getCount(GEOMETRY_TYPE_VERTEX) - 2;
int[] shapeIndices = new int[count];
int i = 0;
while (i < count)
{
shapeIndices[i] = i + 1;
shapeIndices[i + 1] = i + 3;
i += 2;
}
Geometry shapeOutlineElementGeometry = new Geometry();
shapeOutlineElementGeometry.setElementData(GL2.GL_LINES, count, shapeIndices);
drawShapeOutline(dc, refCenter, vertexGeometry, shapeOutlineElementGeometry);
}
private void renderLowerShapeOutline(DrawContext dc)
{
Vec4 refCenter = computeReferenceCenter(dc);
Geometry vertexGeometry = getCurtainGeometry(dc, refCenter).getVertexGeometry();
int count = vertexGeometry.getCount(GEOMETRY_TYPE_VERTEX) - 2;
int[] shapeIndices = new int[count];
int i = 0;
while (i < count)
{
shapeIndices[i] = i;
shapeIndices[i + 1] = i + 2;
i += 2;
}
Geometry shapeOutlineElementGeometry = new Geometry();
shapeOutlineElementGeometry.setElementData(GL2.GL_LINES, count, shapeIndices);
drawShapeOutline(dc, refCenter, vertexGeometry, shapeOutlineElementGeometry);
}
private void drawShapeOutline(DrawContext dc, Vec4 refCenter, Geometry vertexGeometry,
Geometry shapeOutlineElementGeometry)
{
dc.getView().pushReferenceCenter(dc, refCenter);
GL2 gl = dc.getGL().getGL2();
OGLStackHandler stack = new OGLStackHandler();
stack.pushAttrib(gl, GL2.GL_CURRENT_BIT | GL2.GL_HINT_BIT | GL2.GL_ENABLE_BIT | GL2.GL_DEPTH_BUFFER_BIT
| GL2.GL_POINT_BIT | GL2.GL_COLOR_BUFFER_BIT | GL2.GL_LIGHTING_BIT | GL2.GL_POINT_BIT);
stack.pushClientAttrib(gl, GL2.GL_CLIENT_VERTEX_ARRAY_BIT);
try
{
// Points are drawn over the line to prevent gaps forming when
// antialiasing and smoothing is applied to the line
setupDrawParams(dc, gl);
gl.glDepthMask(false);
drawShapeOutlineAsLines(dc, shapeOutlineElementGeometry, vertexGeometry);
drawShapeOutlineAsPoints(dc, shapeOutlineElementGeometry, vertexGeometry);
gl.glDepthMask(true);
drawShapeOutlineAsLines(dc, shapeOutlineElementGeometry, vertexGeometry);
}
finally
{
dc.getView().popReferenceCenter(dc);
stack.pop(gl);
}
}
private void setupDrawParams(DrawContext dc, GL2 gl)
{
gl.glShadeModel(GL2.GL_SMOOTH);
gl.glEnable(GL2.GL_LINE_SMOOTH);
gl.glEnable(GL2.GL_POINT_SMOOTH);
gl.glEnable(GL2.GL_BLEND);
gl.glBlendFunc(GL2.GL_SRC_ALPHA, GL2.GL_ONE_MINUS_SRC_ALPHA);
gl.glHint(GL2.GL_LINE_SMOOTH_HINT, GL2.GL_NICEST);
gl.glHint(GL2.GL_POINT_SMOOTH_HINT, GL2.GL_NICEST);
getAttributes().applyOutline(dc, false);
gl.glPointSize((float) getAttributes().getOutlineWidth());
}
private void drawShapeOutlineAsLines(DrawContext dc, Geometry shapeOutlineElementGeometry, Geometry vertexGeometry)
{
getRenderer().drawGeometry(dc, shapeOutlineElementGeometry, vertexGeometry);
}
private void drawShapeOutlineAsPoints(DrawContext dc, Geometry shapeOutlineElementGeometry, Geometry vertexGeometry)
{
getRenderer().drawGeometry(dc, GL2.GL_POINTS, shapeOutlineElementGeometry.getCount(GEOMETRY_TYPE_ELEMENT),
shapeOutlineElementGeometry.getGLType(GEOMETRY_TYPE_ELEMENT),
shapeOutlineElementGeometry.getBuffer(GEOMETRY_TYPE_ELEMENT), vertexGeometry);
}
private CurtainGeometry getCurtainGeometry(DrawContext dc, Vec4 refCenter)
{
return getCurtainGeometry(dc, locations.size(), locations.toArray(new LatLon[locations.size()]), pathType,
splitThreshold, getAltitudes(), isTerrainConforming(), refCenter);
}
public void setDrawCurtain(boolean drawCurtain)
{
this.drawCurtain = drawCurtain;
}
@Override
public void setDrawUpperShapeOutline(boolean drawUpperShapeOutline)
{
this.drawUpperShapeOutline = drawUpperShapeOutline;
}
@Override
public void setDrawLowerShapeOutline(boolean drawLowerShapeOutline)
{
this.drawLowerShapeOutline = drawLowerShapeOutline;
}
}
|
/*
* Copyright (c) WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.connexta.arbitro.attr;
import com.connexta.arbitro.cond.Evaluatable;
import java.net.URI;
/**
* Supports the standard selector functionality in XACML 3.0 version, which uses XPath expressions to resolve
* values from the Request or elsewhere. This absrtact implementation of
*/
public abstract class AbstractAttributeSelector implements Evaluatable {
/**
* the data type returned by this selector
*/
protected URI type;
/**
* must resolution find something
*/
protected boolean mustBePresent;
/**
* the xpath version we've been told to use
*/
protected String xpathVersion;
/**
* Returns the data type of the attribute values that this selector will resolve
*
* @return the data type of the values found by this selector
*/
public URI getType() {
return type;
}
/**
* Returns whether or not a value is required to be resolved by this selector.
*
* @return true if a value is required, false otherwise
*/
public boolean isMustBePresent() {
return mustBePresent;
}
/**
* Returns the XPath version this selector is supposed to use. This is typically provided by the
* defaults section of the policy containing this selector.
*
* @return the XPath version
*/
public String getXPathVersion() {
return xpathVersion;
}
}
|
package com.epam.jdi.light.common;
import com.epam.jdi.light.elements.common.UIElement;
import com.jdiai.tools.func.JAction2;
/**
* Created by Roman Iovlev on 26.09.2019
* Email: romClickTextFieldClickTextFieldan.iovlev.jdi@gmail.com; Skype: roman.iovlev
*/
public enum SetTextTypes {
SET_TEXT(UIElement::setText),
SEND_KEYS(UIElement::sendKeys),
CLEAR_SEND_KEYS((uiElement, value) -> {
uiElement.clear();
uiElement.sendKeys(value);
});
public JAction2<UIElement, String> action;
SetTextTypes(JAction2<UIElement, String> action) {
this.action = action;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.sql.stored;
import java.util.HashMap;
import java.util.Map;
import org.apache.camel.Exchange;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.component.mock.MockEndpoint;
import org.apache.camel.test.junit5.CamelTestSupport;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabase;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabaseBuilder;
import org.springframework.jdbc.datasource.embedded.EmbeddedDatabaseType;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class ProducerUseMessageBodyForTemplateTest extends CamelTestSupport {
private EmbeddedDatabase db;
@Override
@BeforeEach
public void setUp() throws Exception {
db = new EmbeddedDatabaseBuilder()
.setName(getClass().getSimpleName())
.setType(EmbeddedDatabaseType.DERBY)
.addScript("sql/storedProcedureTest.sql").build();
super.setUp();
}
@Override
@AfterEach
public void tearDown() throws Exception {
super.tearDown();
db.shutdown();
}
@Test
public void shouldUseMessageBodyAsQuery() throws InterruptedException {
MockEndpoint mock = getMockEndpoint("mock:query");
mock.expectedMessageCount(1);
Map<String, Object> batch1 = new HashMap<>();
batch1.put("num1", 3);
batch1.put("num2", 1);
template.requestBodyAndHeader("direct:query", "SUBNUMBERS(INTEGER :#num1,INTEGER :#num2,OUT INTEGER resultofsum)",
SqlStoredConstants.SQL_STORED_PARAMETERS, batch1);
assertMockEndpointsSatisfied();
Exchange exchange = mock.getExchanges().get(0);
assertEquals(Integer.valueOf(2), exchange.getIn().getBody(Map.class).get("resultofsum"));
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
// required for the sql component
getContext().getComponent("sql-stored", SqlStoredComponent.class).setDataSource(db);
from("direct:query").to("sql-stored:query?useMessageBodyForTemplate=true").to("mock:query");
}
};
}
}
|
/*
* Copyright 2000-2012 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.packaging.impl.run;
import com.intellij.icons.AllIcons;
import com.intellij.ide.util.ElementsChooser;
import com.intellij.packaging.artifacts.Artifact;
import com.intellij.packaging.artifacts.ArtifactPointer;
import com.intellij.ui.JBColor;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.awt.*;
import java.util.Comparator;
import java.util.List;
/**
* @author nik
*/
public class ArtifactChooser extends ElementsChooser<ArtifactPointer> {
private static final Comparator<ArtifactPointer> ARTIFACT_COMPARATOR =
(o1, o2) -> o1.getArtifactName().compareToIgnoreCase(o2.getArtifactName());
private static final ElementProperties INVALID_ARTIFACT_PROPERTIES = new ElementProperties() {
@Override
public Icon getIcon() {
return AllIcons.Nodes.Artifact;
}
@Override
public Color getColor() {
return JBColor.RED;
}
};
public ArtifactChooser(List<ArtifactPointer> pointers) {
super(pointers, false);
for (ArtifactPointer pointer : pointers) {
if (pointer.getArtifact() == null) {
setElementProperties(pointer, INVALID_ARTIFACT_PROPERTIES);
}
}
sort(ARTIFACT_COMPARATOR);
}
@Override
protected String getItemText(@NotNull ArtifactPointer value) {
return value.getArtifactName();
}
@Override
protected Icon getItemIcon(@NotNull ArtifactPointer value) {
final Artifact artifact = value.getArtifact();
return artifact != null ? artifact.getArtifactType().getIcon() : null;
}
}
|
package com.avivvegh.encryption;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Build;
import android.security.KeyPairGeneratorSpec;
import android.support.annotation.RequiresApi;
import android.util.Base64;
import javax.crypto.Cipher;
import javax.crypto.CipherInputStream;
import javax.crypto.CipherOutputStream;
import javax.crypto.spec.GCMParameterSpec;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import javax.security.auth.x500.X500Principal;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.math.BigInteger;
import java.security.Key;
import java.security.KeyPairGenerator;
import java.security.KeyStore;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Calendar;
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR2)
class EncryptorAboveApi18 extends BaseEncryptor implements Encryptor {
//region Const
private static final String AES_KEY = "AXL";
//endregion
//region Private members
private KeyStore keyStore;
//endregion
//region C'tor
EncryptorAboveApi18(Context context, SharedPreferences sharedPreferences) {
super(context, sharedPreferences);
try {
keyStore = KeyStore.getInstance(ANDROID_KEY_STORE_TYPE);
keyStore.load(null);
if (!keyStore.containsAlias(KEYSTORE_ALIAS)) {
// Generate the RSA key pairs
Calendar start = Calendar.getInstance();
Calendar end = Calendar.getInstance();
end.add(Calendar.YEAR, RSA_CALENDAR_AMOUNT);
KeyPairGeneratorSpec keyPairGeneratorSpec =
new KeyPairGeneratorSpec.Builder(context)
.setAlias(KEYSTORE_ALIAS)
.setSubject(new X500Principal(X500_PRINCIPAL_NAME))
.setSerialNumber(BigInteger.TEN)
.setStartDate(start.getTime())
.setEndDate(end.getTime())
.build();
KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(KEY_ALGORITHM_RSA,
ANDROID_KEY_STORE_TYPE);
keyPairGenerator.initialize(keyPairGeneratorSpec);
keyPairGenerator.generateKeyPair();
// generate AES key, encrypt it with RSA public key, and save the encrypted AES key
generateAndStoreAESKey();
}
} catch (Exception e) {
e.printStackTrace();
}
}
//endregion
//region Private methods
private void generateAndStoreAESKey() {
String encryptedKeyB64 = sharedPreferences.getString(AES_KEY,
null);
if (encryptedKeyB64 == null) {
// Generate key
byte[] key = new byte[KEY_LENGTH_IN_BYTES];
SecureRandom secureRandom = new SecureRandom();
secureRandom.nextBytes(key);
// Encrypt the key with RSA
byte[] encryptedKey = rsaEncrypt(key);
encryptedKeyB64 = Base64.encodeToString(encryptedKey, Base64.DEFAULT);
sharedPreferences.edit().putString(AES_KEY, encryptedKeyB64).commit();
}
}
private byte[] rsaEncrypt(byte[] plainText) {
// Encrypt plaintText using RSA public key.
try {
KeyStore.PrivateKeyEntry privateKeyEntry =
(KeyStore.PrivateKeyEntry) keyStore.getEntry(KEYSTORE_ALIAS, null);
Cipher cipher = Cipher.getInstance(RSA_MODE, RSA_CIPHER_PROVIDER);
cipher.init(Cipher.ENCRYPT_MODE, privateKeyEntry.getCertificate().getPublicKey());
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
CipherOutputStream cipherOutputStream =
new CipherOutputStream(byteArrayOutputStream, cipher);
cipherOutputStream.write(plainText);
cipherOutputStream.close();
return byteArrayOutputStream.toByteArray();
} catch (Exception e) {
e.printStackTrace();
}
return new byte[0];
}
private byte[] rsaDecrypt(byte[] encryptedText) {
// Decrypt plaintText using RSA private key.
try {
KeyStore.PrivateKeyEntry privateKeyEntry =
(KeyStore.PrivateKeyEntry) keyStore.getEntry(KEYSTORE_ALIAS, null);
Cipher cipher = Cipher.getInstance(RSA_MODE, RSA_CIPHER_PROVIDER);
cipher.init(Cipher.DECRYPT_MODE, privateKeyEntry.getPrivateKey());
CipherInputStream cipherInputStream =
new CipherInputStream(new ByteArrayInputStream(encryptedText), cipher);
ArrayList<Byte> values = new ArrayList<>();
int nextByte;
while ((nextByte = cipherInputStream.read()) != -1) {
values.add((byte) nextByte);
}
byte[] bytes = new byte[values.size()];
for (int index = 0; index < bytes.length; index++) {
bytes[index] = values.get(index);
}
return bytes;
} catch (Exception e) {
e.printStackTrace();
}
return new byte[0];
}
//endregion
//region Factory methods
@Override
public Key getSecretKey() {
String encryptedKeyB64 = sharedPreferences.getString(AES_KEY, null);
if (encryptedKeyB64 == null) {
generateAndStoreAESKey();
}
byte[] encryptedKey = Base64.decode(encryptedKeyB64, Base64.DEFAULT);
byte[] key = rsaDecrypt(encryptedKey);
return new SecretKeySpec(key, KEY_ALGORITHM_AES);
}
@Override
public String decrypt(String text) {
try {
Cipher cipher = Cipher.getInstance(AES_MODE);
// GCMParameterSpec available only from API 21
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
cipher.init(Cipher.DECRYPT_MODE, getSecretKey(),
new GCMParameterSpec(GCM_TAG_LENGTH, getIV()));
} else {
cipher.init(Cipher.DECRYPT_MODE, getSecretKey(), new IvParameterSpec(getIV()));
}
return new String(cipher.doFinal(Base64.decode(text, Base64.DEFAULT)));
} catch (Exception e) {
e.printStackTrace();
}
return "";
}
@Override
public String encrypt(String text) {
try {
Cipher cipher = Cipher.getInstance(AES_MODE);
// GCMParameterSpec available only from API 21
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
cipher.init(Cipher.ENCRYPT_MODE, getSecretKey(),
new GCMParameterSpec(GCM_TAG_LENGTH, getIV()));
} else {
cipher.init(Cipher.ENCRYPT_MODE, getSecretKey(), new IvParameterSpec(getIV()));
}
byte[] encodeBytes = cipher.doFinal(text.getBytes());
return Base64.encodeToString(encodeBytes, Base64.DEFAULT);
} catch (Exception e) {
e.printStackTrace();
}
return "";
}
//endregion
}
|
package com.aqzscn.www;
import com.aqzscn.www.global.util.JacksonUtil;
import com.aqzscn.www.global.util.StringUtil;
import com.aqzscn.www.movie.service.runner.UpdateMovieTask;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.web.client.RestTemplate;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
@SpringBootTest
@RunWith(SpringRunner.class)
@Component
public class HttpTest {
@Autowired
private RestTemplate restTemplate;
@Test
public void testGet() {
CloseableHttpClient httpClient = HttpClientBuilder.create().build();
HttpGet httpGet = new HttpGet("http://127.0.0.1:8000/movie/rank/");
CloseableHttpResponse response = null;
try {
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(5000)
.setConnectionRequestTimeout(5000)
.setSocketTimeout(5000)
.setRedirectsEnabled(true).build();
httpGet.setConfig(requestConfig);
httpGet.addHeader("Content-type", "application/json;charset=utf-8");
httpGet.setHeader("Accept", "application/json");
response = httpClient.execute(httpGet);
HttpEntity httpEntity = response.getEntity();
System.out.println("响应状态:" + response.getStatusLine());
if (httpEntity != null) {
System.out.println("响应长度:"+ httpEntity.getContentLength());
System.out.println("响应内容:"+ EntityUtils.toString(httpEntity));
// System.out.println("encode:"+ new String(EntityUtils.toByteArray(httpEntity), StandardCharsets.UTF_8));
}
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (httpClient != null) {
httpClient.close();
}
if (response != null) {
response.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
@Test
public void testRank() {
String url = "http://127.0.0.1:8000/movie/rank/";
try {
ResponseEntity<String> res = restTemplate.getForEntity(url, String.class);
if (res.getStatusCode() == HttpStatus.OK && StringUtils.isNotBlank(res.getBody())) {
System.out.println(StringUtil.getEncoding(res.getBody()));
System.out.println(new String(res.getBody().getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
}
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void MovieTest () {
new UpdateMovieTask("http://127.0.0.1:8000/movie/").run();
System.out.println("任务开始执行了");
}
}
|
package io.github.imaple.mall.member.entity;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import java.io.Serializable;
import java.util.Date;
import lombok.Data;
/**
* 积分变化历史记录
*
* @author xiaowu
* @email yixiaowu2008@gmail.com
* @date 2019-10-08 09:47:05
*/
@Data
@TableName("ums_integration_change_history")
public class IntegrationChangeHistoryEntity implements Serializable {
private static final long serialVersionUID = 1L;
/**
* id
*/
@TableId
private Long id;
/**
* member_id
*/
private Long memberId;
/**
* create_time
*/
private Date createTime;
/**
* 变化的值
*/
private Integer changeCount;
/**
* 备注
*/
private String note;
/**
* 来源[0->购物;1->管理员修改;2->活动]
*/
private Integer sourceTyoe;
}
|
package org.blockchain_monitoring.config;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.blockchain_monitoring.api.InfluxService;
import org.blockchain_monitoring.api.InfluxServiceImpl;
import org.blockchain_monitoring.api.ResultAdapter;
import org.blockchain_monitoring.api.annotation.invoke.InvokeMonitoring;
import org.blockchain_monitoring.model.MonitoringParams;
import org.influxdb.dto.Point;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import java.util.Optional;
@Configuration
@ComponentScan("org.blockchain_monitoring")
public class InfluxDBConfig {
@Autowired
private MonitoringParams monitoringParams;
@Bean
public InfluxService influxDBFactory() {
return new InfluxServiceImpl(
monitoringParams.getUrlInfluxDB(),
monitoringParams.getUsernameInfluxDB(),
monitoringParams.getPasswordInfluxDB());
}
@Bean
public ObjectMapper jsonObjectMapper(){
ObjectMapper m = new ObjectMapper();
m.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
return m;
}
@Bean
public ResultAdapter resultAdapter() {
return new ResultAdapter() {
@Override
public Optional<Point> apply(Object o, InvokeMonitoring invokeMonitoring) {
return Optional.empty();
}
};
}
}
|
/*
* Copyright 2012 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.workbench.screens.guided.rule.client.editor;
import com.google.gwt.event.shared.GwtEvent;
public class ExpressionTypeChangeEvent extends GwtEvent<ExpressionTypeChangeHandler> {
private static final GwtEvent.Type<ExpressionTypeChangeHandler> TYPE = new GwtEvent.Type<ExpressionTypeChangeHandler>();
private String oldType;
private String newType;
public ExpressionTypeChangeEvent( String oldType,
String newType ) {
super();
this.oldType = oldType;
this.newType = newType;
}
public String getOldType() {
return oldType;
}
public String getNewType() {
return newType;
}
@Override
protected void dispatch( ExpressionTypeChangeHandler handler ) {
try {
handler.onExpressionTypeChanged( this );
} catch ( Throwable e ) {
e.printStackTrace();
}
}
@Override
public GwtEvent.Type<ExpressionTypeChangeHandler> getAssociatedType() {
return getType();
}
public static final Type<ExpressionTypeChangeHandler> getType() {
return TYPE;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package biz.turnonline.ecosystem.service;
import biz.turnonline.ecosystem.model.api.ImportSet;
/**
* Import service is used by concrete agent which supports data import
*
* @author <a href="mailto:pohorelec@turnonlie.biz">Jozef Pohorelec</a>
*/
public interface ImportService
{
/**
* Import data to agent data source
*
* @param importSet {@link ImportSet}
*/
void importData( ImportSet importSet );
}
|
package ee.itcollege.math.graphics;
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Point;
import javax.swing.JPanel;
import javax.swing.border.StrokeBorder;
public class GraphicsContent extends JPanel {
double a, b, c;
public GraphicsContent(double a, double b, double c) {
this.a = a;
this.b = b;
this.c = c;
}
@Override
protected void paintComponent(Graphics g) {
g.setColor(Color.gray);
g.drawLine(0, GraphicsWindow.GRAPHICS_WIDTH / 2, GraphicsWindow.GRAPHICS_WIDTH, GraphicsWindow.GRAPHICS_WIDTH / 2);
g.drawLine(GraphicsWindow.GRAPHICS_WIDTH / 2, 0, GraphicsWindow.GRAPHICS_WIDTH / 2, GraphicsWindow.GRAPHICS_WIDTH);
g.setColor(Color.black);
Point lastPoint = null;
g.setColor(Color.BLACK);
((Graphics2D)g).setStroke(new BasicStroke(5));
for (double x = -GraphicsWindow.GRAPHICS_WIDTH / 2; x < GraphicsWindow.GRAPHICS_WIDTH / 2; x++) {
double y = a * x * x + b * x + c;
int newX = (int) (x + GraphicsWindow.GRAPHICS_WIDTH / 2);
int newY = (int) (-y + GraphicsWindow.GRAPHICS_WIDTH / 2);
if (lastPoint != null) {
g.drawLine((int)newX, (int)newY, (int)lastPoint.getX(), (int)lastPoint.getY());
}
lastPoint = new Point((int)newX, (int)newY);
}
// points
g.setColor(Color.black);
for (double x = -GraphicsWindow.GRAPHICS_WIDTH / 2; x < GraphicsWindow.GRAPHICS_WIDTH / 2; x++) {
double y = a * x * x + b * x + c;
g.fillOval((int)x + GraphicsWindow.GRAPHICS_WIDTH / 2 - 1, (int)-y + GraphicsWindow.GRAPHICS_WIDTH / 2 - 1, 2, 2);
}
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.oap.server.core.analysis.manual.log;
import org.apache.skywalking.oap.server.core.analysis.SourceDispatcher;
import org.apache.skywalking.oap.server.core.analysis.worker.RecordStreamProcessor;
import org.apache.skywalking.oap.server.core.source.HTTPAccessLog;
/**
* @author wusheng
*/
public class HTTPAccessLogDispatcher implements SourceDispatcher<HTTPAccessLog> {
@Override public void dispatch(HTTPAccessLog source) {
HTTPAccessLogRecord record = new HTTPAccessLogRecord();
record.setTimestamp(source.getTimestamp());
record.setTimeBucket(source.getTimeBucket());
record.setServiceId(source.getServiceId());
record.setServiceInstanceId(source.getServiceInstanceId());
record.setEndpointId(source.getEndpointId());
record.setTraceId(source.getTraceId());
record.setIsError(source.getIsError());
record.setStatusCode(source.getStatusCode());
record.setContentType(source.getContentType().value());
record.setContent(source.getContent());
RecordStreamProcessor.getInstance().in(record);
}
}
|
/*
* Copyright 2015 Austin Keener, Michael Ritter, Florian Spieß, and the JDA contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.dv8tion.jda.api.events.channel;
import net.dv8tion.jda.api.JDA;
import net.dv8tion.jda.api.entities.Channel;
import javax.annotation.Nonnull;
//TODO-v5: Docs
public class ChannelCreateEvent extends GenericChannelEvent
{
public ChannelCreateEvent(@Nonnull JDA api, long responseNumber, Channel channel)
{
super(api, responseNumber, channel);
}
}
|
package de.fraunhofer.isst.dataspaceconnector.exceptions.message;
/**
* Thrown to indicate that the message could not be build.
*/
public class MessageBuilderException extends MessageException {
//Default serial version uid
private static final long serialVersionUID = 1L;
/**
* Construct a MessageBuilderException with the specified detail message and cause.
*
* @param msg The detail message.
* @param cause The cause.
*/
public MessageBuilderException(String msg, Throwable cause) {
super(msg, cause);
}
}
|
/*
* Copyright (c) 2021.
* File : UnionFind.java
* Author : Ankur
* Last modified : 1/9/2021
*
* All code is for practice purpose only and strictly non-commercial.
* All rights reserved.
* Please refer to apache license terms in the project.
*/
package dsa;
public class UnionFind {
public class QuickFind{
private int[] id;
QuickFind(int n){
id = new int[n];
for(int i = 0; i<id.length; ++i)
id[i] = i;
}
public boolean find(int p, int q){
return id[p]==id[q];
}
public void union(int p, int q){
if(id[p]==id[q])
return;
int pid = id[p];
int qid = id[q];
for(int i = 0; i<id.length; ++i){
if(id[i]==pid) // Putting id[p] instead of pid is a bug. Think why!
id[i] = qid;
}
}
}
public class QuickUnion{
private int[] id;
QuickUnion(int n){
id = new int[n];
for(int i = 0; i< id.length; ++i)
id[i] = i;
}
public boolean find(int p, int q){
return root(p)==root(q);
}
private int root(int x){
while(x!=id[x]){
x = id[x];
}
return x;
}
public void union(int p, int q){
id[root(p)] = root(q);
}
}
public class WeightedQuickUnion{
private int[] id;
private int[] sz;
WeightedQuickUnion(int n){
id = new int[n];
sz = new int[n];
for(int i = 0; i<n; ++i){
id[i] = i;
sz[i] = 1;
}
}
public boolean find(int p, int q){
return root(p)==root(q);
}
private int root(int x){
while(x!=id[x]){
x = id[x];
}
return x;
}
public void union(int p, int q){
int i = root(p);
int j = root(q);
if(i==j) return;
if(sz[i]<sz[j]){
id[i] = j;
sz[j] += sz[i];
} else{
id[j] = i;
sz[i] += sz[j];
}
}
}
}
|
package com.qualixium.executor;
import org.netbeans.api.project.FileOwnerQuery;
import org.netbeans.api.project.Project;
import org.netbeans.api.project.ProjectUtils;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.loaders.DataObject;
import org.openide.windows.TopComponent;
public final class NetBeansContextInfo {
private NetBeansContextInfo() {
}
public static String getFullFilePath() {
try {
DataObject dataLookup = getActiveTopComponent().getLookup().lookup(DataObject.class);
return FileUtil.toFile(dataLookup.getPrimaryFile()).getAbsolutePath();
} catch (Exception ex) {
ex.printStackTrace();
return "";
}
}
public static String getProjectName() {
try {
FileObject fileObject = getActiveTopComponent().getLookup().lookup(FileObject.class);
return ProjectUtils.getInformation(FileOwnerQuery.getOwner(fileObject)).getDisplayName();
} catch (Exception ex) {
ex.printStackTrace();
return "";
}
}
public static String getProjectDirectory() {
try {
FileObject fileObject = getActiveTopComponent().getLookup().lookup(FileObject.class);
Project project = ProjectUtils.getInformation(FileOwnerQuery.getOwner(fileObject)).getProject();
FileObject projectDirectoryObj = project.getProjectDirectory();
return projectDirectoryObj.getPath();
} catch (Exception ex) {
ex.printStackTrace();
return "";
}
}
private static TopComponent getActiveTopComponent() {
return TopComponent.getRegistry().getActivated();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.pig.newplan.logical.rules;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.newplan.Operator;
import org.apache.pig.newplan.OperatorPlan;
import org.apache.pig.newplan.logical.relational.LOCogroup;
import org.apache.pig.newplan.logical.relational.LOCross;
import org.apache.pig.newplan.logical.relational.LODistinct;
import org.apache.pig.newplan.logical.relational.LOFilter;
import org.apache.pig.newplan.logical.relational.LOForEach;
import org.apache.pig.newplan.logical.relational.LOGenerate;
import org.apache.pig.newplan.logical.relational.LOJoin;
import org.apache.pig.newplan.logical.relational.LOLimit;
import org.apache.pig.newplan.logical.relational.LOLoad;
import org.apache.pig.newplan.logical.relational.LOSort;
import org.apache.pig.newplan.logical.relational.LOSplit;
import org.apache.pig.newplan.logical.relational.LOSplitOutput;
import org.apache.pig.newplan.logical.relational.LOUnion;
import org.apache.pig.newplan.logical.relational.LogicalPlan;
import org.apache.pig.newplan.logical.relational.LogicalRelationalOperator;
import org.apache.pig.newplan.optimizer.Rule;
import org.apache.pig.newplan.optimizer.Transformer;
public class LimitOptimizer extends Rule {
public LimitOptimizer(String name) {
super(name, false);
}
@Override
protected OperatorPlan buildPattern() {
LogicalPlan plan = new LogicalPlan();
LogicalRelationalOperator limit = new LOLimit(plan, 0);
plan.add(limit);
return plan;
}
@Override
public Transformer getNewTransformer() {
return new OptimizeLimitTransformer();
}
public class OptimizeLimitTransformer extends Transformer {
@Override
public boolean check(OperatorPlan matched) {
LOLimit limit = (LOLimit) matched.getSources().get(0);
// Match each foreach.
List<Operator> preds = currentPlan.getPredecessors(limit);
if (preds == null || preds.size() == 0)
return false;
Operator pred = preds.get(0);
// Limit cannot be pushed up
if (pred instanceof LOCogroup || pred instanceof LOFilter
|| pred instanceof LOLoad || pred instanceof LOSplit
|| pred instanceof LODistinct || pred instanceof LOJoin) {
return false;
}
// Limit cannot be pushed in front of ForEach if it has a flatten
if (pred instanceof LOForEach) {
LOForEach foreach = (LOForEach) pred;
LogicalPlan innerPlan = foreach.getInnerPlan();
Iterator<Operator> it = innerPlan.getOperators();
while (it.hasNext()) {
Operator op = it.next();
if (op instanceof LOGenerate) {
LOGenerate gen = (LOGenerate) op;
boolean[] flattenFlags = gen.getFlattenFlags();
if (flattenFlags != null) {
for (boolean flatten : flattenFlags) {
if (flatten)
return false;
}
}
}
}
}
return true;
}
@Override
public OperatorPlan reportChanges() {
return currentPlan;
}
@Override
public void transform(OperatorPlan matched) throws FrontendException {
LOLimit limit = (LOLimit) matched.getSources().get(0);
// Find the next foreach operator.
List<Operator> preds = currentPlan.getPredecessors(limit);
Operator pred = preds.get(0);
if (pred instanceof LOForEach) {
// We can safely move LOLimit up
// Get operator before LOForEach
Operator prepredecessor = currentPlan.getPredecessors(pred)
.get(0);
currentPlan.removeAndReconnect(limit);
currentPlan.insertBetween(prepredecessor, limit, pred);
} else if (limit.getLimitPlan() == null) {
// TODO selectively enable optimizations for variable limit
if (pred instanceof LOCross || pred instanceof LOUnion) {
// Limit can be duplicated, and the new instance pushed in front
// of an operator for the following operators
// (that is, if you have X->limit, you can transform that to
// limit->X->limit):
LOLimit newLimit = null;
List<Operator> nodesToProcess = new ArrayList<Operator>();
for (Operator prepredecessor : currentPlan
.getPredecessors(pred))
nodesToProcess.add(prepredecessor);
for (Operator prepredecessor : nodesToProcess) {
if (prepredecessor instanceof LOLimit) {
LOLimit l = (LOLimit) prepredecessor;
l.setLimit(l.getLimit() < limit.getLimit() ? l
.getLimit() : limit.getLimit());
} else {
newLimit = new LOLimit((LogicalPlan) currentPlan, limit
.getLimit());
currentPlan.insertBetween(prepredecessor, newLimit, pred);
}
}
} else if (pred instanceof LOSort) {
LOSort sort = (LOSort) pred;
if (sort.getLimit() == -1)
sort.setLimit(limit.getLimit());
else
sort.setLimit(sort.getLimit() < limit.getLimit() ? sort
.getLimit() : limit.getLimit());
// remove the limit
currentPlan.removeAndReconnect(limit);
} else if (pred instanceof LOLimit) {
// Limit is merged into another LOLimit
LOLimit beforeLimit = (LOLimit) pred;
beforeLimit
.setLimit(beforeLimit.getLimit() < limit.getLimit() ? beforeLimit
.getLimit()
: limit.getLimit());
// remove the limit
currentPlan.removeAndReconnect(limit);
} else if (pred instanceof LOSplitOutput) {
// Limit and OrderBy (LOSort) can be separated by split
List<Operator> grandparants = currentPlan.getPredecessors(pred);
// After insertion of splitters, any node in the plan can
// have at most one predecessor
if (grandparants != null && grandparants.size() != 0
&& grandparants.get(0) instanceof LOSplit) {
List<Operator> greatGrandparants = currentPlan
.getPredecessors(grandparants.get(0));
if (greatGrandparants != null
&& greatGrandparants.size() != 0
&& greatGrandparants.get(0) instanceof LOSort) {
LOSort sort = (LOSort) greatGrandparants.get(0);
LOSort newSort = new LOSort(sort.getPlan(), sort
.getSortColPlans(), sort.getAscendingCols(),
sort.getUserFunc());
newSort.setLimit(limit.getLimit());
currentPlan.replace(limit, newSort);
}
}
}
}
}
}
}
|
package io.deephaven.db.v2.locations.local;
import gnu.trove.list.TIntList;
import gnu.trove.list.array.TIntArrayList;
import io.deephaven.base.Pair;
import io.deephaven.base.verify.Assert;
import io.deephaven.base.verify.Require;
import io.deephaven.db.tables.ColumnDefinition;
import io.deephaven.db.tables.TableDefinition;
import io.deephaven.db.tables.utils.ParquetTools;
import io.deephaven.db.v2.locations.TableDataException;
import io.deephaven.db.v2.locations.impl.TableLocationKeyFinder;
import io.deephaven.db.v2.locations.parquet.local.ParquetTableLocationKey;
import io.deephaven.db.v2.parquet.ParquetInstructions;
import io.deephaven.parquet.ParquetFileReader;
import io.deephaven.parquet.tempfix.ParquetMetadataConverter;
import io.deephaven.util.type.TypeUtils;
import org.apache.commons.lang3.mutable.MutableInt;
import org.apache.parquet.format.RowGroup;
import org.apache.parquet.hadoop.metadata.ParquetMetadata;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.toMap;
/**
* <p>
* {@link TableLocationKeyFinder Location finder} that will examine a parquet metadata file to discover locations.
*
* <p>
* Note that we expect to find the following files:
* <ul>
* <li>{@code _metadata} - A file containing Parquet metadata for all {@link RowGroup row groups} in all
* {@code .parquet} files for the entire data set, including schema information non-partitioning columns and key-value
* metadata</li>
* <li>{@code _common_metadata} <i>(optional)</i> - A file containing Parquet metadata with schema information that
* applies to the entire data set, including partitioning columns that are inferred from file paths rather than
* explicitly written in {@link org.apache.parquet.format.ColumnChunk column chunks} within {@code .parquet} files</li>
* </ul>
*/
public class ParquetMetadataFileLayout implements TableLocationKeyFinder<ParquetTableLocationKey> {
public static final String METADATA_FILE_NAME = "_metadata";
public static final String COMMON_METADATA_FILE_NAME = "_common_metadata";
private final File metadataFile;
private final File commonMetadataFile;
private final TableDefinition definition;
private final ParquetInstructions instructions;
private final List<ParquetTableLocationKey> keys;
public ParquetMetadataFileLayout(@NotNull final File directory) {
this(directory, ParquetInstructions.EMPTY);
}
public ParquetMetadataFileLayout(@NotNull final File directory,
@NotNull final ParquetInstructions inputInstructions) {
this(new File(directory, METADATA_FILE_NAME), new File(directory, COMMON_METADATA_FILE_NAME),
inputInstructions);
}
public ParquetMetadataFileLayout(@NotNull final File metadataFile,
@Nullable final File commonMetadataFile) {
this(metadataFile, commonMetadataFile, ParquetInstructions.EMPTY);
}
public ParquetMetadataFileLayout(@NotNull final File metadataFile,
@Nullable final File commonMetadataFile,
@NotNull final ParquetInstructions inputInstructions) {
this.metadataFile = metadataFile;
this.commonMetadataFile = commonMetadataFile;
if (!metadataFile.exists()) {
throw new TableDataException("Parquet metadata file " + metadataFile + " does not exist");
}
final ParquetFileReader metadataFileReader = ParquetTools.getParquetFileReader(metadataFile);
final ParquetMetadataConverter converter = new ParquetMetadataConverter();
final ParquetMetadata metadataFileMetadata = convertMetadata(metadataFile, metadataFileReader, converter);
final Pair<List<ColumnDefinition>, ParquetInstructions> leafSchemaInfo = ParquetTools.convertSchema(
metadataFileReader.getSchema(),
metadataFileMetadata.getFileMetaData().getKeyValueMetaData(),
inputInstructions);
if (commonMetadataFile != null && commonMetadataFile.exists()) {
final ParquetFileReader commonMetadataFileReader = ParquetTools.getParquetFileReader(commonMetadataFile);
final Pair<List<ColumnDefinition>, ParquetInstructions> fullSchemaInfo = ParquetTools.convertSchema(
commonMetadataFileReader.getSchema(),
convertMetadata(commonMetadataFile, commonMetadataFileReader, converter).getFileMetaData()
.getKeyValueMetaData(),
leafSchemaInfo.getSecond());
final List<ColumnDefinition> adjustedColumnDefinitions = new ArrayList<>();
final Map<String, ColumnDefinition> leafDefinitionsMap =
leafSchemaInfo.getFirst().stream().collect(toMap(ColumnDefinition::getName, Function.identity()));
for (final ColumnDefinition fullDefinition : fullSchemaInfo.getFirst()) {
final ColumnDefinition leafDefinition = leafDefinitionsMap.get(fullDefinition.getName());
if (leafDefinition == null) {
adjustedColumnDefinitions.add(adjustPartitionDefinition(fullDefinition));
} else if (fullDefinition.equals(leafDefinition)) {
adjustedColumnDefinitions.add(fullDefinition); // No adjustments to apply in this case
} else {
final List<String> differences = new ArrayList<>();
// noinspection unchecked
fullDefinition.describeDifferences(differences, leafDefinition, "full schema", "file schema", "");
throw new TableDataException(String.format("Schema mismatch between %s and %s for column %s: %s",
metadataFile, commonMetadataFile, fullDefinition.getName(), differences));
}
}
definition = new TableDefinition(adjustedColumnDefinitions);
instructions = fullSchemaInfo.getSecond();
} else {
definition = new TableDefinition(leafSchemaInfo.getFirst());
instructions = leafSchemaInfo.getSecond();
}
final List<ColumnDefinition> partitioningColumns = definition.getPartitioningColumns();
final Map<String, ColumnDefinition> partitioningColumnsMap = partitioningColumns.stream().collect(
toMap(ColumnDefinition::getName, Function.identity(), Assert::neverInvoked, LinkedHashMap::new));
final Map<String, TIntList> fileNameToRowGroupIndices = new LinkedHashMap<>();
final List<RowGroup> rowGroups = metadataFileReader.fileMetaData.getRow_groups();
final int numRowGroups = rowGroups.size();
for (int rgi = 0; rgi < numRowGroups; ++rgi) {
fileNameToRowGroupIndices
.computeIfAbsent(rowGroups.get(rgi).getColumns().get(0).getFile_path(), fn -> new TIntArrayList())
.add(rgi);
}
final File directory = metadataFile.getParentFile();
final MutableInt partitionOrder = new MutableInt(0);
keys = fileNameToRowGroupIndices.entrySet().stream().map(entry -> {
final String filePathString = entry.getKey();
final int[] rowGroupIndices = entry.getValue().toArray();
if (filePathString == null || filePathString.isEmpty()) {
throw new TableDataException("Missing parquet file name for row groups "
+ Arrays.toString(rowGroupIndices) + " in " + metadataFile);
}
final LinkedHashMap<String, Comparable<?>> partitions =
partitioningColumns.isEmpty() ? null : new LinkedHashMap<>();
if (partitions != null) {
final Path filePath = Paths.get(filePathString);
final int numPartitions = filePath.getNameCount() - 1;
if (numPartitions != partitioningColumns.size()) {
throw new TableDataException("Unexpected number of path elements in " + filePathString
+ " for partitions " + partitions.keySet());
}
final boolean useHiveStyle = filePath.getName(0).toString().contains("=");
for (int pi = 0; pi < numPartitions; ++pi) {
final String pathElement = filePath.getName(pi).toString();
final ColumnDefinition columnDefinition;
final String partitionKey;
final String partitionValueRaw;
if (useHiveStyle) {
final String[] pathComponents = pathElement.split("=", 2);
if (pathComponents.length != 2) {
throw new TableDataException(
"Unexpected path format found for hive-style partitioning from " + filePathString
+ " for " + metadataFile);
}
partitionKey = instructions.getColumnNameFromParquetColumnNameOrDefault(pathComponents[0]);
columnDefinition = partitioningColumnsMap.get(partitionKey);
partitionValueRaw = pathComponents[1];
} else {
columnDefinition = partitioningColumns.get(pi);
partitionKey = columnDefinition.getName();
partitionValueRaw = pathElement;
}
final Comparable<?> partitionValue =
CONVERSION_FUNCTIONS.get(columnDefinition.getDataType()).apply(partitionValueRaw);
if (partitions.containsKey(partitionKey)) {
throw new TableDataException("Unexpected duplicate partition key " + partitionKey
+ " when parsing " + filePathString + " for " + metadataFile);
}
partitions.put(partitionKey, partitionValue);
}
}
final ParquetTableLocationKey tlk = new ParquetTableLocationKey(new File(directory, filePathString),
partitionOrder.getAndIncrement(), partitions);
tlk.setFileReader(metadataFileReader);
tlk.setMetadata(metadataFileMetadata);
tlk.setRowGroupIndices(rowGroupIndices);
return tlk;
}).collect(Collectors.toList());
}
public String toString() {
return ParquetMetadataFileLayout.class.getSimpleName() + '[' + metadataFile + ',' + commonMetadataFile + ']';
}
private static ParquetMetadata convertMetadata(@NotNull final File file,
@NotNull final ParquetFileReader fileReader,
@NotNull final ParquetMetadataConverter converter) {
try {
return converter.fromParquetMetadata(fileReader.fileMetaData);
} catch (IOException e) {
throw new TableDataException("Error while converting file metadata from " + file);
}
}
private static ColumnDefinition adjustPartitionDefinition(@NotNull final ColumnDefinition columnDefinition) {
if (columnDefinition.getComponentType() != null) {
return ColumnDefinition.fromGenericType(columnDefinition.getName(), String.class,
ColumnDefinition.COLUMNTYPE_PARTITIONING, null);
}
final Class<?> dataType = columnDefinition.getDataType();
if (dataType == boolean.class) {
return ColumnDefinition.fromGenericType(columnDefinition.getName(), Boolean.class,
ColumnDefinition.COLUMNTYPE_PARTITIONING, null);
}
if (dataType.isPrimitive()) {
return columnDefinition.withPartitioning();
}
final Class<?> unboxedType = TypeUtils.getUnboxedType(dataType);
if (unboxedType != null && unboxedType.isPrimitive()) {
return ColumnDefinition.fromGenericType(columnDefinition.getName(), unboxedType,
ColumnDefinition.COLUMNTYPE_PARTITIONING, null);
}
if (dataType == Boolean.class || dataType == String.class || dataType == BigDecimal.class
|| dataType == BigInteger.class) {
return columnDefinition.withPartitioning();
}
// NB: This fallback includes any kind of timestamp; we don't have a strong grasp of required parsing support at
// this time, and preserving the contents as a String allows the user full control.
return ColumnDefinition.fromGenericType(columnDefinition.getName(), String.class,
ColumnDefinition.COLUMNTYPE_PARTITIONING, null);
}
private static final Map<Class, Function<String, Comparable<?>>> CONVERSION_FUNCTIONS;
static {
final Map<Class, Function<String, Comparable<?>>> conversionFunctionsTemp = new HashMap<>();
conversionFunctionsTemp.put(Boolean.class, Boolean::parseBoolean);
conversionFunctionsTemp.put(char.class, str -> {
Require.eq(str.length(), "length", 1);
return str.charAt(0);
});
conversionFunctionsTemp.put(byte.class, Byte::parseByte);
conversionFunctionsTemp.put(short.class, Short::parseShort);
conversionFunctionsTemp.put(int.class, Integer::parseInt);
conversionFunctionsTemp.put(long.class, Long::parseLong);
conversionFunctionsTemp.put(float.class, Float::parseFloat);
conversionFunctionsTemp.put(BigInteger.class, BigInteger::new);
conversionFunctionsTemp.put(BigDecimal.class, BigDecimal::new);
conversionFunctionsTemp.put(String.class, str -> str);
CONVERSION_FUNCTIONS = Collections.unmodifiableMap(conversionFunctionsTemp);
}
public TableDefinition getTableDefinition() {
return definition;
}
public ParquetInstructions getInstructions() {
return instructions;
}
@Override
public void findKeys(@NotNull final Consumer<ParquetTableLocationKey> locationKeyObserver) {
keys.forEach(locationKeyObserver);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.skywalking.apm.plugin.sofarpc;
import com.alipay.sofa.rpc.client.ProviderInfo;
import com.alipay.sofa.rpc.context.RpcInternalContext;
import com.alipay.sofa.rpc.core.request.SofaRequest;
import com.alipay.sofa.rpc.core.response.SofaResponse;
import com.alipay.sofa.rpc.filter.ProviderInvoker;
import org.apache.skywalking.apm.agent.core.conf.Config;
import org.apache.skywalking.apm.agent.core.context.SW3CarrierItem;
import org.apache.skywalking.apm.agent.core.context.trace.AbstractTracingSpan;
import org.apache.skywalking.apm.agent.core.context.trace.SpanLayer;
import org.apache.skywalking.apm.agent.core.context.trace.TraceSegment;
import org.apache.skywalking.apm.agent.core.context.trace.TraceSegmentRef;
import org.apache.skywalking.apm.agent.core.context.util.KeyValuePair;
import org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.EnhancedInstance;
import org.apache.skywalking.apm.agent.core.plugin.interceptor.enhance.MethodInterceptResult;
import org.apache.skywalking.apm.agent.test.helper.SegmentHelper;
import org.apache.skywalking.apm.agent.test.helper.SegmentRefHelper;
import org.apache.skywalking.apm.agent.test.helper.SpanHelper;
import org.apache.skywalking.apm.agent.test.tools.AgentServiceRule;
import org.apache.skywalking.apm.agent.test.tools.SegmentStorage;
import org.apache.skywalking.apm.agent.test.tools.SegmentStoragePoint;
import org.apache.skywalking.apm.agent.test.tools.TracingSegmentRunner;
import org.hamcrest.CoreMatchers;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.powermock.modules.junit4.PowerMockRunnerDelegate;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.powermock.api.mockito.PowerMockito.when;
@RunWith(PowerMockRunner.class)
@PowerMockRunnerDelegate(TracingSegmentRunner.class)
@PrepareForTest({RpcInternalContext.class, SofaRequest.class, SofaResponse.class})
public class SofaRpcProviderInterceptorTest {
public static final String SKYWALKING_PREFIX = "skywalking.";
@SegmentStoragePoint
private SegmentStorage segmentStorage;
@Rule
public AgentServiceRule agentServiceRule = new AgentServiceRule();
@Mock
private EnhancedInstance enhancedInstance;
private SofaRpcProviderInterceptor sofaRpcProviderInterceptor;
@Mock
private RpcInternalContext rpcContext;
@Mock
private ProviderInvoker invoker;
private SofaRequest sofaRequest = PowerMockito.mock(SofaRequest.class);
@Mock
private MethodInterceptResult methodInterceptResult;
private SofaResponse sofaResponse = PowerMockito.mock(SofaResponse.class);
private Object[] allArguments;
private Class[] argumentTypes;
@Before
public void setUp() throws Exception {
Config.Agent.ACTIVE_V1_HEADER = true;
sofaRpcProviderInterceptor = new SofaRpcProviderInterceptor();
PowerMockito.mockStatic(RpcInternalContext.class);
when(sofaRequest.getMethodName()).thenReturn("test");
when(sofaRequest.getMethodArgSigs()).thenReturn(new String[] {"String"});
when(sofaRequest.getMethodArgs()).thenReturn(new Object[] {"abc"});
when(sofaRequest.getInterfaceName()).thenReturn("org.apache.skywalking.apm.test.TestSofaRpcService");
PowerMockito.when(RpcInternalContext.getContext()).thenReturn(rpcContext);
when(rpcContext.isConsumerSide()).thenReturn(false);
final ProviderInfo providerInfo = new ProviderInfo();
providerInfo.setHost("127.0.0.1");
providerInfo.setPort(12200);
when(rpcContext.getProviderInfo()).thenReturn(providerInfo);
allArguments = new Object[] {sofaRequest};
argumentTypes = new Class[] {sofaRequest.getClass()};
Config.Agent.SERVICE_NAME = "SOFARPC-TestCases-APP";
}
@After
public void clear() {
Config.Agent.ACTIVE_V1_HEADER = false;
}
@Test
public void testProviderWithAttachment() throws Throwable {
when(rpcContext.isConsumerSide()).thenReturn(false);
when(sofaRequest.getRequestProp(SKYWALKING_PREFIX + SW3CarrierItem.HEADER_NAME)).thenReturn(
"1.323.4433|3|1|1|#192.168.1.8 :18002|#/portal/|#/testEntrySpan|#AQA*#AQA*Et0We0tQNQA*");
sofaRpcProviderInterceptor.beforeMethod(enhancedInstance, null, allArguments, argumentTypes, methodInterceptResult);
sofaRpcProviderInterceptor.afterMethod(enhancedInstance, null, allArguments, argumentTypes, sofaResponse);
assertProvider();
}
private void assertProvider() {
TraceSegment traceSegment = segmentStorage.getTraceSegments().get(0);
assertThat(SegmentHelper.getSpans(traceSegment).size(), is(1));
assertProviderSpan(SegmentHelper.getSpans(traceSegment).get(0));
assertTraceSegmentRef(traceSegment.getRefs().get(0));
}
private void assertTraceSegmentRef(TraceSegmentRef actual) {
assertThat(SegmentRefHelper.getSpanId(actual), is(3));
assertThat(SegmentRefHelper.getEntryServiceInstanceId(actual), is(1));
assertThat(SegmentRefHelper.getTraceSegmentId(actual).toString(), is("1.323.4433"));
}
private void assertProviderSpan(AbstractTracingSpan span) {
assertCommonsAttribute(span);
assertTrue(span.isEntry());
}
private void assertCommonsAttribute(AbstractTracingSpan span) {
List<KeyValuePair> tags = SpanHelper.getTags(span);
assertThat(tags.size(), is(0));
assertThat(SpanHelper.getLayer(span), CoreMatchers.is(SpanLayer.RPC_FRAMEWORK));
assertThat(SpanHelper.getComponentId(span), is(43));
}
}
|
/*
* Minecraft Forge
* Copyright (c) 2016-2021.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation version 2.1
* of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package net.minecraftforge.client.model;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import com.google.common.collect.Lists;
import net.minecraft.util.math.vector.TransformationMatrix;
import net.minecraft.util.math.vector.Vector3f;
import net.minecraft.util.math.vector.Vector4f;
import net.minecraft.client.renderer.model.BakedQuad;
import net.minecraft.client.renderer.vertex.DefaultVertexFormats;
import net.minecraft.client.renderer.vertex.VertexFormat;
import net.minecraft.client.renderer.vertex.VertexFormatElement;
public class QuadTransformer
{
private static final int POSITION = findPositionOffset(DefaultVertexFormats.BLOCK);
private static final int NORMAL = findNormalOffset(DefaultVertexFormats.BLOCK);
private final TransformationMatrix transform;
public QuadTransformer(TransformationMatrix transform)
{
this.transform = transform;
}
private void processVertices(int[] inData, int[] outData)
{
int stride = DefaultVertexFormats.BLOCK.getVertexSize();
int count = (inData.length * 4) / stride;
for (int i=0;i<count;i++)
{
int offset = POSITION + i * stride;
float x = Float.intBitsToFloat(getAtByteOffset(inData, offset ));
float y = Float.intBitsToFloat(getAtByteOffset(inData, offset + 4));
float z = Float.intBitsToFloat(getAtByteOffset(inData, offset + 8));
Vector4f pos = new Vector4f(x, y, z, 1);
transform.transformPosition(pos);
pos.perspectiveDivide();
putAtByteOffset(outData, offset, Float.floatToRawIntBits(pos.x()));
putAtByteOffset(outData,offset + 4, Float.floatToRawIntBits(pos.y()));
putAtByteOffset(outData,offset + 8, Float.floatToRawIntBits(pos.z()));
}
for (int i=0;i<count;i++)
{
int offset = NORMAL + i * stride;
int normalIn = getAtByteOffset(inData,offset);
if (normalIn != 0)
{
float x = ((byte)((normalIn) >> 24)) / 127.0f;
float y = ((byte)((normalIn << 8) >> 24)) / 127.0f;
float z = ((byte)((normalIn << 16) >> 24)) / 127.0f;
Vector3f pos = new Vector3f(x, y, z);
transform.transformNormal(pos);
pos.normalize();
int normalOut = ((((byte)(x / 127.0f)) & 0xFF) << 24) |
((((byte)(y / 127.0f)) & 0xFF) << 16) |
((((byte)(z / 127.0f)) & 0xFF) << 8) |
(normalIn & 0xFF);
putAtByteOffset(outData, offset, normalOut);
}
}
}
private static int getAtByteOffset(int[] inData, int offset)
{
int index = offset / 4;
int lsb = inData[index];
int shift = (offset % 4) * 8;
if (shift == 0)
return inData[index];
int msb = inData[index+1];
return (lsb >>> shift) | (msb << (32-shift));
}
private static void putAtByteOffset(int[] outData, int offset, int value)
{
int index = offset / 4;
int shift = (offset % 4) * 8;
if (shift == 0)
{
outData[index] = value;
return;
}
int lsbMask = 0xFFFFFFFF >>> (32-shift);
int msbMask = 0xFFFFFFFF << shift;
outData[index] = (outData[index] & lsbMask) | (value << shift);
outData[index+1] = (outData[index+1] & msbMask) | (value >>> (32-shift));
}
private static int findPositionOffset(VertexFormat fmt)
{
int index;
VertexFormatElement element = null;
for (index = 0; index < fmt.getElements().size(); index++)
{
VertexFormatElement el = fmt.getElements().get(index);
if (el.getUsage() == VertexFormatElement.Usage.POSITION)
{
element = el;
break;
}
}
if (index == fmt.getElements().size() || element == null)
throw new RuntimeException("Expected vertex format to have a POSITION attribute");
if (element.getType() != VertexFormatElement.Type.FLOAT)
throw new RuntimeException("Expected POSITION attribute to have data type FLOAT");
if (element.getByteSize() < 3)
throw new RuntimeException("Expected POSITION attribute to have at least 3 dimensions");
return fmt.getOffset(index);
}
private static int findNormalOffset(VertexFormat fmt)
{
int index;
VertexFormatElement element = null;
for (index = 0; index < fmt.getElements().size(); index++)
{
VertexFormatElement el = fmt.getElements().get(index);
if (el.getUsage() == VertexFormatElement.Usage.NORMAL)
{
element = el;
break;
}
}
if (index == fmt.getElements().size() || element == null)
throw new IllegalStateException("BLOCK format does not have normals?");
if (element.getType() != VertexFormatElement.Type.BYTE)
throw new RuntimeException("Expected NORMAL attribute to have data type BYTE");
if (element.getByteSize() < 3)
throw new RuntimeException("Expected NORMAL attribute to have at least 3 dimensions");
return fmt.getOffset(index);
}
/**
* Processes a single quad, producing a new quad.
* @param input A single quad to transform.
* @return A new BakedQuad object with the new position.
*/
public BakedQuad processOne(BakedQuad input)
{
int[] inData = input.getVertices();
int[] outData = Arrays.copyOf(inData, inData.length);
processVertices(inData, outData);
return new BakedQuad(outData, input.getTintIndex(), input.getDirection(), input.getSprite(), input.isShade());
}
/**
* Processes a single quad, modifying the input quad.
* @param input A single quad to transform.
* @return The input BakedQuad object with the new position applied.
*/
public BakedQuad processOneInPlace(BakedQuad input)
{
int[] data = input.getVertices();
processVertices(data, data);
return input;
}
/**
* Processes multiple quads, producing a new array of new quads.
* @param inputs The list of quads to transform
* @return A new array of new BakedQuad objects.
*/
public List<BakedQuad> processMany(List<BakedQuad> inputs)
{
if(inputs.size() == 0)
return Collections.emptyList();
List<BakedQuad> outputs = Lists.newArrayList();
for(BakedQuad input : inputs)
{
int[] inData = input.getVertices();
int[] outData = Arrays.copyOf(inData, inData.length);
processVertices(inData, outData);
outputs.add(new BakedQuad(outData, input.getTintIndex(), input.getDirection(), input.getSprite(), input.isShade()));
}
return outputs;
}
/**
* Processes multiple quads in place, modifying the input quads.
* @param inputs The list of quads to transform
*/
public void processManyInPlace(List<BakedQuad> inputs)
{
if(inputs.size() == 0)
return;
for(BakedQuad input : inputs)
{
int[] data = input.getVertices();
processVertices(data, data);
}
}
}
|
package Problem_2873;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.StringTokenizer;
public class Main {
static int R, C;
static int[][] map;
public static void main(String[] args) throws IOException {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
String bf = br.readLine();
StringTokenizer st;
R = Integer.parseInt(bf.split(" ")[0]);
C = Integer.parseInt(bf.split(" ")[1]);
map = new int[R][C];
for(int i = 0; i<R; i++) {
st = new StringTokenizer(br.readLine());
for(int j = 0; j<C; j++) {
map[i][j] = Integer.parseInt(st.nextToken());
}
}
StringBuilder sb = new StringBuilder();
if(R%2 == 1) {
for(int i = 0; i<R;i++) {
char c;
if(i%2 == 0) c = 'R';
else c = 'L';
for(int j = 0; j<C-1;j++) {
sb.append(c);
}
if(i != R-1) sb.append("D");
}
}
else if(C%2 == 1) {
for(int j = 0; j<C;j++) {
char c;
if(j%2 ==0) c = 'D';
else c = 'U';
for(int i = 0; i<R-1;i++) {
sb.append(c);
}
if(j != C-1) sb.append("R");
}
}
else {
int r = 0, c = 1;
for(int i = 0; i <R; i++) {
for(int j = 0; j <C; j++) {
if((i+j)%2 == 1) {
if(map[r][c] > map[i][j]) {
r = i;
c = j;
}
}
}
}
StringBuilder sb2 = new StringBuilder(); // reverse.
int x1 = 0;
int y1 = 0;
int x2 = R-1;
int y2 = C-1;
while(x2 - x1 > 1) { // 2줄씩 없애기
if(x1/2 < r/2) {
for(int i = 0; i<C-1;i++)sb.append('R');
sb.append('D');
for(int i = 0; i<C-1;i++)sb.append('L');
sb.append('D');
x1 += 2;
}
if(r/2 < x2/2) {
for(int i = 0; i<C-1;i++)sb2.append('R');
sb2.append('D');
for(int i = 0; i<C-1;i++)sb2.append('L');
sb2.append('D');
x2 -= 2;
}
}
while(y2 - y1 > 1) { // 2줄씩 없애기
if(y1/2 < c/2) {
sb.append('D').append('R').append('U').append('R');
y1 += 2;
}
if(c/2 < y2/2) {
sb2.append('D').append('R').append('U').append('R');
y2 -=2;
}
}
if(c == y1) {
sb.append('R').append('D');
} else {
sb.append('D').append('R');
}
sb.append(sb2.reverse());
}
System.out.println(sb);
}
}
|
/*
* Copyright 2009 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.truth.Truth.assertThat;
import static com.google.javascript.jscomp.CompilerTestCase.lines;
import static com.google.javascript.rhino.testing.NodeSubject.assertNode;
import com.google.javascript.jscomp.NodeTraversal.Callback;
import com.google.javascript.rhino.IR;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.Token;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** @author johnlenz@google.com (John Lenz) */
@RunWith(JUnit4.class)
public final class FunctionToBlockMutatorTest {
private boolean needsDefaultResult;
private boolean isCallInLoop;
@Before
public void setUp() {
needsDefaultResult = false;
isCallInLoop = false;
}
@Test
public void testMutateNoReturnWithoutResultAssignment() {
helperMutate(
"function foo(){}; foo();",
"{}",
"foo");
}
@Test
public void testMutateNoReturnWithResultAssignment() {
needsDefaultResult = true;
helperMutate(
"function foo(){}; var result = foo();",
"{result = void 0}",
"foo");
}
@Test
public void testMutateNoValueReturnWithoutResultAssignment() {
helperMutate(
"function foo(){return;}; foo();",
"{}",
"foo", null);
}
@Test
public void testMutateNoValueReturnWithResultAssignment() {
helperMutate(
"function foo(){return;}; var result = foo();",
"{result = void 0}",
"foo");
}
@Test
public void testMutateValueReturnWithoutResultAssignment() {
helperMutate(
"function foo(){return true;}; foo();",
"{true;}",
"foo", null);
}
@Test
public void testMutateValueReturnWithResultAssignment() {
needsDefaultResult = true;
helperMutate(
"function foo(){return true;}; var x=foo();",
"{x=true}",
"foo", "x");
}
@Test
public void testMutateWithMultipleReturns() {
needsDefaultResult = true;
helperMutate(
"function foo(){ if (0) {return 0} else {return 1} }; var result=foo();",
lines(
"{",
" JSCompiler_inline_label_foo_0: {",
" if (0) {",
" result = 0;",
" break JSCompiler_inline_label_foo_0",
" } else {",
" result = 1;",
" break JSCompiler_inline_label_foo_0",
" }",
" result=void 0",
" }",
"}"),
"foo");
}
@Test
public void testMutateWithParameters1() {
// Simple call with useless parameter
helperMutate(
"function foo(a){return true;}; foo(x);",
"{true}",
"foo", null);
}
@Test
public void testMutateWithParameters2() {
// Simple call with parameter
helperMutate(
"function foo(a){return x;}; foo(x);",
"{x}",
"foo", null);
}
@Test
public void testMutateWithParameters3() {
// Parameter has side-effects.
helperMutate(
"function foo(a){return a;}; function x() { foo(x++); }",
"{x++;}",
"foo", null);
}
@Test
public void testMutate8() {
// Parameter has side-effects.
helperMutate(
"function foo(a){return a+a;}; foo(x++);",
"{var a$jscomp$inline_0 = x++; a$jscomp$inline_0 + a$jscomp$inline_0;}",
"foo", null);
}
@Test
public void testMutateInitializeUninitializedVars1() {
isCallInLoop = true;
helperMutate(
"function foo(a){var b;return a;}; foo(1);",
"{var b$jscomp$inline_1 = void 0; 1;}",
"foo",
null);
}
@Test
public void testMutateInitializeUninitializedVars2() {
helperMutate(
"function foo(a) {var b; for(b in c)return a;}; foo(1);",
lines(
"{",
" JSCompiler_inline_label_foo_2:",
" {",
" var b$jscomp$inline_1;",
" for (b$jscomp$inline_1 in c) {",
" 1;",
" break JSCompiler_inline_label_foo_2;",
" }",
" }",
"}"),
"foo",
null);
}
@Test
public void testMutateInitializeUninitializedLets1() {
isCallInLoop = true;
helperMutate(
"function foo(a){let b;return a;}; foo(1);",
"{let b$jscomp$inline_1 = void 0; 1;}",
"foo",
null);
}
@Test
public void testMutateInitializeUninitializedLets2() {
helperMutate(
"function foo(a) {for(let b in c)return a;}; foo(1);",
lines(
"{",
" JSCompiler_inline_label_foo_2:",
" {",
" for (let b$jscomp$inline_1 in c) {",
" 1;",
" break JSCompiler_inline_label_foo_2;",
" }",
" }",
"}"),
"foo",
null);
}
@Test
public void testMutateCallInLoopVars1() {
String src = lines(
"function foo(a) {",
" var B = bar();",
" a;",
"};",
"foo(1);");
// baseline: outside a loop, the constant remains constant.
isCallInLoop = false;
helperMutate(
src,
"{var B$jscomp$inline_1 = bar(); 1;}",
"foo",
null);
// ... in a loop, the constant-ness is removed.
// TODO(johnlenz): update this test to look for the const annotation.
isCallInLoop = true;
helperMutate(
src,
"{var B$jscomp$inline_1 = bar(); 1;}",
"foo",
null);
}
@Test
public void testMutateFunctionDefinition() {
// Function declarations are rewritten as function expressions.
helperMutate(
"function foo(a){function g(){}}; foo(1);",
"{var g$jscomp$inline_1 = function() {};}",
"foo",
null);
}
@Test
public void testMutateFunctionDefinitionHoisting() {
helperMutate(
lines(
"function foo(a){",
" var b = g(a);",
" function g(c){ return c; }",
" var c = i();",
" function h(){}",
" function i(){}",
"}",
"foo(1);"),
lines(
"{",
" var g$jscomp$inline_1 = function(c$jscomp$inline_6) {return c$jscomp$inline_6};",
" var h$jscomp$inline_2 = function(){};",
" var i$jscomp$inline_3 = function(){};",
" var b$jscomp$inline_4 = g$jscomp$inline_1(1);",
" var c$jscomp$inline_5 = i$jscomp$inline_3();",
"}"),
"foo",
null);
}
private void validateSourceInfo(Compiler compiler, Node subtree) {
(new LineNumberCheck(compiler)).setCheckSubTree(subtree);
assertThat(compiler.getErrors()).isEmpty();
}
public void helperMutate(String code, String expectedResult, String fnName) {
helperMutate(code, expectedResult, fnName, "result");
}
public void helperMutate(String code, String expectedResult, String fnName, String resultName) {
final Compiler compiler = new Compiler();
compiler.initCompilerOptionsIfTesting();
final FunctionToBlockMutator mutator = new FunctionToBlockMutator(
compiler, compiler.getUniqueNameIdSupplier());
Node expectedRoot = parse(compiler, expectedResult);
checkState(compiler.getErrorCount() == 0);
final Node expected = expectedRoot.getFirstChild();
final Node script = parse(compiler, code);
checkState(compiler.getErrorCount() == 0);
compiler.externsRoot = new Node(Token.ROOT);
compiler.jsRoot = IR.root(script);
compiler.externAndJsRoot = IR.root(compiler.externsRoot, compiler.jsRoot);
new Normalize(compiler, false).process(compiler.externsRoot, compiler.jsRoot);
new PureFunctionIdentifier.Driver(compiler).process(compiler.externsRoot, compiler.jsRoot);
final Node fnNode = findFunction(script, fnName);
// inline tester
Method tester =
(NodeTraversal t, Node n, Node parent) -> {
Node result =
mutator.mutate(fnName, fnNode, n, resultName, needsDefaultResult, isCallInLoop);
validateSourceInfo(compiler, result);
assertNode(result).usingSerializer(compiler::toSource).isEqualTo(expected);
return true;
};
compiler.resetUniqueNameId();
TestCallback test = new TestCallback(fnName, tester);
NodeTraversal.traverse(compiler, script, test);
}
@FunctionalInterface
private interface Method {
boolean call(NodeTraversal t, Node n, Node parent);
}
static class TestCallback implements Callback {
private final String callname;
private final Method method;
private boolean complete = false;
TestCallback(String callname, Method method) {
this.callname = callname;
this.method = method;
}
@Override
public boolean shouldTraverse(
NodeTraversal nodeTraversal, Node n, Node parent) {
return !complete;
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (n.isCall()) {
Node first = n.getFirstChild();
if (first.isName() && first.getString().equals(callname)) {
complete = method.call(t, n, parent);
}
}
if (parent == null) {
assertThat(complete).isTrue();
}
}
}
private static Node findFunction(Node n, String name) {
if (n.isFunction()) {
if (n.getFirstChild().getString().equals(name)) {
return n;
}
}
for (Node c : n.children()) {
Node result = findFunction(c, name);
if (result != null) {
return result;
}
}
return null;
}
private static Node parse(Compiler compiler, String js) {
Node n = compiler.parseTestCode(js);
assertThat(compiler.getErrorCount()).isEqualTo(0);
return n;
}
}
|
/*
* Copyright 2014 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.common.trace;
/**
* @author emeroad
*/
public class HistogramSlot {
private final short slotTime;
private final SlotType slotType;
private final String slotName;
public HistogramSlot(short slotTime, SlotType slotType, String slotName) {
if (slotType == null) {
throw new NullPointerException("slotType");
}
if (slotName == null) {
throw new NullPointerException("slotName");
}
this.slotTime = slotTime;
this.slotType = slotType;
this.slotName = slotName;
}
public short getSlotTime() {
return slotTime;
}
public SlotType getSlotType() {
return slotType;
}
public String getSlotName() {
return slotName;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("HistogramSlot{");
sb.append("slotTime=").append(slotTime);
sb.append(", slotType=").append(slotType);
sb.append(", slotName='").append(slotName).append('\'');
sb.append('}');
return sb.toString();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint.dsl;
import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.Generated;
import org.apache.camel.ExchangePattern;
import org.apache.camel.LoggingLevel;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
import org.apache.camel.spi.ExceptionHandler;
import org.apache.camel.spi.PollingConsumerPollStrategy;
import org.apache.camel.spi.ScheduledPollConsumerScheduler;
/**
* The beanstalk component is used for job retrieval and post-processing of
* Beanstalk jobs.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
public interface BeanstalkEndpointBuilderFactory {
/**
* Builder for endpoint consumers for the Beanstalk component.
*/
public interface BeanstalkEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default AdvancedBeanstalkEndpointConsumerBuilder advanced() {
return (AdvancedBeanstalkEndpointConsumerBuilder) this;
}
/**
* Connection settings host:port/tube.
* The option is a <code>java.lang.String</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder connectionSettings(
String connectionSettings) {
setProperty("connectionSettings", connectionSettings);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option is a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder command(
BeanstalkCommand command) {
setProperty("command", command);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option will be converted to a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder command(String command) {
setProperty("command", command);
return this;
}
/**
* Job delay in seconds.
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobDelay(int jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job delay in seconds.
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobDelay(String jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option is a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobPriority(long jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option will be converted to a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobPriority(String jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobTimeToRun(int jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointConsumerBuilder jobTimeToRun(
String jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
/**
* Whether to wait for job to complete before ack the job from
* beanstalk.
* The option is a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder awaitJob(boolean awaitJob) {
setProperty("awaitJob", awaitJob);
return this;
}
/**
* Whether to wait for job to complete before ack the job from
* beanstalk.
* The option will be converted to a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder awaitJob(String awaitJob) {
setProperty("awaitJob", awaitJob);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
* The option is a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder bridgeErrorHandler(
boolean bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
* The option will be converted to a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder bridgeErrorHandler(
String bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Command to use when processing failed.
* The option is a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder onFailure(
BeanstalkCommand onFailure) {
setProperty("onFailure", onFailure);
return this;
}
/**
* Command to use when processing failed.
* The option will be converted to a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder onFailure(String onFailure) {
setProperty("onFailure", onFailure);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
* The option is a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder sendEmptyMessageWhenIdle(
boolean sendEmptyMessageWhenIdle) {
setProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* If the polling consumer did not poll any files, you can enable this
* option to send an empty message (no body) instead.
* The option will be converted to a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder sendEmptyMessageWhenIdle(
String sendEmptyMessageWhenIdle) {
setProperty("sendEmptyMessageWhenIdle", sendEmptyMessageWhenIdle);
return this;
}
/**
* Whether to use blockIO.
* The option is a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder useBlockIO(boolean useBlockIO) {
setProperty("useBlockIO", useBlockIO);
return this;
}
/**
* Whether to use blockIO.
* The option will be converted to a <code>boolean</code> type.
* @group consumer
*/
default BeanstalkEndpointConsumerBuilder useBlockIO(String useBlockIO) {
setProperty("useBlockIO", useBlockIO);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
* The option is a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffErrorThreshold(
int backoffErrorThreshold) {
setProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent error polls (failed due some error) that
* should happen before the backoffMultipler should kick-in.
* The option will be converted to a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffErrorThreshold(
String backoffErrorThreshold) {
setProperty("backoffErrorThreshold", backoffErrorThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
* The option is a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffIdleThreshold(
int backoffIdleThreshold) {
setProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* The number of subsequent idle polls that should happen before the
* backoffMultipler should kick-in.
* The option will be converted to a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffIdleThreshold(
String backoffIdleThreshold) {
setProperty("backoffIdleThreshold", backoffIdleThreshold);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
* The option is a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffMultiplier(
int backoffMultiplier) {
setProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* To let the scheduled polling consumer backoff if there has been a
* number of subsequent idles/errors in a row. The multiplier is then
* the number of polls that will be skipped before the next actual
* attempt is happening again. When this option is in use then
* backoffIdleThreshold and/or backoffErrorThreshold must also be
* configured.
* The option will be converted to a <code>int</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder backoffMultiplier(
String backoffMultiplier) {
setProperty("backoffMultiplier", backoffMultiplier);
return this;
}
/**
* Milliseconds before the next poll. You can also specify time values
* using units, such as 60s (60 seconds), 5m30s (5 minutes and 30
* seconds), and 1h (1 hour).
* The option is a <code>long</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder delay(long delay) {
setProperty("delay", delay);
return this;
}
/**
* Milliseconds before the next poll. You can also specify time values
* using units, such as 60s (60 seconds), 5m30s (5 minutes and 30
* seconds), and 1h (1 hour).
* The option will be converted to a <code>long</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder delay(String delay) {
setProperty("delay", delay);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
* The option is a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder greedy(boolean greedy) {
setProperty("greedy", greedy);
return this;
}
/**
* If greedy is enabled, then the ScheduledPollConsumer will run
* immediately again, if the previous run polled 1 or more messages.
* The option will be converted to a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder greedy(String greedy) {
setProperty("greedy", greedy);
return this;
}
/**
* Milliseconds before the first poll starts. You can also specify time
* values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30
* seconds), and 1h (1 hour).
* The option is a <code>long</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder initialDelay(long initialDelay) {
setProperty("initialDelay", initialDelay);
return this;
}
/**
* Milliseconds before the first poll starts. You can also specify time
* values using units, such as 60s (60 seconds), 5m30s (5 minutes and 30
* seconds), and 1h (1 hour).
* The option will be converted to a <code>long</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder initialDelay(
String initialDelay) {
setProperty("initialDelay", initialDelay);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
* The option is a <code>org.apache.camel.LoggingLevel</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder runLoggingLevel(
LoggingLevel runLoggingLevel) {
setProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* The consumer logs a start/complete log line when it polls. This
* option allows you to configure the logging level for that.
* The option will be converted to a
* <code>org.apache.camel.LoggingLevel</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder runLoggingLevel(
String runLoggingLevel) {
setProperty("runLoggingLevel", runLoggingLevel);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
* The option is a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder scheduledExecutorService(
ScheduledExecutorService scheduledExecutorService) {
setProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* Allows for configuring a custom/shared thread pool to use for the
* consumer. By default each consumer has its own single threaded thread
* pool.
* The option will be converted to a
* <code>java.util.concurrent.ScheduledExecutorService</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder scheduledExecutorService(
String scheduledExecutorService) {
setProperty("scheduledExecutorService", scheduledExecutorService);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz2
* component.
* The option is a
* <code>org.apache.camel.spi.ScheduledPollConsumerScheduler</code>
* type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder scheduler(
ScheduledPollConsumerScheduler scheduler) {
setProperty("scheduler", scheduler);
return this;
}
/**
* To use a cron scheduler from either camel-spring or camel-quartz2
* component.
* The option will be converted to a
* <code>org.apache.camel.spi.ScheduledPollConsumerScheduler</code>
* type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder scheduler(String scheduler) {
setProperty("scheduler", scheduler);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz2, Spring based scheduler.
* The option is a <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder schedulerProperties(
Map<String, Object> schedulerProperties) {
setProperty("schedulerProperties", schedulerProperties);
return this;
}
/**
* To configure additional properties when using a custom scheduler or
* any of the Quartz2, Spring based scheduler.
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder schedulerProperties(
String schedulerProperties) {
setProperty("schedulerProperties", schedulerProperties);
return this;
}
/**
* Whether the scheduler should be auto started.
* The option is a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder startScheduler(
boolean startScheduler) {
setProperty("startScheduler", startScheduler);
return this;
}
/**
* Whether the scheduler should be auto started.
* The option will be converted to a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder startScheduler(
String startScheduler) {
setProperty("startScheduler", startScheduler);
return this;
}
/**
* Time unit for initialDelay and delay options.
* The option is a <code>java.util.concurrent.TimeUnit</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder timeUnit(TimeUnit timeUnit) {
setProperty("timeUnit", timeUnit);
return this;
}
/**
* Time unit for initialDelay and delay options.
* The option will be converted to a
* <code>java.util.concurrent.TimeUnit</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder timeUnit(String timeUnit) {
setProperty("timeUnit", timeUnit);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
* The option is a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder useFixedDelay(
boolean useFixedDelay) {
setProperty("useFixedDelay", useFixedDelay);
return this;
}
/**
* Controls if fixed delay or fixed rate is used. See
* ScheduledExecutorService in JDK for details.
* The option will be converted to a <code>boolean</code> type.
* @group scheduler
*/
default BeanstalkEndpointConsumerBuilder useFixedDelay(
String useFixedDelay) {
setProperty("useFixedDelay", useFixedDelay);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the Beanstalk component.
*/
public interface AdvancedBeanstalkEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default BeanstalkEndpointConsumerBuilder basic() {
return (BeanstalkEndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
* The option is a <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder exceptionHandler(
ExceptionHandler exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder exceptionHandler(
String exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
* The option is a <code>org.apache.camel.ExchangePattern</code> type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder exchangePattern(
ExchangePattern exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder exchangePattern(
String exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
* The option is a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder pollStrategy(
PollingConsumerPollStrategy pollStrategy) {
setProperty("pollStrategy", pollStrategy);
return this;
}
/**
* A pluggable org.apache.camel.PollingConsumerPollingStrategy allowing
* you to provide your custom implementation to control error handling
* usually occurred during the poll operation before an Exchange have
* been created and being routed in Camel.
* The option will be converted to a
* <code>org.apache.camel.spi.PollingConsumerPollStrategy</code> type.
* @group consumer (advanced)
*/
default AdvancedBeanstalkEndpointConsumerBuilder pollStrategy(
String pollStrategy) {
setProperty("pollStrategy", pollStrategy);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointConsumerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointConsumerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointConsumerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointConsumerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
}
/**
* Builder for endpoint producers for the Beanstalk component.
*/
public interface BeanstalkEndpointProducerBuilder
extends
EndpointProducerBuilder {
default AdvancedBeanstalkEndpointProducerBuilder advanced() {
return (AdvancedBeanstalkEndpointProducerBuilder) this;
}
/**
* Connection settings host:port/tube.
* The option is a <code>java.lang.String</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder connectionSettings(
String connectionSettings) {
setProperty("connectionSettings", connectionSettings);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option is a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointProducerBuilder command(
BeanstalkCommand command) {
setProperty("command", command);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option will be converted to a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointProducerBuilder command(String command) {
setProperty("command", command);
return this;
}
/**
* Job delay in seconds.
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobDelay(int jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job delay in seconds.
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobDelay(String jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option is a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobPriority(long jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option will be converted to a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobPriority(String jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobTimeToRun(int jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointProducerBuilder jobTimeToRun(
String jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
* The option is a <code>boolean</code> type.
* @group producer
*/
default BeanstalkEndpointProducerBuilder lazyStartProducer(
boolean lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
* The option will be converted to a <code>boolean</code> type.
* @group producer
*/
default BeanstalkEndpointProducerBuilder lazyStartProducer(
String lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
}
/**
* Advanced builder for endpoint producers for the Beanstalk component.
*/
public interface AdvancedBeanstalkEndpointProducerBuilder
extends
EndpointProducerBuilder {
default BeanstalkEndpointProducerBuilder basic() {
return (BeanstalkEndpointProducerBuilder) this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointProducerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointProducerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointProducerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointProducerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
}
/**
* Builder for endpoint for the Beanstalk component.
*/
public interface BeanstalkEndpointBuilder
extends
BeanstalkEndpointConsumerBuilder, BeanstalkEndpointProducerBuilder {
default AdvancedBeanstalkEndpointBuilder advanced() {
return (AdvancedBeanstalkEndpointBuilder) this;
}
/**
* Connection settings host:port/tube.
* The option is a <code>java.lang.String</code> type.
* @group common
*/
default BeanstalkEndpointBuilder connectionSettings(
String connectionSettings) {
setProperty("connectionSettings", connectionSettings);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option is a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointBuilder command(BeanstalkCommand command) {
setProperty("command", command);
return this;
}
/**
* put means to put the job into Beanstalk. Job body is specified in the
* Camel message body. Job ID will be returned in beanstalk.jobId
* message header. delete, release, touch or bury expect Job ID in the
* message header beanstalk.jobId. Result of the operation is returned
* in beanstalk.result message header kick expects the number of jobs to
* kick in the message body and returns the number of jobs actually
* kicked out in the message header beanstalk.result.
* The option will be converted to a
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code>
* type.
* @group common
*/
default BeanstalkEndpointBuilder command(String command) {
setProperty("command", command);
return this;
}
/**
* Job delay in seconds.
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobDelay(int jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job delay in seconds.
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobDelay(String jobDelay) {
setProperty("jobDelay", jobDelay);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option is a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobPriority(long jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job priority. (0 is the highest, see Beanstalk protocol).
* The option will be converted to a <code>long</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobPriority(String jobPriority) {
setProperty("jobPriority", jobPriority);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option is a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobTimeToRun(int jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
/**
* Job time to run in seconds. (when 0, the beanstalkd daemon raises it
* to 1 automatically, see Beanstalk protocol).
* The option will be converted to a <code>int</code> type.
* @group common
*/
default BeanstalkEndpointBuilder jobTimeToRun(String jobTimeToRun) {
setProperty("jobTimeToRun", jobTimeToRun);
return this;
}
}
/**
* Advanced builder for endpoint for the Beanstalk component.
*/
public interface AdvancedBeanstalkEndpointBuilder
extends
AdvancedBeanstalkEndpointConsumerBuilder, AdvancedBeanstalkEndpointProducerBuilder {
default BeanstalkEndpointBuilder basic() {
return (BeanstalkEndpointBuilder) this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option is a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointBuilder synchronous(boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
* The option will be converted to a <code>boolean</code> type.
* @group advanced
*/
default AdvancedBeanstalkEndpointBuilder synchronous(String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
}
/**
* Proxy enum for
* <code>org.apache.camel.component.beanstalk.BeanstalkCommand</code> enum.
*/
enum BeanstalkCommand {
bury,
release,
put,
touch,
delete,
kick;
}
/**
* The beanstalk component is used for job retrieval and post-processing of
* Beanstalk jobs.
* Maven coordinates: org.apache.camel:camel-beanstalk
*/
default BeanstalkEndpointBuilder beanstalk(String path) {
class BeanstalkEndpointBuilderImpl extends AbstractEndpointBuilder implements BeanstalkEndpointBuilder, AdvancedBeanstalkEndpointBuilder {
public BeanstalkEndpointBuilderImpl(String path) {
super("beanstalk", path);
}
}
return new BeanstalkEndpointBuilderImpl(path);
}
}
|
package com.messenger.messages;
/**
* Text message for all in the chat.
* 0 - message for all.
*/
public class ChatMessage extends TextMessage {
private Long chatId;
public ChatMessage(String msgText, Long chatId) {
this.chatId = chatId;
this.setText(msgText);
}
public Long getChatId() {
return chatId;
}
public void setChatId(Long chatId) {
this.chatId = chatId;
}
}
|
/*
* [y] hybris Platform
*
* Copyright (c) 2000-2018 SAP SE
* All rights reserved.
*
* This software is the confidential and proprietary information of SAP
* Hybris ("Confidential Information"). You shall not disclose such
* Confidential Information and shall use it only in accordance with the
* terms of the license agreement you entered into with SAP Hybris.
*/
package com.sap.hybris.c4c.customer.service.impl;
import de.hybris.platform.servicelayer.config.ConfigurationService;
import java.io.IOException;
import com.sap.hybris.c4c.customer.constants.Sapc4ccustomerb2cConstants;
import com.sap.hybris.c4c.customer.dto.C4CCustomerData;
import com.sap.hybris.c4c.customer.service.SapC4cCustomerPublicationService;
import com.sap.hybris.scpiconnector.httpconnection.CloudPlatformIntegrationConnection;
import org.apache.commons.lang.StringUtils;
/**
* Service for publishing Customer JSON to SCPI
*/
public class DefaultSapC4cCustomerPublicationService implements SapC4cCustomerPublicationService
{
private CloudPlatformIntegrationConnection cloudPlatformIntegrationConnection;
private ConfigurationService configurationService;
/**
* Publishes Customer Data to SCPI
*/
@Override
public void publishCustomerToCloudPlatformIntegration(final C4CCustomerData customerJson) throws IOException
{
String strReplicationEnabled = getConfigurationService().getConfiguration().getString(Sapc4ccustomerb2cConstants.C4C_CUSTOMER_CPI_REPLICATE);
boolean isReplicationEnabled = false;
if(StringUtils.isBlank(strReplicationEnabled)){
isReplicationEnabled = Boolean.parseBoolean(strReplicationEnabled);
}
if(isReplicationEnabled) {
getCloudPlatformIntegrationConnection().sendPost(
getConfigurationService().getConfiguration().getString(Sapc4ccustomerb2cConstants.C4C_CUSTOMER_SCPI_IFLOW_KEY),
customerJson.toString());
}
}
/**
* @return the cloudPlatformIntegrationConnection
*/
public CloudPlatformIntegrationConnection getCloudPlatformIntegrationConnection()
{
return cloudPlatformIntegrationConnection;
}
/**
* @param cloudPlatformIntegrationConnection
* the cloudPlatformIntegrationConnection to set
*/
public void setCloudPlatformIntegrationConnection(final CloudPlatformIntegrationConnection cloudPlatformIntegrationConnection)
{
this.cloudPlatformIntegrationConnection = cloudPlatformIntegrationConnection;
}
/**
* @return the configurationService
*/
public ConfigurationService getConfigurationService()
{
return configurationService;
}
/**
* @param configurationService
* the configurationService to set
*/
public void setConfigurationService(final ConfigurationService configurationService)
{
this.configurationService = configurationService;
}
}
|
package org.keycloak.protocol.cas.endpoints;
import org.keycloak.events.EventBuilder;
import org.keycloak.models.*;
import org.keycloak.protocol.ProtocolMapper;
import org.keycloak.protocol.cas.mappers.CASAttributeMapper;
import org.keycloak.protocol.cas.representations.CASServiceResponse;
import org.keycloak.protocol.cas.utils.CASValidationException;
import org.keycloak.protocol.cas.utils.ContentTypeHelper;
import org.keycloak.protocol.cas.utils.ServiceResponseHelper;
import org.keycloak.services.managers.ClientSessionCode;
import org.keycloak.services.util.DefaultClientSessionContext;
import javax.ws.rs.core.*;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
public class ServiceValidateEndpoint extends ValidateEndpoint {
@Context
private Request restRequest;
public ServiceValidateEndpoint(RealmModel realm, EventBuilder event) {
super(realm, event);
}
@Override
protected Response successResponse() {
UserSessionModel userSession = clientSession.getUserSession();
// CAS protocol does not support scopes, so pass null scopeParam
ClientSessionContext clientSessionCtx = DefaultClientSessionContext.fromClientSessionAndScopeParameter(clientSession, null);
Set<ProtocolMapperModel> mappings = clientSessionCtx.getProtocolMappers();
KeycloakSessionFactory sessionFactory = session.getKeycloakSessionFactory();
Map<String, Object> attributes = new HashMap<>();
for (ProtocolMapperModel mapping : mappings) {
ProtocolMapper mapper = (ProtocolMapper) sessionFactory.getProviderFactory(ProtocolMapper.class, mapping.getProtocolMapper());
if (mapper instanceof CASAttributeMapper) {
((CASAttributeMapper) mapper).setAttribute(attributes, mapping, userSession);
}
}
CASServiceResponse serviceResponse = ServiceResponseHelper.createSuccess(userSession.getUser().getUsername(), attributes);
return prepare(Response.Status.OK, serviceResponse);
}
@Override
protected Response errorResponse(CASValidationException e) {
CASServiceResponse serviceResponse = ServiceResponseHelper.createFailure(e.getError(), e.getErrorDescription());
return prepare(e.getStatus(), serviceResponse);
}
private Response prepare(Response.Status status, CASServiceResponse serviceResponse) {
MediaType responseMediaType = new ContentTypeHelper(request, restRequest, uriInfo).selectResponseType();
return ServiceResponseHelper.createResponse(status, responseMediaType, serviceResponse);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* <p>
* A disk-based implementation of the
* {@link org.apache.tomcat.util.http.fileupload.FileItem FileItem}
* interface. This implementation retains smaller items in memory, while
* writing larger ones to disk. The threshold between these two is
* configurable, as is the location of files that are written to disk.
* </p>
* <p>
* In typical usage, an instance of
* {@link org.apache.tomcat.util.http.fileupload.disk.DiskFileItemFactory DiskFileItemFactory}
* would be created, configured, and then passed to a
* {@link org.apache.tomcat.util.http.fileupload.FileUpload FileUpload}
* implementation such as
* {@link org.apache.tomcat.util.http.fileupload.servlet.ServletFileUpload ServletFileUpload}.
* </p>
* <p>
* The following code fragment demonstrates this usage.
* </p>
* <pre>
* DiskFileItemFactory factory = new DiskFileItemFactory();
* // maximum size that will be stored in memory
* factory.setSizeThreshold(4096);
* // the location for saving data that is larger than getSizeThreshold()
* factory.setRepository(new File("/tmp"));
*
* ServletFileUpload upload = new ServletFileUpload(factory);
* </pre>
* <p>
* Please see the FileUpload
* <a href="http://commons.apache.org/fileupload/using.html" target="_top">User Guide</a>
* for further details and examples of how to use this package.
* </p>
*/
package org.apache.tomcat.util.http.fileupload.disk;
|
/*
* Copyright (c) 2018 Gomint team
*
* This code is licensed under the BSD license found in the
* LICENSE file in the root directory of this source tree.
*/
package io.gomint.world.block;
/**
* @author geNAZt
* @version 1.0
* @stability 3
*/
public interface BlockStoneButton extends BlockButton {
}
|
package org.apache.lucene.search;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.TieredMergePolicy;
/**
* A policy defining which filters should be cached.
*
* Implementations of this class must be thread-safe.
*
* @see UsageTrackingFilterCachingPolicy
* @see LRUFilterCache
* @lucene.experimental
* @deprecated Use {@link QueryCachingPolicy} instead
*/
@Deprecated
public interface FilterCachingPolicy {
/** A simple policy that caches all the provided filters on all segments. */
public static final FilterCachingPolicy ALWAYS_CACHE = new FilterCachingPolicy() {
@Override
public void onUse(Filter filter) {}
@Override
public boolean shouldCache(Filter filter, LeafReaderContext context, DocIdSet set) throws IOException {
return true;
}
};
/** A simple policy that only caches on the largest segments of an index.
* The reasoning is that these segments likely account for most of the
* execution time of queries and are also more likely to stay around longer
* than small segments, which makes them more interesting for caching.
*/
public static class CacheOnLargeSegments implements FilterCachingPolicy {
/** {@link CacheOnLargeSegments} instance that only caches on segments that
* account for more than 3% of the total index size. This should guarantee
* that all segments from the upper {@link TieredMergePolicy tier} will be
* cached while ensuring that at most <tt>33</tt> segments can make it to
* the cache (given that some implementations such as {@link LRUFilterCache}
* perform better when the number of cached segments is low). */
public static final CacheOnLargeSegments DEFAULT = new CacheOnLargeSegments(0.03f);
private final float minSizeRatio;
/**
* Create a {@link CacheOnLargeSegments} instance that only caches on a
* given segment if its number of documents divided by the total number of
* documents in the index is greater than or equal to
* <code>minSizeRatio</code>.
*/
public CacheOnLargeSegments(float minSizeRatio) {
if (minSizeRatio <= 0 || minSizeRatio >= 1) {
throw new IllegalArgumentException("minSizeRatio must be in ]0, 1[, got " + minSizeRatio);
}
this.minSizeRatio = minSizeRatio;
}
@Override
public void onUse(Filter filter) {}
@Override
public boolean shouldCache(Filter filter, LeafReaderContext context, DocIdSet set) throws IOException {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final float sizeRatio = (float) context.reader().maxDoc() / topLevelContext.reader().maxDoc();
return sizeRatio >= minSizeRatio;
}
};
/** Callback that is called every time that a cached filter is used.
* This is typically useful if the policy wants to track usage statistics
* in order to make decisions. */
void onUse(Filter filter);
/** Whether the given {@link DocIdSet} should be cached on a given segment.
* This method will be called on each leaf context to know if the filter
* should be cached on this particular leaf. The filter cache will first
* attempt to load a {@link DocIdSet} from the cache. If it is not cached
* yet and this method returns <tt>true</tt> then a cache entry will be
* generated. Otherwise an uncached set will be returned. */
boolean shouldCache(Filter filter, LeafReaderContext context, DocIdSet set) throws IOException;
}
|
package com.bond.oncache;
/*
* This is the source code of SpecNet project
* It is licensed under MIT License.
*
* Copyright (c) Dmitriy Bondarenko
* feel free to contact me: specnet.messenger@gmail.com
*/
import android.content.Context;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.util.Log;
import android.support.design.widget.FloatingActionButton;
import android.support.design.widget.Snackbar;
import android.support.v7.app.AppCompatActivity;
import android.support.v7.widget.Toolbar;
import android.view.View;
import android.view.Menu;
import android.view.MenuItem;
import com.bond.oncache.gui.FragmentKey;
import com.bond.oncache.gui.MainWindow;
import com.bond.oncache.gui.SpecTheme;
import com.bond.oncache.gui.UiFragment;
import com.bond.oncache.gui.UiHistoryFrag;
import com.bond.oncache.gui.UiMainFrag;
import com.bond.oncache.gui.UiSettingsFrag;
import com.bond.oncache.i.IActivityForResult;
import com.bond.oncache.i.IView;
import com.bond.oncache.objs.ByteUtils;
import com.bond.oncache.objs.StaticConsts;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashMap;
import java.util.Map;
public class MainActivity extends AppCompatActivity implements IView {
static final String TAG = "MainActivity";
final GuiHandler guiHandler = new GuiHandler(Looper.getMainLooper());
Toolbar toolbar = null;
FloatingActionButton fab = null;
static final FragmentKey FirstFragKey = new FragmentKey(StaticConsts.FirstFragTAG);
final Map<FragmentKey, UiFragment> uiFrags = new HashMap<FragmentKey,UiFragment>();
final Deque<FragmentKey> uiFragsControl = new ArrayDeque<FragmentKey>();
UiFragment curActiveFrag = null;
MainWindow mainWindow = null;
boolean guiNotStarted = true;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mainWindow = (MainWindow) findViewById(R.id.mainWindow);
toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
fab = (FloatingActionButton) findViewById(R.id.fab);
fab.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
onFABclick(view);
}
});
restoreState(savedInstanceState);
// onNewIntent(getIntent())
}
@Override
public void setFABicon() {
if (null != curActiveFrag ) {
fab.setImageDrawable(curActiveFrag.getFABicon());
}
}
@Override
public void goBack() {
onBackPressed();
}
void onFABclick(View view) {
if (null != curActiveFrag) {
curActiveFrag.onFABclick();
}
}
@Override
public void onPresenterChange() {
if (null != curActiveFrag) {
curActiveFrag.onPresenterChange();
}
setFABicon();
}
@Override
public void showMessage(String str) {
Snackbar.make(fab, str, Snackbar.LENGTH_LONG)
.setAction("Action", null).show();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.menu_main, menu);
return true;
}
/**
* Gets called every time the user presses the menu button.
* Use if your menu is dynamic.
*/
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
menu.clear();
if (null != curActiveFrag) {
if (!curActiveFrag.getTAG().equals(StaticConsts.FirstFragTAG)) {
menu.add(0, StaticConsts.MENU_UiMain, Menu.NONE,
R.string.strUiMainFragM);
}
if (!curActiveFrag.getTAG().equals(StaticConsts.UiSettingsTAG)) {
menu.add(0, StaticConsts.MENU_UiSettings, Menu.NONE,
R.string.strUiSettingsFrag);
}
if (!curActiveFrag.getTAG().equals(StaticConsts.UiHistoryTAG)) {
menu.add(0, StaticConsts.MENU_UiHistory, Menu.NONE,
R.string.strUiHistoryFrag);
}
curActiveFrag.prepareLocalMenu(menu);
}
// Back button:
menu.add(2, StaticConsts.MENU_Exit, Menu.NONE,
R.string.strExit);
return super.onPrepareOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
super.onOptionsItemSelected(item);
boolean was_my_menu = true;
int menu_item_id = item.getItemId();
switch (menu_item_id) {
case StaticConsts.MENU_UiMain:
setCurActiveFrag(new FragmentKey(StaticConsts.FirstFragTAG));
break;
case StaticConsts.MENU_UiSettings:
setCurActiveFrag(new FragmentKey(StaticConsts.UiSettingsTAG));
break;
case StaticConsts.MENU_UiHistory:
setCurActiveFrag(new FragmentKey(StaticConsts.UiHistoryTAG));
break;
case StaticConsts.MENU_Exit:
exitSpecNetMain();
break;
default:
was_my_menu = null == curActiveFrag? false :
curActiveFrag.onSelectLocalMenu(menu_item_id);
}
if (was_my_menu) { return true; }
return super.onOptionsItemSelected(item);
}
@Override
public void onSaveInstanceState(Bundle outState) {
if (null != curActiveFrag) {
outState.putString("curActiveFrag.fragTAG",
curActiveFrag.getFragmentKey().fragTAG);
}
super.onSaveInstanceState(outState);
}
private void restoreState (Bundle savedInstanceState) {
if (null == savedInstanceState) { return ; }
String fragTAG = savedInstanceState.getString("curActiveFrag.fragTAG");
if (null != fragTAG) {
setCurActiveFrag(new FragmentKey(fragTAG));
}//if (null!=fragTAG)
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == StaticConsts.RQS_GET_CONTENT ) {
if (null != curActiveFrag) {
if (ByteUtils.getFlag(curActiveFrag.getType(), StaticConsts.FrgActivityForResult)) {
((IActivityForResult) curActiveFrag).resultActivityForResult(this, requestCode, resultCode, data);
}
}
} else {
super.onActivityResult(requestCode, resultCode, data);
}
}
private void setCurActiveFrag(FragmentKey key) {
if (null == key) return;
UiFragment frg = uiFrags.get(key);
if (null == frg || frg.isDestroyed) {
frg = createUiFragment(key);
}
if (frg != curActiveFrag) {
if (null != curActiveFrag ) {
mainWindow.checkDelCurFrag(curActiveFrag);
uiFrags.remove(curActiveFrag.getFragmentKey());
uiFragsControl.remove(curActiveFrag.getFragmentKey());
curActiveFrag.onDestroyCommon();
}
curActiveFrag = frg;
mainWindow.setCurActiveFrag(curActiveFrag);
if (curActiveFrag.getTAG().equals(StaticConsts.FirstFragTAG)) {
clearUiFrags(FirstFragKey);
}
uiFrags.put(curActiveFrag.getFragmentKey(), curActiveFrag);
uiFragsControl.add(curActiveFrag.getFragmentKey());
}
if (null != curActiveFrag) {
curActiveFrag.onResume();
setFABicon();
}
}
UiFragment createUiFragment(FragmentKey key) {
Context context = MainActivity.this;
UiFragment frg = null;
switch (key.fragTAG) {
case StaticConsts.UiSettingsTAG:
frg = new UiSettingsFrag(context, key);
break;
case StaticConsts.UiHistoryTAG:
frg = new UiHistoryFrag(context, key);
break;
case StaticConsts.FirstFragTAG:
default:
frg = new UiMainFrag(context, key);
break;
}
return frg;
}
@Override
public void setToolbarTittle(String tittle) {
getSupportActionBar().setTitle(tittle);
}
@Override
public Handler getGuiHandler() {
return guiHandler;
}
@Override
public Context getForDialogCtx() {
return MainActivity.this;
}
@Override
public void onPause() {
if (null != curActiveFrag) {
curActiveFrag.onPause();
}
super.onPause();
}
private void onFirstStart() {
FragmentKey key=uiFragsControl.peekLast();
if (null == key) {
TestPresenter.onFirstStart();
setCurActiveFrag(FirstFragKey);
} else {
setCurActiveFrag(key);
}
guiNotStarted = false;
}
@Override
public void onStart() {
TestPresenter.setGUInterface(MainActivity.this);
SpecTheme.applyMetrics(MainActivity.this);
super.onStart();
if (guiNotStarted) {
onFirstStart();
}
}
@Override
public void onResume() {
super.onResume();
if (null != curActiveFrag) {
curActiveFrag.onResume();
}
onPresenterChange();
}
@Override
public void onStop() {
if (null != curActiveFrag) {
curActiveFrag.onStop();
}
guiNotStarted = true;
super.onStop();
}
@Override
protected void onDestroy() {
exitSpecNetMain();
super.onDestroy();
}
@Override
public void onBackPressed() {
if (null != curActiveFrag
&& curActiveFrag.getTAG().equals(StaticConsts.FirstFragTAG)) {
exitSpecNetMain();
} else {
setCurActiveFrag(FirstFragKey);
}
}
private void exitSpecNetMain() {
try {
guiNotStarted = true;
mainWindow.onDestroy();
clearUiFrags();
SpecTheme.onDestroy();
finish();
System.gc();
} catch (Exception e) {}
try {
super.onBackPressed();
System.gc();
} catch (Exception e) {}
}
private void clearUiFrags(FragmentKey exeptFragKey) {
for (FragmentKey fragKey : uiFragsControl) {
if (fragKey.equals(exeptFragKey)) {
continue;
}
UiFragment frag = uiFrags.get(fragKey);
if (null != frag) {
mainWindow.checkDelCurFrag(frag);
frag.onStop();
frag.onDestroyCommon();
}
}
uiFrags.clear();
uiFragsControl.clear();
}
private void clearUiFrags() {
for (FragmentKey fragKey : uiFragsControl) {
UiFragment frag=uiFrags.get(fragKey);
if (null!=frag) {
mainWindow.checkDelCurFrag(frag);
frag.onStop();
frag.onDestroyCommon();
}
}
uiFragsControl.clear();
uiFrags.clear();
curActiveFrag = null;
}
class GuiHandler extends Handler {
public GuiHandler(Looper looper) {
super(looper);
}
@Override
public void handleMessage(Message msg) {
try {
switch (msg.what) {
// case MsgTAGs.M_KEEP_ALIVE:
// keepAlive();
// break;
default:
super.handleMessage(msg);
}
} catch (Exception e) {
Log.e(TAG, "GuiHandler: error Message handling",e);
}
}
}
}
|
/*
* org.riverock.interfaces - Common classes and interafces shared between projects
*
* Copyright (C) 2006, Riverock Software, All Rights Reserved.
*
* Riverock - The Open-source Java Development Community
* http://www.riverock.org
*
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.riverock.interfaces.portal.dao;
import java.util.List;
import org.riverock.interfaces.portal.bean.Css;
import org.riverock.interfaces.portal.spi.PortalCssSpi;
/**
* @deprecated use org.riverock.interfaces.portal.spi.PortalCssSpi
* @author Sergei Maslyukov
* Date: 18.05.2006
* Time: 13:30:08
*/
public interface PortalCssDao {
public Css getCssCurrent(Long siteId);
public Css getCss(Long cssId);
public List<Css> getCssList(Long siteId);
public Long createCss(Css css);
public void updateCss(Css css);
public void deleteCss(Long cssId);
}
|
package net.peepocloud.node.server.minecraftserverfile;
/*
* Created by Mc_Ruben on 23.11.2018
*/
import net.peepocloud.lib.config.yaml.YamlConfigurable;
import net.peepocloud.node.logging.ColoredLogger;
import net.peepocloud.node.server.ServerVersion;
import net.peepocloud.node.api.setup.type.ArraySetupAcceptable;
import net.peepocloud.node.api.setup.Setup;
import net.peepocloud.node.api.utility.FileDownloading;
import java.nio.file.Path;
import java.util.Collection;
import java.util.stream.Collectors;
public class SetupMinecraftServerStartupFile { //TODO implement languagesystem
public static void installServer(ColoredLogger logger, Path path) {
Setup.startSetupSync(new YamlConfigurable(), logger, setup -> {
Collection<MinecraftServerStartupFileVersion> fileVersions = MinecraftServerStartupFileVersion.getAvailableVersions();
String versions = MinecraftServerStartupFileVersion.asString(fileVersions);
setup.request(
"type",
"Please specify a minecraft process version [" + versions + "]",
"You have to specify one of the following: " + versions,
new ArraySetupAcceptable<>(fileVersions.toArray()),
fileVersions.stream().map(MinecraftServerStartupFileVersion::getName).collect(Collectors.toList())
);
String type = setup.getData().getString("type");
String url = null;
for (MinecraftServerStartupFileVersion startupFileVersion : fileVersions) {
if (type.equalsIgnoreCase(startupFileVersion.getName())) {
requestVersions(setup, startupFileVersion.getVersions().values());
url = startupFileVersion.getVersion(setup.getData().getString("version")).getUrl();
break;
}
}
String finalUrl = url;
FileDownloading.downloadFileWithProgressBar(logger, url, path, () -> System.out.println("&aSuccessfully downloaded process"),
() -> System.out.println("&cThere was an error while downloading server.jar from " + finalUrl));
});
}
private static void requestVersions(Setup setup, Collection<ServerVersion> serverVersions) {
String s = serverVersions.stream().map(ServerVersion::getVersion).collect(Collectors.joining(", "));
setup.request(
"version",
"Please specify the version [" + s + "]",
"You have to specify one of the following versions: " + s,
new ArraySetupAcceptable<>(serverVersions.toArray()),
serverVersions.stream().map(ServerVersion::getVersion).collect(Collectors.toList())
);
}
}
|
/*
* Copyright 2020, Yahoo Inc.
* Licensed under the Apache License, Version 2.0
* See LICENSE file in project root for terms.
*/
package com.yahoo.elide.async.models;
/**
* Get principal owner name interface.
*/
public interface PrincipalOwned {
public String getPrincipalName();
}
|
package com.planet_ink.coffee_mud.Abilities.Spells;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2001-2017 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class Spell_Choke extends Spell
{
@Override
public String ID()
{
return "Spell_Choke";
}
private final static String localizedName = CMLib.lang().L("Choke");
@Override
public String name()
{
return localizedName;
}
private final static String localizedStaticDisplay = CMLib.lang().L("(Choke spell)");
@Override
public String displayText()
{
return localizedStaticDisplay;
}
@Override
public int abstractQuality()
{
return Ability.QUALITY_MALICIOUS;
}
@Override
protected int canAffectCode()
{
return CAN_MOBS;
}
@Override
public int classificationCode()
{
return Ability.ACODE_SPELL | Ability.DOMAIN_ALTERATION;
}
@Override
public void affectPhyStats(Physical affected, PhyStats affectableStats)
{
super.affectPhyStats(affected,affectableStats);
affectableStats.setSensesMask(affectableStats.sensesMask()|PhyStats.CAN_NOT_BREATHE);
}
@Override
public void unInvoke()
{
// undo the affects of this spell
if(!(affected instanceof MOB))
return;
final MOB mob=(MOB)affected;
super.unInvoke();
if(canBeUninvoked())
mob.tell(L("You begin to breathe easier."));
}
@Override
public boolean invoke(MOB mob, List<String> commands, Physical givenTarget, boolean auto, int asLevel)
{
final MOB target=this.getTarget(mob,commands,givenTarget);
if(target==null)
return false;
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
boolean success=proficiencyCheck(mob,0,auto);
if(success)
{
invoker=mob;
final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),auto?"":L("^S<S-NAME> incant(s) at <T-NAMESELF>.^?"));
if(mob.location().okMessage(mob,msg))
{
mob.location().send(mob,msg);
if(msg.value()<=0)
{
success=maliciousAffect(mob,target,asLevel,5+super.getXLEVELLevel(mob),-1)!=null;
if(target.location()==mob.location())
target.location().show(target,null,CMMsg.MSG_OK_ACTION,("<S-NAME> start(s) choking!"+CMLib.protocol().msp("choke.wav",40)));
}
}
}
else
return maliciousFizzle(mob,target,L("<S-NAME> incant(s) at <T-NAMESELF>, but the spell fizzles."));
// return whether it worked
return success;
}
}
|
package io.novaordis.playground.jee.ejb2rest.callee;
import io.novaordis.playground.jee.ejb.ejb2rest.common.Callee;
import io.novaordis.playground.jee.ejb.ejb2rest.common.Measurements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ejb.Stateless;
/**
* @author Ovidiu Feodorov <ovidiu@novaordis.com>
* @since 5/1/17
*/
@SuppressWarnings("unused")
@Stateless
public class CalleeImpl implements Callee {
// Constants -------------------------------------------------------------------------------------------------------
private static final Logger log = LoggerFactory.getLogger(CalleeImpl.class);
// Static ----------------------------------------------------------------------------------------------------------
public static void main(String[] args) throws Exception {
Callee c = new CalleeImpl();
Measurements.invokeSeriallyInALoop(c, 100000);
}
// Attributes ------------------------------------------------------------------------------------------------------
// Constructors ----------------------------------------------------------------------------------------------------
// Callee implementation -------------------------------------------------------------------------------------------
@Override
public String businessMethodA(String arg) {
log.info(this + " got businessMethodA(" + arg + ")");
String result = "";
for(int i = arg.length() - 1; i >= 0; i --) {
result += arg.charAt(i);
}
return result;
}
// Public ----------------------------------------------------------------------------------------------------------
@Override
public String toString() {
return "Callee[" + Integer.toHexString(System.identityHashCode(this)) + "]";
}
// Package protected -----------------------------------------------------------------------------------------------
// Protected -------------------------------------------------------------------------------------------------------
// Private ---------------------------------------------------------------------------------------------------------
// Inner classes ---------------------------------------------------------------------------------------------------
}
|
/*
* MIT License
*
* Copyright (c) 2022 MASES s.r.l.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**************************************************************************************
* <auto-generated>
* This code was generated from a template using JCOReflector
*
* Manual changes to this file may cause unexpected behavior in your application.
* Manual changes to this file will be overwritten if the code is regenerated.
* </auto-generated>
*************************************************************************************/
package system.windows.media.animation;
import org.mases.jcobridge.*;
import org.mases.jcobridge.netreflection.*;
import java.util.ArrayList;
// Import section
import system.windows.Freezable;
import system.windows.media.animation.ObjectKeyFrame;
import system.windows.media.animation.ObjectKeyFrameCollection;
import system.Array;
/**
* The base .NET class managing System.Windows.Media.Animation.ObjectKeyFrameCollection, PresentationCore, Version=5.0.15.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35.
* <p>
*
* See: <a href="https://docs.microsoft.com/en-us/dotnet/api/System.Windows.Media.Animation.ObjectKeyFrameCollection" target="_top">https://docs.microsoft.com/en-us/dotnet/api/System.Windows.Media.Animation.ObjectKeyFrameCollection</a>
*/
public class ObjectKeyFrameCollection extends Freezable {
/**
* Fully assembly qualified name: PresentationCore, Version=5.0.15.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35
*/
public static final String assemblyFullName = "PresentationCore, Version=5.0.15.0, Culture=neutral, PublicKeyToken=31bf3856ad364e35";
/**
* Assembly name: PresentationCore
*/
public static final String assemblyShortName = "PresentationCore";
/**
* Qualified class name: System.Windows.Media.Animation.ObjectKeyFrameCollection
*/
public static final String className = "System.Windows.Media.Animation.ObjectKeyFrameCollection";
static JCOBridge bridge = JCOBridgeInstance.getInstance(assemblyFullName);
/**
* The type managed from JCOBridge. See {@link JCType}
*/
public static JCType classType = createType();
static JCEnum enumInstance = null;
JCObject classInstance = null;
static JCType createType() {
try {
String classToCreate = className + ", "
+ (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Creating %s", classToCreate);
JCType typeCreated = bridge.GetType(classToCreate);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Created: %s",
(typeCreated != null) ? typeCreated.toString() : "Returned null value");
return typeCreated;
} catch (JCException e) {
JCOReflector.writeLog(e);
return null;
}
}
void addReference(String ref) throws Throwable {
try {
bridge.AddReference(ref);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
/**
* Internal constructor. Use with caution
*/
public ObjectKeyFrameCollection(java.lang.Object instance) throws Throwable {
super(instance);
if (instance instanceof JCObject) {
classInstance = (JCObject) instance;
} else
throw new Exception("Cannot manage object, it is not a JCObject");
}
public String getJCOAssemblyName() {
return assemblyFullName;
}
public String getJCOClassName() {
return className;
}
public String getJCOObjectName() {
return className + ", " + (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
}
public java.lang.Object getJCOInstance() {
return classInstance;
}
public void setJCOInstance(JCObject instance) {
classInstance = instance;
super.setJCOInstance(classInstance);
}
public JCType getJCOType() {
return classType;
}
/**
* Try to cast the {@link IJCOBridgeReflected} instance into {@link ObjectKeyFrameCollection}, a cast assert is made to check if types are compatible.
* @param from {@link IJCOBridgeReflected} instance to be casted
* @return {@link ObjectKeyFrameCollection} instance
* @throws java.lang.Throwable in case of error during cast operation
*/
public static ObjectKeyFrameCollection cast(IJCOBridgeReflected from) throws Throwable {
NetType.AssertCast(classType, from);
return new ObjectKeyFrameCollection(from.getJCOInstance());
}
// Constructors section
public ObjectKeyFrameCollection() throws Throwable, system.ArgumentException, system.ArgumentOutOfRangeException, system.PlatformNotSupportedException, system.InvalidOperationException, system.ArgumentNullException, system.RankException, system.IndexOutOfRangeException, system.ArrayTypeMismatchException, system.componentmodel.InvalidEnumArgumentException, system.componentmodel.Win32Exception, system.NotSupportedException {
try {
// add reference to assemblyName.dll file
addReference(JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
setJCOInstance((JCObject)classType.NewObject());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
// Methods section
public boolean Contains(ObjectKeyFrame keyFrame) throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.ArgumentOutOfRangeException, system.NotSupportedException, system.IndexOutOfRangeException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (boolean)classInstance.Invoke("Contains", keyFrame == null ? null : keyFrame.getJCOInstance());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public int Add(ObjectKeyFrame keyFrame) throws Throwable, system.ArgumentException, system.ArgumentOutOfRangeException, system.ArgumentNullException, system.InvalidOperationException, system.PlatformNotSupportedException, system.IndexOutOfRangeException, system.NotSupportedException, system.ObjectDisposedException, system.RankException, system.ArrayTypeMismatchException, system.FormatException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (int)classInstance.Invoke("Add", keyFrame == null ? null : keyFrame.getJCOInstance());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public int IndexOf(ObjectKeyFrame keyFrame) throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.ArgumentOutOfRangeException, system.NotSupportedException, system.IndexOutOfRangeException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (int)classInstance.Invoke("IndexOf", keyFrame == null ? null : keyFrame.getJCOInstance());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public ObjectKeyFrameCollection CloneNewObjectKeyFrameCollection() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.ObjectDisposedException, system.security.SecurityException, system.io.IOException, system.ArgumentOutOfRangeException, system.FormatException, system.IndexOutOfRangeException, system.NotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
JCObject objClone = (JCObject)classInstance.Invoke("Clone");
return new ObjectKeyFrameCollection(objClone);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public void Clear() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.IndexOutOfRangeException, system.ArgumentOutOfRangeException, system.FormatException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
classInstance.Invoke("Clear");
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public void CopyTo(ObjectKeyFrame[] array, int index) throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.RankException, system.ArgumentOutOfRangeException, system.IndexOutOfRangeException, system.ArrayTypeMismatchException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
classInstance.Invoke("CopyTo", toObjectFromArray(array), index);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public void Insert(int index, ObjectKeyFrame keyFrame) throws Throwable, system.ArgumentException, system.ArgumentOutOfRangeException, system.ArgumentNullException, system.InvalidOperationException, system.PlatformNotSupportedException, system.IndexOutOfRangeException, system.NotSupportedException, system.ObjectDisposedException, system.RankException, system.ArrayTypeMismatchException, system.FormatException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
classInstance.Invoke("Insert", index, keyFrame == null ? null : keyFrame.getJCOInstance());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public void Remove(ObjectKeyFrame keyFrame) throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.IndexOutOfRangeException, system.ArgumentOutOfRangeException, system.FormatException, system.NotSupportedException, system.RankException, system.ArrayTypeMismatchException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
classInstance.Invoke("Remove", keyFrame == null ? null : keyFrame.getJCOInstance());
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public void RemoveAt(int index) throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException, system.IndexOutOfRangeException, system.ArgumentOutOfRangeException, system.FormatException, system.RankException, system.ArrayTypeMismatchException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
classInstance.Invoke("RemoveAt", index);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToIList method available in IList to obtain an object with an invocable method
*/
@Deprecated
public boolean Contains(NetObject value) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToIList to obtain the full interface.");
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToIList method available in IList to obtain an object with an invocable method
*/
@Deprecated
public int Add(NetObject value) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToIList to obtain the full interface.");
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToIList method available in IList to obtain an object with an invocable method
*/
@Deprecated
public int IndexOf(NetObject value) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToIList to obtain the full interface.");
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToIList method available in IList to obtain an object with an invocable method
*/
@Deprecated
public void Insert(int index, NetObject value) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToIList to obtain the full interface.");
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToIList method available in IList to obtain an object with an invocable method
*/
@Deprecated
public void Remove(NetObject value) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToIList to obtain the full interface.");
}
/**
* @deprecated Not for public use because the method is implemented in .NET with an explicit interface.
* Use the static ToICollection method available in ICollection to obtain an object with an invocable method
*/
@Deprecated
public void CopyTo(Array array, int index) throws Throwable {
throw new UnsupportedOperationException("Not for public use because the method is implemented with an explicit interface. Use ToICollection to obtain the full interface.");
}
// Properties section
public boolean getIsFixedSize() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (boolean)classInstance.Get("IsFixedSize");
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public boolean getIsReadOnly() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (boolean)classInstance.Get("IsReadOnly");
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public boolean getIsSynchronized() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (boolean)classInstance.Get("IsSynchronized");
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public int getCount() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
return (int)classInstance.Get("Count");
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public NetObject getSyncRoot() throws Throwable, system.ArgumentNullException, system.ArgumentException, system.ObjectDisposedException, system.InvalidOperationException, system.globalization.CultureNotFoundException, system.PlatformNotSupportedException {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
JCObject val = (JCObject)classInstance.Get("SyncRoot");
return new NetObject(val);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
public static ObjectKeyFrameCollection getEmpty() throws Throwable, system.ArgumentException, system.ArgumentOutOfRangeException, system.RankException, system.ArrayTypeMismatchException, system.InvalidOperationException, system.ArgumentNullException, system.componentmodel.InvalidEnumArgumentException, system.componentmodel.Win32Exception, system.PlatformNotSupportedException, system.IndexOutOfRangeException {
if (classType == null)
throw new UnsupportedOperationException("classType is null.");
try {
JCObject val = (JCObject)classType.Get("Empty");
return new ObjectKeyFrameCollection(val);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
// Instance Events section
}
|
/*
* Copyright (c) 1997, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Distribution License v. 1.0, which is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
package javax.activation;
import java.io.*;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.security.AccessController;
import java.security.PrivilegedAction;
/**
* The CommandInfo class is used by CommandMap implementations to
* describe the results of command requests. It provides the requestor
* with both the verb requested, as well as an instance of the
* bean. There is also a method that will return the name of the
* class that implements the command but <i>it is not guaranteed to
* return a valid value</i>. The reason for this is to allow CommandMap
* implmentations that subclass CommandInfo to provide special
* behavior. For example a CommandMap could dynamically generate
* JavaBeans. In this case, it might not be possible to create an
* object with all the correct state information solely from the class
* name.
*/
public class CommandInfo {
private String verb;
private String className;
/**
* The Constructor for CommandInfo.
* @param verb The command verb this CommandInfo decribes.
* @param className The command's fully qualified class name.
*/
public CommandInfo(String verb, String className) {
this.verb = verb;
this.className = className;
}
/**
* Return the command verb.
*
* @return the command verb.
*/
public String getCommandName() {
return verb;
}
/**
* Return the command's class name. <i>This method MAY return null in
* cases where a CommandMap subclassed CommandInfo for its
* own purposes.</i> In other words, it might not be possible to
* create the correct state in the command by merely knowing
* its class name. <b>DO NOT DEPEND ON THIS METHOD RETURNING
* A VALID VALUE!</b>
*
* @return The class name of the command, or <i>null</i>
*/
public String getCommandClass() {
return className;
}
/**
* Return the instantiated JavaBean component.
* <p>
* If the current runtime environment supports
* {@link java.beans.Beans#instantiate Beans.instantiate},
* use it to instantiate the JavaBeans component. Otherwise, use
* {@link java.lang.Class#forName Class.forName}.
* <p>
* The component class needs to be public.
* On Java SE 9 and newer, if the component class is in a named module,
* it needs to be in an exported package.
* <p>
* If the bean implements the <code>javax.activation.CommandObject</code>
* interface, call its <code>setCommandContext</code> method.
* <p>
* If the DataHandler parameter is null, then the bean is
* instantiated with no data. NOTE: this may be useful
* if for some reason the DataHandler that is passed in
* throws IOExceptions when this method attempts to
* access its InputStream. It will allow the caller to
* retrieve a reference to the bean if it can be
* instantiated.
* <p>
* If the bean does NOT implement the CommandObject interface,
* this method will check if it implements the
* java.io.Externalizable interface. If it does, the bean's
* readExternal method will be called if an InputStream
* can be acquired from the DataHandler.<p>
*
* @param dh The DataHandler that describes the data to be
* passed to the command.
* @param loader The ClassLoader to be used to instantiate the bean.
* @return The bean
* @exception IOException for failures reading data
* @exception ClassNotFoundException if command object class can't
* be found
* @see java.beans.Beans#instantiate
* @see javax.activation.CommandObject
*/
public Object getCommandObject(DataHandler dh, ClassLoader loader)
throws IOException, ClassNotFoundException {
Object new_bean = null;
// try to instantiate the bean
new_bean = Beans.instantiate(loader, className);
// if we got one and it is a CommandObject
if (new_bean != null) {
if (new_bean instanceof CommandObject) {
((CommandObject)new_bean).setCommandContext(verb, dh);
} else if (new_bean instanceof Externalizable) {
if (dh != null) {
InputStream is = dh.getInputStream();
if (is != null) {
((Externalizable)new_bean).readExternal(
new ObjectInputStream(is));
}
}
}
}
return new_bean;
}
/**
* Helper class to invoke Beans.instantiate reflectively or the equivalent
* with core reflection when module java.desktop is not readable.
*/
private static final class Beans {
static final Method instantiateMethod;
static {
Method m;
try {
Class<?> c = Class.forName("java.beans.Beans");
m = c.getDeclaredMethod("instantiate", ClassLoader.class, String.class);
} catch (ClassNotFoundException e) {
m = null;
} catch (NoSuchMethodException e) {
m = null;
}
instantiateMethod = m;
}
/**
* Equivalent to invoking java.beans.Beans.instantiate(loader, cn)
*/
static Object instantiate(ClassLoader loader, String cn)
throws IOException, ClassNotFoundException {
Exception exception;
if (instantiateMethod != null) {
// invoke Beans.instantiate
try {
return instantiateMethod.invoke(null, loader, cn);
} catch (InvocationTargetException e) {
exception = e;
} catch (IllegalAccessException e) {
exception = e;
}
} else {
SecurityManager security = System.getSecurityManager();
if (security != null) {
// if it's ok with the SecurityManager, it's ok with me.
String cname = cn.replace('/', '.');
if (cname.startsWith("[")) {
int b = cname.lastIndexOf('[') + 2;
if (b > 1 && b < cname.length()) {
cname = cname.substring(b);
}
}
int i = cname.lastIndexOf('.');
if (i != -1) {
security.checkPackageAccess(cname.substring(0, i));
}
}
// Beans.instantiate specified to use SCL when loader is null
if (loader == null) {
loader = (ClassLoader)
AccessController.doPrivileged(new PrivilegedAction() {
public Object run() {
ClassLoader cl = null;
try {
cl = ClassLoader.getSystemClassLoader();
} catch (SecurityException ex) { }
return cl;
}
});
}
Class<?> beanClass = Class.forName(cn, true, loader);
try {
return beanClass.newInstance();
} catch (Exception ex) {
throw new ClassNotFoundException(beanClass + ": " + ex, ex);
}
}
return null;
}
}
}
|
package com.bumptech.glide.annotation.compiler;
import static com.bumptech.glide.annotation.compiler.test.Util.appResource;
import static com.bumptech.glide.annotation.compiler.test.Util.emptyAppModule;
import static com.bumptech.glide.annotation.compiler.test.Util.glide;
import static com.bumptech.glide.annotation.compiler.test.Util.subpackage;
import static com.google.testing.compile.CompilationSubject.assertThat;
import static com.google.testing.compile.Compiler.javac;
import com.bumptech.glide.annotation.compiler.test.CompilationProvider;
import com.bumptech.glide.annotation.compiler.test.ReferencedResource;
import com.bumptech.glide.annotation.compiler.test.RegenerateResourcesRule;
import com.bumptech.glide.annotation.compiler.test.Util;
import com.google.common.truth.Truth;
import com.google.testing.compile.Compilation;
import java.io.IOException;
import javax.tools.JavaFileObject;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Verifies the output of the processor with a simple single extension type.
*/
@RunWith(JUnit4.class)
public class GlideExtensionWithTypeTest implements CompilationProvider {
@Rule
public final RegenerateResourcesRule regenerateResourcesRule = new RegenerateResourcesRule(this);
private Compilation compilation;
@Before
public void setUp() {
compilation =
javac()
.withProcessors(new GlideAnnotationProcessor())
.compile(emptyAppModule(), forResource("ExtensionWithType.java"));
assertThat(compilation).succeededWithoutWarnings();
}
@Test
public void compilation_generatesAllExpectedFiles() {
Truth.assertThat(compilation.generatedSourceFiles()).hasSize(7);
}
@Test
public void compilation_generatesExpectedGlideOptionsClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideOptions"))
.hasSourceEquivalentTo(forResource("GlideOptions.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGlideRequestClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideRequest"))
.hasSourceEquivalentTo(appResource("GlideRequest.java"));
}
@Test
public void compilation_generatesExpectedGlideRequestsClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideRequests"))
.hasSourceEquivalentTo(forResource("GlideRequests.java"));
}
@Test
@ReferencedResource
public void compilationGeneratesExpectedGlideAppClass() throws IOException {
assertThat(compilation)
.generatedSourceFile(subpackage("GlideApp"))
.hasSourceEquivalentTo(appResource("GlideApp.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGeneratedAppGlideModuleImpl() throws IOException {
assertThat(compilation)
.generatedSourceFile(glide("GeneratedAppGlideModuleImpl"))
.hasSourceEquivalentTo(appResource("GeneratedAppGlideModuleImpl.java"));
}
@Test
@ReferencedResource
public void compilation_generatesExpectedGeneratedRequestManagerFactory() throws IOException {
assertThat(compilation)
.generatedSourceFile(glide("GeneratedRequestManagerFactory"))
.hasSourceEquivalentTo(appResource("GeneratedRequestManagerFactory.java"));
}
private JavaFileObject forResource(String name) {
return Util.forResource(getClass().getSimpleName(), name);
}
@Override
public Compilation getCompilation() {
return compilation;
}
}
|
/*
* Copyright 2010 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.fusesource.fabric.activemq.facade;
import org.apache.activemq.broker.jmx.QueueViewMBean;
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
public interface QueueViewFacade extends QueueViewMBean {
/**
* @return a unique id for this resource, typically a JMX ObjectName
* @throws Exception
*/
String getId() throws Exception;
}
|
/**
* eZmax API Definition (Full)
* This API expose all the functionnalities for the eZmax and eZsign applications.
*
* The version of the OpenAPI document: 1.1.9
* Contact: support-api@ezmax.ca
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model;
import io.swagger.annotations.*;
import com.google.gson.annotations.SerializedName;
/**
* The Signer requirement of the Ezsignformfieldgroup. **All** means anyone can fill it, **One** means a specific person must fill it.
**/
@ApiModel(description = "The Signer requirement of the Ezsignformfieldgroup. **All** means anyone can fill it, **One** means a specific person must fill it.")
public class FieldEEzsignformfieldgroupSignerrequirement {
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FieldEEzsignformfieldgroupSignerrequirement fieldEEzsignformfieldgroupSignerrequirement = (FieldEEzsignformfieldgroupSignerrequirement) o;
return true;
}
@Override
public int hashCode() {
int result = 17;
return result;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class FieldEEzsignformfieldgroupSignerrequirement {\n");
sb.append("}\n");
return sb.toString();
}
}
|
/*LC983: Minimum Cost For Tickets
https://leetcode.com/problems/minimum-cost-for-tickets/
In a country popular for train travel, you have planned
some train travelling one year in advance. The days of
the year that you will travel is given as an array days.
Each day is an integer from 1 to 365.
Train tickets are sold in 3 different ways:
a 1-day pass is sold for costs[0] dollars;
a 7-day pass is sold for costs[1] dollars;
a 30-day pass is sold for costs[2] dollars.
The passes allow that many days of consecutive travel.
For example, if we get a 7-day pass on day 2, then we can travel for 7 days: day 2, 3, 4, 5, 6, 7, and 8.
Return the minimum number of dollars you need to
travel every day in the given list of days.
Example 1:
Input: days = [1,4,6,7,8,20], costs = [2,7,15]
Output: 11
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 1-day pass for costs[0] = $2, which covered day 1.
On day 3, you bought a 7-day pass for costs[1] = $7, which covered days 3, 4, ..., 9.
On day 20, you bought a 1-day pass for costs[0] = $2, which covered day 20.
In total you spent $11 and covered all the days of your travel.
Example 2:
Input: days = [1,2,3,4,5,6,7,8,9,10,30,31], costs = [2,7,15]
Output: 17
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 30-day pass for costs[2] = $15 which covered days 1, 2, ..., 30.
On day 31, you bought a 1-day pass for costs[0] = $2 which covered day 31.
In total you spent $17 and covered all the days of your travel.
Note:
1 <= days.length <= 365
1 <= days[i] <= 365
days is in strictly increasing order.
costs.length == 3
1 <= costs[i] <= 1000
Let minCost(i) denote the minimum cost that will be payed for all trips on days 1 to day 365.
The desired answer is then minCost(365).
Calculation minCost(i):
If no trip on day i, then minCost(i) = minCost(i-1).
minCost(i)=0 for all i ≤ 0.
Otherwise:
If a 1-day pass on day i. In this case, minCost(i) = minCost(i) + costs[0].
If a 7-day pass ending on day i. then : In this case, minCost(i) = min(minCost(i − 7), minCost(i − 6), …, minCost(i − 1)) + costs[1].
But since since minCost is increasing (adding a day never reduces the minCost) hence:
minCost(i) = minCost(i − 7) + costs[2]
Same case for 30-day pass also.*/
class Sln{
public int mincostTickets(int[] days, int[] costs) {
boolean[] dayIncluded = new boolean[366];
for (int day : days) {
dayIncluded[day] = true;
}
int[] minCost = new int[366];
minCost[0] = 0;
for (int day = 1; day <= 365; ++day) {
if (!dayIncluded[day]) {
minCost[day] = minCost[day-1];
continue;
}
int min;
min = minCost[day-1] + costs[0];
min =Math.min(min, minCost[Math.max(0, day-7)] + costs[1]);
min = Math.min(min, minCost[Math.max(0, day-30)] + costs[2]);
minCost[day] = min;
}
return minCost[365];
}
}
|
package com.pgmacdesign.pgmactips.firebaseutilities;
import com.google.gson.annotations.SerializedName;
import java.util.Map;
/**
* Class for de/serializing push notification objects
* Created by pmacdowell on 10/19/2016.
*
//Sample Below
{
"data":{
"customTag1":"5817d057ccc869088c3c8c82",
"customTag2":"testing",
"customTag3":1482190375,
"customTag4":"customChat",
"customTag5":"581cbd34ccc869090c35579a"
},
"notification":{
"badge":23,
"body":"testing",
"sound":"default",
"title":"My Title"
},
"priority":"high",
"to":"firebase_cloud_messaging_id_goes_here
}
*/
public class PushNotificationsPojo {
public PushNotificationsPojo(){
//This needs to default to high for IOS Devices
this.priority = "high";
}
//Variables
@SerializedName("to")
private String to;
@SerializedName("pushNotificationTag")
private Integer pushNotificationTag;
@SerializedName("pushNotificationUUID")
private String pushNotificationUUID;
@SerializedName("data")
private Map<String, Object> mapData;
@SerializedName("notification")
private CustomNotificationObject notification;
@SerializedName("priority")
private String priority;
@SerializedName("expirationTime")
private Long expirationTime;
public Long getExpirationTime() {
return expirationTime;
}
public void setExpirationTime(Long expirationTime) {
this.expirationTime = expirationTime;
}
public CustomNotificationObject getNotification() {
return notification;
}
public void setNotification(CustomNotificationObject notification) {
this.notification = notification;
}
public String getPriority() {
return priority;
}
public void setPriority(String priority) {
this.priority = priority;
}
public String getTo() {
return to;
}
public void setTo(String to) {
this.to = to;
}
public Integer getPushNotificationTag() {
return pushNotificationTag;
}
public void setPushNotificationTag(Integer pushNotificationTag) {
this.pushNotificationTag = pushNotificationTag;
}
public String getPushNotificationUUID() {
return pushNotificationUUID;
}
public void setPushNotificationUUID(String pushNotificationUUID) {
this.pushNotificationUUID = pushNotificationUUID;
}
public Map<String, Object> getMapData() {
return mapData;
}
public void setMapData(Map<String, Object> mapData) {
this.mapData = mapData;
}
/**
* Custom Notifications Object
*/
public static class CustomNotificationObject {
@SerializedName("body")
private String body;
@SerializedName("title")
private String title;
@SerializedName("sound")
private String sound;
@SerializedName("badge")
private Integer badge;
@SerializedName("otherData")
private Map<String, Object> otherData;
public Map<String, Object> getOtherData() {
return otherData;
}
public void setOtherData(Map<String, Object> otherData) {
this.otherData = otherData;
}
public Integer getBadge() {
if(badge == null){
badge = 0;
}
return badge;
}
public void setBadge(Integer badge) {
this.badge = badge;
}
public String getBody() {
return body;
}
public void setBody(String body) {
this.body = body;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getSound() {
return sound;
}
public void setSound(String sound) {
this.sound = sound;
}
}
}
|
package com.bidding.application.serviceImpl;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Optional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.stereotype.Service;
import com.bidding.application.dto.BidRequestDto;
import com.bidding.application.entity.Auction;
import com.bidding.application.entity.Bid;
import com.bidding.application.entity.BidStatus;
import com.bidding.application.exception.BidNotFoundException;
import com.bidding.application.repository.AuctionRepository;
import com.bidding.application.repository.BidRepository;
import com.bidding.application.service.AuctionService;
import com.bidding.application.validator.AuctionValidator;
@Service
public class AuctionServiceImpl implements AuctionService {
@Autowired
private AuctionRepository auctionRepo;
@Autowired
private BidRepository bidRepo;
@Autowired
private AuctionValidator auctionValidator;
@Override
public Optional<List<Auction>> fetchAuctionByStatus(String status, Integer pageRequest, Integer pageSize) {
Pageable pageable = PageRequest.of(pageRequest, pageSize);
Page<Auction> auctions = auctionRepo.findAuctionByAuctionStatus(status, pageable);
return Optional.of(auctions.toList());
}
@Override
public Optional<Bid> placeBid(Integer auctionCode, BidRequestDto bidRequestDto) {
Optional<Bid> bid = Optional.empty();
Auction auction = fetchAuctionByAuctionCode(auctionCode).get();
if (auction != null && auctionValidator.validateBid(auction)) {
if (auction.getCurrBidPrice() <= bidRequestDto.getBidPrice()
&& LocalDateTime.now().isBefore(auction.getEndDate())
&& auction.getAuctionStatus().toString().equals("RUNNING")) {
auction.setCurrBidPrice(bidRequestDto.getBidPrice() + auction.getStepRate());
bid = Optional.of(new Bid(auctionCode, getUsernameFromSecurityContext(), LocalDateTime.now(),
bidRequestDto.getBidPrice(), BidStatus.ACCEPTED));
auctionRepo.save(auction);
} else {
bid = Optional.of(new Bid(auctionCode, getUsernameFromSecurityContext(), LocalDateTime.now(),
bidRequestDto.getBidPrice(), BidStatus.REJECTED));
}
bidRepo.save(bid.get());
// Buyer buyer = buyerRepo.findByUsername(getUsernameFromSecurityContext());
}
return bid;
}
@Override
public Optional<Auction> fetchAuctionByAuctionCode(Integer auctionCode) {
// Optional<Auction> auction = Optional.of(auctionRepo.findByAuctionCode(auctionCode));
// return auction.orElseThrow(() -> new AuctionNotFoundException());
return Optional.of(auctionRepo.findByAuctionCode(auctionCode));
}
private String getUsernameFromSecurityContext() {
UserDetails userDetails = (UserDetails) SecurityContextHolder.getContext().getAuthentication().getPrincipal();
String username = userDetails.getUsername();
return username;
}
@Override
public Optional<List<Bid>> fetchBidsByBuyer(String username) {
// TODO Auto-generated method stub
Optional<List<Bid>> bidList = Optional.of(bidRepo.findByUsername(username));
if (bidList.get().size() == 0) {
throw new BidNotFoundException();
}
return bidList;
}
}
|
package pl.pwojcik.patterns.observer;
public class SecondObserver implements Observer {
public void update(String s, int i) {
System.out.println("SecondObserver: my value in " + s + "is now: " + i);
}
}
|
/*
* Copyright 2012-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellecteu.catalyst.metadata;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Tests for {@link DependenciesCapability}.
*
* @author Stephane Nicoll
*/
public class DependenciesCapabilityTests {
@Rule
public final ExpectedException thrown = ExpectedException.none();
private static DependenciesCapability createDependenciesCapability(String groupName,
Dependency... dependencies) {
DependenciesCapability capability = new DependenciesCapability();
DependencyGroup group = createDependencyGroup(groupName, dependencies);
capability.getContent().add(group);
return capability;
}
private static DependencyGroup createDependencyGroup(String groupName,
Dependency... dependencies) {
DependencyGroup group = DependencyGroup.create(groupName);
for (Dependency dependency : dependencies) {
group.getContent().add(dependency);
}
return group;
}
@Test
public void indexedDependencies() {
Dependency dependency = Dependency.withId("first");
Dependency dependency2 = Dependency.withId("second");
DependenciesCapability capability = createDependenciesCapability("foo",
dependency, dependency2);
capability.validate();
assertSame(dependency, capability.get("first"));
assertSame(dependency2, capability.get("second"));
assertNull(capability.get("anotherId"));
}
@Test
public void addTwoDependenciesWithSameId() {
Dependency dependency = Dependency.withId("conflict");
Dependency dependency2 = Dependency.withId("conflict");
DependenciesCapability capability = createDependenciesCapability("foo",
dependency, dependency2);
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("conflict");
capability.validate();
}
@Test
public void addDependencyWithAliases() {
Dependency dependency = Dependency.withId("first");
dependency.getAliases().add("alias1");
dependency.getAliases().add("alias2");
DependenciesCapability capability = createDependenciesCapability("foo",
dependency);
capability.validate();
assertSame(dependency, capability.get("first"));
assertSame(dependency, capability.get("alias1"));
assertSame(dependency, capability.get("alias2"));
}
@Test
public void aliasClashWithAnotherDependency() {
Dependency dependency = Dependency.withId("first");
dependency.getAliases().add("alias1");
dependency.getAliases().add("alias2");
Dependency dependency2 = Dependency.withId("alias2");
DependenciesCapability capability = new DependenciesCapability();
capability.getContent().add(createDependencyGroup("foo", dependency));
capability.getContent().add(createDependencyGroup("bar", dependency2));
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("alias2");
capability.validate();
}
@Test
public void mergeAddEntry() {
DependenciesCapability capability = createDependenciesCapability("foo",
Dependency.withId("first"), Dependency.withId("second"));
DependenciesCapability anotherCapability = createDependenciesCapability("foo",
Dependency.withId("bar"), Dependency.withId("biz"));
anotherCapability.getContent()
.add(createDependencyGroup("bar", Dependency.withId("third")));
capability.merge(anotherCapability);
assertEquals(2, capability.getContent().size());
assertNotNull(capability.get("first"));
assertNotNull(capability.get("second"));
assertNotNull(capability.get("third"));
}
@Test
public void addDefaultVersionRange() {
Dependency first = Dependency.withId("first");
Dependency second = Dependency.withId("second");
second.setVersionRange("1.2.3.RELEASE");
DependencyGroup group = createDependencyGroup("test", first, second);
group.setVersionRange("1.2.0.RELEASE");
DependenciesCapability capability = new DependenciesCapability();
capability.getContent().add(group);
capability.validate();
assertEquals("1.2.0.RELEASE", capability.get("first").getVersionRange());
assertEquals("1.2.3.RELEASE", capability.get("second").getVersionRange());
}
@Test
public void addDefaultBom() {
Dependency first = Dependency.withId("first");
Dependency second = Dependency.withId("second");
second.setBom("da-bom");
DependencyGroup group = createDependencyGroup("test", first, second);
group.setBom("test-bom");
DependenciesCapability capability = new DependenciesCapability();
capability.getContent().add(group);
capability.validate();
assertEquals("test-bom", capability.get("first").getBom());
assertEquals("da-bom", capability.get("second").getBom());
}
@Test
public void addDefaultRepository() {
Dependency first = Dependency.withId("first");
Dependency second = Dependency.withId("second");
second.setRepository("da-repo");
DependencyGroup group = createDependencyGroup("test", first, second);
group.setRepository("test-repo");
DependenciesCapability capability = new DependenciesCapability();
capability.getContent().add(group);
capability.validate();
assertEquals("test-repo", capability.get("first").getRepository());
assertEquals("da-repo", capability.get("second").getRepository());
}
}
|
package dev.tr7zw.firstperson.forge.listener;
import dev.tr7zw.firstperson.FirstPersonModelCore;
import net.minecraftforge.client.event.RenderHandEvent;
import net.minecraftforge.eventbus.api.SubscribeEvent;
public class RenderHandEventListener {
@SubscribeEvent
public void onRender(RenderHandEvent e) {
if(FirstPersonModelCore.enabled && !FirstPersonModelCore.config.firstPerson.vanillaHands) {
e.setCanceled(true);
}
}
}
|
package cn.hutool.core.date.format;
import java.text.DateFormat;
import java.text.FieldPosition;
import java.text.Format;
import java.text.ParseException;
import java.text.ParsePosition;
import java.util.Calendar;
import java.util.Date;
import java.util.Locale;
import java.util.TimeZone;
/**
* <p>
* FastDateFormat 是一个线程安全的 {@link java.text.SimpleDateFormat} 实现。
* </p>
*
* <p>
* 通过以下静态方法获得此对象: <br>
* {@link #getInstance(String, TimeZone, Locale)}<br>
* {@link #getDateInstance(int, TimeZone, Locale)}<br>
* {@link #getTimeInstance(int, TimeZone, Locale)}<br>
* {@link #getDateTimeInstance(int, int, TimeZone, Locale)}
* </p>
*
* Thanks to Apache Commons Lang 3.5
* @since 2.16.2
*/
public class FastDateFormat extends Format implements DateParser, DatePrinter {
private static final long serialVersionUID = 8097890768636183236L;
/** FULL locale dependent date or time style. */
public static final int FULL = DateFormat.FULL;
/** LONG locale dependent date or time style. */
public static final int LONG = DateFormat.LONG;
/** MEDIUM locale dependent date or time style. */
public static final int MEDIUM = DateFormat.MEDIUM;
/** SHORT locale dependent date or time style. */
public static final int SHORT = DateFormat.SHORT;
private static final FormatCache<FastDateFormat> CACHE = new FormatCache<FastDateFormat>(){
@Override
protected FastDateFormat createInstance(final String pattern, final TimeZone timeZone, final Locale locale) {
return new FastDateFormat(pattern, timeZone, locale);
}
};
private final FastDatePrinter printer;
private final FastDateParser parser;
// -----------------------------------------------------------------------
/**
* 获得 FastDateFormat实例,使用默认格式和地区
*
* @return FastDateFormat
*/
public static FastDateFormat getInstance() {
return CACHE.getInstance();
}
/**
* 获得 FastDateFormat 实例,使用默认地区<br>
* 支持缓存
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @return FastDateFormat
* @throws IllegalArgumentException 日期格式问题
*/
public static FastDateFormat getInstance(final String pattern) {
return CACHE.getInstance(pattern, null, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @param timeZone 时区{@link TimeZone}
* @return FastDateFormat
* @throws IllegalArgumentException 日期格式问题
*/
public static FastDateFormat getInstance(final String pattern, final TimeZone timeZone) {
return CACHE.getInstance(pattern, timeZone, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @param locale {@link Locale} 日期地理位置
* @return FastDateFormat
* @throws IllegalArgumentException 日期格式问题
*/
public static FastDateFormat getInstance(final String pattern, final Locale locale) {
return CACHE.getInstance(pattern, null, locale);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @param timeZone 时区{@link TimeZone}
* @param locale {@link Locale} 日期地理位置
* @return FastDateFormat
* @throws IllegalArgumentException 日期格式问题
*/
public static FastDateFormat getInstance(final String pattern, final TimeZone timeZone, final Locale locale) {
return CACHE.getInstance(pattern, timeZone, locale);
}
// -----------------------------------------------------------------------
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style date style: FULL, LONG, MEDIUM, or SHORT
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateInstance(final int style) {
return CACHE.getDateInstance(style, null, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style date style: FULL, LONG, MEDIUM, or SHORT
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateInstance(final int style, final Locale locale) {
return CACHE.getDateInstance(style, null, locale);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style date style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone 时区{@link TimeZone}
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateInstance(final int style, final TimeZone timeZone) {
return CACHE.getDateInstance(style, timeZone, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style date style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone 时区{@link TimeZone}
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateInstance(final int style, final TimeZone timeZone, final Locale locale) {
return CACHE.getDateInstance(style, timeZone, locale);
}
// -----------------------------------------------------------------------
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style time style: FULL, LONG, MEDIUM, or SHORT
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getTimeInstance(final int style) {
return CACHE.getTimeInstance(style, null, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style time style: FULL, LONG, MEDIUM, or SHORT
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getTimeInstance(final int style, final Locale locale) {
return CACHE.getTimeInstance(style, null, locale);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style time style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone optional time zone, overrides time zone of formatted time
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getTimeInstance(final int style, final TimeZone timeZone) {
return CACHE.getTimeInstance(style, timeZone, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param style time style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone optional time zone, overrides time zone of formatted time
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getTimeInstance(final int style, final TimeZone timeZone, final Locale locale) {
return CACHE.getTimeInstance(style, timeZone, locale);
}
// -----------------------------------------------------------------------
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param dateStyle date style: FULL, LONG, MEDIUM, or SHORT
* @param timeStyle time style: FULL, LONG, MEDIUM, or SHORT
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateTimeInstance(final int dateStyle, final int timeStyle) {
return CACHE.getDateTimeInstance(dateStyle, timeStyle, null, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param dateStyle date style: FULL, LONG, MEDIUM, or SHORT
* @param timeStyle time style: FULL, LONG, MEDIUM, or SHORT
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateTimeInstance(final int dateStyle, final int timeStyle, final Locale locale) {
return CACHE.getDateTimeInstance(dateStyle, timeStyle, null, locale);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param dateStyle date style: FULL, LONG, MEDIUM, or SHORT
* @param timeStyle time style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone 时区{@link TimeZone}
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateTimeInstance(final int dateStyle, final int timeStyle, final TimeZone timeZone) {
return getDateTimeInstance(dateStyle, timeStyle, timeZone, null);
}
/**
* 获得 FastDateFormat 实例<br>
* 支持缓存
*
* @param dateStyle date style: FULL, LONG, MEDIUM, or SHORT
* @param timeStyle time style: FULL, LONG, MEDIUM, or SHORT
* @param timeZone 时区{@link TimeZone}
* @param locale {@link Locale} 日期地理位置
* @return 本地化 FastDateFormat
*/
public static FastDateFormat getDateTimeInstance(final int dateStyle, final int timeStyle, final TimeZone timeZone, final Locale locale) {
return CACHE.getDateTimeInstance(dateStyle, timeStyle, timeZone, locale);
}
// ----------------------------------------------------------------------- Constructor start
/**
* 构造
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @param timeZone 非空时区 {@link TimeZone}
* @param locale {@link Locale} 日期地理位置
* @throws NullPointerException if pattern, timeZone, or locale is null.
*/
protected FastDateFormat(final String pattern, final TimeZone timeZone, final Locale locale) {
this(pattern, timeZone, locale, null);
}
/**
* 构造
*
* @param pattern 使用{@link java.text.SimpleDateFormat} 相同的日期格式
* @param timeZone 非空时区 {@link TimeZone}
* @param locale {@link Locale} 日期地理位置
* @param centuryStart The start of the 100 year period to use as the "default century" for 2 digit year parsing. If centuryStart is null, defaults to now - 80 years
* @throws NullPointerException if pattern, timeZone, or locale is null.
*/
protected FastDateFormat(final String pattern, final TimeZone timeZone, final Locale locale, final Date centuryStart) {
printer = new FastDatePrinter(pattern, timeZone, locale);
parser = new FastDateParser(pattern, timeZone, locale, centuryStart);
}
// ----------------------------------------------------------------------- Constructor end
// ----------------------------------------------------------------------- Format methods
@Override
public StringBuffer format(final Object obj, final StringBuffer toAppendTo, final FieldPosition pos) {
return toAppendTo.append(printer.format(obj));
}
@Override
public String format(final long millis) {
return printer.format(millis);
}
@Override
public String format(final Date date) {
return printer.format(date);
}
@Override
public String format(final Calendar calendar) {
return printer.format(calendar);
}
@Override
public <B extends Appendable> B format(final long millis, final B buf) {
return printer.format(millis, buf);
}
@Override
public <B extends Appendable> B format(final Date date, final B buf) {
return printer.format(date, buf);
}
@Override
public <B extends Appendable> B format(final Calendar calendar, final B buf) {
return printer.format(calendar, buf);
}
// ----------------------------------------------------------------------- Parsing
@Override
public Date parse(final String source) throws ParseException {
return parser.parse(source);
}
@Override
public Date parse(final String source, final ParsePosition pos) {
return parser.parse(source, pos);
}
@Override
public boolean parse(final String source, final ParsePosition pos, final Calendar calendar) {
return parser.parse(source, pos, calendar);
}
@Override
public Object parseObject(final String source, final ParsePosition pos) {
return parser.parseObject(source, pos);
}
// ----------------------------------------------------------------------- Accessors
@Override
public String getPattern() {
return printer.getPattern();
}
@Override
public TimeZone getTimeZone() {
return printer.getTimeZone();
}
@Override
public Locale getLocale() {
return printer.getLocale();
}
/**
*估算生成的日期字符串长度<br>
* 实际生成的字符串长度小于或等于此值
*
* @return 日期字符串长度
*/
public int getMaxLengthEstimate() {
return printer.getMaxLengthEstimate();
}
// Basics
// -----------------------------------------------------------------------
@Override
public boolean equals(final Object obj) {
if (obj instanceof FastDateFormat == false) {
return false;
}
final FastDateFormat other = (FastDateFormat) obj;
// no need to check parser, as it has same invariants as printer
return printer.equals(other.printer);
}
@Override
public int hashCode() {
return printer.hashCode();
}
@Override
public String toString() {
return "FastDateFormat[" + printer.getPattern() + "," + printer.getLocale() + "," + printer.getTimeZone().getID() + "]";
}
}
|
/* Copyright (C) 2012 Intel Corporation.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more about this software visit:
* http://www.01.org/GraphBuilder
*/
package com.intel.hadoop.graphbuilder.preprocess.mapreduce.keyvalue;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import com.intel.hadoop.graphbuilder.graph.Edge;
import com.intel.hadoop.graphbuilder.graph.Vertex;
/**
* Abstract union type of {@code Vertex} and {@code Edge}. Used as intermediate
* map output value to hold either a vertex or an edge.
*
*
* @param <VidType>
* @param <VertexData>
* @param <EdgeData>
*/
public abstract class VertexEdgeUnionType<VidType extends WritableComparable<VidType>, VertexData extends Writable, EdgeData extends Writable>
implements Writable {
public static final boolean VERTEXVAL = false;
public static final boolean EDGEVAL = true;;
public abstract VidType createVid();
public abstract VertexData createVdata();
public abstract EdgeData createEdata();
/**
* Creates an empty value.
*/
public VertexEdgeUnionType() {
vertex = new Vertex<VidType, VertexData>();
edge = new Edge<VidType, EdgeData>();
}
/**
* Initialize the value.
*
* @param flag
* @param value
*/
public void init(boolean flag, Object value) {
this.flag = flag;
if (flag == VERTEXVAL) {
this.vertex = (Vertex<VidType, VertexData>) value;
this.edge = null;
} else {
this.edge = (Edge<VidType, EdgeData>) value;
this.vertex = null;
}
}
/**
* @return the type flag of the value.
*/
public boolean flag() {
return flag;
}
/**
* @return the vertex value, used only when flag == VERTEXVAL.
*/
public Vertex<VidType, VertexData> vertex() {
return vertex;
}
/**
* @return the vertex value, used only when flag == EDGEXVAL.
*/
public Edge<VidType, EdgeData> edge() {
return edge;
}
@Override
public void readFields(DataInput arg0) throws IOException {
flag = arg0.readBoolean();
if (flag == VERTEXVAL) {
VidType vid = createVid();
vid.readFields(arg0);
VertexData vdata = createVdata();
vdata.readFields(arg0);
vertex.set(vid, vdata);
} else {
VidType source = createVid();
source.readFields(arg0);
VidType target = createVid();
target.readFields(arg0);
EdgeData edata = createEdata();
edata.readFields(arg0);
edge.set(source, target, edata);
}
}
@Override
public void write(DataOutput arg0) throws IOException {
arg0.writeBoolean(flag);
if (flag == VERTEXVAL) {
vertex.vid().write(arg0);
vertex.vdata().write(arg0);
} else {
edge.source().write(arg0);
edge.target().write(arg0);
edge.EdgeData().write(arg0);
}
}
@Override
public String toString() {
if (flag == VERTEXVAL)
return vertex.toString();
return edge.toString();
}
private boolean flag;
private Vertex<VidType, VertexData> vertex;
private Edge<VidType, EdgeData> edge;
}
|
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.connect.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Contains the filter to apply when retrieving metrics.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/Filters" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class Filters implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* </p>
*/
private java.util.List<String> queues;
/**
* <p>
* The channel to use to filter the metrics.
* </p>
*/
private java.util.List<String> channels;
/**
* <p>
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* </p>
*
* @return The queues to use to filter the metrics. You can specify up to 100 queues per request.
*/
public java.util.List<String> getQueues() {
return queues;
}
/**
* <p>
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* </p>
*
* @param queues
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
*/
public void setQueues(java.util.Collection<String> queues) {
if (queues == null) {
this.queues = null;
return;
}
this.queues = new java.util.ArrayList<String>(queues);
}
/**
* <p>
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setQueues(java.util.Collection)} or {@link #withQueues(java.util.Collection)} if you want to override the
* existing values.
* </p>
*
* @param queues
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Filters withQueues(String... queues) {
if (this.queues == null) {
setQueues(new java.util.ArrayList<String>(queues.length));
}
for (String ele : queues) {
this.queues.add(ele);
}
return this;
}
/**
* <p>
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* </p>
*
* @param queues
* The queues to use to filter the metrics. You can specify up to 100 queues per request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public Filters withQueues(java.util.Collection<String> queues) {
setQueues(queues);
return this;
}
/**
* <p>
* The channel to use to filter the metrics.
* </p>
*
* @return The channel to use to filter the metrics.
* @see Channel
*/
public java.util.List<String> getChannels() {
return channels;
}
/**
* <p>
* The channel to use to filter the metrics.
* </p>
*
* @param channels
* The channel to use to filter the metrics.
* @see Channel
*/
public void setChannels(java.util.Collection<String> channels) {
if (channels == null) {
this.channels = null;
return;
}
this.channels = new java.util.ArrayList<String>(channels);
}
/**
* <p>
* The channel to use to filter the metrics.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setChannels(java.util.Collection)} or {@link #withChannels(java.util.Collection)} if you want to override
* the existing values.
* </p>
*
* @param channels
* The channel to use to filter the metrics.
* @return Returns a reference to this object so that method calls can be chained together.
* @see Channel
*/
public Filters withChannels(String... channels) {
if (this.channels == null) {
setChannels(new java.util.ArrayList<String>(channels.length));
}
for (String ele : channels) {
this.channels.add(ele);
}
return this;
}
/**
* <p>
* The channel to use to filter the metrics.
* </p>
*
* @param channels
* The channel to use to filter the metrics.
* @return Returns a reference to this object so that method calls can be chained together.
* @see Channel
*/
public Filters withChannels(java.util.Collection<String> channels) {
setChannels(channels);
return this;
}
/**
* <p>
* The channel to use to filter the metrics.
* </p>
*
* @param channels
* The channel to use to filter the metrics.
* @return Returns a reference to this object so that method calls can be chained together.
* @see Channel
*/
public Filters withChannels(Channel... channels) {
java.util.ArrayList<String> channelsCopy = new java.util.ArrayList<String>(channels.length);
for (Channel value : channels) {
channelsCopy.add(value.toString());
}
if (getChannels() == null) {
setChannels(channelsCopy);
} else {
getChannels().addAll(channelsCopy);
}
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getQueues() != null)
sb.append("Queues: ").append(getQueues()).append(",");
if (getChannels() != null)
sb.append("Channels: ").append(getChannels());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof Filters == false)
return false;
Filters other = (Filters) obj;
if (other.getQueues() == null ^ this.getQueues() == null)
return false;
if (other.getQueues() != null && other.getQueues().equals(this.getQueues()) == false)
return false;
if (other.getChannels() == null ^ this.getChannels() == null)
return false;
if (other.getChannels() != null && other.getChannels().equals(this.getChannels()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getQueues() == null) ? 0 : getQueues().hashCode());
hashCode = prime * hashCode + ((getChannels() == null) ? 0 : getChannels().hashCode());
return hashCode;
}
@Override
public Filters clone() {
try {
return (Filters) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.connect.model.transform.FiltersMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
|
package com.jph.simple;
import android.net.Uri;
import android.os.Environment;
import android.view.View;
import android.widget.EditText;
import android.widget.RadioGroup;
import com.jph.takephoto.app.TakePhoto;
import com.jph.takephoto.compress.CompressConfig;
import com.jph.takephoto.model.CropOptions;
import java.io.File;
/**
* - 支持通过相机拍照获取图片
* - 支持从相册选择图片
* - 支持从文件选择图片
* - 支持多图选择
* - 支持批量图片裁切
* - 支持批量图片压缩
* - 支持对图片进行压缩
* - 支持对图片进行裁剪
* - 支持对裁剪及压缩参数自定义
* - 提供自带裁剪工具(可选)
* - 支持智能选取及裁剪异常处理
* - 支持因拍照Activity被回收后的自动恢复
* Author: crazycodeboy
* Date: 2016/9/21 0007 20:10
* Version:3.0.0
* 技术博文:http://www.cboy.me
* GitHub:https://github.com/crazycodeboy
* Eamil:crazycodeboy@gmail.com
*/
public class CustomHelper{
private View rootView;
private RadioGroup rgCrop,rgCompress,rgFrom,rgCropSize,rgCropTool,rgShowProgressBar;
private EditText etCropHeight,etCropWidth,etLimit,etSize,etPx;
public static CustomHelper of(View rootView){
return new CustomHelper(rootView);
}
private CustomHelper(View rootView) {
this.rootView = rootView;
init();
}
private void init(){
rgCrop= (RadioGroup) rootView.findViewById(R.id.rgCrop);//是否裁切
rgCompress= (RadioGroup) rootView.findViewById(R.id.rgCompress);//是否压缩
rgCropSize= (RadioGroup) rootView.findViewById(R.id.rgCropSize);//裁剪尺寸/比例
rgFrom= (RadioGroup) rootView.findViewById(R.id.rgFrom);//从哪选择图片
rgShowProgressBar= (RadioGroup) rootView.findViewById(R.id.rgShowProgressBar);//压缩进度条
rgCropTool= (RadioGroup) rootView.findViewById(R.id.rgCropTool);//裁切工具
etCropHeight= (EditText) rootView.findViewById(R.id.etCropHeight);
etCropWidth= (EditText) rootView.findViewById(R.id.etCropWidth);
etLimit= (EditText) rootView.findViewById(R.id.etLimit);//最多选取照片数
etSize= (EditText) rootView.findViewById(R.id.etSize);//压缩大小
etPx= (EditText) rootView.findViewById(R.id.etPx);//压缩长宽
}
public void onClick(View view,TakePhoto takePhoto) {
File file=new File(Environment.getExternalStorageDirectory(), "/temp/"+System.currentTimeMillis() + ".jpg");
if (!file.getParentFile().exists())file.getParentFile().mkdirs();
Uri imageUri = Uri.fromFile(file);
configCompress(takePhoto);//是否压缩图片方法
switch (view.getId()){
case R.id.btnPickBySelect://选择照片
int limit= Integer.parseInt(etLimit.getText().toString());//获取选择照片数量
if(limit>1){
if(rgCrop.getCheckedRadioButtonId()==R.id.rbCropYes){//是否裁剪 是
takePhoto.onPickMultipleWithCrop(limit,getCropOptions());//图片多选,并裁切
}else {
takePhoto.onPickMultiple(limit);
}
return;
}
if(rgFrom.getCheckedRadioButtonId()==R.id.rbFile){//文件
if(rgCrop.getCheckedRadioButtonId()==R.id.rbCropYes){//是否裁剪 是
takePhoto.onPickFromDocumentsWithCrop(imageUri,getCropOptions());//从文件中获取图片并裁剪
}else {
takePhoto.onPickFromDocuments();//从文件中获取图片(不裁剪)
}
return;
}else {//相册
if(rgCrop.getCheckedRadioButtonId()==R.id.rbCropYes){//是否裁剪 是
takePhoto.onPickFromGalleryWithCrop(imageUri,getCropOptions());//从相册中获取图片并裁剪
}else {
takePhoto.onPickFromGallery();//从相册中获取图片(不裁剪)
}
}
break;
case R.id.btnPickByTake://拍照
if(rgCrop.getCheckedRadioButtonId()==R.id.rbCropYes){//是否裁剪是
takePhoto.onPickFromCaptureWithCrop(imageUri,getCropOptions());//getCropOptions()裁剪参数配置
}else {//否
takePhoto.onPickFromCapture(imageUri);
}
break;
default:
break;
}
}
/**
* 压缩配置
* **/
private void configCompress(TakePhoto takePhoto){
if(rgCompress.getCheckedRadioButtonId()!=R.id.rbCompressYes){//是否压缩 否
takePhoto.onEnableCompress(null,false);//压缩图片方法
return ;
}
/////以下压缩的参数配置////
int maxSize= Integer.parseInt(etSize.getText().toString());//压缩大小
int maxPixel= Integer.parseInt(etPx.getText().toString());//压缩长宽
boolean showProgressBar=rgShowProgressBar.getCheckedRadioButtonId()==R.id.rbShowYes? true:false;//压缩进度条 true显示false不显示
CompressConfig config= new CompressConfig.Builder().setMaxPixel(maxSize).setMaxPixel(maxPixel).create();
takePhoto.onEnableCompress(config,showProgressBar);//压缩图片方法
}
/**
* 裁剪配置设置
* **/
private CropOptions getCropOptions(){
if(rgCrop.getCheckedRadioButtonId()!=R.id.rbCropYes)return null;//是否裁剪 是
int height= Integer.parseInt(etCropHeight.getText().toString());//获取输入高
int width= Integer.parseInt(etCropWidth.getText().toString());//获取输入宽
boolean withWonCrop=rgCropTool.getCheckedRadioButtonId()==R.id.rbCropOwn? true:false;//裁剪工具选择 自带true,第三方false
CropOptions.Builder builder=new CropOptions.Builder();
if(rgCropSize.getCheckedRadioButtonId()==R.id.rbAspect){//宽高自定义设置(输入)
builder.setAspectX(width).setAspectY(height);
}else {//系统自带宽高
builder.setOutputX(width).setOutputY(height);
}
builder.setWithOwnCrop(withWonCrop);
return builder.create();
}
}
|
package com.huellapositiva.domain;
public enum Roles {
VOLUNTEER, VOLUNTEER_NOT_CONFIRMED, ORGANIZATION, ADMIN
}
|
package com.atguigu.springcloud.alibaba.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* @author wsk
* @date 2020/3/25 20:37
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
public class CommonResult<T> {
private Integer code;
private String message;
private T data;
public CommonResult(Integer code, String message){
this(code,message,null);
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.net.Socket;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.flink.streaming.api.function.source.FromElementsFunction;
import org.apache.flink.streaming.api.function.source.SocketTextStreamFunction;
import org.apache.flink.streaming.util.MockCollector;
import org.apache.flink.streaming.util.MockSource;
import org.junit.Test;
public class SourceTest {
@Test
public void fromElementsTest() {
List<Integer> expectedList = Arrays.asList(1, 2, 3);
List<Integer> actualList = MockSource.createAndExecute(new FromElementsFunction<Integer>(1,
2, 3));
assertEquals(expectedList, actualList);
}
@Test
public void fromCollectionTest() {
List<Integer> expectedList = Arrays.asList(1, 2, 3);
List<Integer> actualList = MockSource.createAndExecute(new FromElementsFunction<Integer>(
Arrays.asList(1, 2, 3)));
assertEquals(expectedList, actualList);
}
@Test
public void socketTextStreamTest() throws Exception {
List<String> expectedList = Arrays.asList("a", "b", "c");
List<String> actualList = new ArrayList<String>();
byte[] data = { 'a', '\n', 'b', '\n', 'c', '\n' };
Socket socket = mock(Socket.class);
when(socket.getInputStream()).thenReturn(new ByteArrayInputStream(data));
when(socket.isClosed()).thenReturn(false);
when(socket.isConnected()).thenReturn(true);
new SocketTextStreamFunction("", 0, '\n', 0).streamFromSocket(new MockCollector<String>(
actualList), socket);
assertEquals(expectedList, actualList);
}
}
|
package zjc.zhixun.db.service;
import com.github.pagehelper.PageHelper;
import zjc.zhixun.db.dao.TMessages31Mapper;
import zjc.zhixun.db.dao.TUserMapper;
import zjc.zhixun.db.domain.IMUser;
import zjc.zhixun.db.domain.IMUserExample;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
@Service
public class IMPromoterService {
@Resource
private TUserMapper userMapper;
@Resource
private TMessages31Mapper messages31Mapper;
public List<IMUser> querySelective(String uid,Integer page, Integer size, String order) {
List<String> list=removeDuplicate(messages31Mapper.selectFromByTarget(uid));
List<IMUser> userList=new ArrayList<>();
for (String from:list) {
IMUserExample example2 = new IMUserExample();
IMUserExample.Criteria criteria2 = example2.createCriteria();
if (!StringUtils.isEmpty(from)) {
criteria2.andUidLike("%" + from + "%");
}
IMUser imUser=userMapper.selectOneByExample(example2);
if(imUser.getType()==0){
userList.add(imUser);
}
}
PageHelper.startPage(page, size);
return userList;
}
public Boolean judgeCustom(String uid){
IMUserExample example = new IMUserExample();
IMUserExample.Criteria criteria = example.createCriteria();
if (!StringUtils.isEmpty(uid)){
criteria.andUidEqualTo(uid);
}
IMUser imUser=userMapper.selectOneByExample(example);
if(imUser.getType()==3){
return true;
}else{
return false;
}
}
public static List<String> removeDuplicate(List<String> list) {
HashSet<String> h = new HashSet<String>(list);
list.clear();
list.addAll(h);
return list;
}
}
|
package at.mic.nifi.config.service;
import at.mic.nifi.config.service.PortService;
import at.mic.nifi.swagger.ApiException;
import at.mic.nifi.swagger.client.InputPortsApi;
import at.mic.nifi.swagger.client.OutputPortsApi;
import at.mic.nifi.swagger.client.model.PortDTO;
import at.mic.nifi.swagger.client.model.PortEntity;
import at.mic.nifi.swagger.client.model.PortStatusDTO;
import at.mic.nifi.swagger.client.model.PortStatusDTO.RunStatusEnum;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.name.Names;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import java.io.IOException;
import java.net.URISyntaxException;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
/**
* API tests for AccessApi
*/
@RunWith(MockitoJUnitRunner.class)
public class PortServiceTest {
@Mock
private InputPortsApi inputPortsApiMock;
@Mock
private OutputPortsApi outputPortsApiMock;
/**
* Creates a token for accessing the REST API via username/password
* <p>
* The token returned is formatted as a JSON Web Token (JWT). The token is base64 encoded and comprised of three parts. The header, the body, and the signature. The expiration of the token is a contained within the body. The token can be used in the Authorization header in the format 'Authorization: Bearer <token>'.
*
* @throws ApiException if the Api call fails
*/
@Test
public void getByIdInputTest() throws ApiException, IOException, URISyntaxException {
Injector injector = Guice.createInjector(new AbstractModule() {
protected void configure() {
bind(InputPortsApi.class).toInstance(inputPortsApiMock);
bind(OutputPortsApi.class).toInstance(outputPortsApiMock);
bind(Integer.class).annotatedWith(Names.named("timeout")).toInstance(1);
bind(Integer.class).annotatedWith(Names.named("interval")).toInstance(1);
bind(Boolean.class).annotatedWith(Names.named("forceMode")).toInstance(false);
}
});
PortService portService = injector.getInstance(PortService.class);
PortEntity port = new PortEntity();
port.setComponent(new PortDTO());
port.getComponent().setId("id");
when(inputPortsApiMock.getInputPort("id")).thenReturn(port);
PortEntity portResult = portService.getById("id", PortDTO.TypeEnum.INPUT_PORT);
assertEquals("id", portResult.getComponent().getId());
}
@Test
public void getByIdOutputTest() throws ApiException, IOException, URISyntaxException {
Injector injector = Guice.createInjector(new AbstractModule() {
protected void configure() {
bind(InputPortsApi.class).toInstance(inputPortsApiMock);
bind(OutputPortsApi.class).toInstance(outputPortsApiMock);
bind(Integer.class).annotatedWith(Names.named("timeout")).toInstance(1);
bind(Integer.class).annotatedWith(Names.named("interval")).toInstance(1);
bind(Boolean.class).annotatedWith(Names.named("forceMode")).toInstance(false);
}
});
PortService portService = injector.getInstance(PortService.class);
PortEntity port = new PortEntity();
port.setComponent(new PortDTO());
port.getComponent().setId("id");
when(outputPortsApiMock.getOutputPort("id")).thenReturn(port);
PortEntity portResult = portService.getById("id", PortDTO.TypeEnum.OUTPUT_PORT);
assertEquals("id", portResult.getComponent().getId());
}
@Test
public void setStateTest() throws ApiException, IOException, URISyntaxException {
Injector injector = Guice.createInjector(new AbstractModule() {
protected void configure() {
bind(InputPortsApi.class).toInstance(inputPortsApiMock);
bind(OutputPortsApi.class).toInstance(outputPortsApiMock);
bind(Integer.class).annotatedWith(Names.named("timeout")).toInstance(1);
bind(Integer.class).annotatedWith(Names.named("interval")).toInstance(1);
bind(Boolean.class).annotatedWith(Names.named("forceMode")).toInstance(false);
}
});
PortService portService = injector.getInstance(PortService.class);
PortEntity port = new PortEntity();
port.setComponent(new PortDTO());
port.getComponent().setId("id");
port.getComponent().setState(PortDTO.StateEnum.STOPPED);
port.setStatus(new PortStatusDTO());
port.getStatus().setRunStatus(RunStatusEnum.STOPPED);
portService.setState(port, PortDTO.StateEnum.STOPPED);
verify(inputPortsApiMock,never()).updateInputPort(eq("id"), any());
verify(outputPortsApiMock,never()).updateOutputPort(eq("id"), any());
}
@Test
public void setStateInputTest() throws ApiException, IOException, URISyntaxException {
Injector injector = Guice.createInjector(new AbstractModule() {
protected void configure() {
bind(InputPortsApi.class).toInstance(inputPortsApiMock);
bind(OutputPortsApi.class).toInstance(outputPortsApiMock);
bind(Integer.class).annotatedWith(Names.named("timeout")).toInstance(1);
bind(Integer.class).annotatedWith(Names.named("interval")).toInstance(1);
bind(Boolean.class).annotatedWith(Names.named("forceMode")).toInstance(false);
}
});
PortService portService = injector.getInstance(PortService.class);
PortEntity portStopped = new PortEntity();
portStopped.setComponent(new PortDTO());
portStopped.getComponent().setName("name");
portStopped.getComponent().setId("id");
portStopped.getComponent().setState(PortDTO.StateEnum.STOPPED);
portStopped.setStatus(new PortStatusDTO());
portStopped.getStatus().setRunStatus(RunStatusEnum.STOPPED);
when(inputPortsApiMock.updateInputPort(eq("id"),any())).thenReturn(portStopped);
PortEntity port = new PortEntity();
port.setId("id");
port.setComponent(new PortDTO());
port.getComponent().setId("id");
port.getComponent().setState(PortDTO.StateEnum.RUNNING);
port.getComponent().setType(PortDTO.TypeEnum.INPUT_PORT);
port.setStatus(new PortStatusDTO());
port.getStatus().setRunStatus(RunStatusEnum.RUNNING);
portService.setState(port, PortDTO.StateEnum.STOPPED);
verify(inputPortsApiMock, times(1)).updateInputPort(eq("id"), any());
}
@Test
public void setStateOutputTest() throws ApiException, IOException, URISyntaxException {
Injector injector = Guice.createInjector(new AbstractModule() {
protected void configure() {
bind(InputPortsApi.class).toInstance(inputPortsApiMock);
bind(OutputPortsApi.class).toInstance(outputPortsApiMock);
bind(Integer.class).annotatedWith(Names.named("timeout")).toInstance(1);
bind(Integer.class).annotatedWith(Names.named("interval")).toInstance(1);
bind(Boolean.class).annotatedWith(Names.named("forceMode")).toInstance(false);
}
});
PortService portService = injector.getInstance(PortService.class);
PortEntity portStopped = new PortEntity();
portStopped.setComponent(new PortDTO());
portStopped.getComponent().setName("name");
portStopped.getComponent().setId("id");
portStopped.getComponent().setState(PortDTO.StateEnum.STOPPED);
portStopped.setStatus(new PortStatusDTO());
portStopped.getStatus().setRunStatus(RunStatusEnum.STOPPED);
when(outputPortsApiMock.updateOutputPort(eq("id"),any())).thenReturn(portStopped);
PortEntity port = new PortEntity();
port.setId("id");
port.setComponent(new PortDTO());
port.getComponent().setId("id");
port.getComponent().setState(PortDTO.StateEnum.RUNNING);
port.getComponent().setType(PortDTO.TypeEnum.OUTPUT_PORT);
port.setStatus(new PortStatusDTO());
port.getStatus().setRunStatus(RunStatusEnum.RUNNING);
portService.setState(port, PortDTO.StateEnum.STOPPED);
verify(outputPortsApiMock, times(1)).updateOutputPort(eq("id"), any());
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.el;
import java.io.File;
import java.math.BigDecimal;
import java.util.Collections;
import javax.servlet.DispatcherType;
import org.junit.Assert;
import org.junit.Test;
import org.apache.catalina.Wrapper;
import org.apache.catalina.core.StandardContext;
import org.apache.catalina.startup.Tomcat;
import org.apache.catalina.startup.TomcatBaseTest;
import org.apache.jasper.servlet.JasperInitializer;
import org.apache.tomcat.util.buf.ByteChunk;
/**
* Tests EL with an without JSP attributes using a test web application. Similar
* tests may be found in {@link TestELEvaluation} and
* {@link org.apache.jasper.compiler.TestAttributeParser}.
*/
public class TestELInJsp extends TomcatBaseTest {
@Test
public void testBug36923() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug36923.jsp");
String result = res.toString();
assertEcho(result, "00-${hello world}");
}
@Test
public void testBug42565() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug42565.jsp");
String result = res.toString();
assertEcho(result, "00-false");
assertEcho(result, "01-false");
assertEcho(result, "02-false");
assertEcho(result, "03-false");
assertEcho(result, "04-false");
assertEcho(result, "05-false");
assertEcho(result, "06-false");
assertEcho(result, "07-false");
assertEcho(result, "08-false");
assertEcho(result, "09-false");
assertEcho(result, "10-false");
assertEcho(result, "11-false");
assertEcho(result, "12-false");
assertEcho(result, "13-false");
assertEcho(result, "14-false");
assertEcho(result, "15-false");
}
@Test
public void testBug44994() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug44994.jsp");
String result = res.toString();
assertEcho(result, "00-none");
assertEcho(result, "01-one");
assertEcho(result, "02-many");
}
@Test
public void testBug45427() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug45nnn/bug45427.jsp");
String result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
assertEcho(result, "00-hello world");
assertEcho(result, "01-hello 'world");
assertEcho(result, "02-hello \"world");
assertEcho(result, "03-hello \"world");
assertEcho(result, "04-hello world");
assertEcho(result, "05-hello 'world");
assertEcho(result, "06-hello 'world");
assertEcho(result, "07-hello \"world");
assertEcho(result, "08-hello world");
assertEcho(result, "09-hello 'world");
assertEcho(result, "10-hello \"world");
assertEcho(result, "11-hello \"world");
assertEcho(result, "12-hello world");
assertEcho(result, "13-hello 'world");
assertEcho(result, "14-hello 'world");
assertEcho(result, "15-hello \"world");
}
@Test
public void testBug45451() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug45nnn/bug45451a.jsp");
String result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
assertEcho(result, "00-\\'hello world\\'");
assertEcho(result, "01-\\'hello world\\'");
res = getUrl("http://localhost:" + getPort() + "/test/bug45nnn/bug45451b.jsp");
result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
// Warning: Attributes are always unescaped before passing to the EL
// processor
assertEcho(result, "00-2");
assertEcho(result, "01-${1+1}");
assertEcho(result, "02-\\${1+1}");
assertEcho(result, "03-\\\\${1+1}");
assertEcho(result, "04-$500");
// Inside an EL literal '\' is only used to escape '\', ''' and '"'
assertEcho(result, "05-\\$");
assertEcho(result, "06-\\${");
assertEcho(result, "10-2");
assertEcho(result, "11-${1+1}");
assertEcho(result, "12-\\2");
assertEcho(result, "13-\\${1+1}");
assertEcho(result, "14-\\\\2");
assertEcho(result, "15-$500");
assertEcho(result, "16-\\$");
assertEcho(result, "17-\\${");
assertEcho(result, "20-2");
assertEcho(result, "21-#{1+1}");
assertEcho(result, "22-\\2");
assertEcho(result, "23-\\#{1+1}");
assertEcho(result, "24-\\\\2");
assertEcho(result, "25-\\#");
assertEcho(result, "26-\\#{");
res = getUrl("http://localhost:" + getPort() + "/test/bug45nnn/bug45451c.jsp");
result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
// TODO - Currently we allow a single unescaped \ in attribute values
// Review if this should cause a warning/error
assertEcho(result, "00-${1+1}");
assertEcho(result, "01-\\${1+1}");
assertEcho(result, "02-\\\\${1+1}");
assertEcho(result, "03-\\\\\\${1+1}");
assertEcho(result, "04-\\$500");
assertEcho(result, "10-${1+1}");
assertEcho(result, "11-\\${1+1}");
assertEcho(result, "12-\\${1+1}");
assertEcho(result, "13-\\\\${1+1}");
assertEcho(result, "14-\\\\${1+1}");
assertEcho(result, "15-\\$500");
assertEcho(result, "20-#{1+1}");
assertEcho(result, "21-\\#{1+1}");
assertEcho(result, "22-\\#{1+1}");
assertEcho(result, "23-\\\\#{1+1}");
assertEcho(result, "24-\\\\#{1+1}");
res = getUrl("http://localhost:" + getPort() + "/test/bug45nnn/bug45451d.jspx");
result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
// \\ Is *not* an escape sequence in XML attributes
assertEcho(result, "00-2");
assertEcho(result, "01-${1+1}");
assertEcho(result, "02-\\${1+1}");
assertEcho(result, "03-\\\\${1+1}");
assertEcho(result, "04-$500");
assertEcho(result, "10-2");
assertEcho(result, "11-${1+1}");
assertEcho(result, "12-\\${1+1}");
assertEcho(result, "13-\\\\${1+1}");
assertEcho(result, "14-\\\\\\${1+1}");
assertEcho(result, "15-$500");
assertEcho(result, "20-2");
assertEcho(result, "21-#{1+1}");
assertEcho(result, "22-\\#{1+1}");
assertEcho(result, "23-\\\\#{1+1}");
assertEcho(result, "24-\\\\\\#{1+1}");
res = getUrl("http://localhost:" + getPort() + "/test/bug45nnn/bug45451e.jsp");
result = res.toString();
// Warning: JSP attribute escaping != Java String escaping
// Warning: Attributes are always unescaped before passing to the EL
// processor
assertEcho(result, "00-2");
assertEcho(result, "01-${1+1}");
assertEcho(result, "02-\\${1+1}");
assertEcho(result, "03-\\\\${1+1}");
assertEcho(result, "04-$500");
assertEcho(result, "10-2");
assertEcho(result, "11-${1+1}");
assertEcho(result, "12-\\2");
assertEcho(result, "13-\\${1+1}");
assertEcho(result, "14-\\\\2");
assertEcho(result, "15-$500");
assertEcho(result, "20-#{1+1}");
assertEcho(result, "21-\\#{1+1}");
assertEcho(result, "22-\\#{1+1}");
assertEcho(result, "23-\\\\#{1+1}");
assertEcho(result, "24-\\\\#{1+1}");
}
@Test
public void testBug45511() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug45nnn/bug45511.jsp");
String result = res.toString();
assertEcho(result, "00-true");
assertEcho(result, "01-false");
}
@Test
public void testBug46596() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug46596.jsp");
String result = res.toString();
assertEcho(result, "{OK}");
}
@Test
public void testBug47413() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug47413.jsp");
String result = res.toString();
assertEcho(result, "00-hello world");
assertEcho(result, "01-hello world");
assertEcho(result, "02-3.22");
assertEcho(result, "03-3.22");
assertEcho(result, "04-17");
assertEcho(result, "05-17");
assertEcho(result, "06-hello world");
assertEcho(result, "07-hello world");
assertEcho(result, "08-0.0");
assertEcho(result, "09-0.0");
assertEcho(result, "10-0");
assertEcho(result, "11-0");
}
@Test
public void testBug48112() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug48nnn/bug48112.jsp");
String result = res.toString();
assertEcho(result, "{OK}");
}
@Test
public void testBug49555() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/bug49nnn/bug49555.jsp");
String result = res.toString();
assertEcho(result, "00-" + TesterFunctions.Inner$Class.RETVAL);
}
@Test
public void testBug51544() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug51544.jsp");
String result = res.toString();
assertEcho(result, "Empty list: true");
}
@Test
public void testELMiscNoQuoteAttributeEL() throws Exception {
doTestELMisc(false);
}
@Test
public void testELMiscWithQuoteAttributeEL() throws Exception {
doTestELMisc(true);
}
private void doTestELMisc(boolean quoteAttributeEL) throws Exception {
Tomcat tomcat = getTomcatInstance();
// Create the context (don't use addWebapp as we want to modify the
// JSP Servlet settings).
File appDir = new File("test/webapp");
StandardContext ctxt = (StandardContext) tomcat.addContext(
null, "/test", appDir.getAbsolutePath());
ctxt.addServletContainerInitializer(new JasperInitializer(), null);
// Configure the defaults and then tweak the JSP servlet settings
// Note: Min value for maxLoadedJsps is 2
Tomcat.initWebappDefaults(ctxt);
Wrapper w = (Wrapper) ctxt.findChild("jsp");
String jspName;
if (quoteAttributeEL) {
jspName = "/test/el-misc-with-quote-attribute-el.jsp";
w.addInitParameter("quoteAttributeEL", "true");
} else {
jspName = "/test/el-misc-no-quote-attribute-el.jsp";
w.addInitParameter("quoteAttributeEL", "false");
}
tomcat.start();
ByteChunk res = getUrl("http://localhost:" + getPort() + jspName);
String result = res.toString();
assertEcho(result, "00-\\\\\\\"${'hello world'}");
assertEcho(result, "01-\\\\\\\"\\${'hello world'}");
assertEcho(result, "02-\\\"${'hello world'}");
assertEcho(result, "03-\\\"\\hello world");
assertEcho(result, "2az-04");
assertEcho(result, "05-a2z");
assertEcho(result, "06-az2");
assertEcho(result, "2az-07");
assertEcho(result, "08-a2z");
assertEcho(result, "09-az2");
assertEcho(result, "10-${'foo'}bar");
assertEcho(result, "11-\\\"}");
assertEcho(result, "12-foo\\bar\\baz");
assertEcho(result, "13-foo\\bar\\baz");
assertEcho(result, "14-foo\\bar\\baz");
assertEcho(result, "15-foo\\bar\\baz");
assertEcho(result, "16-foo\\bar\\baz");
assertEcho(result, "17-foo\\'bar'\\"baz"");
assertEcho(result, "18-3");
assertEcho(result, "19-4");
assertEcho(result, "20-4");
assertEcho(result, "21-[{value=11}, {value=12}, {value=13}, {value=14}]");
assertEcho(result, "22-[{value=11}, {value=12}, {value=13}, {value=14}]");
}
@Test
public void testScriptingExpression() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/script-expr.jsp");
String result = res.toString();
assertEcho(result, "00-hello world");
assertEcho(result, "01-hello \"world");
assertEcho(result, "02-hello \\\"world");
assertEcho(result, "03-hello ${world");
assertEcho(result, "04-hello \\${world");
assertEcho(result, "05-hello world");
assertEcho(result, "06-hello \"world");
assertEcho(result, "07-hello \\\"world");
assertEcho(result, "08-hello ${world");
assertEcho(result, "09-hello \\${world");
assertEcho(result, "10-hello <% world");
assertEcho(result, "11-hello %> world");
}
@Test
public void testELMethod() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() + "/test/el-method.jsp");
String result = res.toString();
assertEcho(result, "00-Hello JUnit from Tomcat");
assertEcho(result, "01-Hello JUnit from Tomcat");
assertEcho(result, "02-Hello JUnit from Tomcat");
assertEcho(result, "03-Hello JUnit from Tomcat");
assertEcho(result, "04-Hello JUnit from Tomcat");
assertEcho(result, "05-Hello JUnit from Tomcat");
}
@Test
public void testBug56029() throws Exception {
getTomcatInstanceTestWebapp(true, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug56029.jspx");
String result = res.toString();
Assert.assertTrue(result.contains("[1]:[1]"));
}
@Test
public void testBug56147() throws Exception {
getTomcatInstanceTestWebapp(true, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug56147.jsp");
String result = res.toString();
assertEcho(result, "00-OK");
}
@Test
public void testBug56612() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug56612.jsp");
String result = res.toString();
Assert.assertTrue(result.contains("00-''"));
}
/*
* java.lang should be imported by default
*/
@Test
public void testBug57141() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug57141.jsp");
String result = res.toString();
assertEcho(result, "00-true");
assertEcho(result, "01-false");
assertEcho(result, "02-2147483647");
}
/*
* BZ https://bz.apache.org/bugzilla/show_bug.cgi?id=57142
* javax.servlet, javax.servlet.http and javax.servlet.jsp should be
* imported by default.
*/
@Test
public void testBug57142() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug57142.jsp");
String result = res.toString();
// javax.servlet
assertEcho(result, "00-" + DispatcherType.ASYNC);
// No obvious status fields for javax.servlet.http
// Could hack something with HttpUtils...
// No obvious status fields for javax.servlet.jsp
// Wild card (package) import
assertEcho(result, "01-" + BigDecimal.ROUND_UP);
// Class import
assertEcho(result, "02-" + Collections.EMPTY_LIST.size());
}
/*
* BZ https://bz.apache.org/bugzilla/show_bug.cgi?id=57441
* Can't validate function names defined in lambdas (or via imports)
*/
@Test
public void testBug57441() throws Exception {
getTomcatInstanceTestWebapp(false, true);
ByteChunk res = getUrl("http://localhost:" + getPort() +
"/test/bug5nnnn/bug57441.jsp");
String result = res.toString();
assertEcho(result, "00-11");
}
// Assertion for text contained with <p></p>, e.g. printed by tags:echo
private static void assertEcho(String result, String expected) {
Assert.assertTrue(result, result.indexOf("<p>" + expected + "</p>") > 0);
}
}
|
/*
* Copyright (C) 2016 - Niklas Baudy, Ruben Gees, Mario Đanić and contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.vanniktech.emoji.sample;
import android.graphics.PorterDuff;
import android.os.Bundle;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.ViewGroup;
import android.widget.AutoCompleteTextView;
import android.widget.ImageView;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.content.ContextCompat;
import androidx.core.provider.FontRequest;
import androidx.emoji.text.EmojiCompat;
import androidx.emoji.text.FontRequestEmojiCompatConfig;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import com.vanniktech.emoji.EmojiManager;
import com.vanniktech.emoji.EmojiPopup;
import com.vanniktech.emoji.facebook.FacebookEmojiProvider;
import com.vanniktech.emoji.google.GoogleEmojiProvider;
import com.vanniktech.emoji.googlecompat.GoogleCompatEmojiProvider;
import com.vanniktech.emoji.ios.IosEmojiProvider;
import com.vanniktech.emoji.material.MaterialEmojiLayoutFactory;
import com.vanniktech.emoji.twitter.TwitterEmojiProvider;
// We don't care about duplicated code in the sample.
public class MainActivityAutoCompeteTextView extends AppCompatActivity {
static final String TAG = "MainActivity";
ChatAdapter chatAdapter;
EmojiPopup emojiPopup;
AutoCompleteTextView editText;
ViewGroup rootView;
ImageView emojiButton;
EmojiCompat emojiCompat;
@Override protected void onCreate(final Bundle savedInstanceState) {
getLayoutInflater().setFactory2(new MaterialEmojiLayoutFactory((LayoutInflater.Factory2) getDelegate()));
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main_autocompletetextview);
chatAdapter = new ChatAdapter();
editText = findViewById(R.id.main_activity_chat_bottom_message_edittext);
rootView = findViewById(R.id.main_activity_root_view);
emojiButton = findViewById(R.id.main_activity_emoji);
final ImageView sendButton = findViewById(R.id.main_activity_send);
emojiButton.setColorFilter(ContextCompat.getColor(this, R.color.emoji_icons), PorterDuff.Mode.SRC_IN);
sendButton.setColorFilter(ContextCompat.getColor(this, R.color.emoji_icons), PorterDuff.Mode.SRC_IN);
emojiButton.setOnClickListener(ignore -> emojiPopup.toggle());
sendButton.setOnClickListener(ignore -> {
final String text = editText.getText().toString().trim();
if (text.length() > 0) {
chatAdapter.add(text);
editText.setText("");
}
});
final RecyclerView recyclerView = findViewById(R.id.main_activity_recycler_view);
recyclerView.setAdapter(chatAdapter);
recyclerView.setLayoutManager(new LinearLayoutManager(this, RecyclerView.VERTICAL, false));
setUpEmojiPopup();
}
@Override public boolean onCreateOptionsMenu(final Menu menu) {
getMenuInflater().inflate(R.menu.menu_main, menu);
return super.onCreateOptionsMenu(menu);
}
@Override public boolean onOptionsItemSelected(final MenuItem item) {
final int itemId = item.getItemId();
if (itemId == R.id.menuMainShowDialog) {
emojiPopup.dismiss();
MainDialog.show(this);
return true;
} else if (itemId == R.id.menuMainVariantIos) {
EmojiManager.destroy();
EmojiManager.install(new IosEmojiProvider());
recreate();
return true;
} else if (itemId == R.id.menuMainGoogle) {
EmojiManager.destroy();
EmojiManager.install(new GoogleEmojiProvider());
recreate();
return true;
} else if (itemId == R.id.menuMainTwitter) {
EmojiManager.destroy();
EmojiManager.install(new TwitterEmojiProvider());
recreate();
return true;
} else if (itemId == R.id.menuMainFacebook) {
EmojiManager.destroy();
EmojiManager.install(new FacebookEmojiProvider());
recreate();
return true;
} else if (itemId == R.id.menuMainGoogleCompat) {
if (emojiCompat == null) {
emojiCompat = EmojiCompat.init(new FontRequestEmojiCompatConfig(this,
new FontRequest("com.google.android.gms.fonts", "com.google.android.gms",
"Noto Color Emoji Compat", R.array.com_google_android_gms_fonts_certs)
).setReplaceAll(true));
}
EmojiManager.destroy();
EmojiManager.install(new GoogleCompatEmojiProvider(emojiCompat));
recreate();
return true;
}
return super.onOptionsItemSelected(item);
}
@Override public void onBackPressed() {
if (emojiPopup != null && emojiPopup.isShowing()) {
emojiPopup.dismiss();
} else {
super.onBackPressed();
}
}
private void setUpEmojiPopup() {
emojiPopup = EmojiPopup.Builder.fromRootView(rootView)
.setOnEmojiBackspaceClickListener(ignore -> Log.d(TAG, "Clicked on Backspace"))
.setOnEmojiClickListener((ignore, ignore2) -> Log.d(TAG, "Clicked on emoji"))
.setOnEmojiPopupShownListener(() -> emojiButton.setImageResource(R.drawable.ic_keyboard))
.setOnSoftKeyboardOpenListener(ignore -> Log.d(TAG, "Opened soft keyboard"))
.setOnEmojiPopupDismissListener(() -> emojiButton.setImageResource(R.drawable.emoji_ios_category_smileysandpeople))
.setOnSoftKeyboardCloseListener(() -> Log.d(TAG, "Closed soft keyboard"))
.setKeyboardAnimationStyle(R.style.emoji_fade_animation_style)
.setPageTransformer(new PageTransformer())
.build(editText);
}
}
|
package fr.epicanard.globalmarketchest.gui.shops.baseinterfaces;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Consumer;
import org.bukkit.entity.Player;
import org.bukkit.event.inventory.ClickType;
import org.bukkit.event.inventory.InventoryClickEvent;
import org.bukkit.inventory.ItemStack;
import fr.epicanard.globalmarketchest.exceptions.MissingMethodException;
import fr.epicanard.globalmarketchest.gui.InterfacesLoader;
import fr.epicanard.globalmarketchest.gui.InventoryGUI;
import fr.epicanard.globalmarketchest.gui.actions.LeaveShop;
import fr.epicanard.globalmarketchest.gui.paginator.Paginator;
import fr.epicanard.globalmarketchest.gui.paginator.PaginatorConfig;
import fr.epicanard.globalmarketchest.gui.shops.toggler.Toggler;
import fr.epicanard.globalmarketchest.gui.shops.toggler.TogglerConfig;
import fr.epicanard.globalmarketchest.utils.LangUtils;
import fr.epicanard.globalmarketchest.utils.Utils;
import fr.epicanard.globalmarketchest.utils.annotations.AnnotationCaller;
import fr.epicanard.globalmarketchest.utils.reflection.VersionSupportUtils;
import lombok.Getter;
import lombok.experimental.Accessors;
public abstract class ShopInterface {
@Accessors(fluent=true) @Getter
protected Boolean isTemp = false;
protected Paginator paginator = null;
protected Map<Integer, Toggler> togglers = new HashMap<>();
protected InventoryGUI inv;
protected Map<Integer, Consumer<InventoryGUI>> actions = new HashMap<Integer, Consumer<InventoryGUI>>();
private ItemStack icon;
public ShopInterface(InventoryGUI inv) {
this.inv = inv;
this.icon = Utils.getBackground();
String className = this.getClass().getSimpleName();
PaginatorConfig conf = InterfacesLoader.getInstance().getPaginatorConfig(className);
if (conf != null)
this.paginator = new Paginator(this.inv.getInv(), conf);
List<TogglerConfig> togglersConfig = InterfacesLoader.getInstance().getTogglers(className);
if (togglersConfig != null) {
togglersConfig.forEach(togglerConfig -> {
this.togglers.put(togglerConfig.getPosition(), togglerConfig.instanceToggler(inv.getInv()));
});
}
this.actions.put(8, new LeaveShop());
}
/**
* Load specific interface with is className
*
* @param gui
*/
public void load() {
String className = this.getClass().getSimpleName();
ItemStack[] items = InterfacesLoader.getInstance().getInterface(className).clone();
if (items == null)
return;
this.togglers.forEach((k, v) -> {
v.getItems().forEach((pos, item) -> {
items[pos] = item;
});
});
for (int i = 0; i < 54; i++)
this.inv.getInv().setItem(i, items[i]);
if (this.paginator != null)
this.paginator.reloadInterface();
this.updateInventoryName(className);
this.loadIcon();
}
/**
* Update the inventory name with current interface name
*
* @param interfaceName
*/
private void updateInventoryName(String interfaceName) {
String title = LangUtils.getOrElse("InterfacesTitle." + interfaceName, "&2GlobalMarketChest");
try {
AnnotationCaller.call("updateInventoryName", VersionSupportUtils.getInstance(), title, (Player)this.inv.getPlayer());
} catch (MissingMethodException e) {
e.printStackTrace();
}
}
/**
* Add the icon item inside inventory
*/
private void loadIcon() {
this.inv.getInv().setItem(4, this.icon);
}
/**
* Set and load icon
*
* @param item Icon
*/
protected void setIcon(ItemStack item) {
this.icon = VersionSupportUtils.getInstance().setNbtTag(item);
this.loadIcon();
}
/**
* Unload interface
*/
public void unload() {
this.inv.getWarn().stopWarn();
}
/**
* Called when interface is destroyed
*/
public void destroy() {}
/**
* Called when a mouse event is done inside inventory
*
* @param event
*/
public void onClick(InventoryClickEvent event, InventoryGUI inv) {
if (event.getClick() != ClickType.LEFT)
return;
if (this.paginator == null || !this.paginator.onClick(event.getSlot()))
Optional.ofNullable(this.actions.get(event.getSlot())).ifPresent(c -> c.accept(inv));
}
/**
* Called when a mouse drop event is done inside inventory
*
* @param event
*/
public void onDrop(InventoryClickEvent event, InventoryGUI inv) {}
}
|
/*
* Copyright 2021 ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package tech.pegasys.teku.reference.phase0.forkchoice;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static tech.pegasys.teku.infrastructure.time.TimeUtilities.secondsToMillis;
import com.google.common.collect.ImmutableMap;
import java.io.IOException;
import java.nio.ByteOrder;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.ssz.SSZ;
import org.apache.tuweni.units.bigints.UInt256;
import org.assertj.core.api.Condition;
import org.opentest4j.TestAbortedException;
import tech.pegasys.teku.bls.BLSSignature;
import tech.pegasys.teku.ethtests.finder.TestDefinition;
import tech.pegasys.teku.infrastructure.async.SafeFuture;
import tech.pegasys.teku.infrastructure.async.eventthread.InlineEventThread;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.reference.TestDataUtils;
import tech.pegasys.teku.reference.TestExecutor;
import tech.pegasys.teku.spec.Spec;
import tech.pegasys.teku.spec.datastructures.attestation.ValidateableAttestation;
import tech.pegasys.teku.spec.datastructures.blocks.BeaconBlock;
import tech.pegasys.teku.spec.datastructures.blocks.SignedBeaconBlock;
import tech.pegasys.teku.spec.datastructures.blocks.SignedBlockAndState;
import tech.pegasys.teku.spec.datastructures.execution.PowBlock;
import tech.pegasys.teku.spec.datastructures.operations.Attestation;
import tech.pegasys.teku.spec.datastructures.operations.AttesterSlashing;
import tech.pegasys.teku.spec.datastructures.state.AnchorPoint;
import tech.pegasys.teku.spec.datastructures.state.Checkpoint;
import tech.pegasys.teku.spec.datastructures.state.beaconstate.BeaconState;
import tech.pegasys.teku.spec.executionlayer.ExecutionLayerChannel;
import tech.pegasys.teku.spec.executionlayer.ExecutionLayerChannelStub;
import tech.pegasys.teku.spec.logic.common.statetransition.results.BlockImportResult;
import tech.pegasys.teku.statetransition.forkchoice.ForkChoice;
import tech.pegasys.teku.statetransition.forkchoice.MergeTransitionBlockValidator;
import tech.pegasys.teku.statetransition.forkchoice.StubForkChoiceNotifier;
import tech.pegasys.teku.statetransition.validation.InternalValidationResult;
import tech.pegasys.teku.storage.client.RecentChainData;
import tech.pegasys.teku.storage.storageSystem.InMemoryStorageSystemBuilder;
import tech.pegasys.teku.storage.storageSystem.StorageSystem;
import tech.pegasys.teku.storage.store.UpdatableStore;
public class ForkChoiceTestExecutor implements TestExecutor {
private static final Logger LOG = LogManager.getLogger();
public static final ImmutableMap<String, TestExecutor> FORK_CHOICE_TEST_TYPES =
ImmutableMap.<String, TestExecutor>builder()
.put("fork_choice/get_head", new ForkChoiceTestExecutor())
.put("fork_choice/ex_ante", new ForkChoiceTestExecutor())
.put(
"fork_choice/on_block",
new ForkChoiceTestExecutor("new_finalized_slot_is_justified_checkpoint_ancestor"))
.put("fork_choice/on_merge_block", IGNORE_TESTS)
.build();
private final List<?> testsToSkip;
public ForkChoiceTestExecutor(String... testsToSkip) {
this.testsToSkip = List.of(testsToSkip);
}
@Override
public void runTest(final TestDefinition testDefinition) throws Throwable {
if (testsToSkip.contains(testDefinition.getTestName())) {
throw new TestAbortedException(
"Test " + testDefinition.getDisplayName() + " has been ignored");
}
// Note: The fork choice spec says there may be settings in a meta.yaml file but currently no
// tests actually have one, so we currently don't bother trying to load it.
final BeaconState anchorState =
TestDataUtils.loadStateFromSsz(testDefinition, "anchor_state.ssz_snappy");
final Spec spec = testDefinition.getSpec();
final SignedBeaconBlock anchorBlock = loadAnchorBlock(testDefinition);
final StorageSystem storageSystem =
InMemoryStorageSystemBuilder.create().specProvider(spec).build();
final RecentChainData recentChainData = storageSystem.recentChainData();
recentChainData.initializeFromAnchorPoint(
AnchorPoint.fromInitialBlockAndState(
spec, new SignedBlockAndState(anchorBlock, anchorState)),
spec.getSlotStartTime(anchorBlock.getSlot(), anchorState.getGenesisTime()));
final MergeTransitionBlockValidator transitionBlockValidator =
new MergeTransitionBlockValidator(spec, recentChainData, ExecutionLayerChannel.NOOP);
final ForkChoice forkChoice =
new ForkChoice(
spec,
new InlineEventThread(),
recentChainData,
new StubForkChoiceNotifier(),
transitionBlockValidator,
true,
true);
final ExecutionLayerChannelStub executionLayer = new ExecutionLayerChannelStub(spec, false);
runSteps(testDefinition, spec, recentChainData, forkChoice, executionLayer);
}
/**
* The anchor block is currently always a Phase 0 block because of the way the specs repo are
* doing Altair genesis. See https://github.com/ethereum/eth2.0-specs/pull/2323
*
* @param testDefinition the test definition
* @return the anchor block for the test
*/
private SignedBeaconBlock loadAnchorBlock(final TestDefinition testDefinition) {
final BeaconBlock anchorBlock =
TestDataUtils.loadSsz(
testDefinition,
"anchor_block.ssz_snappy",
testDefinition.getSpec()::deserializeBeaconBlock);
return SignedBeaconBlock.create(testDefinition.getSpec(), anchorBlock, BLSSignature.empty());
}
private void runSteps(
final TestDefinition testDefinition,
final Spec spec,
final RecentChainData recentChainData,
final ForkChoice forkChoice,
final ExecutionLayerChannelStub executionLayer)
throws IOException {
final List<Map<String, Object>> steps = loadSteps(testDefinition);
for (Map<String, Object> step : steps) {
LOG.info("Executing step {}", step);
if (step.containsKey("checks")) {
applyChecks(recentChainData, forkChoice, step);
} else if (step.containsKey("tick")) {
forkChoice.onTick(secondsToMillis(getUInt64(step, "tick")));
} else if (step.containsKey("block")) {
applyBlock(testDefinition, spec, forkChoice, step, executionLayer);
} else if (step.containsKey("attestation")) {
applyAttestation(testDefinition, forkChoice, step);
} else if (step.containsKey("pow_block")) {
applyPowBlock(testDefinition, step, executionLayer);
} else if (step.containsKey("attester_slashing")) {
applyAttesterSlashing(testDefinition, forkChoice, step);
} else {
throw new UnsupportedOperationException("Unsupported step: " + step);
}
}
}
private void applyPowBlock(
final TestDefinition testDefinition,
final Map<String, Object> step,
final ExecutionLayerChannelStub executionLayer) {
final String filename = (String) step.get("pow_block");
final PowBlock block =
TestDataUtils.loadSsz(testDefinition, filename + ".ssz_snappy", this::parsePowBlock);
executionLayer.addPowBlock(block);
}
private PowBlock parsePowBlock(final Bytes data) {
return SSZ.decode(
data,
reader -> {
final Bytes32 blockHash = Bytes32.wrap(reader.readFixedBytes(Bytes32.SIZE));
final Bytes32 parentHash = Bytes32.wrap(reader.readFixedBytes(Bytes32.SIZE));
final UInt256 totalDifficulty =
UInt256.valueOf(
reader
.readFixedBytes(Bytes32.SIZE)
.toUnsignedBigInteger(ByteOrder.LITTLE_ENDIAN));
reader.readFixedBytes(Bytes32.SIZE); // Read difficulty even though we don't use it.
// We don't get a timestamp but as long as it's in the past that's fine
final UInt64 timestamp = UInt64.ZERO;
return new PowBlock(blockHash, parentHash, totalDifficulty, timestamp);
});
}
private void applyAttestation(
final TestDefinition testDefinition,
final ForkChoice forkChoice,
final Map<String, Object> step) {
final String attestationName = get(step, "attestation");
final Attestation attestation =
TestDataUtils.loadSsz(
testDefinition,
attestationName + ".ssz_snappy",
testDefinition.getSpec().getGenesisSchemaDefinitions().getAttestationSchema());
final Spec spec = testDefinition.getSpec();
assertThat(forkChoice.onAttestation(ValidateableAttestation.from(spec, attestation)))
.isCompleted();
}
private void applyAttesterSlashing(
final TestDefinition testDefinition,
final ForkChoice forkChoice,
final Map<String, Object> step) {
final String slashingName = get(step, "attester_slashing");
final AttesterSlashing attesterSlashing =
TestDataUtils.loadSsz(
testDefinition,
slashingName + ".ssz_snappy",
testDefinition.getSpec().getGenesisSchemaDefinitions().getAttesterSlashingSchema());
assertDoesNotThrow(
() ->
forkChoice.onAttesterSlashing(attesterSlashing, InternalValidationResult.ACCEPT, true));
}
private void applyBlock(
final TestDefinition testDefinition,
final Spec spec,
final ForkChoice forkChoice,
final Map<String, Object> step,
final ExecutionLayerChannelStub executionLayer) {
final String blockName = get(step, "block");
final boolean valid = !step.containsKey("valid") || (boolean) step.get("valid");
final SignedBeaconBlock block =
TestDataUtils.loadSsz(
testDefinition, blockName + ".ssz_snappy", spec::deserializeSignedBeaconBlock);
LOG.info(
"Importing block {} at slot {} with parent {}",
block.getRoot(),
block.getSlot(),
block.getParentRoot());
final SafeFuture<BlockImportResult> result =
forkChoice.onBlock(block, Optional.empty(), executionLayer);
assertThat(result).isCompleted();
final BlockImportResult importResult = result.join();
assertThat(importResult)
.describedAs("Incorrect block import result for block %s", block)
.has(new Condition<>(r -> r.isSuccessful() == valid, "isSuccessful matching " + valid));
}
@SuppressWarnings("unchecked")
private List<Map<String, Object>> loadSteps(final TestDefinition testDefinition)
throws IOException {
return TestDataUtils.loadYaml(testDefinition, "steps.yaml", List.class);
}
private void applyChecks(
final RecentChainData recentChainData,
final ForkChoice forkChoice,
final Map<String, Object> step) {
assertThat(forkChoice.processHead()).isCompleted();
final UpdatableStore store = recentChainData.getStore();
final Map<String, Object> checks = get(step, "checks");
for (String checkType : checks.keySet()) {
switch (checkType) {
case "genesis_time":
assertThat(recentChainData.getGenesisTime()).isEqualTo(getUInt64(checks, checkType));
break;
case "head":
final Map<String, Object> expectedHead = get(checks, checkType);
final UInt64 expectedSlot = UInt64.valueOf(expectedHead.get("slot").toString());
final Bytes32 expectedRoot = Bytes32.fromHexString(expectedHead.get("root").toString());
assertThat(recentChainData.getHeadSlot()).isEqualTo(expectedSlot);
assertThat(recentChainData.getBestBlockRoot()).contains(expectedRoot);
break;
case "time":
final UInt64 expectedTime = getUInt64(checks, checkType);
assertThat(store.getTimeSeconds()).isEqualTo(expectedTime);
break;
case "justified_checkpoint_root":
final Bytes32 expectedJustifiedRoot = getBytes32(checks, checkType);
assertThat(store.getJustifiedCheckpoint().getRoot())
.describedAs("justified checkpoint")
.isEqualTo(expectedJustifiedRoot);
break;
case "justified_checkpoint":
assertCheckpoint(
"justified checkpoint", store.getJustifiedCheckpoint(), get(checks, checkType));
break;
case "best_justified_checkpoint":
assertCheckpoint(
"best justified checkpoint",
store.getBestJustifiedCheckpoint(),
get(checks, checkType));
break;
case "finalized_checkpoint_root":
final Bytes32 expectedFinalizedRoot = getBytes32(checks, checkType);
assertThat(store.getFinalizedCheckpoint().getRoot())
.describedAs("finalized checkpoint")
.isEqualTo(expectedFinalizedRoot);
break;
case "finalized_checkpoint":
assertCheckpoint(
"finalized checkpoint", store.getFinalizedCheckpoint(), get(checks, checkType));
break;
case "proposer_boost_root":
final Optional<Bytes32> boostedRoot = store.getProposerBoostRoot();
final Bytes32 expectedBoostedRoot = getBytes32(checks, checkType);
if (expectedBoostedRoot.isZero()) {
assertThat(boostedRoot).describedAs("proposer_boost_root").isEmpty();
} else {
assertThat(boostedRoot)
.describedAs("proposer_boost_root")
.contains(expectedBoostedRoot);
}
break;
default:
throw new UnsupportedOperationException("Unsupported check type: " + checkType);
}
}
}
private void assertCheckpoint(
final String checkpointType,
final Checkpoint actual,
final Map<String, Object> expectedCheckpoint) {
final Bytes32 expectedRoot = getBytes32(expectedCheckpoint, "root");
final UInt64 expectedEpoch = getUInt64(expectedCheckpoint, "epoch");
assertThat(actual)
.describedAs(checkpointType)
.isEqualTo(new Checkpoint(expectedEpoch, expectedRoot));
}
@SuppressWarnings({"unchecked", "TypeParameterUnusedInFormals"})
private <T> T get(final Map<String, Object> yamlData, final String key) {
return (T) yamlData.get(key);
}
private UInt64 getUInt64(final Map<String, Object> yamlData, final String key) {
return UInt64.valueOf(get(yamlData, key).toString());
}
private Bytes32 getBytes32(final Map<String, Object> yamlData, final String key) {
return Bytes32.fromHexString(get(yamlData, key));
}
}
|
package org.hl7.fhir.r4.model.codesystems;
/*
Copyright (c) 2011+, HL7, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of HL7 nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
// Generated on Thu, Dec 27, 2018 10:06-0500 for FHIR v4.0.0
import org.hl7.fhir.r4.model.EnumFactory;
public class ClaimSubtypeEnumFactory implements EnumFactory<ClaimSubtype> {
public ClaimSubtype fromCode(String codeString) throws IllegalArgumentException {
if (codeString == null || "".equals(codeString))
return null;
if ("ortho".equals(codeString))
return ClaimSubtype.ORTHO;
if ("emergency".equals(codeString))
return ClaimSubtype.EMERGENCY;
throw new IllegalArgumentException("Unknown ClaimSubtype code '"+codeString+"'");
}
public String toCode(ClaimSubtype code) {
if (code == ClaimSubtype.ORTHO)
return "ortho";
if (code == ClaimSubtype.EMERGENCY)
return "emergency";
return "?";
}
public String toSystem(ClaimSubtype code) {
return code.getSystem();
}
}
|
package com.sqli.echallenge.formation.metier;
import java.util.List;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.sqli.echallenge.formation.dao.UtilisateurDao;
import com.sqli.echallenge.formation.model.Utilisateur;
@Service
@Transactional
public class UtilisateurMetierImpl implements UtilisateurMetier {
@Autowired
private UtilisateurDao dao;
public Utilisateur getUtilisateur(Long idUtilisateur) throws Exception {
return dao.getUtilisateur(idUtilisateur);
}
public Utilisateur getUtilisateur(String email, String password) throws Exception {
return dao.getUtilisateur(email, password);
}
public void addUtilisateur(Utilisateur utilisateur) throws Exception {
dao.addUtilisateur(utilisateur);
}
public void removeUtilisateur(Long idUtilisateur) throws Exception {
dao.removeUtilisateur(idUtilisateur);
}
public void updateUtilisateur(Utilisateur utilisateur) throws Exception {
dao.updateUtilisateur(utilisateur);
}
public List<Utilisateur> getAllUtilisateurs() throws Exception {
return dao.getAllUtilisateurs();
}
public List<Utilisateur> getAllUtilisateursWithProfil(String profil) throws Exception {
return dao.getAllUtilisateursWithProfil(profil);
}
public UtilisateurDao getDao() {
return dao;
}
public void setDao(UtilisateurDao dao) {
this.dao = dao;
}
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE190_Integer_Overflow__long_max_preinc_45.java
Label Definition File: CWE190_Integer_Overflow.label.xml
Template File: sources-sinks-45.tmpl.java
*/
/*
* @description
* CWE: 190 Integer Overflow
* BadSource: max Set data to the max value for long
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: increment
* GoodSink: Ensure there will not be an overflow before incrementing data
* BadSink : Increment data, which can cause an overflow
* Flow Variant: 45 Data flow: data passed as a private class member variable from one function to another in the same class
*
* */
package testcases.CWE190_Integer_Overflow.s07;
import testcasesupport.*;
import javax.servlet.http.*;
public class CWE190_Integer_Overflow__long_max_preinc_45 extends AbstractTestCase
{
private long dataBad;
private long dataGoodG2B;
private long dataGoodB2G;
private void badSink() throws Throwable
{
long data = dataBad;
/* POTENTIAL FLAW: if data == Long.MAX_VALUE, this will overflow */
long result = (long)(++data);
IO.writeLine("result: " + result);
}
public void bad() throws Throwable
{
long data;
/* POTENTIAL FLAW: Use the maximum size of the data type */
data = Long.MAX_VALUE;
dataBad = data;
badSink();
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
private void goodG2BSink() throws Throwable
{
long data = dataGoodG2B;
/* POTENTIAL FLAW: if data == Long.MAX_VALUE, this will overflow */
long result = (long)(++data);
IO.writeLine("result: " + result);
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
long data;
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
dataGoodG2B = data;
goodG2BSink();
}
private void goodB2GSink() throws Throwable
{
long data = dataGoodB2G;
/* FIX: Add a check to prevent an overflow from occurring */
if (data < Long.MAX_VALUE)
{
long result = (long)(++data);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too large to increment.");
}
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
long data;
/* POTENTIAL FLAW: Use the maximum size of the data type */
data = Long.MAX_VALUE;
dataGoodB2G = data;
goodB2GSink();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
/*
Copyright 2017-2019 Advanced Products Limited,
dannyb@cloudpta.com
github.com/dannyb2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.cloudpta.quantpipeline.backend.data_provider.dss.processors.CPTADSSDataProviderProcessor.request_response.dss;
/**
*
* @author Danny
*/
public class CPTADSSEODMessage extends CPTADSSMessage
{
public CPTADSSEODMessage()
{
super();
messageType = CPTADSSConstants.EOD_MESSAGE_TYPE;
extractionType = CPTADSSConstants.EOD_EXTRACTION_TYPE;
}
}
|
package com.ciel.loadstar.user.service.impl;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.ciel.loadstar.infrastructure.dto.web.ReturnModel;
import com.ciel.loadstar.infrastructure.events.account.AccountEvent;
import com.ciel.loadstar.infrastructure.events.account.AccountEventType;
import com.ciel.loadstar.infrastructure.exceptions.ObjectNotExistingException;
import com.ciel.loadstar.infrastructure.utils.ApiReturnUtil;
import com.ciel.loadstar.user.client.AuthServiceClient;
import com.ciel.loadstar.user.client.FolderServiceClient;
import com.ciel.loadstar.user.dto.input.CreateUser;
import com.ciel.loadstar.user.entity.User;
import com.ciel.loadstar.user.mq.producer.AccountEventProducer;
import com.ciel.loadstar.user.repository.ThemeRepository;
import com.ciel.loadstar.user.repository.UserRepository;
import com.ciel.loadstar.user.service.AccountService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.Assert;
import java.util.Date;
/**
* @Author Ciel Qian
* @CreateDate 2018/8/15
* @Comment
*/
@Service
@Slf4j
public class AccountServiceImpl extends ServiceImpl<UserRepository, User> implements AccountService {
@Autowired
UserRepository accountRepository;
@Autowired
ThemeRepository themeRepository;
@Autowired
AuthServiceClient authServiceClient;
@Autowired
FolderServiceClient folderServiceClient;
@Autowired
ThemeServiceImpl themeService;
@Autowired
AccountEventProducer accountEventProducer;
@Override
public User queryById(Long id) {
User user = accountRepository.selectById(id);
if (user == null)
throw new ObjectNotExistingException("用户不存在");
return user;
}
@Override
public User create(CreateUser user) {
User existing = findByName(user.getUsername());
Assert.isNull(existing, "用户已存在");
ReturnModel<Long> remoteResult = authServiceClient.createUser(user);
ApiReturnUtil.checkSuccess(remoteResult);
User account = new User();
account.setAccountId(remoteResult.getData());
account.setNickname(user.getNickname());
account.setUsername(user.getUsername());
account.setLastSeen(new Date());
accountRepository.insert(account);
themeService.create(account);
AccountEvent event = new AccountEvent(AccountEventType.CREATE);
event.setId(remoteResult.getData().toString());
accountEventProducer.send(event);
return account;
}
@Override
public void delete(Long userId) {
User user = queryById(userId);
ReturnModel remoteResult = authServiceClient.deleteUser(user.getUsername());
ApiReturnUtil.checkSuccess(remoteResult);
accountRepository.deleteById(userId);
AccountEvent event = new AccountEvent(AccountEventType.DELETE);
event.setId(userId.toString());
accountEventProducer.send(event);
}
@Override
public User findByName(String username) {
QueryWrapper<User> queryWrapper = new QueryWrapper<>();
queryWrapper.eq("username", username);
return accountRepository.selectOne(queryWrapper);
}
@Override
public User findByAccountId(Long accountId) {
QueryWrapper<User> queryWrapper = new QueryWrapper<>();
queryWrapper.eq("account_id", accountId);
return accountRepository.selectOne(queryWrapper);
}
}
|
package io.quarkus.bootstrap.runner;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.lang.reflect.InvocationTargetException;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.net.URLDecoder;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
public class QuarkusEntryPoint {
public static final String QUARKUS_APPLICATION_DAT = "quarkus/quarkus-application.dat";
public static void main(String... args) throws Throwable {
System.setProperty("java.util.logging.manager", org.jboss.logmanager.LogManager.class.getName());
Timing.staticInitStarted();
doRun(args);
}
private static void doRun(Object args) throws IOException, ClassNotFoundException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
String path = QuarkusEntryPoint.class.getProtectionDomain().getCodeSource().getLocation().getPath();
String decodedPath = URLDecoder.decode(path, "UTF-8");
Path appRoot = new File(decodedPath).toPath().getParent().getParent().getParent();
if (Boolean.parseBoolean(System.getenv("QUARKUS_LAUNCH_DEVMODE"))) {
DevModeMediator.doDevMode(appRoot);
} else if (Boolean.getBoolean("quarkus.launch.rebuild")) {
doReaugment(appRoot);
} else {
SerializedApplication app = null;
try (InputStream in = Files.newInputStream(appRoot.resolve(QUARKUS_APPLICATION_DAT))) {
app = SerializedApplication.read(in, appRoot);
Thread.currentThread().setContextClassLoader(app.getRunnerClassLoader());
Class<?> mainClass = app.getRunnerClassLoader().loadClass(app.getMainClass());
mainClass.getMethod("main", String[].class).invoke(null, args);
} finally {
if (app != null) {
app.getRunnerClassLoader().close();
}
}
}
}
private static void doReaugment(Path appRoot) throws IOException, ClassNotFoundException, IllegalAccessException,
InvocationTargetException, NoSuchMethodException {
try (ObjectInputStream in = new ObjectInputStream(
Files.newInputStream(appRoot.resolve("lib/deployment/deployment-class-path.dat")))) {
List<String> paths = (List<String>) in.readObject();
//yuck, should use runner class loader
URLClassLoader loader = new URLClassLoader(paths.stream().map((s) -> {
try {
return appRoot.resolve(s).toUri().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
}).toArray(URL[]::new));
try {
loader.loadClass("io.quarkus.deployment.mutability.ReaugmentTask")
.getDeclaredMethod("main", Path.class).invoke(null, appRoot);
} finally {
loader.close();
}
}
}
}
|
package io.renren.modules.app.service.thirdParty.wxSdk;
import org.apache.http.HttpEntity;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.DefaultHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.BasicHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.SSLContext;
import java.io.InputStream;
import java.net.SocketTimeoutException;
import java.net.UnknownHostException;
import java.security.KeyStore;
import java.security.SecureRandom;
public class WXPayRequest {
private WXPayConfig config;
public WXPayRequest(WXPayConfig config) throws Exception{
this.config = config;
}
/**
* 请求,只请求一次,不做重试
* @param domain
* @param urlSuffix
* @param uuid
* @param data
* @param connectTimeoutMs
* @param readTimeoutMs
* @param useCert 是否使用证书,针对退款、撤销等操作
* @return
* @throws Exception
*/
private String requestOnce(final String domain, String urlSuffix, String uuid, String data, int connectTimeoutMs, int readTimeoutMs, boolean useCert) throws Exception {
BasicHttpClientConnectionManager connManager;
if (useCert) {
// 证书
char[] password = config.getMchID().toCharArray();
InputStream certStream = config.getCertStream();
KeyStore ks = KeyStore.getInstance("PKCS12");
ks.load(certStream, password);
// 实例化密钥库 & 初始化密钥工厂
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(ks, password);
// 创建 SSLContext
SSLContext sslContext = SSLContext.getInstance("TLS");
sslContext.init(kmf.getKeyManagers(), null, new SecureRandom());
SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(
sslContext,
new String[]{"TLSv1"},
null,
new DefaultHostnameVerifier());
connManager = new BasicHttpClientConnectionManager(
RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory())
.register("https", sslConnectionSocketFactory)
.build(),
null,
null,
null
);
}
else {
connManager = new BasicHttpClientConnectionManager(
RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory())
.register("https", SSLConnectionSocketFactory.getSocketFactory())
.build(),
null,
null,
null
);
}
HttpClient httpClient = HttpClientBuilder.create()
.setConnectionManager(connManager)
.build();
String url = "https://" + domain + urlSuffix;
HttpPost httpPost = new HttpPost(url);
RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(readTimeoutMs).setConnectTimeout(connectTimeoutMs).build();
httpPost.setConfig(requestConfig);
StringEntity postEntity = new StringEntity(data, "UTF-8");
httpPost.addHeader("Content-Type", "text/xml");
httpPost.addHeader("User-Agent", WXPayConstants.USER_AGENT + " " + config.getMchID());
httpPost.setEntity(postEntity);
HttpResponse httpResponse = httpClient.execute(httpPost);
HttpEntity httpEntity = httpResponse.getEntity();
return EntityUtils.toString(httpEntity, "UTF-8");
}
private String request(String urlSuffix, String uuid, String data, int connectTimeoutMs, int readTimeoutMs, boolean useCert, boolean autoReport) throws Exception {
Exception exception = null;
long elapsedTimeMillis = 0;
long startTimestampMs = WXPayUtil.getCurrentTimestampMs();
boolean firstHasDnsErr = false;
boolean firstHasConnectTimeout = false;
boolean firstHasReadTimeout = false;
IWXPayDomain.DomainInfo domainInfo = config.getWXPayDomain().getDomain(config);
if(domainInfo == null){
throw new Exception("WXPayConfig.getWXPayDomain().getDomain() is empty or null");
}
try {
String result = requestOnce(domainInfo.domain, urlSuffix, uuid, data, connectTimeoutMs, readTimeoutMs, useCert);
elapsedTimeMillis = WXPayUtil.getCurrentTimestampMs()-startTimestampMs;
config.getWXPayDomain().report(domainInfo.domain, elapsedTimeMillis, null);
WXPayReport.getInstance(config).report(
uuid,
elapsedTimeMillis,
domainInfo.domain,
domainInfo.primaryDomain,
connectTimeoutMs,
readTimeoutMs,
firstHasDnsErr,
firstHasConnectTimeout,
firstHasReadTimeout);
return result;
}
catch (UnknownHostException ex) { // dns 解析错误,或域名不存在
exception = ex;
firstHasDnsErr = true;
elapsedTimeMillis = WXPayUtil.getCurrentTimestampMs()-startTimestampMs;
WXPayUtil.getLogger().warn("UnknownHostException for domainInfo {}", domainInfo);
WXPayReport.getInstance(config).report(
uuid,
elapsedTimeMillis,
domainInfo.domain,
domainInfo.primaryDomain,
connectTimeoutMs,
readTimeoutMs,
firstHasDnsErr,
firstHasConnectTimeout,
firstHasReadTimeout
);
}
catch (ConnectTimeoutException ex) {
exception = ex;
firstHasConnectTimeout = true;
elapsedTimeMillis = WXPayUtil.getCurrentTimestampMs()-startTimestampMs;
WXPayUtil.getLogger().warn("connect timeout happened for domainInfo {}", domainInfo);
WXPayReport.getInstance(config).report(
uuid,
elapsedTimeMillis,
domainInfo.domain,
domainInfo.primaryDomain,
connectTimeoutMs,
readTimeoutMs,
firstHasDnsErr,
firstHasConnectTimeout,
firstHasReadTimeout
);
}
catch (SocketTimeoutException ex) {
exception = ex;
firstHasReadTimeout = true;
elapsedTimeMillis = WXPayUtil.getCurrentTimestampMs()-startTimestampMs;
WXPayUtil.getLogger().warn("timeout happened for domainInfo {}", domainInfo);
WXPayReport.getInstance(config).report(
uuid,
elapsedTimeMillis,
domainInfo.domain,
domainInfo.primaryDomain,
connectTimeoutMs,
readTimeoutMs,
firstHasDnsErr,
firstHasConnectTimeout,
firstHasReadTimeout);
}
catch (Exception ex) {
exception = ex;
elapsedTimeMillis = WXPayUtil.getCurrentTimestampMs()-startTimestampMs;
WXPayReport.getInstance(config).report(
uuid,
elapsedTimeMillis,
domainInfo.domain,
domainInfo.primaryDomain,
connectTimeoutMs,
readTimeoutMs,
firstHasDnsErr,
firstHasConnectTimeout,
firstHasReadTimeout);
}
config.getWXPayDomain().report(domainInfo.domain, elapsedTimeMillis, exception);
throw exception;
}
/**
* 可重试的,非双向认证的请求
* @param urlSuffix
* @param uuid
* @param data
* @return
*/
public String requestWithoutCert(String urlSuffix, String uuid, String data, boolean autoReport) throws Exception {
return this.request(urlSuffix, uuid, data, config.getHttpConnectTimeoutMs(), config.getHttpReadTimeoutMs(), false, autoReport);
}
/**
* 可重试的,非双向认证的请求
* @param urlSuffix
* @param uuid
* @param data
* @param connectTimeoutMs
* @param readTimeoutMs
* @return
*/
public String requestWithoutCert(String urlSuffix, String uuid, String data, int connectTimeoutMs, int readTimeoutMs, boolean autoReport) throws Exception {
return this.request(urlSuffix, uuid, data, connectTimeoutMs, readTimeoutMs, false, autoReport);
}
/**
* 可重试的,双向认证的请求
* @param urlSuffix
* @param uuid
* @param data
* @return
*/
public String requestWithCert(String urlSuffix, String uuid, String data, boolean autoReport) throws Exception {
return this.request(urlSuffix, uuid, data, config.getHttpConnectTimeoutMs(), config.getHttpReadTimeoutMs(), true, autoReport);
}
/**
* 可重试的,双向认证的请求
* @param urlSuffix
* @param uuid
* @param data
* @param connectTimeoutMs
* @param readTimeoutMs
* @return
*/
public String requestWithCert(String urlSuffix, String uuid, String data, int connectTimeoutMs, int readTimeoutMs, boolean autoReport) throws Exception {
return this.request(urlSuffix, uuid, data, connectTimeoutMs, readTimeoutMs, true, autoReport);
}
}
|
/*
* (c) Copyright Christian P. Fries, Germany. Contact: email@christian-fries.de.
*
* Created on 26.05.2013
*/
package net.finmath.montecarlo.interestrate.models.covariance;
import java.util.Map;
import net.finmath.exception.CalculationException;
import net.finmath.stochastic.RandomVariable;
/**
* Special variant of a blended model (or displaced diffusion model)
* build on top of a standard covariance model
* using the special function corresponding to the Hull-White local volatility.
*
* The model constructed for the <i>i</i>-th factor loading is
* <i>(1+L<sub>i</sub>(t) d) F<sub>i</sub>(t)</i>
* where <i>d</i> is a constant (the period length), <i>L<sub>i</sub></i> is
* the realization of the <i>i</i>-th component of the stochastic process and
* <i>F<sub>i</sub></i> is the factor loading from the given covariance model.
*
* If this model is combined with an exponential decay volatility model
* <code>LIBORVolatilityModelTwoParameterExponentialForm</code>, then
* the resulting LIBOR Market model corresponds to a Hull-White short rate model
* (with constant short rate volatility and mean reversion).
*
* The parameter of this model is the parameter vector of the given base covariance model.
*
* @author Christian Fries
* @version 1.0
*/
public class HullWhiteLocalVolatilityModel extends AbstractLIBORCovarianceModelParametric {
private static final long serialVersionUID = -4182083344704425769L;
private final AbstractLIBORCovarianceModelParametric covarianceModel;
private final double periodLength;
/**
* The model constructed for the <i>i</i>-th factor loading is
* <i>(1+L<sub>i</sub>(t) d) F<sub>i</sub>(t)</i>
* where <i>d</i> is a constant (the period length), <i>L<sub>i</sub></i> is
* the realization of the <i>i</i>-th component of the stochastic process and
* <i>F<sub>i</sub></i> is the factor loading from the given covariance model.
*
* The parameter of this model is the parameter vector of the given base covariance model.
*
* @param covarianceModel The given covariance model specifying the factor loadings <i>F</i>.
* @param periodLength The parameter <i>d</i> in the formula above.
*/
public HullWhiteLocalVolatilityModel(final AbstractLIBORCovarianceModelParametric covarianceModel, final double periodLength) {
super(covarianceModel.getTimeDiscretization(), covarianceModel.getLiborPeriodDiscretization(), covarianceModel.getNumberOfFactors());
this.covarianceModel = covarianceModel;
this.periodLength = periodLength;
}
@Override
public Object clone() {
return new HullWhiteLocalVolatilityModel((AbstractLIBORCovarianceModelParametric) covarianceModel.clone(), periodLength);
}
/**
* Returns the base covariance model, i.e., the model providing the factor loading <i>F</i>.
*
* @return The base covariance model.
*/
public AbstractLIBORCovarianceModelParametric getBaseCovarianceModel() {
return covarianceModel;
}
@Override
public double[] getParameterAsDouble() {
return covarianceModel.getParameterAsDouble();
}
@Override
public AbstractLIBORCovarianceModelParametric getCloneWithModifiedParameters(final double[] parameters) {
return new HullWhiteLocalVolatilityModel(covarianceModel.getCloneWithModifiedParameters(parameters), periodLength);
}
@Override
public RandomVariable[] getFactorLoading(final int timeIndex, final int component, final RandomVariable[] realizationAtTimeIndex) {
final RandomVariable[] factorLoading = covarianceModel.getFactorLoading(timeIndex, component, realizationAtTimeIndex);
if(realizationAtTimeIndex != null && realizationAtTimeIndex[component] != null) {
final RandomVariable localVolatilityFactor = realizationAtTimeIndex[component].mult(periodLength).add(1.0);
for (int factorIndex = 0; factorIndex < factorLoading.length; factorIndex++) {
factorLoading[factorIndex] = factorLoading[factorIndex].mult(localVolatilityFactor);
}
}
return factorLoading;
}
@Override
public RandomVariable getFactorLoadingPseudoInverse(final int timeIndex, final int component, final int factor, final RandomVariable[] realizationAtTimeIndex) {
throw new UnsupportedOperationException();
}
@Override
public AbstractLIBORCovarianceModelParametric getCloneWithModifiedData(final Map<String, Object> dataModified)
throws CalculationException {
double periodLength = this.periodLength;
AbstractLIBORCovarianceModelParametric covarianceModel = this.covarianceModel;
if(dataModified != null) {
if(!dataModified.containsKey("covarianceModel")) {
covarianceModel = covarianceModel.getCloneWithModifiedData(dataModified);
}
// Explicitly passed covarianceModel has priority
covarianceModel = (AbstractLIBORCovarianceModelParametric)dataModified.getOrDefault("covarianceModel", covarianceModel);
periodLength = (double)dataModified.getOrDefault("periodLength", periodLength);
}
final AbstractLIBORCovarianceModelParametric newModel = new HullWhiteLocalVolatilityModel(covarianceModel, periodLength);
return newModel;
}
}
|
package com.egendata;
import android.app.Application;
import com.facebook.react.ReactApplication;
import com.facebook.react.ReactNativeHost;
import com.facebook.react.ReactPackage;
import com.facebook.react.shell.MainReactPackage;
import com.facebook.soloader.SoLoader;
import com.horcrux.svg.SvgPackage;
import com.lugg.ReactNativeConfig.ReactNativeConfigPackage;
import com.reactnativecommunity.asyncstorage.AsyncStoragePackage;
import com.swmansion.gesturehandler.react.RNGestureHandlerPackage;
import com.pedrouid.crypto.RCTCryptoPackage;
import org.reactnative.camera.RNCameraPackage;
import com.reactlibrary.JosePackage;
import java.util.Arrays;
import java.util.List;
public class MainApplication extends Application implements ReactApplication {
private final ReactNativeHost mReactNativeHost = new ReactNativeHost(this) {
@Override
public boolean getUseDeveloperSupport() {
return BuildConfig.DEBUG;
}
@Override
protected List<ReactPackage> getPackages() {
return Arrays.<ReactPackage>asList(
new MainReactPackage(),
new RCTCryptoPackage(),
new AsyncStoragePackage(),
new SvgPackage(),
new RNGestureHandlerPackage(),
new RNCameraPackage(),
new ReactNativeConfigPackage(),
new JosePackage()
);
}
@Override
protected String getJSMainModuleName() {
return "index";
}
};
@Override
public ReactNativeHost getReactNativeHost() {
return mReactNativeHost;
}
@Override
public void onCreate() {
super.onCreate();
SoLoader.init(this, /* native exopackage */ false);
}
}
|
package hahaha.lalala.lambda.testlambda.fun;
import java.util.HashMap;
import java.util.Map;
import java.util.function.BiFunction;
public class Test {
@org.junit.Test
public void test02() {
Map<String, String> map = new HashMap<>();
map.put("ch", "china");
map.put("ja", "japan");
map.put("ru", "russia");
System.out.println(map);
//如果以字母a 结尾 value 变为 李白 否则 不变
/* map.replaceAll((key,value)->{
if(value.endsWith("a")){
return "李白";
}
return value;
});*/
map.replaceAll((k, v) -> v.endsWith("a") ? "李白" : v);
System.out.println(map);
}
@org.junit.Test
public void test01() {
Map<String, String> map = new HashMap<>();
map.put("ch", "china");
map.put("ja", "japan");
map.put("ru", "russia");
System.out.println(map);
//如果以字母a 结尾 value 变为 李白 否则 不变
map.replaceAll(new BiFunction<String, String, String>() {
@Override
public String apply(String s, String s2) {
System.out.println(s + "--->" + s2);
if (s2.endsWith("a")) {
return "李白";
}
return s2;
}
});
System.out.println(map);
}
}
|
/* Copyright 2010 The Tor Project
* See LICENSE for licensing information */
package org.torproject.ernie.db;
import java.io.*;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.logging.*;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.digest.*;
import org.apache.commons.codec.binary.*;
/**
* Sanitizes bridge descriptors, i.e., removes all possibly sensitive
* information from them, and writes them to a local directory structure.
* During the sanitizing process, all information about the bridge
* identity or IP address are removed or replaced. The goal is to keep the
* sanitized bridge descriptors useful for statistical analysis while not
* making it easier for an adversary to enumerate bridges.
*
* There are three types of bridge descriptors: bridge network statuses
* (lists of all bridges at a given time), server descriptors (published
* by the bridge to advertise their capabilities), and extra-info
* descriptors (published by the bridge, mainly for statistical analysis).
*
* Network statuses, server descriptors, and extra-info descriptors are
* linked via descriptor digests: extra-info descriptors are referenced
* from server descriptors, and server descriptors are referenced from
* network statuses. These references need to be changed during the
* sanitizing process, because descriptor contents change and so do the
* descriptor digests.
*
* No assumptions are made about the order in which bridge descriptors are
* parsed. The approach taken here is to sanitize bridge descriptors even
* with incomplete knowledge about references and to update them as soon
* as these information get known. We are keeping a persistent data
* structure, the bridge descriptor mapping, to hold information about
* every single descriptor. The idea is that every descriptor is (a)
* referenced from a network status and consists of (b) a server
* descriptor and (c) an extra-info descriptor, both of which are
* published at the same time. Using this data structure, we can repair
* references as soon as we learn more about the descriptor and regardless
* of the order of incoming bridge descriptors.
*
* The process of sanitizing a bridge descriptor is as follows, depending
* on the type of descriptor:
*
* Network statuses are processed by sanitizing every r line separately
* and looking up whether the descriptor mapping contains a bridge with
* given identity hash and descriptor publication time. If so, the new
* server descriptor identifier can be added. If not, we're adding all
* 0's.
*
* While sanitizing a server descriptor, its identity hash and publication
* time are looked up in order to put in the extra-info descriptor
* identifier in case the corresponding extra-info descriptor was
* sanitized before. Further, its publication time is noted down, so that
* all network statuses that might be referencing this server descriptor
* can be re-written at the end of the sanitizing procedure.
*
* Extra-info descriptors are processed by looking up their identity hash
* and publication time in the descriptor mapping. If the corresponding
* server descriptor was sanitized before, the server descriptor is
* re-written to include the new extra-info descriptor digest, and the
* publication time is noted down in order to re-write the network
* statuses possibly referencing this extra-info descriptor and its
* corresponding server descriptor at the end of the sanitizing process.
*
* After sanitizing all bridge descriptors, the network statuses that
* might be referencing server descriptors which have been (re-)written
* during this execution are re-written, too. This may be necessary in
* order to update previously broken references to server descriptors.
*/
public class SanitizedBridgesWriter {
/**
* Hex representation of null reference that is written to bridge
* descriptors if we don't have the real reference, yet.
*/
private static final String NULL_REFERENCE =
"0000000000000000000000000000000000000000";
/**
* Mapping between a descriptor as referenced from a network status to
* the digests of server descriptor and extra-info descriptor.
*/
private static class DescriptorMapping {
/**
* Creates a new mapping from comma-separated values as read from the
* persistent mapping file.
*/
private DescriptorMapping(String commaSeparatedValues) {
String[] parts = commaSeparatedValues.split(",");
this.hashedBridgeIdentity = parts[0];
this.published = parts[1];
this.serverDescriptorIdentifier = parts[2];
this.extraInfoDescriptorIdentifier = parts[3];
}
/**
* Creates a new mapping for a given identity hash and descriptor
* publication time that has all 0's as descriptor digests.
*/
private DescriptorMapping(String hashedBridgeIdentity,
String published) {
this.hashedBridgeIdentity = hashedBridgeIdentity;
this.published = published;
this.serverDescriptorIdentifier = NULL_REFERENCE;
this.extraInfoDescriptorIdentifier = NULL_REFERENCE;
}
private String hashedBridgeIdentity;
private String published;
private String serverDescriptorIdentifier;
private String extraInfoDescriptorIdentifier;
/**
* Returns a string representation of this descriptor mapping that can
* be written to the persistent mapping file.
*/
public String toString() {
return this.hashedBridgeIdentity + "," + this.published + ","
+ this.serverDescriptorIdentifier + ","
+ this.extraInfoDescriptorIdentifier;
}
}
/**
* File containing the mapping between network status entries, server
* descriptors, and extra-info descriptors.
*/
private File bridgeDescriptorMappingsFile;
/**
* Mapping between status entries, server descriptors, and extra-info
* descriptors. This mapping is required to re-establish the references
* from status entries to server descriptors and from server descriptors
* to extra-info descriptors. The original references are broken when
* sanitizing, because descriptor contents change and so do the
* descriptor digests that are used for referencing. Map key contains
* hashed bridge identity and descriptor publication time, map value
* contains map key plus new server descriptor identifier and new
* extra-info descriptor identifier.
*/
private SortedMap<String, DescriptorMapping> bridgeDescriptorMappings;
/**
* Logger for this class.
*/
private Logger logger;
/**
* Publication times of server descriptors and extra-info descriptors
* parsed in the current execution. These times are used to determine
* which statuses need to be rewritten at the end of the execution.
*/
private SortedSet<String> descriptorPublicationTimes;
/**
* Output directory for writing sanitized bridge descriptors.
*/
private String sanitizedBridgesDir;
/**
* Initializes this class, including reading in the known descriptor
* mapping.
*/
public SanitizedBridgesWriter(String dir) {
/* Memorize argument values. */
this.sanitizedBridgesDir = dir;
/* Initialize logger. */
this.logger = Logger.getLogger(
SanitizedBridgesWriter.class.getName());
/* Initialize data structure. */
this.bridgeDescriptorMappings = new TreeMap<String,
DescriptorMapping>();
this.descriptorPublicationTimes = new TreeSet<String>();
/* Read known descriptor mappings from disk. */
this.bridgeDescriptorMappingsFile = new File(
"stats/bridge-descriptor-mappings");
if (this.bridgeDescriptorMappingsFile.exists()) {
try {
BufferedReader br = new BufferedReader(new FileReader(
this.bridgeDescriptorMappingsFile));
String line = null;
while ((line = br.readLine()) != null) {
if (line.split(",").length == 4) {
String[] parts = line.split(",");
DescriptorMapping dm = new DescriptorMapping(line);
dm.hashedBridgeIdentity = parts[0];
dm.published = parts[1];
dm.serverDescriptorIdentifier = parts[2];
dm.extraInfoDescriptorIdentifier = parts[3];
this.bridgeDescriptorMappings.put(line.split(",")[0] + ","
+ line.split(",")[1], dm);
} else {
this.logger.warning("Corrupt line '" + line + "' in "
+ this.bridgeDescriptorMappingsFile.getAbsolutePath()
+ ". Skipping.");
continue;
}
}
br.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not read in "
+ this.bridgeDescriptorMappingsFile.getAbsolutePath()
+ ".");
return;
}
}
}
/**
* Sanitizes a network status and writes it to disk. Processes every r
* line separately and looks up whether the descriptor mapping contains
* a bridge with given identity hash and descriptor publication time. */
public void sanitizeAndStoreNetworkStatus(byte[] data,
String publicationTime) {
/* Parse the given network status line by line. */
StringBuilder scrubbed = new StringBuilder();
try {
BufferedReader br = new BufferedReader(new StringReader(new String(
data, "US-ASCII")));
String line = null;
while ((line = br.readLine()) != null) {
/* r lines contain sensitive information that needs to be removed
* or replaced. */
if (line.startsWith("r ")) {
/* Parse the relevant parts of this r line. */
String[] parts = line.split(" ");
String bridgeIdentity = parts[2];
String descPublicationTime = parts[4] + " " + parts[5];
String orPort = parts[7];
String dirPort = parts[8];
/* Look up the descriptor in the descriptor mapping, or add a
* new mapping entry if there is none. */
String hashedBridgeIdentityHex = Hex.encodeHexString(
DigestUtils.sha(Base64.decodeBase64(bridgeIdentity
+ "=="))).toLowerCase();
String mappingKey = hashedBridgeIdentityHex + ","
+ descPublicationTime;
DescriptorMapping mapping = null;
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentityHex.
toLowerCase(), descPublicationTime);
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
/* Write scrubbed r line to buffer. */
String hashedBridgeIdentityBase64 = Base64.encodeBase64String(
DigestUtils.sha(Base64.decodeBase64(bridgeIdentity
+ "=="))).substring(0, 27);
String sdi = Base64.encodeBase64String(Hex.decodeHex(
mapping.serverDescriptorIdentifier.toCharArray())).
substring(0, 27);
scrubbed.append("r Unnamed "
+ hashedBridgeIdentityBase64 + " " + sdi + " "
+ descPublicationTime + " 127.0.0.1 " + orPort + " "
+ dirPort + "\n");
/* Nothing special about s lines; just copy them. */
} else if (line.startsWith("s ")) {
scrubbed.append(line + "\n");
/* There should be nothing else but r and s lines in the network
* status. If there is, we should probably learn before writing
* anything to the sanitized descriptors. */
} else {
this.logger.fine("Unknown line '" + line + "' in bridge "
+ "network status. Not writing to disk!");
return;
}
}
br.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not parse bridge network "
+ "status.", e);
return;
} catch (DecoderException e) {
this.logger.log(Level.WARNING, "Could not parse bridge network "
+ "status.", e);
return;
}
/* Write the sanitized network status to disk. */
try {
/* Determine file name. */
String syear = publicationTime.substring(0, 4);
String smonth = publicationTime.substring(5, 7);
String sday = publicationTime.substring(8, 10);
String stime = publicationTime.substring(11, 13)
+ publicationTime.substring(14, 16)
+ publicationTime.substring(17, 19);
File statusFile = new File(this.sanitizedBridgesDir + "/" + syear
+ "/" + smonth + "/statuses/" + sday + "/" + syear + smonth
+ sday + "-" + stime + "-"
+ "4A0CCD2DDC7995083D73F5D667100C8A5831F16D");
/* Create all parent directories to write this network status. */
statusFile.getParentFile().mkdirs();
/* Write sanitized network status to disk. */
BufferedWriter bw = new BufferedWriter(new FileWriter(statusFile));
bw.write(scrubbed.toString());
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not write sanitized bridge "
+ "network status to disk.", e);
return;
}
}
/**
* Sanitizes a bridge server descriptor and writes it to disk. Looks up
* up bridge identity hash and publication time in the descriptor
* mapping. After sanitizing a server descriptor, its publication time
* is noted down, so that all network statuses that might be referencing
* this server descriptor can be re-written at the end of the sanitizing
* procedure.
*/
public void sanitizeAndStoreServerDescriptor(byte[] data) {
/* Parse descriptor to generate a sanitized version and to look it up
* in the descriptor mapping. */
String scrubbedDesc = null;
DescriptorMapping mapping = null;
try {
BufferedReader br = new BufferedReader(new StringReader(
new String(data, "US-ASCII")));
StringBuilder scrubbed = new StringBuilder();
String line = null, hashedBridgeIdentity = null,
published = null;
boolean skipCrypto = false;
while ((line = br.readLine()) != null) {
/* When we have parsed both published and fingerprint line, look
* up descriptor in the descriptor mapping or create a new one if
* there is none. */
if (mapping == null && published != null &&
hashedBridgeIdentity != null) {
String mappingKey = hashedBridgeIdentity + "," + published;
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentity,
published);
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
}
/* Skip all crypto parts that might be used to derive the bridge's
* identity fingerprint. */
if (skipCrypto && !line.startsWith("-----END ")) {
continue;
/* Parse the original IP address for looking it up in the GeoIP
* database and replace it with 127.0.0.1 in the scrubbed
* version. */
} else if (line.startsWith("router ")) {
scrubbed = new StringBuilder("router Unnamed 127.0.0.1 "
+ line.split(" ")[3] + " " + line.split(" ")[4] + " "
+ line.split(" ")[5] + "\n");
/* Parse the publication time and add it to the list of descriptor
* publication times to re-write network statuses at the end of
* the sanitizing procedure. */
} else if (line.startsWith("published ")) {
published = line.substring("published ".length());
this.descriptorPublicationTimes.add(published);
scrubbed.append(line + "\n");
/* Parse the fingerprint to determine the hashed bridge
* identity. */
} else if (line.startsWith("opt fingerprint ")) {
String fingerprint = line.substring(line.startsWith("opt ") ?
"opt fingerprint".length() : "fingerprint".length()).
replaceAll(" ", "").toLowerCase();
hashedBridgeIdentity = DigestUtils.shaHex(Hex.decodeHex(
fingerprint.toCharArray())).toLowerCase();
scrubbed.append("opt fingerprint");
for (int i = 0; i < hashedBridgeIdentity.length() / 4; i++)
scrubbed.append(" " + hashedBridgeIdentity.substring(4 * i,
4 * (i + 1)).toUpperCase());
scrubbed.append("\n");
/* Replace the contact line (if present) with a generic one. */
} else if (line.startsWith("contact ")) {
scrubbed.append("contact somebody\n");
/* When we reach the signature, we're done. Write the sanitized
* descriptor to disk below. */
} else if (line.startsWith("router-signature")) {
scrubbedDesc = scrubbed.toString();
break;
/* Replace extra-info digest with the one we know from our
* descriptor mapping (which might be all 0's if we didn't parse
* the extra-info descriptor before). */
} else if (line.startsWith("opt extra-info-digest ")) {
scrubbed.append("opt extra-info-digest "
+ mapping.extraInfoDescriptorIdentifier.toUpperCase()
+ "\n");
/* Write the following lines unmodified to the sanitized
* descriptor. */
} else if (line.startsWith("reject ")
|| line.startsWith("accept ")
|| line.startsWith("platform ")
|| line.startsWith("opt protocols ")
|| line.startsWith("uptime ")
|| line.startsWith("bandwidth ")
|| line.startsWith("opt hibernating ")
|| line.equals("opt hidden-service-dir")
|| line.equals("opt caches-extra-info")
|| line.equals("opt allow-single-hop-exits")) {
scrubbed.append(line + "\n");
/* Replace node fingerprints in the family line with their hashes
* and nicknames with Unnamed. */
} else if (line.startsWith("family ")) {
StringBuilder familyLine = new StringBuilder("family");
for (String s : line.substring(7).split(" ")) {
if (s.startsWith("$")) {
familyLine.append(" $" + DigestUtils.shaHex(Hex.decodeHex(
s.substring(1).toCharArray())).toUpperCase());
} else {
familyLine.append(" Unnamed");
}
}
scrubbed.append(familyLine.toString() + "\n");
/* Skip the purpose line that the bridge authority adds to its
* cached-descriptors file. */
} else if (line.startsWith("@purpose ")) {
continue;
/* Skip all crypto parts that might leak the bridge's identity
* fingerprint. */
} else if (line.startsWith("-----BEGIN ")
|| line.equals("onion-key") || line.equals("signing-key")) {
skipCrypto = true;
/* Stop skipping lines when the crypto parts are over. */
} else if (line.startsWith("-----END ")) {
skipCrypto = false;
/* If we encounter an unrecognized line, stop parsing and print
* out a warning. We might have overlooked sensitive information
* that we need to remove or replace for the sanitized descriptor
* version. */
} else {
this.logger.fine("Unrecognized line '" + line + "'. Skipping.");
return;
}
}
br.close();
} catch (Exception e) {
this.logger.log(Level.WARNING, "Could not parse server "
+ "descriptor.", e);
return;
}
/* Determine new descriptor digest and write it to descriptor
* mapping. */
String scrubbedHash = DigestUtils.shaHex(scrubbedDesc);
mapping.serverDescriptorIdentifier = scrubbedHash;
/* Determine filename of sanitized server descriptor. */
String dyear = mapping.published.substring(0, 4);
String dmonth = mapping.published.substring(5, 7);
File newFile = new File(this.sanitizedBridgesDir + "/"
+ dyear + "/" + dmonth + "/server-descriptors/"
+ "/" + scrubbedHash.charAt(0) + "/"
+ scrubbedHash.charAt(1) + "/"
+ scrubbedHash);
/* Write sanitized server descriptor to disk, including all its parent
* directories. */
try {
newFile.getParentFile().mkdirs();
BufferedWriter bw = new BufferedWriter(new FileWriter(newFile));
bw.write(scrubbedDesc);
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not write sanitized server "
+ "descriptor to disk.", e);
return;
}
}
/**
* Sanitizes an extra-info descriptor and writes it to disk. Looks up
* the bridge identity hash and publication time in the descriptor
* mapping. If the corresponding server descriptor was sanitized before,
* it is re-written to include the new extra-info descriptor digest and
* the publication time is noted down, too, so that all network statuses
* possibly referencing this extra-info descriptor and its corresponding
* server descriptor can be re-written at the end of the sanitizing
* procedure.
*/
public void sanitizeAndStoreExtraInfoDescriptor(byte[] data) {
/* Parse descriptor to generate a sanitized version and to look it up
* in the descriptor mapping. */
String scrubbedDesc = null, published = null;
DescriptorMapping mapping = null;
try {
BufferedReader br = new BufferedReader(new StringReader(new String(
data, "US-ASCII")));
String line = null;
StringBuilder scrubbed = null;
String hashedBridgeIdentity = null;
while ((line = br.readLine()) != null) {
/* When we have parsed both published and fingerprint line, look
* up descriptor in the descriptor mapping or create a new one if
* there is none. */
if (mapping == null && published != null &&
hashedBridgeIdentity != null) {
String mappingKey = hashedBridgeIdentity + "," + published;
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentity,
published);
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
}
/* Parse bridge identity from extra-info line and replace it with
* its hash in the sanitized descriptor. */
if (line.startsWith("extra-info ")) {
hashedBridgeIdentity = DigestUtils.shaHex(Hex.decodeHex(
line.split(" ")[2].toCharArray())).toLowerCase();
scrubbed = new StringBuilder("extra-info Unnamed "
+ hashedBridgeIdentity.toUpperCase() + "\n");
/* Parse the publication time and add it to the list of descriptor
* publication times to re-write network statuses at the end of
* the sanitizing procedure. */
} else if (line.startsWith("published ")) {
scrubbed.append(line + "\n");
published = line.substring("published ".length());
/* Write the following lines unmodified to the sanitized
* descriptor. */
} else if (line.startsWith("write-history ")
|| line.startsWith("read-history ")
|| line.startsWith("geoip-start-time ")
|| line.startsWith("geoip-client-origins ")
|| line.startsWith("bridge-stats-end ")
|| line.startsWith("bridge-ips ")) {
scrubbed.append(line + "\n");
/* When we reach the signature, we're done. Write the sanitized
* descriptor to disk below. */
} else if (line.startsWith("router-signature")) {
scrubbedDesc = scrubbed.toString();
break;
/* Don't include statistics that should only be contained in relay
* extra-info descriptors. */
} else if (line.startsWith("dirreq-") || line.startsWith("cell-")
|| line.startsWith("exit-")) {
continue;
/* If we encounter an unrecognized line, stop parsing and print
* out a warning. We might have overlooked sensitive information
* that we need to remove or replace for the sanitized descriptor
* version. */
} else {
this.logger.fine("Unrecognized line '" + line + "'. Skipping.");
return;
}
}
br.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not parse extra-info "
+ "descriptor.", e);
return;
} catch (DecoderException e) {
this.logger.log(Level.WARNING, "Could not parse extra-info "
+ "descriptor.", e);
return;
}
/* Determine new descriptor digest and check if write it to descriptor
* mapping. */
String scrubbedDescHash = DigestUtils.shaHex(scrubbedDesc);
boolean extraInfoDescriptorIdentifierHasChanged =
!scrubbedDescHash.equals(mapping.extraInfoDescriptorIdentifier);
mapping.extraInfoDescriptorIdentifier = scrubbedDescHash;
if (extraInfoDescriptorIdentifierHasChanged &&
!mapping.serverDescriptorIdentifier.equals(NULL_REFERENCE)) {
this.rewriteServerDescriptor(mapping);
this.descriptorPublicationTimes.add(published);
}
/* Determine filename of sanitized server descriptor. */
String dyear = mapping.published.substring(0, 4);
String dmonth = mapping.published.substring(5, 7);
File newFile = new File(this.sanitizedBridgesDir + "/"
+ dyear + "/" + dmonth + "/extra-infos/"
+ scrubbedDescHash.charAt(0) + "/"
+ scrubbedDescHash.charAt(1) + "/"
+ scrubbedDescHash);
/* Write sanitized server descriptor to disk, including all its parent
* directories. */
try {
newFile.getParentFile().mkdirs();
BufferedWriter bw = new BufferedWriter(new FileWriter(newFile));
bw.write(scrubbedDesc);
bw.close();
} catch (Exception e) {
this.logger.log(Level.WARNING, "Could not write sanitized "
+ "extra-info descriptor to disk.", e);
}
}
public void storeSanitizedNetworkStatus(byte[] data, String published) {
String scrubbed = null;
try {
String ascii = new String(data, "US-ASCII");
BufferedReader br2 = new BufferedReader(new StringReader(ascii));
StringBuilder sb = new StringBuilder();
String line = null;
while ((line = br2.readLine()) != null) {
if (line.startsWith("r ")) {
String hashedBridgeIdentity = Hex.encodeHexString(
Base64.decodeBase64(line.split(" ")[2] + "==")).
toLowerCase();
String hashedBridgeIdentityBase64 = line.split(" ")[2];
String readServerDescId = Hex.encodeHexString(
Base64.decodeBase64(line.split(" ")[3] + "==")).
toLowerCase();
String descPublished = line.split(" ")[4] + " "
+ line.split(" ")[5];
String mappingKey = (hashedBridgeIdentity + ","
+ descPublished).toLowerCase();
DescriptorMapping mapping = null;
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentity.
toLowerCase(), descPublished);
mapping.serverDescriptorIdentifier = readServerDescId;
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
String sdi = Base64.encodeBase64String(Hex.decodeHex(
mapping.serverDescriptorIdentifier.toCharArray())).
substring(0, 27);
String orPort = line.split(" ")[7];
String dirPort = line.split(" ")[8];
sb.append("r Unnamed "
+ hashedBridgeIdentityBase64 + " " + sdi + " "
+ descPublished + " 127.0.0.1 " + orPort + " "
+ dirPort + "\n");
} else {
sb.append(line + "\n");
}
}
scrubbed = sb.toString();
br2.close();
} catch (DecoderException e) {
this.logger.log(Level.WARNING, "Could not parse server descriptor "
+ "identifier. This must be a bug.", e);
return;
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not parse previously "
+ "sanitized network status.", e);
return;
}
try {
/* Determine file name. */
String syear = published.substring(0, 4);
String smonth = published.substring(5, 7);
String sday = published.substring(8, 10);
String stime = published.substring(11, 13)
+ published.substring(14, 16)
+ published.substring(17, 19);
File statusFile = new File(this.sanitizedBridgesDir + "/" + syear
+ "/" + smonth + "/statuses/" + sday + "/" + syear + smonth
+ sday + "-" + stime + "-"
+ "4A0CCD2DDC7995083D73F5D667100C8A5831F16D");
/* Create all parent directories to write this network status. */
statusFile.getParentFile().mkdirs();
/* Write sanitized network status to disk. */
BufferedWriter bw = new BufferedWriter(new FileWriter(statusFile));
bw.write(scrubbed);
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not write previously "
+ "sanitized network status.", e);
return;
}
}
public void storeSanitizedServerDescriptor(byte[] data) {
try {
String ascii = new String(data, "US-ASCII");
BufferedReader br2 = new BufferedReader(new StringReader(ascii));
StringBuilder sb = new StringBuilder();
String line2 = null, published = null;
String hashedBridgeIdentity = null;
DescriptorMapping mapping = null;
while ((line2 = br2.readLine()) != null) {
if (mapping == null && published != null &&
hashedBridgeIdentity != null) {
String mappingKey = (hashedBridgeIdentity + "," + published).
toLowerCase();
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentity.
toLowerCase(), published);
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
}
if (line2.startsWith("router ")) {
sb.append("router Unnamed 127.0.0.1 " + line2.split(" ")[3]
+ " " + line2.split(" ")[4] + " " + line2.split(" ")[5]
+ "\n");
} else if (line2.startsWith("published ")) {
published = line2.substring("published ".length());
sb.append(line2 + "\n");
this.descriptorPublicationTimes.add(published);
} else if (line2.startsWith("opt fingerprint ")) {
hashedBridgeIdentity = line2.substring("opt fingerprint".
length()).replaceAll(" ", "").toLowerCase();
sb.append(line2 + "\n");
} else if (line2.startsWith("opt extra-info-digest ")) {
sb.append("opt extra-info-digest "
+ mapping.extraInfoDescriptorIdentifier.toUpperCase()
+ "\n");
} else {
sb.append(line2 + "\n");
}
}
br2.close();
String scrubbedDesc = sb.toString();
String scrubbedHash = DigestUtils.shaHex(scrubbedDesc);
mapping.serverDescriptorIdentifier = scrubbedHash;
String dyear = published.substring(0, 4);
String dmonth = published.substring(5, 7);
File newFile = new File(this.sanitizedBridgesDir + "/"
+ dyear + "/" + dmonth + "/server-descriptors/"
+ scrubbedHash.substring(0, 1) + "/"
+ scrubbedHash.substring(1, 2) + "/"
+ scrubbedHash);
this.logger.finer("Storing server descriptor "
+ newFile.getAbsolutePath());
newFile.getParentFile().mkdirs();
BufferedWriter bw = new BufferedWriter(new FileWriter(
newFile));
bw.write(scrubbedDesc);
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not store unsanitized server "
+ "descriptor.", e);
}
}
public void storeSanitizedExtraInfoDescriptor(byte[] data) {
try {
String ascii = new String(data, "US-ASCII");
BufferedReader br2 = new BufferedReader(new StringReader(ascii));
StringBuilder sb = new StringBuilder();
String line2 = null, published = null;
String hashedBridgeIdentity = null;
DescriptorMapping mapping = null;
while ((line2 = br2.readLine()) != null) {
if (mapping == null && published != null &&
hashedBridgeIdentity != null) {
String mappingKey = (hashedBridgeIdentity + "," + published).
toLowerCase();
if (this.bridgeDescriptorMappings.containsKey(mappingKey)) {
mapping = this.bridgeDescriptorMappings.get(mappingKey);
} else {
mapping = new DescriptorMapping(hashedBridgeIdentity.
toLowerCase(), published);
this.bridgeDescriptorMappings.put(mappingKey, mapping);
}
}
if (line2.startsWith("extra-info ")) {
hashedBridgeIdentity = line2.split(" ")[2];
sb.append("extra-info Unnamed " + hashedBridgeIdentity
+ "\n");
} else if (line2.startsWith("published ")) {
sb.append(line2 + "\n");
published = line2.substring("published ".length());
this.descriptorPublicationTimes.add(published);
} else {
sb.append(line2 + "\n");
}
}
br2.close();
String scrubbedDesc = sb.toString();
String scrubbedHash = DigestUtils.shaHex(scrubbedDesc);
mapping.extraInfoDescriptorIdentifier = scrubbedHash;
String dyear = published.substring(0, 4);
String dmonth = published.substring(5, 7);
File newFile = new File(this.sanitizedBridgesDir + "/"
+ dyear + "/" + dmonth + "/extra-infos/"
+ scrubbedHash.substring(0, 1) + "/"
+ scrubbedHash.substring(1, 2) + "/"
+ scrubbedHash);
this.logger.finer("Storing extra-info descriptor "
+ newFile.getAbsolutePath());
newFile.getParentFile().mkdirs();
BufferedWriter bw = new BufferedWriter(new FileWriter(
newFile));
bw.write(scrubbedDesc);
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not store sanitized "
+ "extra-info descriptor.", e);
}
}
private void rewriteNetworkStatus(File status, String published) {
try {
FileInputStream fis = new FileInputStream(status);
BufferedInputStream bis = new BufferedInputStream(fis);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int len;
byte[] data2 = new byte[1024];
while ((len = bis.read(data2, 0, 1024)) >= 0) {
baos.write(data2, 0, len);
}
fis.close();
byte[] allData = baos.toByteArray();
this.storeSanitizedNetworkStatus(allData, published);
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not rewrite network "
+ "status.", e);
}
}
private void rewriteServerDescriptor(DescriptorMapping mapping) {
try {
String dyear = mapping.published.substring(0, 4);
String dmonth = mapping.published.substring(5, 7);
File serverDescriptorFile = new File(
this.sanitizedBridgesDir + "/"
+ dyear + "/" + dmonth + "/server-descriptors/"
+ mapping.serverDescriptorIdentifier.substring(0, 1) + "/"
+ mapping.serverDescriptorIdentifier.substring(1, 2) + "/"
+ mapping.serverDescriptorIdentifier);
FileInputStream fis = new FileInputStream(serverDescriptorFile);
BufferedInputStream bis = new BufferedInputStream(fis);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
int len;
byte[] data2 = new byte[1024];
while ((len = bis.read(data2, 0, 1024)) >= 0) {
baos.write(data2, 0, len);
}
fis.close();
byte[] allData = baos.toByteArray();
this.storeSanitizedServerDescriptor(allData);
serverDescriptorFile.delete();
this.logger.finer("Deleting server descriptor "
+ serverDescriptorFile.getAbsolutePath());
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not rewrite server "
+ "descriptor.", e);
}
}
/**
* Rewrite all network statuses that might contain references to server
* descriptors we added or updated in this execution. This applies to
* all statuses that have been published up to 24 hours after any added
* or updated server descriptor.
*/
public void finishWriting() {
/* Prepare parsing and formatting timestamps. */
SimpleDateFormat dateTimeFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
dateTimeFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
SimpleDateFormat statusFileFormat =
new SimpleDateFormat("yyyyMMdd-HHmmss");
statusFileFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
/* Iterate over publication timestamps of previously sanitized
* descriptors. For every publication timestamp, we want to re-write
* the network statuses that we published up to 24 hours after that
* descriptor. We keep the timestamp of the last re-written network
* status in order to make sure we re-writing any network status at
* most once. */
String lastDescriptorPublishedPlus24Hours = "1970-01-01 00:00:00";
for (String published : this.descriptorPublicationTimes) {
if (published.compareTo(lastDescriptorPublishedPlus24Hours) <= 0) {
continue;
}
// find statuses 24 hours after published
SortedSet<File> statusesToRewrite = new TreeSet<File>();
long publishedTime;
try {
publishedTime = dateTimeFormat.parse(published).getTime();
} catch (ParseException e) {
this.logger.log(Level.WARNING, "Could not parse publication "
+ "timestamp '" + published + "'. Skipping.", e);
continue;
}
String[] dayOne = dateFormat.format(publishedTime).split("-");
File publishedDayOne = new File(this.sanitizedBridgesDir + "/"
+ dayOne[0] + "/" + dayOne[1] + "/statuses/" + dayOne[2]);
if (publishedDayOne.exists()) {
statusesToRewrite.addAll(Arrays.asList(publishedDayOne.
listFiles()));
}
long plus24Hours = publishedTime + 24L * 60L * 60L * 1000L;
lastDescriptorPublishedPlus24Hours = dateFormat.format(plus24Hours);
String[] dayTwo = dateFormat.format(plus24Hours).split("-");
File publishedDayTwo = new File(this.sanitizedBridgesDir + "/"
+ dayTwo[0] + "/" + dayTwo[1] + "/statuses/" + dayTwo[2]);
if (publishedDayTwo.exists()) {
statusesToRewrite.addAll(Arrays.asList(publishedDayTwo.
listFiles()));
}
for (File status : statusesToRewrite) {
String statusPublished = status.getName().substring(0, 15);
long statusTime;
try {
statusTime = statusFileFormat.parse(statusPublished).getTime();
} catch (ParseException e) {
this.logger.log(Level.WARNING, "Could not parse network "
+ "status publication timestamp '" + published
+ "'. Skipping.", e);
continue;
}
if (statusTime < publishedTime || statusTime > plus24Hours) {
continue;
}
this.rewriteNetworkStatus(status,
dateTimeFormat.format(statusTime));
}
}
/* Write descriptor mappings to disk. */
try {
BufferedWriter bw = new BufferedWriter(new FileWriter(
this.bridgeDescriptorMappingsFile));
for (DescriptorMapping mapping :
this.bridgeDescriptorMappings.values()) {
bw.write(mapping.toString() + "\n");
}
bw.close();
} catch (IOException e) {
this.logger.log(Level.WARNING, "Could not write descriptor "
+ "mappings to disk.", e);
}
}
}
|
package com.bergerkiller.bukkit.common;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import com.bergerkiller.bukkit.common.utils.LogicUtil;
import com.bergerkiller.bukkit.common.utils.LogicUtil.ItemSynchronizer;
public class LogicUtilTest {
private static enum TestMode {
ORDERED_LIST, UNORDERED_SET
}
private static final List<Collection<Integer>> demo_sync_lists = Arrays.asList(
Arrays.asList(1, 2, 3, 4, 5),
Arrays.asList(1, 2, 3, 4, 5, 6),
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8),
Arrays.asList(1, 2, 3, 5, 6, 7, 8),
Arrays.asList(1, 2, 7, 8),
Arrays.asList(1, 2, 4, 7, 8),
Arrays.asList(1, 2, 5, 7, 8),
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8),
Collections.<Integer>emptyList()
);
private static final List<Collection<Integer>> demo_sync_sets;
static {
demo_sync_sets = new ArrayList<Collection<Integer>>(demo_sync_lists.size());
for (Collection<Integer> coll : demo_sync_lists) {
demo_sync_sets.add(new HashSet<Integer>(coll));
}
}
private static final ItemSynchronizer<Integer, Integer> synchronizer = new ItemSynchronizer<Integer, Integer>() {
@Override
public boolean isItem(Integer item, Integer value) {
return item.equals(value);
}
@Override
public Integer onAdded(Integer value) {
//System.out.println("Add: " + value);
return value;
}
@Override
public void onRemoved(Integer item) {
//System.out.println("Removed: " + item);
}
};
private void testListSynchronizer(List<Integer> sync, TestMode mode, boolean validate, Collection<Integer> values) {
LogicUtil.synchronizeList(sync, values, synchronizer);
if (validate) {
assertEquals(sync, values);
}
}
private void testSetSynchronizer(Set<Integer> sync, TestMode mode, boolean validate, Collection<Integer> values) {
LogicUtil.synchronizeUnordered(sync, values, synchronizer);
if (validate) {
assertTrue(sync.containsAll(values));
assertTrue(values.containsAll(sync));
}
}
// Performs test, returns amount of microseconds per test cycle
private double runTest(TestMode mode, int count) {
long time_a, time_b;
if (mode == TestMode.ORDERED_LIST) {
List<Integer> sync = new ArrayList<Integer>();
time_a = System.nanoTime();
for (int i = 0; i < count; i++) {
for (Collection<Integer> compare : demo_sync_lists) {
testListSynchronizer(sync, mode, i==0, compare);
}
}
time_b = System.nanoTime();
} else {
Set<Integer> sync = new HashSet<Integer>();
time_a = System.nanoTime();
for (int i = 0; i < count; i++) {
for (Collection<Integer> compare : demo_sync_sets) {
testSetSynchronizer(sync, mode, i==0, compare);
}
}
time_b = System.nanoTime();
}
return (double) ((time_b - time_a)) / (double) (count*1000);
}
// Performs test with only the first synchronized list, returns amount of microseconds per test cycle
// This tests the performance when the list and sync collections are the same
private double runTestUnchanging(TestMode mode, int count) {
long time_a, time_b;
if (mode == TestMode.ORDERED_LIST) {
List<Integer> sync = new ArrayList<Integer>();
Collection<Integer> compare = demo_sync_lists.get(0);
time_a = System.nanoTime();
for (int i = 0; i < count; i++) {
testListSynchronizer(sync, mode, i==0, compare);
}
time_b = System.nanoTime();
} else {
Set<Integer> sync = new HashSet<Integer>();
Collection<Integer> compare = demo_sync_sets.get(0);
time_a = System.nanoTime();
for (int i = 0; i < count; i++) {
testSetSynchronizer(sync, mode, i==0, compare);
}
time_b = System.nanoTime();
}
return (double) ((time_b - time_a)) / (double) (count*1000);
}
@Test
public void testSynchronizedList() {
runTest(TestMode.ORDERED_LIST, 1);
}
@Test
public void testSynchronizedUnorderedSet() {
runTest(TestMode.UNORDERED_SET, 1);
}
@Test
@Ignore
public void testTimings() {
for (TestMode mode : TestMode.values()) {
String name = mode.toString();
while (name.length() < 22) {
name += " ";
}
double time_per_cycle = runTest(mode, 1000000);
double time_per_unchanged_cycle = runTestUnchanging(mode, 1000000);
String col1 = "changed=" + String.format("%.4f", time_per_cycle) + " us";
while (col1.length() < 20) {
col1 += " ";
}
String col2 = "unchanged=" + String.format("%.4f", time_per_unchanged_cycle) + " us";
while (col2.length() < 20) {
col2 += " ";
}
System.out.println("Sync " + name + " " + col1 + " | " + col2);
}
}
@Test
public void testClone() {
ArrayList<String> list = new ArrayList<String>();
list.add("hello");
list.add("world");
ArrayList<String> list_clone = LogicUtil.clone(list);
assertEquals(list, list_clone);
}
}
|
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.collect;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Interner;
import com.google.devtools.build.lib.concurrent.BlazeInterners;
import com.google.devtools.build.lib.util.Preconditions;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import javax.annotation.concurrent.Immutable;
/**
* Provides a memory-efficient map when the key sets are likely to be shared between multiple
* instances of this class.
*
* <p>This class is appropriate where it is expected that a lot of the key sets will be the same.
* These key sets are shared and an offset table of indices is computed. Each map instance thus
* contains only a reference to the shared offset table, and a plain array of instances.
*
* <p>The map is sensitive to insertion order. Two maps with different insertion orders are *not*
* considered equal, and will not share keys.
*
* <p>This class explicitly does *not* implement the Map interface, as use of that would lead to a
* lot of GC churn.
*/
@Immutable
public class ImmutableSharedKeyMap<K, V> extends CompactImmutableMap<K, V> {
private static final Interner<OffsetTable> offsetTables = BlazeInterners.newWeakInterner();
private final OffsetTable<K> offsetTable;
private final Object[] values;
private static final class OffsetTable<K> {
private final Object[] keys;
// Keep a map around to speed up get lookups for larger maps.
// We make this value lazy to avoid computing for values that end up being thrown away
// during interning anyway (the majority).
private volatile ImmutableMap<K, Integer> indexMap;
private OffsetTable(Object[] keys) {
this.keys = keys;
}
void initIndexMap() {
if (indexMap == null) {
synchronized (this) {
if (indexMap == null) {
ImmutableMap.Builder<K, Integer> builder = ImmutableMap.builder();
for (int i = 0; i < keys.length; ++i) {
@SuppressWarnings("unchecked")
K key = (K) keys[i];
builder.put(key, i);
}
this.indexMap = builder.build();
}
}
}
}
private ImmutableMap<K, Integer> getIndexMap() {
return indexMap;
}
int offsetForKey(K key) {
return getIndexMap().getOrDefault(key, -1);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof OffsetTable)) {
return false;
}
OffsetTable that = (OffsetTable) o;
return Arrays.equals(this.keys, that.keys);
}
@Override
public int hashCode() {
return Arrays.hashCode(keys);
}
}
protected ImmutableSharedKeyMap(Object[] keys, Object[] values) {
Preconditions.checkArgument(keys.length == values.length);
this.values = values;
this.offsetTable = createOffsetTable(keys);
}
protected ImmutableSharedKeyMap(Map<K, V> map) {
int count = map.size();
Object[] keys = new Object[count];
Object[] values = new Object[count];
int i = 0;
for (Map.Entry<K, V> entry : map.entrySet()) {
keys[i] = entry.getKey();
values[i] = entry.getValue();
++i;
}
Preconditions.checkArgument(keys.length == values.length);
this.values = values;
this.offsetTable = createOffsetTable(keys);
}
@SuppressWarnings("unchecked")
private static <K> OffsetTable<K> createOffsetTable(Object[] keys) {
OffsetTable<K> offsetTable = new OffsetTable<>(keys);
OffsetTable<K> internedTable = (OffsetTable<K>) offsetTables.intern(offsetTable);
internedTable.initIndexMap();
return internedTable;
}
@SuppressWarnings("unchecked")
@Override
public V get(K key) {
int offset = offsetTable.offsetForKey(key);
return offset != -1 ? (V) values[offset] : null;
}
@Override
public int size() {
return values.length;
}
@SuppressWarnings("unchecked")
@Override
public K keyAt(int index) {
return (K) offsetTable.keys[index];
}
@SuppressWarnings("unchecked")
@Override
public V valueAt(int index) {
return (V) values[index];
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ImmutableSharedKeyMap<?, ?> that = (ImmutableSharedKeyMap<?, ?>) o;
// We can use object identity for the offset table due to
// it being interned
return offsetTable == that.offsetTable && Arrays.equals(values, that.values);
}
@Override
public int hashCode() {
return Objects.hashCode(offsetTable, Arrays.hashCode(values));
}
public static <K, V> Builder<K, V> builder() {
return new Builder<>();
}
/** Builder for {@link ImmutableSharedKeyMap}. */
public static class Builder<K, V> {
private final List<Object> entries = new ArrayList<>();
private Builder() {}
public Builder<K, V> put(K key, V value) {
entries.add(key);
entries.add(value);
return this;
}
public ImmutableSharedKeyMap<K, V> build() {
int count = entries.size() / 2;
Object[] keys = new Object[count];
Object[] values = new Object[count];
int entryIndex = 0;
for (int i = 0; i < count; ++i) {
keys[i] = entries.get(entryIndex++);
values[i] = entries.get(entryIndex++);
}
return new ImmutableSharedKeyMap<>(keys, values);
}
}
}
|
package net.shade.mixin;
import java.util.List;
import net.shade.plugin.ChatEventHandler;
import org.spongepowered.asm.mixin.Mixin;
import org.spongepowered.asm.mixin.Shadow;
import org.spongepowered.asm.mixin.injection.At;
import org.spongepowered.asm.mixin.injection.Inject;
import org.spongepowered.asm.mixin.injection.callback.CallbackInfo;
import net.minecraft.client.MinecraftClient;
import net.minecraft.client.gui.DrawableHelper;
import net.minecraft.client.gui.hud.ChatHud;
import net.minecraft.text.OrderedText;
import net.minecraft.client.gui.hud.ChatHudLine;
import net.minecraft.text.Text;
@Mixin(ChatHud.class)
public class ChatInputMixin extends DrawableHelper
{
@Shadow
private List<ChatHudLine<OrderedText>> visibleMessages;
@Inject(at = @At("HEAD"), method = "addMessage(Lnet/minecraft/text/Text;I)V", cancellable = true)
private void onAddMessage(Text eventText, int line, CallbackInfo ci)
{
ChatEventHandler.prosessChatEvent(eventText.getString());
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.broker.service;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import static org.apache.commons.collections.CollectionUtils.isEmpty;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
import static org.apache.pulsar.broker.cache.ConfigurationCacheService.POLICIES;
import static org.apache.pulsar.broker.cache.LocalZooKeeperCacheService.LOCAL_POLICIES_ROOT;
import static org.apache.pulsar.broker.web.PulsarWebResource.joinPath;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Queues;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.ByteBuf;
import io.netty.channel.AdaptiveRecvByteBufAllocator;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.ssl.SslContext;
import io.netty.util.concurrent.DefaultThreadFactory;
import java.io.Closeable;
import java.io.IOException;
import java.lang.reflect.Field;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Consumer;
import java.util.function.Predicate;
import javax.ws.rs.core.Response;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.Setter;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteLedgerCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.OpenLedgerCallback;
import org.apache.bookkeeper.mledger.LedgerOffloader;
import org.apache.bookkeeper.mledger.ManagedLedger;
import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.ManagedLedgerNotFoundException;
import org.apache.bookkeeper.mledger.ManagedLedgerFactory;
import org.apache.bookkeeper.mledger.interceptor.ManagedLedgerInterceptor;
import org.apache.bookkeeper.mledger.util.Futures;
import org.apache.bookkeeper.util.ZkUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.pulsar.broker.PulsarServerException;
import org.apache.pulsar.broker.PulsarService;
import org.apache.pulsar.broker.ServiceConfiguration;
import org.apache.pulsar.broker.admin.AdminResource;
import org.apache.pulsar.broker.authentication.AuthenticationService;
import org.apache.pulsar.broker.authorization.AuthorizationService;
import org.apache.pulsar.broker.cache.ConfigurationCacheService;
import org.apache.pulsar.broker.delayed.DelayedDeliveryTrackerFactory;
import org.apache.pulsar.broker.delayed.DelayedDeliveryTrackerLoader;
import org.apache.pulsar.broker.intercept.BrokerInterceptor;
import org.apache.pulsar.broker.intercept.ManagedLedgerInterceptorImpl;
import org.apache.pulsar.broker.loadbalance.LoadManager;
import org.apache.pulsar.broker.service.BrokerServiceException.NamingException;
import org.apache.pulsar.broker.service.BrokerServiceException.NotAllowedException;
import org.apache.pulsar.broker.service.BrokerServiceException.PersistenceException;
import org.apache.pulsar.broker.service.BrokerServiceException.ServerMetadataException;
import org.apache.pulsar.broker.service.BrokerServiceException.ServiceUnitNotReadyException;
import org.apache.pulsar.broker.service.nonpersistent.NonPersistentTopic;
import org.apache.pulsar.broker.service.persistent.DispatchRateLimiter;
import org.apache.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers;
import org.apache.pulsar.broker.service.persistent.PersistentTopic;
import org.apache.pulsar.broker.service.persistent.SystemTopic;
import org.apache.pulsar.broker.stats.ClusterReplicationMetrics;
import org.apache.pulsar.broker.stats.prometheus.metrics.ObserverGauge;
import org.apache.pulsar.broker.stats.prometheus.metrics.Summary;
import org.apache.pulsar.broker.systopic.SystemTopicClient;
import org.apache.pulsar.broker.web.PulsarWebResource;
import org.apache.pulsar.broker.zookeeper.aspectj.ClientCnxnAspect;
import org.apache.pulsar.broker.zookeeper.aspectj.ClientCnxnAspect.EventListner;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminBuilder;
import org.apache.pulsar.client.api.ClientBuilder;
import org.apache.pulsar.client.api.PulsarClient;
import org.apache.pulsar.client.api.PulsarClientException;
import org.apache.pulsar.client.impl.ClientBuilderImpl;
import org.apache.pulsar.client.impl.PulsarClientImpl;
import org.apache.pulsar.client.impl.conf.ClientConfigurationData;
import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
import org.apache.pulsar.common.configuration.FieldContext;
import org.apache.pulsar.common.intercept.AppendIndexMetadataInterceptor;
import org.apache.pulsar.common.intercept.BrokerEntryMetadataInterceptor;
import org.apache.pulsar.common.intercept.BrokerEntryMetadataUtils;
import org.apache.pulsar.common.naming.NamespaceBundle;
import org.apache.pulsar.common.naming.NamespaceBundleFactory;
import org.apache.pulsar.common.naming.NamespaceBundles;
import org.apache.pulsar.common.naming.NamespaceName;
import org.apache.pulsar.common.naming.TopicDomain;
import org.apache.pulsar.common.naming.TopicName;
import org.apache.pulsar.common.partition.PartitionedTopicMetadata;
import org.apache.pulsar.common.policies.data.AutoSubscriptionCreationOverride;
import org.apache.pulsar.common.policies.data.AutoTopicCreationOverride;
import org.apache.pulsar.common.policies.data.ClusterData;
import org.apache.pulsar.common.policies.data.LocalPolicies;
import org.apache.pulsar.common.policies.data.OffloadPolicies;
import org.apache.pulsar.common.policies.data.PersistencePolicies;
import org.apache.pulsar.common.policies.data.PersistentOfflineTopicStats;
import org.apache.pulsar.common.policies.data.Policies;
import org.apache.pulsar.common.policies.data.PublishRate;
import org.apache.pulsar.common.policies.data.RetentionPolicies;
import org.apache.pulsar.common.policies.data.TopicPolicies;
import org.apache.pulsar.common.policies.data.TopicStats;
import org.apache.pulsar.common.policies.data.TopicType;
import org.apache.pulsar.common.protocol.schema.SchemaVersion;
import org.apache.pulsar.common.stats.Metrics;
import org.apache.pulsar.common.util.FieldParser;
import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.common.util.ObjectMapperFactory;
import org.apache.pulsar.common.util.RestException;
import org.apache.pulsar.common.util.collections.ConcurrentOpenHashMap;
import org.apache.pulsar.common.util.collections.ConcurrentOpenHashSet;
import org.apache.pulsar.common.util.netty.EventLoopUtil;
import org.apache.pulsar.policies.data.loadbalancer.NamespaceBundleStats;
import org.apache.pulsar.zookeeper.ZkIsolatedBookieEnsemblePlacementPolicy;
import org.apache.pulsar.zookeeper.ZooKeeperCacheListener;
import org.apache.pulsar.zookeeper.ZooKeeperDataCache;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Getter(AccessLevel.PUBLIC)
@Setter(AccessLevel.PROTECTED)
public class BrokerService implements Closeable, ZooKeeperCacheListener<Policies> {
private static final Logger log = LoggerFactory.getLogger(BrokerService.class);
private final PulsarService pulsar;
private final ManagedLedgerFactory managedLedgerFactory;
private final ConcurrentOpenHashMap<String, CompletableFuture<Optional<Topic>>> topics;
private final ConcurrentOpenHashMap<String, PulsarClient> replicationClients;
private final ConcurrentOpenHashMap<String, PulsarAdmin> clusterAdmins;
// Multi-layer topics map:
// Namespace --> Bundle --> topicName --> topic
private final ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, Topic>>>
multiLayerTopicsMap;
private int numberOfNamespaceBundles = 0;
private final EventLoopGroup acceptorGroup;
private final EventLoopGroup workerGroup;
private final OrderedExecutor topicOrderedExecutor;
// offline topic backlog cache
private final ConcurrentOpenHashMap<TopicName, PersistentOfflineTopicStats> offlineTopicStatCache;
private static final ConcurrentOpenHashMap<String, ConfigField> dynamicConfigurationMap =
prepareDynamicConfigurationMap();
private final ConcurrentOpenHashMap<String, Consumer<?>> configRegisteredListeners;
private final ConcurrentLinkedQueue<Pair<String, CompletableFuture<Optional<Topic>>>> pendingTopicLoadingQueue;
private AuthorizationService authorizationService = null;
private final ScheduledExecutorService statsUpdater;
private final ScheduledExecutorService backlogQuotaChecker;
protected final AtomicReference<Semaphore> lookupRequestSemaphore;
protected final AtomicReference<Semaphore> topicLoadRequestSemaphore;
private final ObserverGauge pendingLookupRequests;
private final ObserverGauge pendingTopicLoadRequests;
private final ScheduledExecutorService inactivityMonitor;
private final ScheduledExecutorService messageExpiryMonitor;
private final ScheduledExecutorService compactionMonitor;
private final ScheduledExecutorService messagePublishBufferMonitor;
private final ScheduledExecutorService consumedLedgersMonitor;
private final ScheduledExecutorService ledgerFullMonitor;
private ScheduledExecutorService topicPublishRateLimiterMonitor;
private ScheduledExecutorService brokerPublishRateLimiterMonitor;
private ScheduledExecutorService deduplicationSnapshotMonitor;
protected volatile PublishRateLimiter brokerPublishRateLimiter = PublishRateLimiter.DISABLED_RATE_LIMITER;
private DistributedIdGenerator producerNameGenerator;
public final static String PRODUCER_NAME_GENERATOR_PATH = "/counters/producer-name";
private final BacklogQuotaManager backlogQuotaManager;
private final int keepAliveIntervalSeconds;
private final PulsarStats pulsarStats;
private final EventListner zkStatsListener;
private final AuthenticationService authenticationService;
public static final String BROKER_SERVICE_CONFIGURATION_PATH = "/admin/configuration";
public static final String MANAGED_LEDGER_PATH_ZNODE = "/managed-ledgers";
private final ZooKeeperDataCache<Map<String, String>> dynamicConfigurationCache;
private static final LongAdder totalUnackedMessages = new LongAdder();
private final int maxUnackedMessages;
public final int maxUnackedMsgsPerDispatcher;
private static final AtomicBoolean blockedDispatcherOnHighUnackedMsgs = new AtomicBoolean(false);
private final ConcurrentOpenHashSet<PersistentDispatcherMultipleConsumers> blockedDispatchers;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final DelayedDeliveryTrackerFactory delayedDeliveryTrackerFactory;
private final ServerBootstrap defaultServerBootstrap;
private Channel listenChannel;
private Channel listenChannelTls;
private boolean preciseTopicPublishRateLimitingEnable;
private final long maxMessagePublishBufferBytes;
private final long resumeProducerReadMessagePublishBufferBytes;
private volatile boolean reachMessagePublishBufferThreshold;
private BrokerInterceptor interceptor;
private Set<BrokerEntryMetadataInterceptor> brokerEntryMetadataInterceptors;
public BrokerService(PulsarService pulsar) throws Exception {
this.pulsar = pulsar;
this.maxMessagePublishBufferBytes = pulsar.getConfiguration().getMaxMessagePublishBufferSizeInMB() > 0
? pulsar.getConfiguration().getMaxMessagePublishBufferSizeInMB() * 1024L * 1024L : -1;
this.resumeProducerReadMessagePublishBufferBytes = this.maxMessagePublishBufferBytes / 2;
this.preciseTopicPublishRateLimitingEnable =
pulsar.getConfiguration().isPreciseTopicPublishRateLimiterEnable();
this.managedLedgerFactory = pulsar.getManagedLedgerFactory();
this.topics = new ConcurrentOpenHashMap<>();
this.replicationClients = new ConcurrentOpenHashMap<>();
this.clusterAdmins = new ConcurrentOpenHashMap<>();
this.keepAliveIntervalSeconds = pulsar.getConfiguration().getKeepAliveIntervalSeconds();
this.configRegisteredListeners = new ConcurrentOpenHashMap<>();
this.pendingTopicLoadingQueue = Queues.newConcurrentLinkedQueue();
this.multiLayerTopicsMap = new ConcurrentOpenHashMap<>();
this.pulsarStats = new PulsarStats(pulsar);
this.offlineTopicStatCache = new ConcurrentOpenHashMap<>();
this.topicOrderedExecutor = OrderedScheduler.newSchedulerBuilder()
.numThreads(pulsar.getConfiguration().getNumWorkerThreadsForNonPersistentTopic())
.name("broker-topic-workers").build();
final DefaultThreadFactory acceptorThreadFactory = new DefaultThreadFactory("pulsar-acceptor");
final DefaultThreadFactory workersThreadFactory = new DefaultThreadFactory("pulsar-io");
final int numThreads = pulsar.getConfiguration().getNumIOThreads();
log.info("Using {} threads for broker service IO", numThreads);
this.acceptorGroup = EventLoopUtil.newEventLoopGroup(1, acceptorThreadFactory);
this.workerGroup = EventLoopUtil.newEventLoopGroup(numThreads, workersThreadFactory);
this.statsUpdater = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-stats-updater"));
this.authorizationService = new AuthorizationService(
pulsar.getConfiguration(), pulsar.getConfigurationCache());
if (pulsar.getConfigurationCache() != null) {
pulsar.getConfigurationCache().policiesCache().registerListener(this);
}
this.inactivityMonitor = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-inactivity-monitor"));
this.messageExpiryMonitor = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-msg-expiry-monitor"));
this.compactionMonitor =
Executors.newSingleThreadScheduledExecutor(
new DefaultThreadFactory("pulsar-compaction-monitor"));
this.messagePublishBufferMonitor =
Executors.newSingleThreadScheduledExecutor(
new DefaultThreadFactory("pulsar-publish-buffer-monitor"));
this.consumedLedgersMonitor = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("consumed-Ledgers-monitor"));
this.ledgerFullMonitor =
Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("ledger-full-monitor"));
this.backlogQuotaManager = new BacklogQuotaManager(pulsar);
this.backlogQuotaChecker = Executors
.newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-backlog-quota-checker"));
this.authenticationService = new AuthenticationService(pulsar.getConfiguration());
this.dynamicConfigurationCache = new ZooKeeperDataCache<Map<String, String>>(pulsar().getLocalZkCache()) {
@Override
public Map<String, String> deserialize(String key, byte[] content) throws Exception {
return ObjectMapperFactory.getThreadLocal().readValue(content, HashMap.class);
}
};
this.blockedDispatchers = new ConcurrentOpenHashSet<>();
// update dynamic configuration and register-listener
updateConfigurationAndRegisterListeners();
this.lookupRequestSemaphore = new AtomicReference<Semaphore>(
new Semaphore(pulsar.getConfiguration().getMaxConcurrentLookupRequest(), false));
this.topicLoadRequestSemaphore = new AtomicReference<Semaphore>(
new Semaphore(pulsar.getConfiguration().getMaxConcurrentTopicLoadRequest(), false));
if (pulsar.getConfiguration().getMaxUnackedMessagesPerBroker() > 0
&& pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked() > 0.0) {
this.maxUnackedMessages = pulsar.getConfiguration().getMaxUnackedMessagesPerBroker();
this.maxUnackedMsgsPerDispatcher = (int) ((maxUnackedMessages
* pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked()) / 100);
log.info("Enabling per-broker unack-message limit {} and dispatcher-limit {} on blocked-broker",
maxUnackedMessages, maxUnackedMsgsPerDispatcher);
// block misbehaving dispatcher by checking periodically
pulsar.getExecutor().scheduleAtFixedRate(() -> checkUnAckMessageDispatching(),
600, 30, TimeUnit.SECONDS);
} else {
this.maxUnackedMessages = 0;
this.maxUnackedMsgsPerDispatcher = 0;
log.info(
"Disabling per broker unack-msg blocking due invalid"
+ " unAckMsgSubscriptionPercentageLimitOnBrokerBlocked {} ",
pulsar.getConfiguration().getMaxUnackedMessagesPerSubscriptionOnBrokerBlocked());
}
// register listener to capture zk-latency
zkStatsListener = (eventType, latencyMs) -> pulsarStats.recordZkLatencyTimeValue(eventType, latencyMs);
this.delayedDeliveryTrackerFactory = DelayedDeliveryTrackerLoader
.loadDelayedDeliveryTrackerFactory(pulsar.getConfiguration());
this.defaultServerBootstrap = defaultServerBootstrap();
this.pendingLookupRequests = ObserverGauge.build("pulsar_broker_lookup_pending_requests", "-")
.supplier(() -> pulsar.getConfig().getMaxConcurrentLookupRequest()
- lookupRequestSemaphore.get().availablePermits())
.register();
this.pendingTopicLoadRequests = ObserverGauge.build(
"pulsar_broker_topic_load_pending_requests", "-")
.supplier(() -> pulsar.getConfig().getMaxConcurrentTopicLoadRequest()
- topicLoadRequestSemaphore.get().availablePermits())
.register();
this.brokerEntryMetadataInterceptors = BrokerEntryMetadataUtils
.loadBrokerEntryMetadataInterceptors(pulsar.getConfiguration().getBrokerEntryMetadataInterceptors(),
BrokerService.class.getClassLoader());
}
// This call is used for starting additional protocol handlers
public void startProtocolHandlers(
Map<String, Map<InetSocketAddress, ChannelInitializer<SocketChannel>>> protocolHandlers) {
protocolHandlers.forEach((protocol, initializers) -> {
initializers.forEach((address, initializer) -> {
try {
startProtocolHandler(protocol, address, initializer);
} catch (IOException e) {
log.error("{}", e.getMessage(), e.getCause());
throw new RuntimeException(e.getMessage(), e.getCause());
}
});
});
}
private void startProtocolHandler(String protocol,
SocketAddress address,
ChannelInitializer<SocketChannel> initializer) throws IOException {
ServerBootstrap bootstrap = defaultServerBootstrap.clone();
bootstrap.childHandler(initializer);
try {
bootstrap.bind(address).sync();
} catch (Exception e) {
throw new IOException("Failed to bind protocol `" + protocol + "` on " + address, e);
}
log.info("Successfully bind protocol `{}` on {}", protocol, address);
}
private ServerBootstrap defaultServerBootstrap() {
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.childOption(ChannelOption.ALLOCATOR, PulsarByteBufAllocator.DEFAULT);
bootstrap.group(acceptorGroup, workerGroup);
bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
new AdaptiveRecvByteBufAllocator(1024, 16 * 1024, 1 * 1024 * 1024));
bootstrap.channel(EventLoopUtil.getServerSocketChannelClass(workerGroup));
EventLoopUtil.enableTriggeredMode(bootstrap);
return bootstrap;
}
public void start() throws Exception {
this.producerNameGenerator = new DistributedIdGenerator(pulsar.getZkClient(), PRODUCER_NAME_GENERATOR_PATH,
pulsar.getConfiguration().getClusterName());
ServerBootstrap bootstrap = defaultServerBootstrap.clone();
ServiceConfiguration serviceConfig = pulsar.getConfiguration();
bootstrap.childHandler(new PulsarChannelInitializer(pulsar, false));
Optional<Integer> port = serviceConfig.getBrokerServicePort();
if (port.isPresent()) {
// Bind and start to accept incoming connections.
InetSocketAddress addr = new InetSocketAddress(pulsar.getBindAddress(), port.get());
try {
listenChannel = bootstrap.bind(addr).sync().channel();
log.info("Started Pulsar Broker service on {}", listenChannel.localAddress());
} catch (Exception e) {
throw new IOException("Failed to bind Pulsar broker on " + addr, e);
}
}
Optional<Integer> tlsPort = serviceConfig.getBrokerServicePortTls();
if (tlsPort.isPresent()) {
ServerBootstrap tlsBootstrap = bootstrap.clone();
tlsBootstrap.childHandler(new PulsarChannelInitializer(pulsar, true));
try {
listenChannelTls = tlsBootstrap.bind(new InetSocketAddress(
pulsar.getBindAddress(), tlsPort.get())).sync()
.channel();
log.info("Started Pulsar Broker TLS service on {} - TLS provider: {}", listenChannelTls.localAddress(),
SslContext.defaultServerProvider());
} catch (Exception e) {
throw new IOException(String.format("Failed to start Pulsar Broker TLS service on %s:%d",
pulsar.getBindAddress(), tlsPort.get()), e);
}
}
// start other housekeeping functions
this.startStatsUpdater(
serviceConfig.getStatsUpdateInitialDelayInSecs(),
serviceConfig.getStatsUpdateFrequencyInSecs());
this.startInactivityMonitor();
this.startMessageExpiryMonitor();
this.startCompactionMonitor();
this.startMessagePublishBufferMonitor();
this.startConsumedLedgersMonitor();
this.startLedgerFullMonitor();
this.startBacklogQuotaChecker();
this.updateBrokerPublisherThrottlingMaxRate();
this.startCheckReplicationPolicies();
this.startDeduplicationSnapshotMonitor();
// register listener to capture zk-latency
ClientCnxnAspect.addListener(zkStatsListener);
ClientCnxnAspect.registerExecutor(pulsar.getExecutor());
}
protected void startStatsUpdater(int statsUpdateInitailDelayInSecs, int statsUpdateFrequencyInSecs) {
statsUpdater.scheduleAtFixedRate(safeRun(this::updateRates),
statsUpdateInitailDelayInSecs, statsUpdateFrequencyInSecs, TimeUnit.SECONDS);
// Ensure the broker starts up with initial stats
updateRates();
}
protected void startDeduplicationSnapshotMonitor() {
int interval = pulsar().getConfiguration().getBrokerDeduplicationSnapshotFrequencyInSeconds();
if (interval > 0 && pulsar().getConfiguration().isBrokerDeduplicationEnabled()) {
this.deduplicationSnapshotMonitor =
Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory(
"deduplication-snapshot-monitor"));
deduplicationSnapshotMonitor.scheduleAtFixedRate(safeRun(() -> forEachTopic(
Topic::checkDeduplicationSnapshot))
, interval, interval, TimeUnit.SECONDS);
}
}
protected void startInactivityMonitor() {
if (pulsar().getConfiguration().isBrokerDeleteInactiveTopicsEnabled()) {
int interval = pulsar().getConfiguration().getBrokerDeleteInactiveTopicsFrequencySeconds();
inactivityMonitor.scheduleAtFixedRate(safeRun(() -> checkGC()), interval, interval,
TimeUnit.SECONDS);
}
// Deduplication info checker
long duplicationCheckerIntervalInSeconds = TimeUnit.MINUTES
.toSeconds(pulsar().getConfiguration().getBrokerDeduplicationProducerInactivityTimeoutMinutes()) / 3;
inactivityMonitor.scheduleAtFixedRate(safeRun(this::checkMessageDeduplicationInfo),
duplicationCheckerIntervalInSeconds,
duplicationCheckerIntervalInSeconds, TimeUnit.SECONDS);
// Inactive subscriber checker
if (pulsar().getConfiguration().getSubscriptionExpiryCheckIntervalInMinutes() > 0) {
long subscriptionExpiryCheckIntervalInSeconds =
TimeUnit.MINUTES.toSeconds(pulsar().getConfiguration()
.getSubscriptionExpiryCheckIntervalInMinutes());
inactivityMonitor.scheduleAtFixedRate(safeRun(this::checkInactiveSubscriptions),
subscriptionExpiryCheckIntervalInSeconds,
subscriptionExpiryCheckIntervalInSeconds, TimeUnit.SECONDS);
}
}
protected void startMessageExpiryMonitor() {
int interval = pulsar().getConfiguration().getMessageExpiryCheckIntervalInMinutes();
messageExpiryMonitor.scheduleAtFixedRate(safeRun(this::checkMessageExpiry), interval, interval,
TimeUnit.MINUTES);
}
protected void startCheckReplicationPolicies() {
int interval = pulsar.getConfig().getReplicationPolicyCheckDurationSeconds();
if (interval > 0) {
messageExpiryMonitor.scheduleAtFixedRate(safeRun(this::checkReplicationPolicies), interval, interval,
TimeUnit.SECONDS);
}
}
protected void startCompactionMonitor() {
int interval = pulsar().getConfiguration().getBrokerServiceCompactionMonitorIntervalInSeconds();
if (interval > 0) {
compactionMonitor.scheduleAtFixedRate(safeRun(() -> checkCompaction()),
interval, interval, TimeUnit.SECONDS);
}
}
protected void startMessagePublishBufferMonitor() {
int interval = pulsar().getConfiguration().getMessagePublishBufferCheckIntervalInMillis();
if (interval > 0 && maxMessagePublishBufferBytes > 0) {
messagePublishBufferMonitor.scheduleAtFixedRate(safeRun(this::checkMessagePublishBuffer),
interval, interval, TimeUnit.MILLISECONDS);
}
}
protected void startConsumedLedgersMonitor() {
int interval = pulsar().getConfiguration().getRetentionCheckIntervalInSeconds();
if (interval > 0) {
consumedLedgersMonitor.scheduleAtFixedRate(safeRun(this::checkConsumedLedgers),
interval, interval, TimeUnit.SECONDS);
}
}
protected void startLedgerFullMonitor() {
int interval = pulsar().getConfiguration().getManagedLedgerMaxLedgerRolloverTimeMinutes();
ledgerFullMonitor.scheduleAtFixedRate(safeRun(this::checkLedgerFull),
interval, interval, TimeUnit.MINUTES);
}
protected void startBacklogQuotaChecker() {
if (pulsar().getConfiguration().isBacklogQuotaCheckEnabled()) {
final int interval = pulsar().getConfiguration().getBacklogQuotaCheckIntervalInSeconds();
log.info("Scheduling a thread to check backlog quota after [{}] seconds in background", interval);
backlogQuotaChecker.scheduleAtFixedRate(safeRun(this::monitorBacklogQuota), interval, interval,
TimeUnit.SECONDS);
} else {
log.info("Backlog quota check monitoring is disabled");
}
}
/**
* Schedules and monitors publish-throttling for all owned topics that has publish-throttling configured. It also
* disables and shutdowns publish-rate-limiter monitor task if broker disables it.
*/
public synchronized void setupTopicPublishRateLimiterMonitor() {
// set topic PublishRateLimiterMonitor
long topicTickTimeMs = pulsar().getConfiguration().getTopicPublisherThrottlingTickTimeMillis();
if (topicTickTimeMs > 0) {
if (this.topicPublishRateLimiterMonitor == null) {
this.topicPublishRateLimiterMonitor = Executors.newSingleThreadScheduledExecutor(
new DefaultThreadFactory("pulsar-topic-publish-rate-limiter-monitor"));
if (topicTickTimeMs > 0) {
// schedule task that sums up publish-rate across all cnx on a topic
topicPublishRateLimiterMonitor.scheduleAtFixedRate(safeRun(() -> checkTopicPublishThrottlingRate()),
topicTickTimeMs, topicTickTimeMs, TimeUnit.MILLISECONDS);
// schedule task that refreshes rate-limiting bucket
topicPublishRateLimiterMonitor.scheduleAtFixedRate(safeRun(() -> refreshTopicPublishRate()), 1, 1,
TimeUnit.SECONDS);
}
}
} else {
// disable publish-throttling for all topics
if (this.topicPublishRateLimiterMonitor != null) {
try {
this.topicPublishRateLimiterMonitor.awaitTermination(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
log.warn("failed to shutdown topicPublishRateLimiterMonitor", e);
}
// make sure topics are not being throttled
refreshTopicPublishRate();
this.topicPublishRateLimiterMonitor = null;
}
}
}
/**
* Schedules and monitors publish-throttling for broker that has publish-throttling configured. It also
* disables and shutdowns publish-rate-limiter monitor for broker task if broker disables it.
*/
public synchronized void setupBrokerPublishRateLimiterMonitor() {
// set broker PublishRateLimiterMonitor
long brokerTickTimeMs = pulsar().getConfiguration().getBrokerPublisherThrottlingTickTimeMillis();
if (brokerTickTimeMs > 0) {
if (this.brokerPublishRateLimiterMonitor == null) {
this.brokerPublishRateLimiterMonitor = Executors.newSingleThreadScheduledExecutor(
new DefaultThreadFactory("pulsar-broker-publish-rate-limiter-monitor"));
if (brokerTickTimeMs > 0) {
// schedule task that sums up publish-rate across all cnx on a topic,
// and check the rate limit exceeded or not.
brokerPublishRateLimiterMonitor.scheduleAtFixedRate(
safeRun(() -> checkBrokerPublishThrottlingRate()),
brokerTickTimeMs,
brokerTickTimeMs,
TimeUnit.MILLISECONDS);
// schedule task that refreshes rate-limiting bucket
brokerPublishRateLimiterMonitor.scheduleAtFixedRate(
safeRun(() -> refreshBrokerPublishRate()),
1,
1,
TimeUnit.SECONDS);
}
}
} else {
// disable publish-throttling for broker.
if (this.brokerPublishRateLimiterMonitor != null) {
try {
this.brokerPublishRateLimiterMonitor.awaitTermination(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
log.warn("failed to shutdown brokerPublishRateLimiterMonitor", e);
}
// make sure topics are not being throttled
refreshBrokerPublishRate();
this.brokerPublishRateLimiterMonitor = null;
}
}
}
@Override
public void close() throws IOException {
log.info("Shutting down Pulsar Broker service");
if (pulsar.getConfigurationCache() != null) {
pulsar.getConfigurationCache().policiesCache().unregisterListener(this);
}
// unloads all namespaces gracefully without disrupting mutually
unloadNamespaceBundlesGracefully();
// close replication clients
replicationClients.forEach((cluster, client) -> {
try {
client.shutdown();
} catch (PulsarClientException e) {
log.warn("Error shutting down repl client for cluster {}", cluster, e);
}
});
// close replication admins
clusterAdmins.forEach((cluster, admin) -> {
try {
admin.close();
} catch (Exception e) {
log.warn("Error shutting down repl admin for cluster {}", cluster, e);
}
});
if (listenChannel != null) {
listenChannel.close();
}
if (listenChannelTls != null) {
listenChannelTls.close();
}
acceptorGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
if (interceptor != null) {
interceptor.close();
interceptor = null;
}
statsUpdater.shutdown();
inactivityMonitor.shutdown();
messageExpiryMonitor.shutdown();
compactionMonitor.shutdown();
ledgerFullMonitor.shutdown();
messagePublishBufferMonitor.shutdown();
consumedLedgersMonitor.shutdown();
backlogQuotaChecker.shutdown();
authenticationService.close();
pulsarStats.close();
ClientCnxnAspect.removeListener(zkStatsListener);
ClientCnxnAspect.registerExecutor(null);
topicOrderedExecutor.shutdown();
delayedDeliveryTrackerFactory.close();
if (topicPublishRateLimiterMonitor != null) {
topicPublishRateLimiterMonitor.shutdown();
}
if (brokerPublishRateLimiterMonitor != null) {
brokerPublishRateLimiterMonitor.shutdown();
}
if (deduplicationSnapshotMonitor != null) {
deduplicationSnapshotMonitor.shutdown();
}
log.info("Broker service completely shut down");
}
/**
* It unloads all owned namespacebundles gracefully.
* <ul>
* <li>First it makes current broker unavailable and isolates from the clusters so, it will not serve any new
* requests.</li>
* <li>Second it starts unloading namespace bundle one by one without closing the connection in order to avoid
* disruption for other namespacebundles which are sharing the same connection from the same client.</li>
* </ul>
*/
public void unloadNamespaceBundlesGracefully() {
try {
// make broker-node unavailable from the cluster
if (pulsar.getLoadManager() != null && pulsar.getLoadManager().get() != null) {
try {
pulsar.getLoadManager().get().disableBroker();
} catch (PulsarServerException.NotFoundException ne) {
log.warn("Broker load-manager znode doesn't exist ", ne);
// still continue and release bundle ownership as broker's registration node doesn't exist.
}
}
// unload all namespace-bundles gracefully
long closeTopicsStartTime = System.nanoTime();
Set<NamespaceBundle> serviceUnits = pulsar.getNamespaceService().getOwnedServiceUnits();
serviceUnits.forEach(su -> {
if (su instanceof NamespaceBundle) {
try {
pulsar.getNamespaceService().unloadNamespaceBundle(su, 1, TimeUnit.MINUTES).get();
} catch (Exception e) {
log.warn("Failed to unload namespace bundle {}", su, e);
}
}
});
double closeTopicsTimeSeconds = TimeUnit.NANOSECONDS.toMillis((System.nanoTime() - closeTopicsStartTime))
/ 1000.0;
log.info("Unloading {} namespace-bundles completed in {} seconds", serviceUnits.size(),
closeTopicsTimeSeconds);
} catch (Exception e) {
log.error("Failed to disable broker from loadbalancer list {}", e.getMessage(), e);
}
}
public CompletableFuture<Optional<Topic>> getTopicIfExists(final String topic) {
return getTopic(topic, false /* createIfMissing */);
}
public CompletableFuture<Topic> getOrCreateTopic(final String topic) {
return getTopic(topic, isAllowAutoTopicCreation(topic)).thenApply(Optional::get);
}
public CompletableFuture<Optional<Topic>> getTopic(final String topic, boolean createIfMissing) {
try {
CompletableFuture<Optional<Topic>> topicFuture = topics.get(topic);
if (topicFuture != null) {
if (topicFuture.isCompletedExceptionally()
|| (topicFuture.isDone() && !topicFuture.getNow(Optional.empty()).isPresent())) {
// Exceptional topics should be recreated.
topics.remove(topic, topicFuture);
} else {
return topicFuture;
}
}
final boolean isPersistentTopic = TopicName.get(topic).getDomain().equals(TopicDomain.persistent);
return topics.computeIfAbsent(topic, (topicName) -> {
return isPersistentTopic ? this.loadOrCreatePersistentTopic(topicName, createIfMissing)
: createNonPersistentTopic(topicName);
});
} catch (IllegalArgumentException e) {
log.warn("[{}] Illegalargument exception when loading topic", topic, e);
return failedFuture(e);
} catch (RuntimeException e) {
Throwable cause = e.getCause();
if (cause instanceof ServiceUnitNotReadyException) {
log.warn("[{}] Service unit is not ready when loading the topic", topic);
} else {
log.warn("[{}] Unexpected exception when loading topic: {}", topic, e.getMessage(), e);
}
return failedFuture(cause);
}
}
public CompletableFuture<SchemaVersion> deleteSchemaStorage(String topic) {
Optional<Topic> optTopic = getTopicReference(topic);
if (optTopic.isPresent()) {
return optTopic.get().deleteSchema();
} else {
return CompletableFuture.completedFuture(null);
}
}
public CompletableFuture<Void> deleteTopic(String topic, boolean forceDelete) {
return deleteTopic(topic, forceDelete, false);
}
public CompletableFuture<Void> deleteTopic(String topic, boolean forceDelete, boolean deleteSchema) {
Optional<Topic> optTopic = getTopicReference(topic);
if (optTopic.isPresent()) {
Topic t = optTopic.get();
if (forceDelete) {
if (deleteSchema) {
return t.deleteSchema().thenCompose(schemaVersion -> {
log.info("Successfully delete topic {}'s schema of version {}", t.getName(), schemaVersion);
return t.deleteForcefully();
});
} else {
return t.deleteForcefully();
}
}
// v2 topics have a global name so check if the topic is replicated.
if (t.isReplicated()) {
// Delete is disallowed on global topic
final List<String> clusters = t.getReplicators().keys();
log.error("Delete forbidden topic {} is replicated on clusters {}", topic, clusters);
return FutureUtil.failedFuture(
new IllegalStateException("Delete forbidden topic is replicated on clusters " + clusters));
}
if (deleteSchema) {
return t.deleteSchema().thenCompose(schemaVersion -> {
log.info("Successfully delete topic {}'s schema of version {}", t.getName(), schemaVersion);
return t.delete();
});
} else {
return t.delete();
}
}
// Topic is not loaded, though we still might be able to delete from metadata
TopicName tn = TopicName.get(topic);
if (!tn.isPersistent()) {
// Nothing to do if it's not persistent
return CompletableFuture.completedFuture(null);
}
CompletableFuture<Void> future = new CompletableFuture<>();
managedLedgerFactory.asyncDelete(tn.getPersistenceNamingEncoding(), new DeleteLedgerCallback() {
@Override
public void deleteLedgerComplete(Object ctx) {
future.complete(null);
}
@Override
public void deleteLedgerFailed(ManagedLedgerException exception, Object ctx) {
future.completeExceptionally(exception);
}
}, null);
return future;
}
private CompletableFuture<Optional<Topic>> createNonPersistentTopic(String topic) {
CompletableFuture<Optional<Topic>> topicFuture = futureWithDeadline();
if (!pulsar.getConfiguration().isEnableNonPersistentTopics()) {
if (log.isDebugEnabled()) {
log.debug("Broker is unable to load non-persistent topic {}", topic);
}
topicFuture.completeExceptionally(
new NotAllowedException("Broker is not unable to load non-persistent topic"));
return topicFuture;
}
final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
NonPersistentTopic nonPersistentTopic = new NonPersistentTopic(topic, this);
CompletableFuture<Void> replicationFuture = nonPersistentTopic.checkReplication();
replicationFuture.thenRun(() -> {
log.info("Created topic {}", nonPersistentTopic);
long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - topicCreateTimeMs;
pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs);
addTopicToStatsMaps(TopicName.get(topic), nonPersistentTopic);
topicFuture.complete(Optional.of(nonPersistentTopic));
});
replicationFuture.exceptionally((ex) -> {
log.warn("Replication check failed. Removing topic from topics list {}, {}", topic, ex);
nonPersistentTopic.stopReplProducers().whenComplete((v, exception) -> {
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(ex);
});
return null;
});
return topicFuture;
}
private static <T> CompletableFuture<T> failedFuture(Throwable t) {
CompletableFuture<T> future = new CompletableFuture<>();
future.completeExceptionally(t);
return future;
}
private <T> CompletableFuture<T> futureWithDeadline(Long delay, TimeUnit unit, Exception exp) {
CompletableFuture<T> future = new CompletableFuture<T>();
executor().schedule(() -> {
if (!future.isDone()) {
future.completeExceptionally(exp);
}
}, delay, unit);
return future;
}
private <T> CompletableFuture<T> futureWithDeadline() {
return futureWithDeadline(60000L, TimeUnit.MILLISECONDS,
new TimeoutException("Future didn't finish within deadline"));
}
public PulsarClient getReplicationClient(String cluster) {
PulsarClient client = replicationClients.get(cluster);
if (client != null) {
return client;
}
return replicationClients.computeIfAbsent(cluster, key -> {
try {
String path = PulsarWebResource.path("clusters", cluster);
ClusterData data = this.pulsar.getConfigurationCache().clustersCache().get(path)
.orElseThrow(() -> new KeeperException.NoNodeException(path));
ClientBuilder clientBuilder = PulsarClient.builder()
.enableTcpNoDelay(false)
.connectionsPerBroker(pulsar.getConfiguration().getReplicationConnectionsPerBroker())
.statsInterval(0, TimeUnit.SECONDS);
if (pulsar.getConfiguration().isAuthenticationEnabled()) {
clientBuilder.authentication(pulsar.getConfiguration().getBrokerClientAuthenticationPlugin(),
pulsar.getConfiguration().getBrokerClientAuthenticationParameters());
}
if (pulsar.getConfiguration().isBrokerClientTlsEnabled()) {
clientBuilder
.serviceUrl(isNotBlank(data.getBrokerServiceUrlTls()) ? data.getBrokerServiceUrlTls()
: data.getServiceUrlTls())
.enableTls(true)
.allowTlsInsecureConnection(pulsar.getConfiguration().isTlsAllowInsecureConnection());
if (pulsar.getConfiguration().isBrokerClientTlsEnabledWithKeyStore()) {
clientBuilder.useKeyStoreTls(true)
.tlsTrustStoreType(pulsar.getConfiguration().getBrokerClientTlsTrustStoreType())
.tlsTrustStorePath(pulsar.getConfiguration().getBrokerClientTlsTrustStore())
.tlsTrustStorePassword(pulsar.getConfiguration()
.getBrokerClientTlsTrustStorePassword());
} else {
clientBuilder.tlsTrustCertsFilePath(pulsar.getConfiguration()
.getBrokerClientTrustCertsFilePath());
}
} else {
clientBuilder.serviceUrl(
isNotBlank(data.getBrokerServiceUrl()) ? data.getBrokerServiceUrl() : data.getServiceUrl());
}
if (data.getProxyProtocol() != null && StringUtils.isNotBlank(data.getProxyServiceUrl())) {
clientBuilder.proxyServiceUrl(data.getProxyServiceUrl(), data.getProxyProtocol());
log.info("Configuring proxy-url {} with protocol {}", data.getProxyServiceUrl(),
data.getProxyProtocol());
}
// Share all the IO threads across broker and client connections
ClientConfigurationData conf = ((ClientBuilderImpl) clientBuilder).getClientConfigurationData();
return new PulsarClientImpl(conf, workerGroup);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
public PulsarAdmin getClusterPulsarAdmin(String cluster) {
PulsarAdmin admin = clusterAdmins.get(cluster);
if (admin != null) {
return admin;
}
return clusterAdmins.computeIfAbsent(cluster, key -> {
try {
String path = PulsarWebResource.path("clusters", cluster);
ClusterData data = this.pulsar.getConfigurationCache().clustersCache().get(path)
.orElseThrow(() -> new KeeperException.NoNodeException(path));
ServiceConfiguration conf = pulsar.getConfig();
boolean isTlsUrl = conf.isBrokerClientTlsEnabled() && isNotBlank(data.getServiceUrlTls());
String adminApiUrl = isTlsUrl ? data.getServiceUrlTls() : data.getServiceUrl();
PulsarAdminBuilder builder = PulsarAdmin.builder().serviceHttpUrl(adminApiUrl)
.authentication(
conf.getBrokerClientAuthenticationPlugin(),
conf.getBrokerClientAuthenticationParameters());
if (isTlsUrl) {
builder.allowTlsInsecureConnection(conf.isTlsAllowInsecureConnection());
if (conf.isBrokerClientTlsEnabledWithKeyStore()) {
builder.useKeyStoreTls(true)
.tlsTrustStoreType(conf.getBrokerClientTlsTrustStoreType())
.tlsTrustStorePath(conf.getBrokerClientTlsTrustStore())
.tlsTrustStorePassword(conf.getBrokerClientTlsTrustStorePassword());
} else {
builder.tlsTrustCertsFilePath(conf.getBrokerClientTrustCertsFilePath());
}
}
// most of the admin request requires to make zk-call so, keep the max read-timeout based on
// zk-operation timeout
builder.readTimeout(conf.getZooKeeperOperationTimeoutSeconds(), TimeUnit.SECONDS);
PulsarAdmin adminClient = builder.build();
log.info("created admin with url {} ", adminApiUrl);
return adminClient;
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
/**
* It creates a topic async and returns CompletableFuture. It also throttles down configured max-concurrent topic
* loading and puts them into queue once in-process topics are created.
*
* @param topic persistent-topic name
* @return CompletableFuture<Topic>
* @throws RuntimeException
*/
protected CompletableFuture<Optional<Topic>> loadOrCreatePersistentTopic(final String topic,
boolean createIfMissing) throws RuntimeException {
final CompletableFuture<Optional<Topic>> topicFuture = futureWithDeadline(
pulsar.getConfiguration().getTopicLoadTimeoutSeconds(),
TimeUnit.SECONDS, new TimeoutException("Failed to load topic within timeout"));
if (!pulsar.getConfiguration().isEnablePersistentTopics()) {
if (log.isDebugEnabled()) {
log.debug("Broker is unable to load persistent topic {}", topic);
}
topicFuture.completeExceptionally(new NotAllowedException(
"Broker is not unable to load persistent topic"));
return topicFuture;
}
checkTopicNsOwnershipAsync(topic).whenComplete((ignored, throwable) -> {
if (throwable != null) {
topicFuture.completeExceptionally(throwable);
return;
}
final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get();
if (topicLoadSemaphore.tryAcquire()) {
createPersistentTopic(topic, createIfMissing, topicFuture);
topicFuture.handle((persistentTopic, ex) -> {
// release permit and process pending topic
topicLoadSemaphore.release();
createPendingLoadTopic();
return null;
});
} else {
pendingTopicLoadingQueue.add(new ImmutablePair<>(topic, topicFuture));
if (log.isDebugEnabled()) {
log.debug("topic-loading for {} added into pending queue", topic);
}
}
});
return topicFuture;
}
private void createPersistentTopic(final String topic, boolean createIfMissing,
CompletableFuture<Optional<Topic>> topicFuture) {
final long topicCreateTimeMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
TopicName topicName = TopicName.get(topic);
if (!pulsar.getNamespaceService().isServiceUnitActive(topicName)) {
// namespace is being unloaded
String msg = String.format("Namespace is being unloaded, cannot add topic %s", topic);
log.warn(msg);
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
return;
}
if (!checkMaxTopicsPerNamespace(topicName, 1, topicFuture)) {
return;
}
getManagedLedgerConfig(topicName).thenAccept(managedLedgerConfig -> {
if (isBrokerEntryMetadataEnabled()) {
// init managedLedger interceptor
for (BrokerEntryMetadataInterceptor interceptor : brokerEntryMetadataInterceptors) {
if (interceptor instanceof AppendIndexMetadataInterceptor) {
// add individual AppendOffsetMetadataInterceptor for each topic
brokerEntryMetadataInterceptors.remove(interceptor);
brokerEntryMetadataInterceptors.add(new AppendIndexMetadataInterceptor());
}
}
ManagedLedgerInterceptor mlInterceptor =
new ManagedLedgerInterceptorImpl(brokerEntryMetadataInterceptors);
managedLedgerConfig.setManagedLedgerInterceptor(mlInterceptor);
}
managedLedgerConfig.setCreateIfMissing(createIfMissing);
// Once we have the configuration, we can proceed with the async open operation
managedLedgerFactory.asyncOpen(topicName.getPersistenceNamingEncoding(), managedLedgerConfig,
new OpenLedgerCallback() {
@Override
public void openLedgerComplete(ManagedLedger ledger, Object ctx) {
try {
PersistentTopic persistentTopic = isSystemTopic(topic)
? new SystemTopic(topic, ledger, BrokerService.this)
: new PersistentTopic(topic, ledger, BrokerService.this);
CompletableFuture<Void> replicationFuture = persistentTopic.checkReplication();
replicationFuture.thenCompose(v -> {
// Also check dedup status
return persistentTopic.checkDeduplicationStatus();
}).thenRun(() -> {
log.info("Created topic {} - dedup is {}", topic,
persistentTopic.isDeduplicationEnabled() ? "enabled" : "disabled");
long topicLoadLatencyMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime())
- topicCreateTimeMs;
pulsarStats.recordTopicLoadTimeValue(topic, topicLoadLatencyMs);
addTopicToStatsMaps(topicName, persistentTopic);
topicFuture.complete(Optional.of(persistentTopic));
}).exceptionally((ex) -> {
log.warn(
"Replication or dedup check failed."
+ " Removing topic from topics list {}, {}",
topic, ex);
persistentTopic.stopReplProducers().whenComplete((v, exception) -> {
topics.remove(topic, topicFuture);
topicFuture.completeExceptionally(ex);
});
return null;
});
} catch (NamingException e) {
log.warn("Failed to create topic {}-{}", topic, e.getMessage());
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(e);
}
}
@Override
public void openLedgerFailed(ManagedLedgerException exception, Object ctx) {
if (!createIfMissing && exception instanceof ManagedLedgerNotFoundException) {
// We were just trying to load a topic and the topic doesn't exist
topicFuture.complete(Optional.empty());
} else {
log.warn("Failed to create topic {}", topic, exception);
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(new PersistenceException(exception));
}
}
}, () -> isTopicNsOwnedByBroker(topicName), null);
}).exceptionally((exception) -> {
log.warn("[{}] Failed to get topic configuration: {}", topic, exception.getMessage(), exception);
// remove topic from topics-map in different thread to avoid possible deadlock if
// createPersistentTopic-thread only tries to handle this future-result
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(exception);
return null;
});
}
public CompletableFuture<ManagedLedgerConfig> getManagedLedgerConfig(TopicName topicName) {
CompletableFuture<ManagedLedgerConfig> future = futureWithDeadline();
// Execute in background thread, since getting the policies might block if the z-node wasn't already cached
pulsar.getOrderedExecutor().executeOrdered(topicName, safeRun(() -> {
NamespaceName namespace = topicName.getNamespaceObject();
ServiceConfiguration serviceConfig = pulsar.getConfiguration();
// Get persistence policy for this topic
Optional<Policies> policies = Optional.empty();
Optional<LocalPolicies> localPolicies = Optional.empty();
PersistencePolicies persistencePolicies = null;
RetentionPolicies retentionPolicies = null;
OffloadPolicies topicLevelOffloadPolicies = null;
if (pulsar.getConfig().isTopicLevelPoliciesEnabled()) {
TopicName cloneTopicName = topicName;
if (topicName.isPartitioned()) {
cloneTopicName = TopicName.get(topicName.getPartitionedTopicName());
}
try {
TopicPolicies topicPolicies = pulsar.getTopicPoliciesService().getTopicPolicies(cloneTopicName);
if (topicPolicies != null) {
persistencePolicies = topicPolicies.getPersistence();
retentionPolicies = topicPolicies.getRetentionPolicies();
topicLevelOffloadPolicies = topicPolicies.getOffloadPolicies();
}
} catch (BrokerServiceException.TopicPoliciesCacheNotInitException e) {
log.warn("Topic {} policies cache have not init.", topicName);
}
}
try {
policies = pulsar
.getConfigurationCache().policiesCache().get(AdminResource.path(POLICIES,
namespace.toString()));
String path = joinPath(LOCAL_POLICIES_ROOT, topicName.getNamespaceObject().toString());
localPolicies = pulsar().getLocalZkCacheService().policiesCache().get(path);
} catch (Throwable t) {
// Ignoring since if we don't have policies, we fallback on the default
log.warn("Got exception when reading persistence policy for {}: {}", topicName, t.getMessage(), t);
future.completeExceptionally(t);
return;
}
if (persistencePolicies == null) {
persistencePolicies = policies.map(p -> p.persistence).orElseGet(
() -> new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(),
serviceConfig.getManagedLedgerDefaultWriteQuorum(),
serviceConfig.getManagedLedgerDefaultAckQuorum(),
serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit()));
}
if (retentionPolicies == null) {
retentionPolicies = policies.map(p -> p.retention_policies).orElseGet(
() -> new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(),
serviceConfig.getDefaultRetentionSizeInMB())
);
}
ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig();
managedLedgerConfig.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble());
managedLedgerConfig.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum());
managedLedgerConfig.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum());
if (localPolicies.isPresent() && localPolicies.get().bookieAffinityGroup != null) {
managedLedgerConfig
.setBookKeeperEnsemblePlacementPolicyClassName(ZkIsolatedBookieEnsemblePlacementPolicy.class);
Map<String, Object> properties = Maps.newHashMap();
properties.put(ZkIsolatedBookieEnsemblePlacementPolicy.ISOLATION_BOOKIE_GROUPS,
localPolicies.get().bookieAffinityGroup.bookkeeperAffinityGroupPrimary);
properties.put(ZkIsolatedBookieEnsemblePlacementPolicy.SECONDARY_ISOLATION_BOOKIE_GROUPS,
localPolicies.get().bookieAffinityGroup.bookkeeperAffinityGroupSecondary);
managedLedgerConfig.setBookKeeperEnsemblePlacementPolicyProperties(properties);
}
managedLedgerConfig.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate());
managedLedgerConfig.setDigestType(serviceConfig.getManagedLedgerDigestType());
managedLedgerConfig.setPassword(serviceConfig.getManagedLedgerPassword());
managedLedgerConfig.setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist());
managedLedgerConfig.setMaxUnackedRangesToPersistInZk(
serviceConfig.getManagedLedgerMaxUnackedRangesToPersistInZooKeeper());
managedLedgerConfig.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger());
managedLedgerConfig.setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(),
TimeUnit.MINUTES);
managedLedgerConfig.setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(),
TimeUnit.MINUTES);
managedLedgerConfig.setMaxSizePerLedgerMb(serviceConfig.getManagedLedgerMaxSizePerLedgerMbytes());
managedLedgerConfig.setMetadataOperationsTimeoutSeconds(
serviceConfig.getManagedLedgerMetadataOperationsTimeoutSeconds());
managedLedgerConfig.setReadEntryTimeoutSeconds(serviceConfig.getManagedLedgerReadEntryTimeoutSeconds());
managedLedgerConfig.setAddEntryTimeoutSeconds(serviceConfig.getManagedLedgerAddEntryTimeoutSeconds());
managedLedgerConfig.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize());
managedLedgerConfig.setUnackedRangesOpenCacheSetEnabled(
serviceConfig.isManagedLedgerUnackedRangesOpenCacheSetEnabled());
managedLedgerConfig.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum());
managedLedgerConfig.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum());
managedLedgerConfig
.setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger());
managedLedgerConfig.setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds());
managedLedgerConfig.setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES);
managedLedgerConfig.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB());
managedLedgerConfig.setAutoSkipNonRecoverableData(serviceConfig.isAutoSkipNonRecoverableData());
managedLedgerConfig.setLazyCursorRecovery(serviceConfig.isLazyCursorRecovery());
OffloadPolicies nsLevelOffloadPolicies = policies.map(p -> p.offload_policies).orElse(null);
OffloadPolicies offloadPolicies = OffloadPolicies.mergeConfiguration(
topicLevelOffloadPolicies,
OffloadPolicies.oldPoliciesCompatible(nsLevelOffloadPolicies, policies.orElse(null)),
getPulsar().getConfig().getProperties());
if (topicLevelOffloadPolicies != null) {
try {
LedgerOffloader topicLevelLedgerOffLoader = pulsar().createManagedLedgerOffloader(offloadPolicies);
managedLedgerConfig.setLedgerOffloader(topicLevelLedgerOffLoader);
} catch (PulsarServerException e) {
future.completeExceptionally(e);
return;
}
} else {
//If the topic level policy is null, use the namespace level
managedLedgerConfig.setLedgerOffloader(pulsar.getManagedLedgerOffloader(namespace, offloadPolicies));
}
managedLedgerConfig.setDeletionAtBatchIndexLevelEnabled(
serviceConfig.isAcknowledgmentAtBatchIndexLevelEnabled());
managedLedgerConfig.setNewEntriesCheckDelayInMillis(
serviceConfig.getManagedLedgerNewEntriesCheckDelayInMillis());
future.complete(managedLedgerConfig);
}, (exception) -> future.completeExceptionally(exception)));
return future;
}
private void addTopicToStatsMaps(TopicName topicName, Topic topic) {
try {
NamespaceBundle namespaceBundle = pulsar.getNamespaceService().getBundle(topicName);
if (namespaceBundle != null) {
synchronized (multiLayerTopicsMap) {
String serviceUnit = namespaceBundle.toString();
multiLayerTopicsMap //
.computeIfAbsent(topicName.getNamespace(), k -> new ConcurrentOpenHashMap<>()) //
.computeIfAbsent(serviceUnit, k -> new ConcurrentOpenHashMap<>()) //
.put(topicName.toString(), topic);
}
}
invalidateOfflineTopicStatCache(topicName);
} catch (Exception e) {
log.warn("Got exception when retrieving bundle name during create persistent topic", e);
}
}
public void refreshTopicToStatsMaps(NamespaceBundle oldBundle) {
checkNotNull(oldBundle);
try {
// retrieve all topics under existing old bundle
List<Topic> topics = getAllTopicsFromNamespaceBundle(oldBundle.getNamespaceObject().toString(),
oldBundle.toString());
if (!isEmpty(topics)) {
// add topic under new split bundles which already updated into NamespaceBundleFactory.bundleCache
topics.stream().forEach(t -> {
addTopicToStatsMaps(TopicName.get(t.getName()), t);
});
// remove old bundle from the map
synchronized (multiLayerTopicsMap) {
multiLayerTopicsMap.get(oldBundle.getNamespaceObject().toString()).remove(oldBundle.toString());
pulsarStats.invalidBundleStats(oldBundle.toString());
}
}
} catch (Exception e) {
log.warn("Got exception while refreshing topicStats map", e);
}
}
public PersistentOfflineTopicStats getOfflineTopicStat(TopicName topicName) {
return offlineTopicStatCache.get(topicName);
}
public void cacheOfflineTopicStats(TopicName topicName, PersistentOfflineTopicStats offlineTopicStats) {
offlineTopicStatCache.put(topicName, offlineTopicStats);
}
public void invalidateOfflineTopicStatCache(TopicName topicName) {
PersistentOfflineTopicStats removed = offlineTopicStatCache.remove(topicName);
if (removed != null) {
log.info("Removed cached offline topic stat for {} ", topicName.getPersistenceNamingEncoding());
}
}
/**
* Get a reference to a topic that is currently loaded in the broker.
*
* This method will not make the broker attempt to load the topic if it's not already.
*/
public Optional<Topic> getTopicReference(String topic) {
CompletableFuture<Optional<Topic>> future = topics.get(topic);
if (future != null && future.isDone() && !future.isCompletedExceptionally()) {
return future.join();
} else {
return Optional.empty();
}
}
public void updateRates() {
synchronized (pulsarStats) {
pulsarStats.updateStats(multiLayerTopicsMap);
Summary.rotateLatencyCollection();
}
}
public void getDimensionMetrics(Consumer<ByteBuf> consumer) {
pulsarStats.getDimensionMetrics(consumer);
}
public List<Metrics> getTopicMetrics() {
return pulsarStats.getTopicMetrics();
}
public Map<String, NamespaceBundleStats> getBundleStats() {
return pulsarStats.getBundleStats();
}
public Semaphore getLookupRequestSemaphore() {
return lookupRequestSemaphore.get();
}
public void checkGC() {
forEachTopic(Topic::checkGC);
}
public void checkMessageExpiry() {
forEachTopic(Topic::checkMessageExpiry);
}
public void checkReplicationPolicies() {
forEachTopic(Topic::checkReplication);
}
public void checkCompaction() {
forEachTopic((t) -> {
if (t instanceof PersistentTopic) {
((PersistentTopic) t).checkCompaction();
}
});
}
private void checkConsumedLedgers() {
forEachTopic((t) -> {
if (t instanceof PersistentTopic) {
Optional.ofNullable(((PersistentTopic) t).getManagedLedger()).ifPresent(
managedLedger -> {
managedLedger.trimConsumedLedgersInBackground(Futures.NULL_PROMISE);
}
);
}
});
}
private void checkLedgerFull() {
forEachTopic((t) -> {
if (t instanceof PersistentTopic) {
Optional.ofNullable(((PersistentTopic) t).getManagedLedger()).ifPresent(
managedLedger -> {
managedLedger.rollCurrentLedgerIfFull();
}
);
}
});
}
public void checkMessageDeduplicationInfo() {
forEachTopic(Topic::checkMessageDeduplicationInfo);
}
public void checkInactiveSubscriptions() {
forEachTopic(Topic::checkInactiveSubscriptions);
}
public void checkTopicPublishThrottlingRate() {
forEachTopic(Topic::checkTopicPublishThrottlingRate);
}
private void refreshTopicPublishRate() {
forEachTopic(Topic::resetTopicPublishCountAndEnableReadIfRequired);
}
public void checkBrokerPublishThrottlingRate() {
brokerPublishRateLimiter.checkPublishRate();
if (brokerPublishRateLimiter.isPublishRateExceeded()) {
forEachTopic(topic -> ((AbstractTopic) topic).disableProducerRead());
}
}
private void refreshBrokerPublishRate() {
boolean doneReset = brokerPublishRateLimiter.resetPublishCount();
forEachTopic(topic -> topic.resetBrokerPublishCountAndEnableReadIfRequired(doneReset));
}
/**
* Iterates over all loaded topics in the broker.
*/
public void forEachTopic(Consumer<Topic> consumer) {
topics.forEach((n, t) -> {
Optional<Topic> topic = extractTopic(t);
topic.ifPresent(consumer::accept);
});
}
public BacklogQuotaManager getBacklogQuotaManager() {
return this.backlogQuotaManager;
}
public synchronized void monitorBacklogQuota() {
forEachTopic(topic -> {
if (topic instanceof PersistentTopic) {
PersistentTopic persistentTopic = (PersistentTopic) topic;
if (persistentTopic.isBacklogExceeded()) {
getBacklogQuotaManager().handleExceededBacklogQuota(persistentTopic);
} else {
if (log.isDebugEnabled()) {
log.debug("quota not exceeded for [{}]", topic.getName());
}
}
}
});
}
public boolean isTopicNsOwnedByBroker(TopicName topicName) {
try {
return pulsar.getNamespaceService().isServiceUnitOwned(topicName);
} catch (Exception e) {
log.warn("Failed to check the ownership of the topic: {}, {}", topicName, e.getMessage());
}
return false;
}
public CompletableFuture<Void> checkTopicNsOwnershipAsync(final String topic) {
TopicName topicName = TopicName.get(topic);
CompletableFuture<Void> checkFuture = new CompletableFuture<>();
pulsar.getNamespaceService().checkTopicOwnership(topicName).whenComplete((ownedByThisInstance, throwable) -> {
if (throwable != null) {
log.debug("Failed to check the ownership of the topic: {}", topicName, throwable);
checkFuture.completeExceptionally(new ServerMetadataException(throwable));
} else if (!ownedByThisInstance) {
String msg = String.format("Namespace bundle for topic (%s) not served by this instance. "
+ "Please redo the lookup. Request is denied: namespace=%s", topic, topicName.getNamespace());
log.warn(msg);
checkFuture.completeExceptionally(new ServiceUnitNotReadyException(msg));
} else {
checkFuture.complete(null);
}
});
return checkFuture;
}
public void checkTopicNsOwnership(final String topic) throws BrokerServiceException {
try {
checkTopicNsOwnershipAsync(topic).join();
} catch (CompletionException ex) {
if (ex.getCause() instanceof BrokerServiceException) {
throw (BrokerServiceException) ex.getCause();
}
throw new BrokerServiceException(ex.getCause());
} catch (Exception ex) {
throw new BrokerServiceException(ex);
}
}
public CompletableFuture<Integer> unloadServiceUnit(NamespaceBundle serviceUnit,
boolean closeWithoutWaitingClientDisconnect, long timeout, TimeUnit unit) {
CompletableFuture<Integer> future = unloadServiceUnit(serviceUnit, closeWithoutWaitingClientDisconnect);
ScheduledFuture<?> taskTimeout = executor().schedule(() -> {
if (!future.isDone()) {
log.warn("Unloading of {} has timed out", serviceUnit);
// Complete the future with no error
future.complete(0);
}
}, timeout, unit);
future.whenComplete((r, ex) -> taskTimeout.cancel(true));
return future;
}
/**
* Unload all the topic served by the broker service under the given service unit.
*
* @param serviceUnit
* @param closeWithoutWaitingClientDisconnect don't wait for clients to disconnect
* and forcefully close managed-ledger
* @return
*/
private CompletableFuture<Integer> unloadServiceUnit(NamespaceBundle serviceUnit,
boolean closeWithoutWaitingClientDisconnect) {
List<CompletableFuture<Void>> closeFutures = Lists.newArrayList();
topics.forEach((name, topicFuture) -> {
TopicName topicName = TopicName.get(name);
if (serviceUnit.includes(topicName)) {
// Topic needs to be unloaded
log.info("[{}] Unloading topic", topicName);
closeFutures.add(topicFuture
.thenCompose(t -> t.isPresent() ? t.get().close(closeWithoutWaitingClientDisconnect)
: CompletableFuture.completedFuture(null)));
}
});
return FutureUtil.waitForAll(closeFutures).thenApply(v -> closeFutures.size());
}
public void cleanUnloadedTopicFromCache(NamespaceBundle serviceUnit) {
topics.forEach((name, topicFuture) -> {
TopicName topicName = TopicName.get(name);
if (serviceUnit.includes(topicName)) {
pulsar.getBrokerService().removeTopicFromCache(topicName.toString());
}
});
}
public AuthorizationService getAuthorizationService() {
return authorizationService;
}
public void removeTopicFromCache(String topic) {
TopicName topicName = null;
NamespaceBundle namespaceBundle = null;
try {
topicName = TopicName.get(topic);
namespaceBundle = pulsar.getNamespaceService().getBundle(topicName);
checkArgument(namespaceBundle instanceof NamespaceBundle);
String bundleName = namespaceBundle.toString();
String namespaceName = topicName.getNamespaceObject().toString();
synchronized (multiLayerTopicsMap) {
ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, Topic>> namespaceMap = multiLayerTopicsMap
.get(namespaceName);
ConcurrentOpenHashMap<String, Topic> bundleMap = namespaceMap.get(bundleName);
bundleMap.remove(topic);
if (bundleMap.isEmpty()) {
namespaceMap.remove(bundleName);
}
if (namespaceMap.isEmpty()) {
multiLayerTopicsMap.remove(namespaceName);
final ClusterReplicationMetrics clusterReplicationMetrics = pulsarStats
.getClusterReplicationMetrics();
replicationClients.forEach((cluster, client) -> {
clusterReplicationMetrics.remove(clusterReplicationMetrics.getKeyName(namespaceName, cluster));
});
}
}
} catch (Exception e) {
log.warn("Got exception when retrieving bundle name {} for topic {} during removeTopicFromCache", topicName,
namespaceBundle, e);
}
topics.remove(topic);
}
public int getNumberOfNamespaceBundles() {
this.numberOfNamespaceBundles = 0;
this.multiLayerTopicsMap.forEach((namespaceName, bundles) -> {
this.numberOfNamespaceBundles += bundles.size();
});
return this.numberOfNamespaceBundles;
}
public ConcurrentOpenHashMap<String, CompletableFuture<Optional<Topic>>> getTopics() {
return topics;
}
@Override
public void onUpdate(String path, Policies data, Stat stat) {
final NamespaceName namespace = NamespaceName.get(NamespaceBundleFactory.getNamespaceFromPoliciesPath(path));
log.info("{} updating with {}", path, data);
topics.forEach((name, topicFuture) -> {
if (namespace.includes(TopicName.get(name))) {
// If the topic is already created, immediately apply the updated policies, otherwise once the topic is
// created it'll apply the policies update
topicFuture.thenAccept(topic -> {
if (log.isDebugEnabled()) {
log.debug("Notifying topic that policies have changed: {}", name);
}
topic.ifPresent(t -> t.onPoliciesUpdate(data));
});
}
});
// sometimes, some brokers don't receive policies-update watch and miss to remove replication-cluster and still
// own the bundle. That can cause data-loss for TODO: git-issue
unloadDeletedReplNamespace(data, namespace);
}
/**
* Unloads the namespace bundles if local cluster is not part of replication-cluster list into the namespace.
* So, broker that owns the bundle and doesn't receive the zk-watch will unload the namespace.
* @param data
* @param namespace
*/
private void unloadDeletedReplNamespace(Policies data, NamespaceName namespace) {
if (!namespace.isGlobal()) {
return;
}
final String localCluster = this.pulsar.getConfiguration().getClusterName();
if (!data.replication_clusters.contains(localCluster)) {
try {
NamespaceBundles bundles = pulsar().getNamespaceService().getNamespaceBundleFactory()
.getBundles(namespace);
bundles.getBundles().forEach(bundle -> {
pulsar.getNamespaceService().isNamespaceBundleOwned(bundle).thenAccept(isExist -> {
if (isExist) {
this.pulsar().getExecutor().submit(() -> {
try {
pulsar().getAdminClient().namespaces().unloadNamespaceBundle(namespace.toString(),
bundle.getBundleRange());
} catch (Exception e) {
log.error("Failed to unload namespace-bundle {}-{} that not owned by {}, {}",
namespace.toString(), bundle.toString(), localCluster, e.getMessage());
}
});
}
});
});
} catch (Exception e) {
log.error("Failed to unload locally not owned bundles {}", e.getMessage(), e);
}
}
}
public PulsarService pulsar() {
return pulsar;
}
public ScheduledExecutorService executor() {
return workerGroup;
}
public ConcurrentOpenHashMap<String, PulsarClient> getReplicationClients() {
return replicationClients;
}
public boolean isAuthenticationEnabled() {
return pulsar.getConfiguration().isAuthenticationEnabled();
}
public boolean isAuthorizationEnabled() {
return pulsar.getConfiguration().isAuthorizationEnabled();
}
public int getKeepAliveIntervalSeconds() {
return keepAliveIntervalSeconds;
}
public String generateUniqueProducerName() {
return producerNameGenerator.getNextId();
}
public Map<String, TopicStats> getTopicStats() {
HashMap<String, TopicStats> stats = new HashMap<>();
forEachTopic(topic -> stats.put(topic.getName(), topic.getStats(false)));
return stats;
}
public AuthenticationService getAuthenticationService() {
return authenticationService;
}
public List<Topic> getAllTopicsFromNamespaceBundle(String namespace, String bundle) {
ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, Topic>> map1 = multiLayerTopicsMap.get(namespace);
if (map1 == null) {
return Collections.emptyList();
}
ConcurrentOpenHashMap<String, Topic> map2 = map1.get(bundle);
if (map2 == null) {
return Collections.emptyList();
}
return map2.values();
}
public ZooKeeperDataCache<Map<String, String>> getDynamicConfigurationCache() {
return dynamicConfigurationCache;
}
/**
* Update dynamic-ServiceConfiguration with value present into zk-configuration-map and register listeners on
* dynamic-ServiceConfiguration field to take appropriate action on change of zk-configuration-map.
*/
private void updateConfigurationAndRegisterListeners() {
// (1) Dynamic-config value validation: add validator if updated value required strict check before considering
// validate configured load-manager classname present into classpath
addDynamicConfigValidator("loadManagerClassName", (className) -> {
try {
Class.forName(className);
} catch (ClassNotFoundException | NoClassDefFoundError e) {
log.warn("Configured load-manager class {} not found {}", className, e.getMessage());
return false;
}
return true;
});
// (2) update ServiceConfiguration value by reading zk-configuration-map
updateDynamicServiceConfiguration();
// (3) Listener Registration
// add listener on "maxConcurrentLookupRequest" value change
registerConfigurationListener("maxConcurrentLookupRequest",
(maxConcurrentLookupRequest) -> lookupRequestSemaphore.set(
new Semaphore((int) maxConcurrentLookupRequest, false)));
// add listener on "maxConcurrentTopicLoadRequest" value change
registerConfigurationListener("maxConcurrentTopicLoadRequest",
(maxConcurrentTopicLoadRequest) -> topicLoadRequestSemaphore.set(
new Semaphore((int) maxConcurrentTopicLoadRequest, false)));
registerConfigurationListener("loadManagerClassName", className -> {
try {
final LoadManager newLoadManager = LoadManager.create(pulsar);
log.info("Created load manager: {}", className);
pulsar.getLoadManager().get().stop();
newLoadManager.start();
pulsar.getLoadManager().set(newLoadManager);
} catch (Exception ex) {
log.warn("Failed to change load manager", ex);
}
});
// add listener to update message-dispatch-rate in msg for topic
registerConfigurationListener("dispatchThrottlingRatePerTopicInMsg", (dispatchRatePerTopicInMsg) -> {
updateTopicMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for topic
registerConfigurationListener("dispatchThrottlingRatePerTopicInByte", (dispatchRatePerTopicInByte) -> {
updateTopicMessageDispatchRate();
});
// add listener to update managed-ledger config to skipNonRecoverableLedgers
registerConfigurationListener("autoSkipNonRecoverableData", (skipNonRecoverableLedger) -> {
updateManagedLedgerConfig();
});
// add listener to update message-dispatch-rate in msg for subscription
registerConfigurationListener("dispatchThrottlingRatePerSubscriptionInMsg", (dispatchRatePerTopicInMsg) -> {
updateSubscriptionMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for subscription
registerConfigurationListener("dispatchThrottlingRatePerSubscriptionInByte", (dispatchRatePerTopicInByte) -> {
updateSubscriptionMessageDispatchRate();
});
// add listener to update message-dispatch-rate in msg for replicator
registerConfigurationListener("dispatchThrottlingRatePerReplicatorInMsg",
(dispatchRatePerTopicInMsg) -> {
updateReplicatorMessageDispatchRate();
});
// add listener to update message-dispatch-rate in byte for replicator
registerConfigurationListener("dispatchThrottlingRatePerReplicatorInByte",
(dispatchRatePerTopicInByte) -> {
updateReplicatorMessageDispatchRate();
});
// add listener to notify broker publish-rate monitoring
registerConfigurationListener("brokerPublisherThrottlingTickTimeMillis",
(publisherThrottlingTickTimeMillis) -> {
setupBrokerPublishRateLimiterMonitor();
});
// add listener to notify broker publish-rate dynamic config
registerConfigurationListener("brokerPublisherThrottlingMaxMessageRate",
(brokerPublisherThrottlingMaxMessageRate) ->
updateBrokerPublisherThrottlingMaxRate());
registerConfigurationListener("brokerPublisherThrottlingMaxByteRate",
(brokerPublisherThrottlingMaxByteRate) ->
updateBrokerPublisherThrottlingMaxRate());
// add listener to notify topic publish-rate monitoring
if (!preciseTopicPublishRateLimitingEnable) {
registerConfigurationListener("topicPublisherThrottlingTickTimeMillis",
(publisherThrottlingTickTimeMillis) -> {
setupTopicPublishRateLimiterMonitor();
});
}
// add more listeners here
}
private void updateBrokerPublisherThrottlingMaxRate() {
int currentMaxMessageRate = pulsar.getConfiguration().getBrokerPublisherThrottlingMaxMessageRate();
long currentMaxByteRate = pulsar.getConfiguration().getBrokerPublisherThrottlingMaxByteRate();
int brokerTickMs = pulsar.getConfiguration().getBrokerPublisherThrottlingTickTimeMillis();
// not enable
if (brokerTickMs <= 0 || (currentMaxByteRate <= 0 && currentMaxMessageRate <= 0)) {
if (brokerPublishRateLimiter != PublishRateLimiter.DISABLED_RATE_LIMITER) {
refreshBrokerPublishRate();
brokerPublishRateLimiter = PublishRateLimiter.DISABLED_RATE_LIMITER;
}
return;
}
final PublishRate publishRate = new PublishRate(currentMaxMessageRate, currentMaxByteRate);
log.info("Update broker publish rate limiting {}", publishRate);
// lazy init broker Publish-rateLimiting monitoring if not initialized yet
this.setupBrokerPublishRateLimiterMonitor();
if (brokerPublishRateLimiter == null
|| brokerPublishRateLimiter == PublishRateLimiter.DISABLED_RATE_LIMITER) {
// create new rateLimiter if rate-limiter is disabled
brokerPublishRateLimiter = new PublishRateLimiterImpl(publishRate);
} else {
brokerPublishRateLimiter.update(publishRate);
}
}
private void updateTopicMessageDispatchRate() {
this.pulsar().getExecutor().execute(() -> {
// update message-rate for each topic
forEachTopic(topic -> {
if (topic.getDispatchRateLimiter().isPresent()) {
topic.getDispatchRateLimiter().get().updateDispatchRate();
}
});
});
}
private void updateSubscriptionMessageDispatchRate() {
this.pulsar().getExecutor().submit(() -> {
// update message-rate for each topic subscription
forEachTopic(topic -> {
topic.getSubscriptions().forEach((subName, persistentSubscription) -> {
Dispatcher dispatcher = persistentSubscription.getDispatcher();
if (dispatcher != null) {
dispatcher.getRateLimiter().ifPresent(DispatchRateLimiter::updateDispatchRate);
}
});
});
});
}
private void updateReplicatorMessageDispatchRate() {
this.pulsar().getExecutor().submit(() -> {
// update message-rate for each topic Replicator in Geo-replication
forEachTopic(topic ->
topic.getReplicators().forEach((name, persistentReplicator) -> {
if (persistentReplicator.getRateLimiter().isPresent()) {
persistentReplicator.getRateLimiter().get().updateDispatchRate();
}
}));
});
}
private void updateManagedLedgerConfig() {
this.pulsar().getExecutor().execute(() -> {
// update managed-ledger config of each topic
forEachTopic(topic -> {
try {
if (topic instanceof PersistentTopic) {
PersistentTopic persistentTopic = (PersistentTopic) topic;
// update skipNonRecoverableLedger configuration
persistentTopic.getManagedLedger().getConfig().setAutoSkipNonRecoverableData(
pulsar.getConfiguration().isAutoSkipNonRecoverableData());
}
} catch (Exception e) {
log.warn("[{}] failed to update managed-ledger config", topic.getName(), e);
}
});
});
}
/**
* Allows a listener to listen on update of {@link ServiceConfiguration} change, so listener can take appropriate
* action if any specific config-field value has been changed.
*
* On notification, listener should first check if config value has been changed and after taking appropriate
* action, listener should update config value with new value if it has been changed (so, next time listener can
* compare values on configMap change).
* @param <T>
*
* @param configKey
* : configuration field name
* @param listener
* : listener which takes appropriate action on config-value change
*/
public <T> void registerConfigurationListener(String configKey, Consumer<T> listener) {
validateConfigKey(configKey);
configRegisteredListeners.put(configKey, listener);
}
private void addDynamicConfigValidator(String key, Predicate<String> validator) {
validateConfigKey(key);
if (dynamicConfigurationMap.containsKey(key)) {
dynamicConfigurationMap.get(key).validator = validator;
}
}
private void validateConfigKey(String key) {
try {
ServiceConfiguration.class.getDeclaredField(key);
} catch (Exception e) {
log.error("ServiceConfiguration key {} not found {}", key, e.getMessage());
throw new IllegalArgumentException("Invalid service config " + key, e);
}
}
/**
* Updates pulsar.ServiceConfiguration's dynamic field with value persistent into zk-dynamic path. It also validates
* dynamic-value before updating it and throws {@code IllegalArgumentException} if validation fails
*/
private void updateDynamicServiceConfiguration() {
Optional<Map<String, String>> configCache = Optional.empty();
try {
// create dynamic-config znode if not present
if (pulsar.getZkClient().exists(BROKER_SERVICE_CONFIGURATION_PATH, false) == null) {
try {
byte[] data = ObjectMapperFactory.getThreadLocal().writeValueAsBytes(Maps.newHashMap());
ZkUtils.createFullPathOptimistic(pulsar.getZkClient(), BROKER_SERVICE_CONFIGURATION_PATH, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
} catch (KeeperException.NodeExistsException e) {
// Ok
}
}
configCache = dynamicConfigurationCache.get(BROKER_SERVICE_CONFIGURATION_PATH);
} catch (Exception e) {
log.warn("Failed to read zookeeper path [{}]:", BROKER_SERVICE_CONFIGURATION_PATH, e);
}
configCache.ifPresent(stringStringMap -> stringStringMap.forEach((key, value) -> {
// validate field
if (dynamicConfigurationMap.containsKey(key) && dynamicConfigurationMap.get(key).validator != null) {
if (!dynamicConfigurationMap.get(key).validator.test(value)) {
log.error("Failed to validate dynamic config {} with value {}", key, value);
throw new IllegalArgumentException(
String.format("Failed to validate dynamic-config %s/%s", key, value));
}
}
// update field value
try {
Field field = ServiceConfiguration.class.getDeclaredField(key);
if (field != null && field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
field.set(pulsar().getConfiguration(), FieldParser.value(value, field));
log.info("Successfully updated {}/{}", key, value);
}
} catch (Exception e) {
log.warn("Failed to update service configuration {}/{}, {}", key, value, e.getMessage());
}
}));
// register a listener: it updates field value and triggers appropriate registered field-listener only if
// field's value has been changed so, registered doesn't have to update field value in ServiceConfiguration
dynamicConfigurationCache.registerListener(new ZooKeeperCacheListener<Map<String, String>>() {
@SuppressWarnings("unchecked")
@Override
public void onUpdate(String path, Map<String, String> data, Stat stat) {
if (BROKER_SERVICE_CONFIGURATION_PATH.equalsIgnoreCase(path) && data != null) {
data.forEach((configKey, value) -> {
Field configField = dynamicConfigurationMap.get(configKey).field;
Object newValue = FieldParser.value(data.get(configKey), configField);
if (configField != null) {
Consumer listener = configRegisteredListeners.get(configKey);
try {
Object existingValue = configField.get(pulsar.getConfiguration());
configField.set(pulsar.getConfiguration(), newValue);
log.info("Successfully updated configuration {}/{}", configKey,
data.get(configKey));
if (listener != null && !existingValue.equals(newValue)) {
listener.accept(newValue);
}
} catch (Exception e) {
log.error("Failed to update config {}/{}", configKey, newValue);
}
} else {
log.error("Found non-dynamic field in dynamicConfigMap {}/{}", configKey, newValue);
}
});
}
}
});
}
public DelayedDeliveryTrackerFactory getDelayedDeliveryTrackerFactory() {
return delayedDeliveryTrackerFactory;
}
public static List<String> getDynamicConfiguration() {
return dynamicConfigurationMap.keys();
}
public Map<String, String> getRuntimeConfiguration() {
Map<String, String> configMap = Maps.newHashMap();
ConcurrentOpenHashMap<String, Object> runtimeConfigurationMap = getRuntimeConfigurationMap();
runtimeConfigurationMap.forEach((key, value) -> {
configMap.put(key, String.valueOf(value));
});
return configMap;
}
public static boolean isDynamicConfiguration(String key) {
return dynamicConfigurationMap.containsKey(key);
}
public static boolean validateDynamicConfiguration(String key, String value) {
if (dynamicConfigurationMap.containsKey(key) && dynamicConfigurationMap.get(key).validator != null) {
return dynamicConfigurationMap.get(key).validator.test(value);
}
return true;
}
private static ConcurrentOpenHashMap<String, ConfigField> prepareDynamicConfigurationMap() {
ConcurrentOpenHashMap<String, ConfigField> dynamicConfigurationMap = new ConcurrentOpenHashMap<>();
for (Field field : ServiceConfiguration.class.getDeclaredFields()) {
if (field != null && field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
if (field.getAnnotation(FieldContext.class).dynamic()) {
dynamicConfigurationMap.put(field.getName(), new ConfigField(field));
}
}
}
return dynamicConfigurationMap;
}
private ConcurrentOpenHashMap<String, Object> getRuntimeConfigurationMap() {
ConcurrentOpenHashMap<String, Object> runtimeConfigurationMap = new ConcurrentOpenHashMap<>();
for (Field field : ServiceConfiguration.class.getDeclaredFields()) {
if (field != null && field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
try {
Object configValue = field.get(pulsar.getConfiguration());
runtimeConfigurationMap.put(field.getName(), configValue == null ? "" : configValue);
} catch (Exception e) {
log.error("Failed to get value of field {}, {}", field.getName(), e.getMessage());
}
}
}
return runtimeConfigurationMap;
}
/**
* Create pending topic and on completion it picks the next one until processes all topics in
* {@link #pendingTopicLoadingQueue}.<br/>
* It also tries to acquire {@link #topicLoadRequestSemaphore} so throttle down newly incoming topics and release
* permit if it was successful to acquire it.
*/
private void createPendingLoadTopic() {
Pair<String, CompletableFuture<Optional<Topic>>> pendingTopic = pendingTopicLoadingQueue.poll();
if (pendingTopic == null) {
return;
}
final String topic = pendingTopic.getLeft();
try {
checkTopicNsOwnership(topic);
CompletableFuture<Optional<Topic>> pendingFuture = pendingTopic.getRight();
final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get();
final boolean acquiredPermit = topicLoadSemaphore.tryAcquire();
createPersistentTopic(topic, true, pendingFuture);
pendingFuture.handle((persistentTopic, ex) -> {
// release permit and process next pending topic
if (acquiredPermit) {
topicLoadSemaphore.release();
}
createPendingLoadTopic();
return null;
});
} catch (Exception e) {
log.error("Failed to create pending topic {}", topic, e);
pendingTopic.getRight()
.completeExceptionally((e instanceof RuntimeException && e.getCause() != null) ? e.getCause() : e);
// schedule to process next pending topic
inactivityMonitor.schedule(() -> createPendingLoadTopic(), 100, TimeUnit.MILLISECONDS);
}
}
public CompletableFuture<PartitionedTopicMetadata> fetchPartitionedTopicMetadataCheckAllowAutoCreationAsync(
TopicName topicName) {
if (pulsar.getNamespaceService() == null) {
return FutureUtil.failedFuture(new NamingException("namespace service is not ready"));
}
return pulsar.getNamespaceService().checkTopicExists(topicName)
.thenCompose(topicExists -> {
return fetchPartitionedTopicMetadataAsync(topicName)
.thenCompose(metadata -> {
// If topic is already exist, creating partitioned topic is not allowed.
if (metadata.partitions == 0
&& !topicExists
&& !topicName.isPartitioned()
&& pulsar.getBrokerService().isAllowAutoTopicCreation(topicName)
&& pulsar.getBrokerService().isDefaultTopicTypePartitioned(topicName)) {
return pulsar.getBrokerService().createDefaultPartitionedTopicAsync(topicName);
} else {
return CompletableFuture.completedFuture(metadata);
}
});
});
}
@SuppressWarnings("deprecation")
private CompletableFuture<PartitionedTopicMetadata> createDefaultPartitionedTopicAsync(TopicName topicName) {
final int defaultNumPartitions = pulsar.getBrokerService().getDefaultNumPartitions(topicName);
final int maxPartitions = pulsar().getConfig().getMaxNumPartitionsPerPartitionedTopic();
checkArgument(defaultNumPartitions > 0,
"Default number of partitions should be more than 0");
checkArgument(maxPartitions <= 0 || defaultNumPartitions <= maxPartitions,
"Number of partitions should be less than or equal to " + maxPartitions);
PartitionedTopicMetadata configMetadata = new PartitionedTopicMetadata(defaultNumPartitions);
CompletableFuture<PartitionedTopicMetadata> partitionedTopicFuture = futureWithDeadline();
if (!checkMaxTopicsPerNamespace(topicName, defaultNumPartitions, partitionedTopicFuture)) {
return partitionedTopicFuture;
}
try {
byte[] content = ObjectMapperFactory.getThreadLocal().writeValueAsBytes(configMetadata);
ZkUtils.asyncCreateFullPathOptimistic(pulsar.getGlobalZkCache().getZooKeeper(),
partitionedTopicPath(topicName), content,
ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, (rc, path1, ctx, name) -> {
if (rc == KeeperException.Code.OK.intValue()) {
// Sync data to all quorums and the observers
pulsar.getGlobalZkCache().getZooKeeper().sync(partitionedTopicPath(topicName),
(rc2, path2, ctx2) -> {
if (rc2 == KeeperException.Code.OK.intValue()) {
partitionedTopicFuture.complete(configMetadata);
} else {
partitionedTopicFuture.completeExceptionally(KeeperException.create(rc2));
}
}, null);
} else {
partitionedTopicFuture.completeExceptionally(KeeperException.create(rc));
}
}, null);
} catch (Exception e) {
log.error("Failed to create default partitioned topic.", e);
return FutureUtil.failedFuture(e);
}
return partitionedTopicFuture;
}
public CompletableFuture<PartitionedTopicMetadata> fetchPartitionedTopicMetadataAsync(TopicName topicName) {
// gets the number of partitions from the zk cache
return pulsar.getGlobalZkCache().getDataAsync(partitionedTopicPath(topicName), (key, content) -> {
return ObjectMapperFactory.getThreadLocal().readValue(content, PartitionedTopicMetadata.class);
}).thenApply(metadata -> {
// if the partitioned topic is not found in zk, then the topic is not partitioned
return metadata.orElseGet(() -> new PartitionedTopicMetadata());
});
}
private static String partitionedTopicPath(TopicName topicName) {
return String.format("%s/%s/%s/%s",
ConfigurationCacheService.PARTITIONED_TOPICS_ROOT,
topicName.getNamespace(),
topicName.getDomain(),
topicName.getEncodedLocalName());
}
public OrderedExecutor getTopicOrderedExecutor() {
return topicOrderedExecutor;
}
public ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, ConcurrentOpenHashMap<String, Topic>>>
getMultiLayerTopicMap() {
return multiLayerTopicsMap;
}
/**
* If per-broker unacked message reached to limit then it blocks dispatcher if its unacked message limit has been
* reached to {@link #maxUnackedMsgsPerDispatcher}.
*
* @param dispatcher
* @param numberOfMessages
*/
public void addUnAckedMessages(PersistentDispatcherMultipleConsumers dispatcher, int numberOfMessages) {
// don't block dispatchers if maxUnackedMessages = 0
if (maxUnackedMessages > 0) {
totalUnackedMessages.add(numberOfMessages);
// block dispatcher: if broker is already blocked and dispatcher reaches to max dispatcher limit when broker
// is blocked
if (blockedDispatcherOnHighUnackedMsgs.get() && !dispatcher.isBlockedDispatcherOnUnackedMsgs()
&& dispatcher.getTotalUnackedMessages() > maxUnackedMsgsPerDispatcher) {
lock.readLock().lock();
try {
log.info("[{}] dispatcher reached to max unack msg limit on blocked-broker {}",
dispatcher.getName(), dispatcher.getTotalUnackedMessages());
dispatcher.blockDispatcherOnUnackedMsgs();
blockedDispatchers.add(dispatcher);
} finally {
lock.readLock().unlock();
}
}
}
}
/**
* Adds given dispatcher's unackMessage count to broker-unack message count and if it reaches to the
* {@link #maxUnackedMessages} then it blocks all the dispatchers which has unack-messages higher than
* {@link #maxUnackedMsgsPerDispatcher}. It unblocks all dispatchers once broker-unack message counts decreased to
* ({@link #maxUnackedMessages}/2)
*
*/
public void checkUnAckMessageDispatching() {
// don't block dispatchers if maxUnackedMessages = 0
if (maxUnackedMessages <= 0) {
return;
}
long unAckedMessages = totalUnackedMessages.sum();
if (unAckedMessages >= maxUnackedMessages && blockedDispatcherOnHighUnackedMsgs.compareAndSet(false, true)) {
// block dispatcher with higher unack-msg when it reaches broker-unack msg limit
log.info("Starting blocking dispatchers with unacked msgs {} due to reached max broker limit {}",
maxUnackedMessages, maxUnackedMsgsPerDispatcher);
executor().execute(() -> blockDispatchersWithLargeUnAckMessages());
} else if (blockedDispatcherOnHighUnackedMsgs.get() && unAckedMessages < maxUnackedMessages / 2) {
// unblock broker-dispatching if received enough acked messages back
if (blockedDispatcherOnHighUnackedMsgs.compareAndSet(true, false)) {
unblockDispatchersOnUnAckMessages(blockedDispatchers.values());
}
}
}
public boolean isBrokerDispatchingBlocked() {
return blockedDispatcherOnHighUnackedMsgs.get();
}
private void blockDispatchersWithLargeUnAckMessages() {
lock.readLock().lock();
try {
forEachTopic(topic -> {
topic.getSubscriptions().forEach((subName, persistentSubscription) -> {
if (persistentSubscription.getDispatcher() instanceof PersistentDispatcherMultipleConsumers) {
PersistentDispatcherMultipleConsumers dispatcher =
(PersistentDispatcherMultipleConsumers) persistentSubscription
.getDispatcher();
int dispatcherUnAckMsgs = dispatcher.getTotalUnackedMessages();
if (dispatcherUnAckMsgs > maxUnackedMsgsPerDispatcher) {
log.info("[{}] Blocking dispatcher due to reached max broker limit {}",
dispatcher.getName(), dispatcher.getTotalUnackedMessages());
dispatcher.blockDispatcherOnUnackedMsgs();
blockedDispatchers.add(dispatcher);
}
}
});
});
} finally {
lock.readLock().unlock();
}
}
/**
* Unblocks the dispatchers and removes it from the {@link #blockedDispatchers} list.
*
* @param dispatcherList
*/
public void unblockDispatchersOnUnAckMessages(List<PersistentDispatcherMultipleConsumers> dispatcherList) {
lock.writeLock().lock();
try {
dispatcherList.forEach(dispatcher -> {
dispatcher.unBlockDispatcherOnUnackedMsgs();
executor().execute(() -> dispatcher.readMoreEntries());
log.info("[{}] Dispatcher is unblocked", dispatcher.getName());
blockedDispatchers.remove(dispatcher);
});
} finally {
lock.writeLock().unlock();
}
}
private static class ConfigField {
final Field field;
Predicate<String> validator;
public ConfigField(Field field) {
super();
this.field = field;
}
}
/**
* Safely extract optional topic instance from a future, in a way to avoid unchecked exceptions and race conditions.
*/
public static Optional<Topic> extractTopic(CompletableFuture<Optional<Topic>> topicFuture) {
if (topicFuture.isDone() && !topicFuture.isCompletedExceptionally()) {
return topicFuture.join();
} else {
return Optional.empty();
}
}
public Optional<Integer> getListenPort() {
if (listenChannel != null) {
return Optional.of(((InetSocketAddress) listenChannel.localAddress()).getPort());
} else {
return Optional.empty();
}
}
public Optional<Integer> getListenPortTls() {
if (listenChannelTls != null) {
return Optional.of(((InetSocketAddress) listenChannelTls.localAddress()).getPort());
} else {
return Optional.empty();
}
}
@VisibleForTesting
void checkMessagePublishBuffer() {
AtomicLong currentMessagePublishBufferBytes = new AtomicLong();
foreachCnx(cnx -> currentMessagePublishBufferBytes.addAndGet(cnx.getMessagePublishBufferSize()));
if (currentMessagePublishBufferBytes.get() >= maxMessagePublishBufferBytes
&& !reachMessagePublishBufferThreshold) {
reachMessagePublishBufferThreshold = true;
forEachTopic(topic -> ((AbstractTopic) topic).disableProducerRead());
}
if (currentMessagePublishBufferBytes.get() < resumeProducerReadMessagePublishBufferBytes
&& reachMessagePublishBufferThreshold) {
reachMessagePublishBufferThreshold = false;
forEachTopic(topic -> ((AbstractTopic) topic).enableProducerReadForPublishBufferLimiting());
}
}
private void foreachCnx(Consumer<TransportCnx> consumer) {
Set<TransportCnx> cnxSet = new HashSet<>();
topics.forEach((n, t) -> {
Optional<Topic> topic = extractTopic(t);
topic.ifPresent(value -> value.getProducers().values().forEach(producer -> cnxSet.add(producer.getCnx())));
});
cnxSet.forEach(consumer);
}
public boolean isReachMessagePublishBufferThreshold() {
return reachMessagePublishBufferThreshold;
}
@VisibleForTesting
long getCurrentMessagePublishBufferSize() {
AtomicLong currentMessagePublishBufferBytes = new AtomicLong();
foreachCnx(cnx -> currentMessagePublishBufferBytes.addAndGet(cnx.getMessagePublishBufferSize()));
return currentMessagePublishBufferBytes.get();
}
public boolean isAllowAutoTopicCreation(final String topic) {
TopicName topicName = TopicName.get(topic);
return isAllowAutoTopicCreation(topicName);
}
public boolean isAllowAutoTopicCreation(final TopicName topicName) {
AutoTopicCreationOverride autoTopicCreationOverride = getAutoTopicCreationOverride(topicName);
if (autoTopicCreationOverride != null) {
return autoTopicCreationOverride.allowAutoTopicCreation;
} else {
return pulsar.getConfiguration().isAllowAutoTopicCreation();
}
}
public boolean isDefaultTopicTypePartitioned(final TopicName topicName) {
AutoTopicCreationOverride autoTopicCreationOverride = getAutoTopicCreationOverride(topicName);
if (autoTopicCreationOverride != null) {
return TopicType.PARTITIONED.toString().equals(autoTopicCreationOverride.topicType);
} else {
return pulsar.getConfiguration().isDefaultTopicTypePartitioned();
}
}
public int getDefaultNumPartitions(final TopicName topicName) {
AutoTopicCreationOverride autoTopicCreationOverride = getAutoTopicCreationOverride(topicName);
if (autoTopicCreationOverride != null) {
return autoTopicCreationOverride.defaultNumPartitions;
} else {
return pulsar.getConfiguration().getDefaultNumPartitions();
}
}
private AutoTopicCreationOverride getAutoTopicCreationOverride(final TopicName topicName) {
try {
Optional<Policies> policies = pulsar.getConfigurationCache().policiesCache()
.get(AdminResource.path(POLICIES, topicName.getNamespace()));
// If namespace policies have the field set, it will override the broker-level setting
if (policies.isPresent() && policies.get().autoTopicCreationOverride != null) {
return policies.get().autoTopicCreationOverride;
}
} catch (Throwable t) {
// Ignoring since if we don't have policies, we fallback on the default
log.warn("Got exception when reading autoTopicCreateOverride policy for {}: {};",
topicName, t.getMessage(), t);
return null;
}
log.debug("No autoTopicCreateOverride policy found for {}", topicName);
return null;
}
public boolean isAllowAutoSubscriptionCreation(final String topic) {
TopicName topicName = TopicName.get(topic);
return isAllowAutoSubscriptionCreation(topicName);
}
public boolean isAllowAutoSubscriptionCreation(final TopicName topicName) {
AutoSubscriptionCreationOverride autoSubscriptionCreationOverride =
getAutoSubscriptionCreationOverride(topicName);
if (autoSubscriptionCreationOverride != null) {
return autoSubscriptionCreationOverride.allowAutoSubscriptionCreation;
} else {
return pulsar.getConfiguration().isAllowAutoSubscriptionCreation();
}
}
private AutoSubscriptionCreationOverride getAutoSubscriptionCreationOverride(final TopicName topicName) {
try {
Optional<Policies> policies = pulsar.getConfigurationCache().policiesCache()
.get(AdminResource.path(POLICIES, topicName.getNamespace()));
// If namespace policies have the field set, it will override the broker-level setting
if (policies.isPresent() && policies.get().autoSubscriptionCreationOverride != null) {
return policies.get().autoSubscriptionCreationOverride;
}
} catch (Throwable t) {
// Ignoring since if we don't have policies, we fallback on the default
log.warn("Got exception when reading autoSubscriptionCreateOverride policy for {}: {};",
topicName, t.getMessage(), t);
return null;
}
log.debug("No autoSubscriptionCreateOverride policy found for {}", topicName);
return null;
}
private boolean isSystemTopic(String topic) {
return SystemTopicClient.isSystemTopic(TopicName.get(topic));
}
/**
* Get {@link TopicPolicies} for this topic.
* @param topicName
* @return TopicPolicies is exist else return null.
*/
public TopicPolicies getTopicPolicies(TopicName topicName) {
TopicName cloneTopicName = topicName;
if (topicName.isPartitioned()) {
cloneTopicName = TopicName.get(topicName.getPartitionedTopicName());
}
try {
checkTopicLevelPolicyEnable();
return pulsar.getTopicPoliciesService().getTopicPolicies(cloneTopicName);
} catch (BrokerServiceException.TopicPoliciesCacheNotInitException e) {
log.warn("Topic {} policies cache have not init.", topicName.getPartitionedTopicName());
return null;
} catch (RestException | NullPointerException e) {
log.warn("Topic level policies are not enabled. "
+ "Please refer to systemTopicEnabled and topicLevelPoliciesEnabled on broker.conf");
return null;
}
}
private void checkTopicLevelPolicyEnable() {
if (!pulsar().getConfig().isTopicLevelPoliciesEnabled()) {
throw new RestException(Response.Status.METHOD_NOT_ALLOWED,
"Topic level policies is disabled, to enable the topic level policy and retry.");
}
}
private <T> boolean checkMaxTopicsPerNamespace(TopicName topicName, int numPartitions,
CompletableFuture<T> topicFuture) {
final int maxTopicsPerNamespace = pulsar().getConfig().getMaxTopicsPerNamespace();
if (maxTopicsPerNamespace > 0) {
try {
String partitionedTopicPath = PulsarWebResource.joinPath(MANAGED_LEDGER_PATH_ZNODE,
topicName.getNamespace(), topicName.getDomain().value());
List<String> topics = pulsar().getGlobalZkCache().getZooKeeper()
.getChildren(partitionedTopicPath, false);
if (topics.size() + numPartitions > maxTopicsPerNamespace) {
log.error("Failed to create persistent topic {}, "
+ "exceed maximum number of topics in namespace", topicName);
topicFuture.completeExceptionally(new RestException(Response.Status.PRECONDITION_FAILED,
"Exceed maximum number of topics in namespace."));
return false;
}
} catch (KeeperException.NoNodeException e) {
// NoNode means there are no partitioned topics in this domain for this namespace
} catch (Exception e) {
log.error("Failed to create partitioned topic {}", topicName, e);
topicFuture.completeExceptionally(new RestException(e));
return false;
}
}
return true;
}
public void setInterceptor(BrokerInterceptor interceptor) {
this.interceptor = interceptor;
}
public Set<BrokerEntryMetadataInterceptor> getBrokerEntryMetadataInterceptors() {
return brokerEntryMetadataInterceptors;
}
public boolean isBrokerEntryMetadataEnabled() {
return brokerEntryMetadataInterceptors.size() > 0;
}
}
|
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package org.jetbrains.plugins.ruby.motion.bridgesupport;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Trinity;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.reference.SoftReference;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.plugins.ruby.motion.symbols.MotionSymbolUtil;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
public class FrameworkInfo {
private static final Logger LOG = Logger.getInstance(FrameworkInfo.class);
@NotNull private final String myBridgeSupportPath;
@NotNull private final String myName;
@NotNull private final String myVersion;
private final boolean myOSX;
private SoftReference<Framework> myFramework = null;
private SoftReference<Set<String>> myIdSelectorNames = null;
private SoftReference<Set<String>> mySelectorNames = null;
public FrameworkInfo(@NotNull String name, @NotNull String version, boolean isOSX, @NotNull String bridgeSupportFilePath) {
myBridgeSupportPath = bridgeSupportFilePath;
myName = name;
myOSX = isOSX;
myVersion = version;
}
@NotNull
public String getName() {
return myName;
}
@Nullable
public synchronized Framework getFramework() {
Framework result = SoftReference.dereference(myFramework);
return result != null ? result : reloadFramework().first;
}
@Nullable
public synchronized Set<String> getIdSelectorNames() {
Set<String> result = SoftReference.dereference(myIdSelectorNames);
return result != null ? result : reloadFramework().second;
}
@Nullable
public synchronized Set<String> getSelectorNames() {
Set<String> result = SoftReference.dereference(mySelectorNames);
return result != null ? result : reloadFramework().third;
}
private Trinity<Framework, Set<String>, Set<String>> reloadFramework() {
VirtualFile file = LocalFileSystem.getInstance().findFileByPath(myBridgeSupportPath);
if (file == null) return Trinity.create(null, null, null);
Set<String> idSelectorNames = null;
Set<String> selectorNames = null;
Framework framework = null;
try {
framework = BridgeSupportReader.read(myName, myVersion, file.getInputStream(), myOSX);
idSelectorNames = new HashSet<>();
selectorNames = new HashSet<>();
for (Class clazz : framework.getClasses()) {
for (Function function : clazz.getFunctions()) {
if (function.isId()) {
idSelectorNames.addAll(MotionSymbolUtil.getSelectorNames(function));
}
selectorNames.add(function.getName());
}
}
}
catch (IOException e) {
LOG.error("Failed to load bridgesupport file", e);
}
myFramework = framework != null ? new SoftReference<>(framework) : null;
myIdSelectorNames = idSelectorNames != null ? new SoftReference<>(idSelectorNames) : null;
mySelectorNames = selectorNames != null ? new SoftReference<>(selectorNames) : null;
return Trinity.create(framework, idSelectorNames, selectorNames);
}
}
|
/*
* This file is part of SpongeAPI, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://www.spongepowered.org>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.api.scoreboard;
import org.spongepowered.api.Sponge;
import org.spongepowered.api.entity.living.player.Player;
import org.spongepowered.api.text.Text;
import org.spongepowered.api.text.format.TextColor;
import org.spongepowered.api.util.ResettableBuilder;
import java.util.Optional;
import java.util.Set;
/**
* A team on a scoreboard that has a common display theme and other
* properties.
*
* <p>A team is comprised of different members, represented by {@link Text} objects.
* While any {@link Text} can be added to a team, certain {@link Text}s convey a special
* meaning.</p>
*
* <p>Examples of this include players, whose names gain the prefix and suffix
* of the team they are on.</p>
*
* <p>With the exception of {@link #getNameTagVisibility()} (which is handled client-side),
* all of the team options require players to have the same team object (and by
* extension, the same scoreboard).
*
* For example, consider two players who each have different scoreboards set.
* Each scoreboard has a team registered with identical names, each containing
* the same players. Both players would always be able to attack each other,
* regardless of the value of {@link #allowFriendlyFire()}.
*
* For it to work, both players must have the same scoreboard, and be on a team
* registered to said scoreboard.</p>
*/
public interface Team {
/**
* Creates a new {@link Builder} to build a {@link Team}.
*
* @return The new builder
*/
static Builder builder() {
return Sponge.getRegistry().createBuilder(Builder.class);
}
/**
* Gets the name of this team.
*
* @return The name of this team
*/
String getName();
/**
* Gets the name displayed to users for this team.
*
* @return The display name for this team
*/
Text getDisplayName();
/**
* Sets the name displayed to users for this team.
*
* @param displayName The {@link Text} to use
* @throws IllegalArgumentException If displayName is longer than 32
* characters (in its legacy representation)
*/
void setDisplayName(Text displayName) throws IllegalArgumentException;
/**
* Gets the color of this team.
*
* <p>The team's color is a distinct concept from its prefix or suffix.
* It is only used for colored sidebar display slots, and certain statistic
* criteria.</p>
*
* @return The team color
*/
TextColor getColor();
/**
* Sets the color of this team.
*
* <p>The team's color is a distinct concept from its prefix or suffix.
* It is only used for colored sidebar display slots, and certain statistic
* criteria.</p>
*
* @param color The team color
*/
void setColor(TextColor color);
/**
* Gets the prefix prepended to the display name of users on this team.
*
* @return The prefix for this team
*/
Text getPrefix();
/**
* Sets the prefix prepended to the display name of users on this team.
*
* @param prefix The new prefix for this team
* @throws IllegalArgumentException If prefix is longer than 16
* characters
*/
void setPrefix(Text prefix) throws IllegalArgumentException;
/**
* Gets the suffix appended to the display name of users on this team.
*
* @return The team's current suffix
*/
Text getSuffix();
/**
* Sets the suffix appended to the display name of users on this team.
*
* @param suffix The new suffix for this team.
* @throws IllegalArgumentException If suffix is longer than 16
* characters (in its legacy representation)
*/
void setSuffix(Text suffix) throws IllegalArgumentException;
/**
* Gets whether friendly fire is enabled.
*
* <p>This option only controls players attacking other players. It has no
* affect other entities attacking other entities, or players attacking
* other entities (or vice-versa).</p>
*
* @return Whether friendly fire is enabled
*/
boolean allowFriendlyFire();
/**
* Sets whether friendly fire is enabled.
*
* @param enabled Whether friendly fire is enabled
*/
void setAllowFriendlyFire(boolean enabled);
/**
* Gets whether invisible team members are shown.
*
* @return Whether to show invisible team members
*/
boolean canSeeFriendlyInvisibles();
/**
* Sets whether invisible team members are shown.
*
* @param enabled Whether to show invisible teammates
*/
void setCanSeeFriendlyInvisibles(boolean enabled);
/**
* Gets the {@link Visibility} which controls to who nametags
* of players on this team are visible to.
*
* @return The {@link Visibility} for this team's nametags
*/
Visibility getNameTagVisibility();
/**
* Sets the {@link Visibility} which controls to who nametags
* of players on this team are visible to.
*
* @param visibility The {@link Visibility} for this team's nametags
*/
void setNameTagVisibility(Visibility visibility);
/**
* Gets the {@link Visibility} which controls who death Texts
* for players on this team are visible to.
*
* @return The {@link Visibility} for this team's death Texts
*/
Visibility getDeathMessageVisibility();
/**
* Sets the {@link Visibility} which controls who death Texts
* of players on this team are visible to.
*
* @param visibility The {@link Visibility} for this team's death Texts
*/
void setDeathMessageVisibility(Visibility visibility);
/**
* Gets the {@link CollisionRule} for entities on this team.
*
* @return The {@link CollisionRule} for entities on this team
*/
CollisionRule getCollisionRule();
/**
* Sets the {@link CollisionRule} for entities on this team.
*
* @param rule The {@link CollisionRule} for entities on this team
*/
void setCollisionRule(CollisionRule rule);
/**
* Gets the {@link Text}s representing the members of this team.
*
* @return the {@link Text}s for this team's members
*/
Set<Text> getMembers();
/**
* Adds the specified {@link Text} to this team.
*
* <p>While any {@link Text} may be added, the {@link Text} to use should
* normally be obtained by calling
* {@link TeamMember#getTeamRepresentation()} on a {@link TeamMember}, such
* as a {@link Player}.</p>
*
* @param member the {@link Text} to add
*/
void addMember(Text member);
/**
* Removes the specified {@link Text} from this team.
*
* <p>While any {@link Text} may be removed, the {@link Text}
* to use should normally be obtained by calling {@link TeamMember#getTeamRepresentation()}
* on a {@link TeamMember}, such as a {@link Player}.</p>
*
* @param member The {@link Text} to remove
* @return Whether the {@link Text} was on this team
*/
boolean removeMember(Text member);
/**
* Returns the scoreboard this team is registered on, if available.
*
* <p>This will return {@link Optional#empty()} when a team has
* been removed from its {@link Scoreboard}, or has been created
* but not yet registered.</p>
*
* @return The scoreboard this team is registered on, if available.
*/
Optional<Scoreboard> getScoreboard();
/**
* Unregisters this team from its {@link Scoreboard}, if present.
*
* <p>A team can still be fully used after being unregistered. However,
* it will not affect the game in any way until registered to a
* {@link Scoreboard} again, through
* {@link Scoreboard#registerTeam(Team)}.</p>
*
* @return Whether this team was registered to a {@link Scoreboard}.
*/
boolean unregister();
/**
* Represents a builder tp create {@link Team} instances.
*/
interface Builder extends ResettableBuilder<Team, Builder> {
/**
* Sets the name of the {@link Team}.
*
* @param name The name to set
* @return This builder
*/
Builder name(String name);
/**
* Sets the color of the {@link Team}.
*
* <p>The team's color is a distinct concept from its prefix or suffix.
* It is only used for colored sidebar display slots, and certain
* statistic criteria.</p>
*
* @param color The color to set
* @return This builder
*/
Builder color(TextColor color);
/**
* Sets the name displayed to users for the {@link Team}.
*
* <p>Display names may be truncated in order to meet an
* implementation-defined length limit. In Vanilla, this is sixteen
* characters.</p>
*
* <p>By default, this is set to {@link #name(String)}</p>
*
* @param displayName The {@link Text} to set
* @return This builder
* @throws IllegalArgumentException If the name is longer than 16
* characters
*/
Builder displayName(Text displayName) throws IllegalArgumentException;
/**
* Sets the prefix prepended to the display name of users on the
* {@link Team}.
*
* <p>Display names may be truncated in order to meet an
* implementation-defined length limit. In Vanilla, this is sixteen
* characters.</p>
*
* @param prefix The new prefix for the {@link Team}
* @return This builder
*/
Builder prefix(Text prefix);
/**
* Sets the suffix appended to the display name of users on the
* {@link Team}.
*
* <p>Display names may be truncated in order to meet an
* implementation-defined length limit. In Vanilla, this is sixteen
* characters.</p>
*
* @param suffix The new suffix for the {@link Team}.
* @return This builder
*/
Builder suffix(Text suffix);
/**
* Sets whether friendly fire is enabled for the {@link Team}.
*
* @param enabled Whether friendly fire is enabled
* @return This builder
*/
Builder allowFriendlyFire(boolean enabled);
/**
* Sets whether invisible team members are shown for the
* {@link Team}.
*
* @param enabled Whether to show invisible teammates
* @return This builder
*/
Builder canSeeFriendlyInvisibles(boolean enabled);
/**
* Sets the {@link Visibility} which controls to who nametags
* of players on the {@link Team} are visible to.
*
* @param visibility The {@link Visibility} for the {@link Team}'s
* nametags
* @return This builder
*/
Builder nameTagVisibility(Visibility visibility);
/**
* Sets the {@link Visibility} which controls who death Texts
* of players on the {@link Team} are visible to.
*
* @param visibility The {@link Visibility} for the {@link Team}'s
* death Texts
* @return This builder
*/
Builder deathTextVisibility(Visibility visibility);
/**
* Sets the {@link CollisionRule} for this team's members.
*
* @param rule The {@link CollisionRule} for the {@link Team}'s members
* @return This builder
*/
Builder collisionRule(CollisionRule rule);
/**
* Sets the set of {@link Text} members on the {@link Team}.
*
* <p>By default, this is the empty set.</p>
*
* @param users The set of {@link Text} members on the {@link Team}
* @return This builder
*/
Builder members(Set<Text> users);
/**
* Builds an instance of a {@link Team}.
*
* @return A new instance of a {@link Team}
* @throws IllegalStateException if the {@link Team} is not complete
*/
Team build() throws IllegalStateException;
}
}
|
/*
* Copyright 2016-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.vorb.platon.web.api.json;
import de.vorb.platon.web.api.common.ByteArrayConverter;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import java.io.IOException;
class ByteArraySerializer extends JsonSerializer<byte[]> {
@Override
public void serialize(byte[] bytes, JsonGenerator gen, SerializerProvider serializers) throws IOException {
gen.writeString(ByteArrayConverter.bytesToHexString(bytes));
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.