language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | codec-http2/src/main/java/io/netty/handler/codec/http2/HpackStaticTable.java | {
"start": 1708,
"end": 11966
} | class ____ {
static final int NOT_FOUND = -1;
// Appendix A: Static Table
// https://tools.ietf.org/html/rfc7541#appendix-A
private static final List<HpackHeaderField> STATIC_TABLE = Arrays.asList(
/* 1 */ newEmptyPseudoHeaderField(PseudoHeaderName.AUTHORITY),
/* 2 */ newPseudoHeaderMethodField(HttpMethod.GET),
/* 3 */ newPseudoHeaderMethodField(HttpMethod.POST),
/* 4 */ newPseudoHeaderField(PseudoHeaderName.PATH, "/"),
/* 5 */ newPseudoHeaderField(PseudoHeaderName.PATH, "/index.html"),
/* 6 */ newPseudoHeaderField(PseudoHeaderName.SCHEME, "http"),
/* 7 */ newPseudoHeaderField(PseudoHeaderName.SCHEME, "https"),
/* 8 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.OK.codeAsText()),
/* 9 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.NO_CONTENT.codeAsText()),
/* 10 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.PARTIAL_CONTENT.codeAsText()),
/* 11 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.NOT_MODIFIED.codeAsText()),
/* 12 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.BAD_REQUEST.codeAsText()),
/* 13 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.NOT_FOUND.codeAsText()),
/* 14 */ newPseudoHeaderField(PseudoHeaderName.STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR.codeAsText()),
/* 15 */ newEmptyHeaderField(HttpHeaderNames.ACCEPT_CHARSET),
/* 16 */ newHeaderField(HttpHeaderNames.ACCEPT_ENCODING, "gzip, deflate"),
/* 17 */ newEmptyHeaderField(HttpHeaderNames.ACCEPT_LANGUAGE),
/* 18 */ newEmptyHeaderField(HttpHeaderNames.ACCEPT_RANGES),
/* 19 */ newEmptyHeaderField(HttpHeaderNames.ACCEPT),
/* 20 */ newEmptyHeaderField(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN),
/* 21 */ newEmptyHeaderField(HttpHeaderNames.AGE),
/* 22 */ newEmptyHeaderField(HttpHeaderNames.ALLOW),
/* 23 */ newEmptyHeaderField(HttpHeaderNames.AUTHORIZATION),
/* 24 */ newEmptyHeaderField(HttpHeaderNames.CACHE_CONTROL),
/* 25 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_DISPOSITION),
/* 26 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_ENCODING),
/* 27 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_LANGUAGE),
/* 28 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_LENGTH),
/* 29 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_LOCATION),
/* 30 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_RANGE),
/* 31 */ newEmptyHeaderField(HttpHeaderNames.CONTENT_TYPE),
/* 32 */ newEmptyHeaderField(HttpHeaderNames.COOKIE),
/* 33 */ newEmptyHeaderField(HttpHeaderNames.DATE),
/* 34 */ newEmptyHeaderField(HttpHeaderNames.ETAG),
/* 35 */ newEmptyHeaderField(HttpHeaderNames.EXPECT),
/* 36 */ newEmptyHeaderField(HttpHeaderNames.EXPIRES),
/* 37 */ newEmptyHeaderField(HttpHeaderNames.FROM),
/* 38 */ newEmptyHeaderField(HttpHeaderNames.HOST),
/* 39 */ newEmptyHeaderField(HttpHeaderNames.IF_MATCH),
/* 40 */ newEmptyHeaderField(HttpHeaderNames.IF_MODIFIED_SINCE),
/* 41 */ newEmptyHeaderField(HttpHeaderNames.IF_NONE_MATCH),
/* 42 */ newEmptyHeaderField(HttpHeaderNames.IF_RANGE),
/* 43 */ newEmptyHeaderField(HttpHeaderNames.IF_UNMODIFIED_SINCE),
/* 44 */ newEmptyHeaderField(HttpHeaderNames.LAST_MODIFIED),
/* 45 */ newEmptyHeaderField("link"),
/* 46 */ newEmptyHeaderField(HttpHeaderNames.LOCATION),
/* 47 */ newEmptyHeaderField(HttpHeaderNames.MAX_FORWARDS),
/* 48 */ newEmptyHeaderField(HttpHeaderNames.PROXY_AUTHENTICATE),
/* 49 */ newEmptyHeaderField(HttpHeaderNames.PROXY_AUTHORIZATION),
/* 50 */ newEmptyHeaderField(HttpHeaderNames.RANGE),
/* 51 */ newEmptyHeaderField(HttpHeaderNames.REFERER),
/* 52 */ newEmptyHeaderField("refresh"),
/* 53 */ newEmptyHeaderField(HttpHeaderNames.RETRY_AFTER),
/* 54 */ newEmptyHeaderField(HttpHeaderNames.SERVER),
/* 55 */ newEmptyHeaderField(HttpHeaderNames.SET_COOKIE),
/* 56 */ newEmptyHeaderField("strict-transport-security"),
/* 57 */ newEmptyHeaderField(HttpHeaderNames.TRANSFER_ENCODING),
/* 58 */ newEmptyHeaderField(HttpHeaderNames.USER_AGENT),
/* 59 */ newEmptyHeaderField(HttpHeaderNames.VARY),
/* 60 */ newEmptyHeaderField(HttpHeaderNames.VIA),
/* 61 */ newEmptyHeaderField(HttpHeaderNames.WWW_AUTHENTICATE)
);
private static HpackHeaderField newEmptyHeaderField(AsciiString name) {
return new HpackHeaderField(name, AsciiString.EMPTY_STRING);
}
private static HpackHeaderField newEmptyHeaderField(String name) {
return new HpackHeaderField(AsciiString.cached(name), AsciiString.EMPTY_STRING);
}
private static HpackHeaderField newHeaderField(AsciiString name, String value) {
return new HpackHeaderField(name, AsciiString.cached(value));
}
private static HpackHeaderField newPseudoHeaderMethodField(HttpMethod method) {
return new HpackHeaderField(PseudoHeaderName.METHOD.value(), method.asciiName());
}
private static HpackHeaderField newPseudoHeaderField(PseudoHeaderName name, AsciiString value) {
return new HpackHeaderField(name.value(), value);
}
private static HpackHeaderField newPseudoHeaderField(PseudoHeaderName name, String value) {
return new HpackHeaderField(name.value(), AsciiString.cached(value));
}
private static HpackHeaderField newEmptyPseudoHeaderField(PseudoHeaderName name) {
return new HpackHeaderField(name.value(), AsciiString.EMPTY_STRING);
}
// The table size and bit shift are chosen so that each hash bucket contains a single header name.
private static final int HEADER_NAMES_TABLE_SIZE = 1 << 9;
private static final int HEADER_NAMES_TABLE_SHIFT = PlatformDependent.BIG_ENDIAN_NATIVE_ORDER ? 22 : 18;
// A table mapping header names to their associated indexes.
private static final HeaderNameIndex[] HEADER_NAMES = new HeaderNameIndex[HEADER_NAMES_TABLE_SIZE];
static {
// Iterate through the static table in reverse order to
// save the smallest index for a given name in the table.
for (int index = STATIC_TABLE.size(); index > 0; index--) {
HpackHeaderField entry = getEntry(index);
int bucket = headerNameBucket(entry.name);
HeaderNameIndex tableEntry = HEADER_NAMES[bucket];
if (tableEntry != null && !equalsVariableTime(tableEntry.name, entry.name)) {
// Can happen if AsciiString.hashCode changes
throw new IllegalStateException("Hash bucket collision between " +
tableEntry.name + " and " + entry.name);
}
HEADER_NAMES[bucket] = new HeaderNameIndex(entry.name, index, entry.value.length() == 0);
}
}
// The table size and bit shift are chosen so that each hash bucket contains a single header.
private static final int HEADERS_WITH_NON_EMPTY_VALUES_TABLE_SIZE = 1 << 6;
private static final int HEADERS_WITH_NON_EMPTY_VALUES_TABLE_SHIFT =
PlatformDependent.BIG_ENDIAN_NATIVE_ORDER ? 0 : 6;
// A table mapping headers with non-empty values to their associated indexes.
private static final HeaderIndex[] HEADERS_WITH_NON_EMPTY_VALUES =
new HeaderIndex[HEADERS_WITH_NON_EMPTY_VALUES_TABLE_SIZE];
static {
for (int index = STATIC_TABLE.size(); index > 0; index--) {
HpackHeaderField entry = getEntry(index);
if (entry.value.length() > 0) {
int bucket = headerBucket(entry.value);
HeaderIndex tableEntry = HEADERS_WITH_NON_EMPTY_VALUES[bucket];
if (tableEntry != null) {
// Can happen if AsciiString.hashCode changes
throw new IllegalStateException("Hash bucket collision between " +
tableEntry.value + " and " + entry.value);
}
HEADERS_WITH_NON_EMPTY_VALUES[bucket] = new HeaderIndex(entry.name, entry.value, index);
}
}
}
/**
* The number of header fields in the static table.
*/
static final int length = STATIC_TABLE.size();
/**
* Return the header field at the given index value.
*/
static HpackHeaderField getEntry(int index) {
return STATIC_TABLE.get(index - 1);
}
/**
* Returns the lowest index value for the given header field name in the static table. Returns
* -1 if the header field name is not in the static table.
*/
static int getIndex(CharSequence name) {
HeaderNameIndex entry = getEntry(name);
return entry == null ? NOT_FOUND : entry.index;
}
/**
* Returns the index value for the given header field in the static table. Returns -1 if the
* header field is not in the static table.
*/
static int getIndexInsensitive(CharSequence name, CharSequence value) {
if (value.length() == 0) {
HeaderNameIndex entry = getEntry(name);
return entry == null || !entry.emptyValue ? NOT_FOUND : entry.index;
}
int bucket = headerBucket(value);
HeaderIndex header = HEADERS_WITH_NON_EMPTY_VALUES[bucket];
if (header == null) {
return NOT_FOUND;
}
if (equalsVariableTime(header.name, name) && equalsVariableTime(header.value, value)) {
return header.index;
}
return NOT_FOUND;
}
private static HeaderNameIndex getEntry(CharSequence name) {
int bucket = headerNameBucket(name);
HeaderNameIndex entry = HEADER_NAMES[bucket];
if (entry == null) {
return null;
}
return equalsVariableTime(entry.name, name) ? entry : null;
}
private static int headerNameBucket(CharSequence name) {
return bucket(name, HEADER_NAMES_TABLE_SHIFT, HEADER_NAMES_TABLE_SIZE - 1);
}
private static int headerBucket(CharSequence value) {
return bucket(value, HEADERS_WITH_NON_EMPTY_VALUES_TABLE_SHIFT, HEADERS_WITH_NON_EMPTY_VALUES_TABLE_SIZE - 1);
}
private static int bucket(CharSequence s, int shift, int mask) {
return (AsciiString.hashCode(s) >> shift) & mask;
}
private static final | HpackStaticTable |
java | apache__camel | core/camel-xml-io/src/main/java/org/apache/camel/xml/in/BaseParser.java | {
"start": 1950,
"end": 20897
} | class ____ {
public static final String DEFAULT_NAMESPACE = "http://camel.apache.org/schema/xml-io";
public static final String SPRING_NAMESPACE = "http://camel.apache.org/schema/spring";
protected final MXParser parser;
protected String namespace;
protected final Set<String> secondaryNamespaces = new HashSet<>();
protected Resource resource;
public BaseParser(Resource resource) throws IOException, XmlPullParserException {
this(resource.getInputStream(), null);
this.resource = resource;
}
public BaseParser(Resource resource, String namespace) throws IOException, XmlPullParserException {
this(resource.getInputStream(), namespace);
this.resource = resource;
}
public BaseParser(InputStream input) throws IOException, XmlPullParserException {
this(input, null);
}
public BaseParser(Reader reader) throws IOException, XmlPullParserException {
this(reader, null);
}
public BaseParser(InputStream input, String namespace) throws IOException, XmlPullParserException {
this.parser = new MXParser();
this.parser.setFeature(XmlPullParser.FEATURE_PROCESS_NAMESPACES, true);
this.parser.setInput(input, null);
this.namespace = namespace != null && !namespace.isEmpty() ? namespace : DEFAULT_NAMESPACE;
}
public BaseParser(Reader reader, String namespace) throws IOException, XmlPullParserException {
this.parser = new MXParser();
this.parser.setFeature(XmlPullParser.FEATURE_PROCESS_NAMESPACES, true);
this.parser.setInput(reader);
this.namespace = namespace != null && !namespace.isEmpty() ? namespace : DEFAULT_NAMESPACE;
}
public void addSecondaryNamespace(String namespace) {
this.secondaryNamespaces.add(namespace);
}
protected <T> T doParse(
T definition, AttributeHandler<T> attributeHandler, ElementHandler<T> elementHandler, ValueHandler<T> valueHandler)
throws IOException, XmlPullParserException {
return doParse(definition, attributeHandler, elementHandler, valueHandler, false);
}
protected <T> T doParse(
T definition, AttributeHandler<T> attributeHandler, ElementHandler<T> elementHandler, ValueHandler<T> valueHandler,
boolean supportsExternalNamespaces)
throws IOException, XmlPullParserException {
try {
return doParseXml(definition, attributeHandler, elementHandler, valueHandler, supportsExternalNamespaces);
} catch (Exception e) {
if (e instanceof XmlPullParserLocationException) {
throw e;
}
// wrap in XmlPullParserLocationException so we have line-precise error
String msg = e.getMessage();
Throwable cause = e;
if (e instanceof XmlPullParserException) {
if (e.getCause() != null) {
cause = e.getCause();
msg = e.getCause().getMessage();
}
}
throw new XmlPullParserLocationException(msg, resource, parser.getLineNumber(), parser.getColumnNumber(), cause);
}
}
protected <T> T doParseXml(
T definition, AttributeHandler<T> attributeHandler, ElementHandler<T> elementHandler, ValueHandler<T> valueHandler,
boolean supportsExternalNamespaces)
throws IOException, XmlPullParserException {
setLocation(definition);
if (definition instanceof NamespaceAware namespaceAware) {
final Map<String, String> namespaces = new LinkedHashMap<>();
for (int i = 0; i < parser.getNamespaceCount(parser.getDepth()); i++) {
final String prefix = parser.getNamespacePrefix(i);
if (prefix != null) {
namespaces.put(prefix, parser.getNamespaceUri(i));
}
}
namespaceAware.setNamespaces(namespaces);
}
for (int i = 0; i < parser.getAttributeCount(); i++) {
String name = parser.getAttributeName(i);
String ns = parser.getAttributeNamespace(i);
String val = parser.getAttributeValue(i);
if (name.equals("uri") || name.endsWith("Uri")) {
val = URISupport.removeNoiseFromUri(val);
}
if (matchNamespace(ns, true)) {
if (attributeHandler == null || !attributeHandler.accept(definition, name, val)) {
handleUnexpectedAttribute(ns, name);
}
} else {
handleOtherAttribute(definition, name, ns, val);
}
}
while (true) {
int event = parser.next();
if (event == XmlPullParser.TEXT) {
if (!parser.isWhitespace()) {
valueHandler.accept(definition, parser.getText());
}
} else if (event == XmlPullParser.START_TAG) {
String ns = parser.getNamespace();
String name = parser.getName();
if (supportsExternalNamespaces) {
// pass element to the handler regardless of namespace
if (elementHandler == null || !elementHandler.accept(definition, name)) {
handleUnexpectedElement(ns, name);
}
} else {
// pass element to the handler only if matches the declared namespace for the parser
if (matchNamespace(ns, false)) {
if (elementHandler == null || !elementHandler.accept(definition, name)) {
handleUnexpectedElement(namespace, name);
}
} else {
handleUnexpectedElement(ns, name);
}
}
} else if (event == XmlPullParser.END_TAG) {
// we need to check first if the end tag is from
// and unexpected element which we should ignore,
// and continue parsing (special need for camel-xml-io-dsl)
String ns = parser.getNamespace();
String name = parser.getName();
boolean ignore = false;
if (supportsExternalNamespaces) {
ignore = ignoreUnexpectedElement(ns, name);
}
if (!ignore) {
return definition;
}
} else {
throw new XmlPullParserException(
"expected START_TAG or END_TAG not " + XmlPullParser.TYPES[event], parser, null);
}
}
}
protected <T> List<T> doParseValue(Supplier<T> definitionSupplier, ValueHandler<T> valueHandler)
throws IOException, XmlPullParserException {
List<T> answer = new ArrayList<>();
while (true) {
int event = parser.next();
if (event == XmlPullParser.TEXT) {
if (!parser.isWhitespace()) {
T definition = definitionSupplier.get();
setLocation(definition);
valueHandler.accept(definition, parser.getText());
answer.add(definition);
}
} else if (event == XmlPullParser.START_TAG) {
String ns = parser.getNamespace();
String name = parser.getName();
if (matchNamespace(ns, false)) {
if (!"value".equals(name)) {
handleUnexpectedElement(ns, name);
}
} else {
handleUnexpectedElement(ns, name);
}
} else if (event == XmlPullParser.END_TAG) {
String ns = parser.getNamespace();
String name = parser.getName();
if (matchNamespace(ns, false)) {
if ("value".equals(name)) {
continue;
}
}
return answer;
} else {
throw new XmlPullParserException(
"expected START_TAG or END_TAG not " + XmlPullParser.TYPES[event], parser, null);
}
}
}
private <T> void setLocation(T definition) {
if (definition instanceof LineNumberAware lineNumberAware) {
// we want to get the line number where the tag starts (in case its multi-line)
int line = parser.getStartLineNumber();
if (line == -1) {
line = parser.getLineNumber();
}
lineNumberAware.setLineNumber(line);
if (resource != null) {
lineNumberAware.setLocation(resource.getLocation());
}
}
}
protected Class<?> asClass(String val) throws XmlPullParserException {
try {
return Class.forName(val);
} catch (ClassNotFoundException e) {
throw new XmlPullParserException("Unable to load class " + val, parser, e);
}
}
protected Class<?>[] asClassArray(String val) throws XmlPullParserException {
String[] vals = val.split(" ");
Class<?>[] cls = new Class<?>[vals.length];
for (int i = 0; i < vals.length; i++) {
cls[i] = asClass(vals[i]);
}
return cls;
}
protected byte[] asByteArray(String val) {
return Base64.getDecoder().decode(val);
}
protected List<String> asStringList(String val) {
return new ArrayList<>(Arrays.asList(val.split(" ")));
}
protected Set<String> asStringSet(String val) {
return new LinkedHashSet<>(Arrays.asList(val.split(" ")));
}
protected <T> void doAdd(T element, List<T> existing, Consumer<List<T>> setter) {
if (existing == null) {
existing = new ArrayList<>();
setter.accept(existing);
}
existing.add(element);
}
protected <T> void doAddValues(List<T> elements, List<T> existing, Consumer<List<T>> setter) {
if (existing == null) {
existing = new ArrayList<>();
setter.accept(existing);
}
existing.addAll(elements);
}
protected String doParseText() throws IOException, XmlPullParserException {
String s = "";
int e = parser.next();
if (e == XmlPullParser.TEXT) {
s = parser.getText();
e = parser.next();
}
if (e != XmlPullParser.END_TAG) {
throw new XmlPullParserException("Expected text element");
}
return s;
}
protected Element doParseDOMElement(String rootElementName, String namespace, List<Element> existing)
throws XmlPullParserException, IOException {
Document doc;
if (existing != null && !existing.isEmpty()) {
doc = existing.get(0).getOwnerDocument();
} else {
// create a new one
try {
doc = createDocumentBuilderFactory().newDocumentBuilder().newDocument();
// with root element generated from @ExternalSchemaElement.documentElement
Element rootElement = doc.createElementNS(namespace, rootElementName);
doc.appendChild(rootElement);
} catch (ParserConfigurationException e) {
throw new XmlPullParserException(
"Problem handling external element '{" + namespace + "}" + parser.getName()
+ ": " + e.getMessage());
}
}
if (doc == null) {
return null;
}
Element element = doc.createElementNS(namespace, parser.getName());
doc.getDocumentElement().appendChild(element);
doParse(element, domAttributeHandler(), domElementHandler(), domValueHandler(), true);
return element;
}
protected void doAddElement(Element element, List<Element> existing, Consumer<List<Element>> setter) {
if (existing == null) {
existing = new ArrayList<>();
setter.accept(existing);
}
existing.add(element);
}
protected boolean handleUnexpectedAttribute(String namespace, String name) throws XmlPullParserException {
throw new XmlPullParserException("Unexpected attribute '{" + namespace + "}" + name + "'");
}
protected boolean handleUnexpectedElement(String namespace, String name) throws XmlPullParserException {
throw new XmlPullParserException("Unexpected element '{" + namespace + "}" + name + "'");
}
protected void handleUnexpectedText(String text) throws XmlPullParserException {
throw new XmlPullParserException("Unexpected text '" + text + "'");
}
protected boolean ignoreUnexpectedElement(String namespace, String name) throws XmlPullParserException {
// special for dataFormats (wrapper)
if ("dataFormats".equals(name)) {
return true;
}
return false;
}
protected void expectTag(String name) throws XmlPullParserException, IOException {
if (parser.nextTag() != XmlPullParser.START_TAG) {
throw new XmlPullParserException(
"Expected starting tag '{" + namespace + "}" + name + "', read ending tag '{" + parser.getNamespace() + "}"
+ parser.getName()
+ "' instead");
}
if (!Objects.equals(name, parser.getName()) || !Objects.equals(namespace, parser.getNamespace())) {
throw new XmlPullParserException(
"Expected starting tag '{" + namespace + "}" + name + "', read starting tag '{" + parser.getNamespace()
+ "}" + parser.getName()
+ "' instead");
}
}
protected boolean hasTag(String name) throws XmlPullParserException, IOException {
if (parser.nextTag() != XmlPullParser.START_TAG) {
throw new XmlPullParserException("Expected starting tag");
}
if (!Objects.equals(name, parser.getName())
|| !matchNamespace(namespace, parser.getNamespace(), secondaryNamespaces, false)) {
return false;
}
return true;
}
protected String getNextTag(String name, String name2) throws XmlPullParserException, IOException {
if (parser.nextTag() != XmlPullParser.START_TAG) {
throw new XmlPullParserException("Expected starting tag");
}
String pn = parser.getName();
boolean match = Objects.equals(name, pn) || Objects.equals(name2, pn);
if (!match || !matchNamespace(namespace, parser.getNamespace(), secondaryNamespaces, false)) {
return ""; // empty tag
}
return pn;
}
protected String getNextTag(String name, String name2, String name3) throws XmlPullParserException, IOException {
if (parser.nextTag() != XmlPullParser.START_TAG) {
throw new XmlPullParserException("Expected starting tag");
}
String pn = parser.getName();
boolean match = Objects.equals(name, pn) || Objects.equals(name2, pn) || Objects.equals(name3, pn);
if (!match || !matchNamespace(namespace, parser.getNamespace(), secondaryNamespaces, false)) {
return ""; // empty tag
}
return pn;
}
protected void handleOtherAttribute(Object definition, String name, String ns, String val) throws XmlPullParserException {
// Ignore
if ("http://www.w3.org/2001/XMLSchema-instance".equals(ns)) {
return;
}
String fqn = ns.isEmpty() ? name : "{" + ns + "}" + name;
throw new XmlPullParserException("Unsupported attribute '" + fqn + "'");
}
protected <T> AttributeHandler<T> noAttributeHandler() {
return null;
}
protected <T> ElementHandler<T> noElementHandler() {
return (def, name) -> handleUnexpectedElement(namespace, name);
}
protected <T> ValueHandler<T> noValueHandler() {
return (def, text) -> handleUnexpectedText(text);
}
protected AttributeHandler<Element> domAttributeHandler() {
return (el, name, value) -> {
// for now, handle only XMLs where schema declares attributeFormDefault="unqualified"
el.setAttributeNS(null, name, value);
return true;
};
}
protected ElementHandler<Element> domElementHandler() {
return (def, name) -> {
Element child = def.getOwnerDocument().createElementNS(parser.getNamespace(), name);
def.appendChild(child);
doParse(child, domAttributeHandler(), domElementHandler(), domValueHandler(), true);
return true;
};
}
protected ValueHandler<Element> domValueHandler() {
return (def, text) -> {
Text txt = def.getOwnerDocument().createTextNode(text);
def.appendChild(txt);
};
}
protected <T extends ExpressionDefinition> ValueHandler<T> expressionDefinitionValueHandler() {
return ExpressionDefinition::setExpression;
}
// another one...
private static DocumentBuilderFactory createDocumentBuilderFactory() {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(true);
factory.setValidating(false);
factory.setIgnoringElementContentWhitespace(true);
factory.setIgnoringComments(true);
try {
// Set secure processing
factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, Boolean.TRUE);
} catch (ParserConfigurationException ignore) {
}
try {
// Disable the external-general-entities by default
factory.setFeature("http://xml.org/sax/features/external-general-entities", false);
} catch (ParserConfigurationException ignore) {
}
try {
// Disable the external-parameter-entities by default
factory.setFeature("http://xml.org/sax/features/external-parameter-entities", false);
} catch (ParserConfigurationException ignore) {
}
// setup the SecurityManager by default if it's apache xerces
try {
Class<?> smClass = ObjectHelper.loadClass("org.apache.xerces.util.SecurityManager");
if (smClass != null) {
Object sm = smClass.getDeclaredConstructor().newInstance();
// Here we just use the default setting of the SeurityManager
factory.setAttribute("http://apache.org/xml/properties/security-manager", sm);
}
} catch (Exception ignore) {
}
return factory;
}
protected | BaseParser |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/converted/converter/literal/QueryLiteralTest.java | {
"start": 6954,
"end": 7036
} | enum ____ {
ONE, TWO, THREE
}
@Converter(autoApply = true)
public static | Numbers |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/deser/DeserConcurrencyTest.java | {
"start": 833,
"end": 2779
} | class ____
extends ValueDeserializer<Bean>
{
protected volatile boolean resolved = false;
@Override
public Bean deserialize(JsonParser p, DeserializationContext ctxt)
{
if (!resolved) {
ctxt.reportInputMismatch(Bean.class,
"Deserializer not yet completely resolved");
}
p.skipChildren(); // consume the value
Bean b = new Bean();
b.value = 13;
return b;
}
@Override
public void resolve(DeserializationContext ctxt)
{
try {
Thread.sleep(100L);
} catch (Exception e) { }
resolved = true;
}
}
/*
/**********************************************************************
/* Test methods
/**********************************************************************
*/
@Test
public void testDeserializerResolution() throws Exception
{
// Let's repeat couple of times, just to be sure; thread timing is not
// exact science; plus caching plays a role too
final String JSON = "{\"value\":42}";
for (int i = 0; i < 5; ++i) {
final ObjectMapper mapper = new ObjectMapper();
Runnable r = new Runnable() {
@Override
public void run() {
try {
/*Bean b =*/ mapper.readValue(JSON, Bean.class);
} catch (Exception e) { }
}
};
Thread t = new Thread(r);
t.start();
// then let it proceed
Thread.sleep(10L);
// and try the same...
Bean b = mapper.readValue(JSON, Bean.class);
// note: funny deserializer, mangles data.. :)
assertEquals(13, b.value);
t.join();
}
}
}
| TestBeanDeserializer |
java | lettuce-io__lettuce-core | src/main/java/io/lettuce/core/output/JsonValueListOutput.java | {
"start": 494,
"end": 1356
} | class ____<K, V> extends CommandOutput<K, V, List<JsonValue>> {
private boolean initialized;
private final JsonParser parser;
public JsonValueListOutput(RedisCodec<K, V> codec, JsonParser theParser) {
super(codec, Collections.emptyList());
parser = theParser;
}
@Override
public void set(ByteBuffer bytes) {
if (!initialized) {
multi(1);
}
ByteBuffer fetched = null;
if (bytes != null) {
fetched = ByteBuffer.allocate(bytes.remaining());
fetched.put(bytes);
fetched.flip();
}
output.add(parser.loadJsonValue(fetched));
}
@Override
public void multi(int count) {
if (!initialized) {
output = OutputFactory.newList(count);
initialized = true;
}
}
}
| JsonValueListOutput |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/handlers/IDataLoader.java | {
"start": 1033,
"end": 1188
} | interface ____ {
/**
* @return size of data loaded
*/
public int load() throws IOException;
public void close() throws IOException;
}
| IDataLoader |
java | redisson__redisson | redisson/src/main/java/org/redisson/transaction/operation/map/MapAddAndGetOperation.java | {
"start": 739,
"end": 1144
} | class ____ extends MapOperation {
public MapAddAndGetOperation() {
}
public MapAddAndGetOperation(RMap<?, ?> map, Object key, Object value, String transactionId, long threadId) {
super(map, key, value, transactionId, threadId);
}
@Override
public void commit(RMap<Object, Object> map) {
map.addAndGetAsync(key, (Number) value);
}
}
| MapAddAndGetOperation |
java | spring-projects__spring-boot | module/spring-boot-restclient/src/test/java/org/springframework/boot/restclient/autoconfigure/service/HttpServiceClientAutoConfigurationTests.java | {
"start": 12306,
"end": 12619
} | class ____ {
@Bean
ClientHttpRequestFactoryBuilder<?> requestFactoryBuilder() {
return ClientHttpRequestFactoryBuilder.jdk()
.withHttpClientCustomizer((httpClient) -> httpClient.followRedirects(Redirect.NEVER));
}
}
@Configuration(proxyBeanMethods = false)
static | RequestFactoryBuilderConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/jpa/boot/spi/EntityManagerFactoryBuilder.java | {
"start": 886,
"end": 2127
} | interface ____ {
ManagedResources getManagedResources();
MetadataImplementor metadata();
/**
* Allows passing in a Java EE ValidatorFactory (delayed from constructing the builder, AKA phase 2) to be used
* in building the EntityManagerFactory
*
* @param validatorFactory The ValidatorFactory
*
* @return {@code this}, for method chaining
*/
EntityManagerFactoryBuilder withValidatorFactory(Object validatorFactory);
/**
* Allows passing in a DataSource (delayed from constructing the builder, AKA phase 2) to be used
* in building the EntityManagerFactory
*
* @param dataSource The DataSource to use
*
* @return {@code this}, for method chaining
*/
EntityManagerFactoryBuilder withDataSource(DataSource dataSource);
/**
* Build {@link EntityManagerFactory} instance
*
* @return The built {@link EntityManagerFactory}
*/
EntityManagerFactory build();
/**
* Cancel the building processing. This is used to signal the builder to release any resources in the case of
* something having gone wrong during the bootstrap process
*/
void cancel();
/**
* Perform an explicit schema generation (rather than an "auto" one) based on the
*/
void generateSchema();
}
| EntityManagerFactoryBuilder |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/dialect/function/json/H2JsonTableFunction.java | {
"start": 22281,
"end": 23658
} | class ____ implements SelfRenderingExpression {
private final Expression arrayExpression;
private final BasicType<Integer> integerType;
public ArrayLengthExpression(Expression arrayExpression, BasicType<Integer> integerType) {
this.arrayExpression = arrayExpression;
this.integerType = integerType;
}
@Override
public void renderToSql(
SqlAppender sqlAppender,
SqlAstTranslator<?> walker,
SessionFactoryImplementor sessionFactory) {
sqlAppender.append( "coalesce(array_length(" );
arrayExpression.accept( walker );
sqlAppender.append( "),0)" );
}
@Override
public JdbcMappingContainer getExpressionType() {
return integerType;
}
}
private static String ordinalityExpression(String tableIdentifierVariable, int clauseLevel) {
if ( clauseLevel == 0 ) {
return tableIdentifierVariable + ".x";
}
return tableIdentifierVariable + "_" + clauseLevel + "_.x";
}
/**
* This type resolver essentially implements all the JSON path handling and casting via column read expressions
* instead of rendering to the {@code from} clause like other {@code json_table()} implementations.
* This is necessary because H2 does not support lateral joins.
* The rendering is tightly coupled to the {@code system_range()} joins that are rendered for nested paths
* that refer to arrays.
*/
private static | ArrayLengthExpression |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/decorator/Address.java | {
"start": 197,
"end": 508
} | class ____ {
private String addressLine;
public Address(String addressLine) {
this.addressLine = addressLine;
}
public String getAddressLine() {
return addressLine;
}
public void setAddressLine(String addressLine) {
this.addressLine = addressLine;
}
}
| Address |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/nosql/NoSqlObject.java | {
"start": 1054,
"end": 2233
} | interface ____<W> {
/**
* Sets the value of a property on this object to a String or primitive.
*
* @param field The name of the property
* @param value The value of the property
*/
void set(String field, Object value);
/**
* Sets the value of a property on this object to a nested complex object.
*
* @param field The name of the property
* @param value The value of the property
*/
void set(String field, NoSqlObject<W> value);
/**
* Sets the value of a property on this object to an array of Strings or primitives.
*
* @param field The name of the property
* @param values The values for the property
*/
void set(String field, Object[] values);
/**
* Sets the value of a property on this object to an array of nested complex objects.
*
* @param field The name of the property
* @param values The values for the property
*/
void set(String field, NoSqlObject<W>[] values);
/**
* Obtains the underlying NoSQL library-specific object that this object wraps.
*
* @return the wrapped object.
*/
W unwrap();
}
| NoSqlObject |
java | apache__camel | components/camel-mina/src/test/java/org/apache/camel/component/mina/MinaComponentTest.java | {
"start": 1269,
"end": 2367
} | class ____ extends CamelTestSupport {
@Test
public void testUnknownProtocol() {
Exception e = assertThrows(ResolveEndpointFailedException.class,
() -> template.sendBody("mina:xxx://localhost:8080", "mina:xxx://localhost:8080"),
"Should have thrown a ResolveEndpointFailedException");
assertTrue(e.getCause() instanceof IllegalArgumentException, "Should be an IAE exception");
assertEquals("Unrecognised MINA protocol: xxx for uri: mina://xxx://localhost:8080", e.getCause().getMessage());
}
@Test
public void testMistypedProtocol() {
Exception e = assertThrows(ResolveEndpointFailedException.class,
() -> template.sendBody("mina:tcp//localhost:8080", "mina:tcp//localhost:8080"),
"Should have thrown a ResolveEndpointFailedException");
assertTrue(e.getCause() instanceof IllegalArgumentException, "Should be an IAE exception");
assertEquals("Unrecognised MINA protocol: null for uri: mina://tcp//localhost:8080", e.getCause().getMessage());
}
}
| MinaComponentTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/entity/Topic.java | {
"start": 678,
"end": 1167
} | class ____ {
@Id @GeneratedValue
private int id;
private String name;
@OneToMany(mappedBy="topic", cascade=CascadeType.ALL)
@Filter(name="byState", condition=":state = state")
private Set<Narrative> narratives = new HashSet<>();
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public Set<Narrative> getNarratives() {
return narratives;
}
public void addNarrative(Narrative val) {
narratives.add(val);
val.setTopic(this);
}
}
| Topic |
java | apache__kafka | streams/src/test/java/org/apache/kafka/test/StreamsTestUtils.java | {
"start": 14085,
"end": 14716
} | class ____ {
private final TopologyMetadata topologyMetadata;
private TopologyMetadataBuilder(final TopologyMetadata topologyMetadata) {
this.topologyMetadata = topologyMetadata;
}
public static TopologyMetadataBuilder unnamedTopology() {
final TopologyMetadata topologyMetadata = mock(TopologyMetadata.class);
when(topologyMetadata.isPaused(null)).thenReturn(false);
return new TopologyMetadataBuilder(topologyMetadata);
}
public TopologyMetadata build() {
return topologyMetadata;
}
}
}
| TopologyMetadataBuilder |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/file/FileAssert_isDirectory_Test.java | {
"start": 880,
"end": 1187
} | class ____ extends FileAssertBaseTest {
@Override
protected FileAssert invoke_api_method() {
return assertions.isDirectory();
}
@Override
protected void verify_internal_effects() {
verify(files).assertIsDirectory(getInfo(assertions), getActual(assertions));
}
}
| FileAssert_isDirectory_Test |
java | quarkusio__quarkus | extensions/elytron-security-ldap/deployment/src/test/java/io/quarkus/elytron/security/ldap/CustomRoleDecoder.java | {
"start": 403,
"end": 925
} | class ____ implements RoleDecoder {
@Override
public Roles decodeRoles(AuthorizationIdentity authorizationIdentity) {
Attributes.Entry groupsEntry = authorizationIdentity.getAttributes().get("Roles");
Set<String> roles = new HashSet<>();
StreamSupport.stream(groupsEntry.spliterator(), false).forEach(groups -> {
for (String role : groups.split(",")) {
roles.add(role.trim());
}
});
return Roles.fromSet(roles);
}
}
| CustomRoleDecoder |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/connector/dsv2/DataStreamV2SourceUtils.java | {
"start": 1071,
"end": 2017
} | class ____ {
/**
* Wrap a FLIP-27 based source to a DataStream V2 supported source.
*
* @param source The FLIP-27 based source to wrap.
* @return The DataStream V2 supported source.
*/
public static <T> Source<T> wrapSource(
org.apache.flink.api.connector.source.Source<T, ?, ?> source) {
return new WrappedSource<>(source);
}
/**
* Creates a source that contains the given elements.The type of the data stream is that of the
* elements in the collection.
*
* @param data The collection of elements to create the source from.
* @param <T> The generic type of the returned data stream.
* @return The source representing the given collection
*/
public static <T> Source<T> fromData(Collection<T> data) {
Preconditions.checkNotNull(data, "Collection must not be null");
return new FromDataSource<>(data);
}
}
| DataStreamV2SourceUtils |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/selectcase/SelectCaseTest.java | {
"start": 806,
"end": 3686
} | class ____ {
@Test
public void selectCaseWithValuesShouldWork(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaBuilder.Case<EnumValue> selectCase = cb.selectCase();
Predicate somePredicate = cb.equal( cb.literal( 1 ), 1 );
selectCase.when( somePredicate, EnumValue.VALUE_1 );
selectCase.otherwise( EnumValue.VALUE_2 );
CriteriaQuery<Entity> query = cb.createQuery( Entity.class );
Root<Entity> from = query.from( Entity.class );
query.select( from ).where( cb.equal( from.get( "value" ), selectCase ) );
entityManager.createQuery( query ).getResultList();
} );
}
@Test
public void selectCaseWithCastedTypeValuesShouldWork(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaBuilder.Case<String> selectCase = cb.selectCase();
Predicate somePredicate = cb.equal( cb.literal( 1 ), 1 );
selectCase.when( somePredicate, EnumValue.VALUE_1.name() );
selectCase.otherwise( EnumValue.VALUE_2.name() );
CriteriaQuery<Entity> query = cb.createQuery( Entity.class );
Root<Entity> from = query.from( Entity.class );
query.select( from )
.where( cb.equal( from.get( "value" ).as( String.class ), selectCase.as( String.class ) ) );
entityManager.createQuery( query ).getResultList();
} );
}
@Test
public void simpleSelectCaseWithValuesShouldWork(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaBuilder.SimpleCase<Integer, EnumValue> selectCase = cb.selectCase( cb.literal( 1 ) );
selectCase.when( 1, EnumValue.VALUE_1 );
selectCase.otherwise( EnumValue.VALUE_2 );
CriteriaQuery<Entity> query = cb.createQuery( Entity.class );
Root<Entity> from = query.from( Entity.class );
query.select( from ).where( cb.equal( from.get( "value" ), selectCase ) );
entityManager.createQuery( query ).getResultList();
} );
}
@Test
public void simpleSelectCaseWithCastedTypeValuesShouldWork(EntityManagerFactoryScope scope) {
scope.inEntityManager( entityManager -> {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
CriteriaBuilder.SimpleCase<Integer, String> selectCase = cb.selectCase( cb.literal( 1 ) );
selectCase.when( 1, EnumValue.VALUE_1.name() );
selectCase.otherwise( EnumValue.VALUE_2.name() );
CriteriaQuery<Entity> query = cb.createQuery( Entity.class );
Root<Entity> from = query.from( Entity.class );
query.select( from )
.where( cb.equal( from.get( "value" ).as( String.class ), selectCase.as( String.class ) ) );
entityManager.createQuery( query ).getResultList();
} );
}
@jakarta.persistence.Entity
@Table(name = "entity")
public static | SelectCaseTest |
java | alibaba__nacos | naming/src/main/java/com/alibaba/nacos/naming/remote/udp/UdpConnector.java | {
"start": 1595,
"end": 3919
} | class ____ {
private final ConcurrentMap<String, AckEntry> ackMap;
private final ConcurrentMap<String, PushCallBack> callbackMap;
private final DatagramSocket udpSocket;
private volatile boolean running = true;
public UdpConnector() throws SocketException {
this.ackMap = new ConcurrentHashMap<>();
this.callbackMap = new ConcurrentHashMap<>();
this.udpSocket = new DatagramSocket();
GlobalExecutor.scheduleUdpReceiver(new UdpReceiver());
}
public void shutdown() {
running = false;
}
public boolean containAck(String ackId) {
return ackMap.containsKey(ackId);
}
/**
* Sync send data once.
*
* @param ackEntry ack entry
* @throws NacosException nacos exception during sending
*/
public void sendData(AckEntry ackEntry) throws NacosException {
if (null == ackEntry) {
return;
}
try {
MetricsMonitor.incrementPush();
doSend(ackEntry.getOrigin());
} catch (IOException e) {
MetricsMonitor.incrementFailPush();
throw new NacosException(NacosException.SERVER_ERROR, "[NACOS-PUSH] push data with exception: ", e);
}
}
/**
* Send Data with {@link PushCallBack}.
*
* @param ackEntry ack entry
* @param pushCallBack push callback
*/
public void sendDataWithCallback(AckEntry ackEntry, PushCallBack pushCallBack) {
if (null == ackEntry) {
return;
}
GlobalExecutor.scheduleUdpSender(new UdpAsyncSender(ackEntry, pushCallBack), 0L, TimeUnit.MILLISECONDS);
}
private void doSend(DatagramPacket packet) throws IOException {
if (!udpSocket.isClosed()) {
udpSocket.send(packet);
}
}
private void callbackSuccess(String ackKey) {
PushCallBack pushCallBack = callbackMap.remove(ackKey);
if (null != pushCallBack) {
pushCallBack.onSuccess();
}
}
private void callbackFailed(String ackKey, Throwable exception) {
PushCallBack pushCallBack = callbackMap.remove(ackKey);
if (null != pushCallBack) {
pushCallBack.onFail(exception);
}
}
private | UdpConnector |
java | redisson__redisson | redisson/src/test/java/org/redisson/codec/protobuf/nativeData/Proto2AllTypes.java | {
"start": 155,
"end": 520
} | class ____ {
private Proto2AllTypes() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistryLite registry) {
}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
registerAllExtensions(
(com.google.protobuf.ExtensionRegistryLite) registry);
}
public | Proto2AllTypes |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/socket/adapter/AbstractListenerWebSocketSession.java | {
"start": 2229,
"end": 8342
} | class ____<T> extends AbstractWebSocketSession<T>
implements Subscriber<Void> {
/**
* The "back-pressure" buffer size to use if the underlying WebSocket API
* does not have flow control for receiving messages.
*/
private static final int RECEIVE_BUFFER_SIZE = 8192;
private final Sinks.@Nullable Empty<Void> handlerCompletionSink;
private final WebSocketReceivePublisher receivePublisher;
private volatile @Nullable WebSocketSendProcessor sendProcessor;
private final AtomicBoolean sendCalled = new AtomicBoolean();
private final Sinks.One<CloseStatus> closeStatusSink = Sinks.one();
/**
* Base constructor.
* @param delegate the native WebSocket session, channel, or connection
* @param id the session id
* @param info the handshake info
* @param bufferFactory the DataBuffer factor for the current connection
*/
public AbstractListenerWebSocketSession(
T delegate, String id, HandshakeInfo info, DataBufferFactory bufferFactory) {
this(delegate, id, info, bufferFactory, null);
}
/**
* Alternative constructor with completion sink to use to signal when the
* handling of the session is complete, with success or error.
* <p>Primarily for use with {@code WebSocketClient} to be able to
* communicate the end of handling.
*/
public AbstractListenerWebSocketSession(T delegate, String id, HandshakeInfo info,
DataBufferFactory bufferFactory, Sinks.@Nullable Empty<Void> handlerCompletionSink) {
super(delegate, id, info, bufferFactory);
this.receivePublisher = new WebSocketReceivePublisher();
this.handlerCompletionSink = handlerCompletionSink;
}
protected WebSocketSendProcessor getSendProcessor() {
WebSocketSendProcessor sendProcessor = this.sendProcessor;
Assert.state(sendProcessor != null, "No WebSocketSendProcessor available");
return sendProcessor;
}
@Override
public Flux<WebSocketMessage> receive() {
return (canSuspendReceiving() ?
Flux.from(this.receivePublisher) :
Flux.from(this.receivePublisher).onBackpressureBuffer(RECEIVE_BUFFER_SIZE));
}
@Override
public Mono<Void> send(Publisher<WebSocketMessage> messages) {
if (this.sendCalled.compareAndSet(false, true)) {
WebSocketSendProcessor sendProcessor = new WebSocketSendProcessor();
this.sendProcessor = sendProcessor;
return Mono.from(subscriber -> {
messages.subscribe(sendProcessor);
sendProcessor.subscribe(subscriber);
});
}
else {
return Mono.error(new IllegalStateException("send() has already been called"));
}
}
@Override
public Mono<CloseStatus> closeStatus() {
return this.closeStatusSink.asMono();
}
/**
* Whether the underlying WebSocket API has flow control and can suspend and
* resume the receiving of messages.
* <p><strong>Note:</strong> Sub-classes are encouraged to start out in
* suspended mode, if possible, and wait until demand is received.
*/
protected abstract boolean canSuspendReceiving();
/**
* Suspend receiving until received message(s) are processed and more demand
* is generated by the downstream Subscriber.
* <p><strong>Note:</strong> if the underlying WebSocket API does not provide
* flow control for receiving messages, this method should be a no-op
* and {@link #canSuspendReceiving()} should return {@code false}.
*/
protected abstract void suspendReceiving();
/**
* Resume receiving new message(s) after demand is generated by the
* downstream Subscriber.
* <p><strong>Note:</strong> if the underlying WebSocket API does not provide
* flow control for receiving messages, this method should be a no-op
* and {@link #canSuspendReceiving()} should return {@code false}.
*/
protected abstract void resumeReceiving();
/**
* Send the given WebSocket message.
* <p><strong>Note:</strong> Sub-classes are responsible for releasing the
* payload data buffer, once fully written, if pooled buffers apply to the
* underlying container.
*/
protected abstract boolean sendMessage(WebSocketMessage message) throws IOException;
// WebSocketHandler adapter delegate methods
/** Handle a message callback from the WebSocketHandler adapter. */
void handleMessage(Type type, WebSocketMessage message) {
this.receivePublisher.handleMessage(message);
}
/** Handle an error callback from the WebSocket engine. */
void handleError(Throwable ex) {
// Ignore result: can't overflow, ok if not first or no one listens
this.closeStatusSink.tryEmitEmpty();
this.receivePublisher.onError(ex);
WebSocketSendProcessor sendProcessor = this.sendProcessor;
if (sendProcessor != null) {
sendProcessor.cancel();
sendProcessor.onError(ex);
}
}
/** Handle a close callback from the WebSocket engine. */
void handleClose(CloseStatus closeStatus) {
// Ignore result: can't overflow, ok if not first or no one listens
this.closeStatusSink.tryEmitValue(closeStatus);
this.receivePublisher.onAllDataRead();
WebSocketSendProcessor sendProcessor = this.sendProcessor;
if (sendProcessor != null) {
sendProcessor.cancel();
sendProcessor.onComplete();
}
}
// Subscriber<Void> implementation tracking WebSocketHandler#handle completion
@Override
public void onSubscribe(Subscription subscription) {
subscription.request(Long.MAX_VALUE);
}
@Override
public void onNext(Void aVoid) {
// no op
}
@Override
public void onError(Throwable ex) {
if (this.handlerCompletionSink != null) {
// Ignore result: can't overflow, ok if not first or no one listens
this.handlerCompletionSink.tryEmitError(ex);
}
if (logger.isDebugEnabled()) {
logger.debug("WebSocket session completed with error", ex);
}
else if (logger.isInfoEnabled()) {
logger.info("WebSocket session completed with error: " + ex.getMessage());
}
close(CloseStatus.SERVER_ERROR);
}
@Override
public void onComplete() {
if (this.handlerCompletionSink != null) {
// Ignore result: can't overflow, ok if not first or no one listens
this.handlerCompletionSink.tryEmitEmpty();
}
close();
}
/**
* Read publisher for inbound WebSocket messages.
*/
private final | AbstractListenerWebSocketSession |
java | micronaut-projects__micronaut-core | http-client/src/main/java/io/micronaut/http/client/netty/DefaultNettyHttpClientRegistry.java | {
"start": 16206,
"end": 26552
} | class ____ be an instance of HttpClientConfiguration for injection point: " + configurationClass);
}
final List<String> filterAnnotations = clientKey.filterAnnotations;
final String path = clientKey.path;
if (clientBean != null && path == null && configurationClass == null && filterAnnotations.isEmpty()) {
return clientBean;
}
LoadBalancer loadBalancer = null;
final HttpClientConfiguration configuration;
if (configurationClass != null) {
configuration = (HttpClientConfiguration) this.beanContext.getBean(configurationClass);
} else if (clientId != null) {
configuration = this.beanContext.findBean(
HttpClientConfiguration.class,
Qualifiers.byName(clientId)
).orElse(defaultHttpClientConfiguration);
} else {
configuration = defaultHttpClientConfiguration;
}
if (clientId != null) {
loadBalancer = loadBalancerResolver.resolve(clientId)
.orElseThrow(() ->
new HttpClientException("Invalid service reference [" + clientId + "] specified to @Client"));
}
String contextPath = null;
if (StringUtils.isNotEmpty(path)) {
contextPath = path;
} else if (StringUtils.isNotEmpty(clientId) && clientId.startsWith("/")) {
contextPath = clientId;
} else {
if (loadBalancer != null) {
contextPath = loadBalancer.getContextPath().orElse(null);
}
}
final DefaultHttpClientBuilder builder = clientBuilder(
configuration,
clientId,
beanContext,
annotationMetadata
)
.loadBalancer(loadBalancer)
.explicitHttpVersion(clientKey.httpVersion)
.contextPath(contextPath);
final JsonFeatures jsonFeatures = clientKey.jsonFeatures;
if (jsonFeatures != null) {
List<MediaTypeCodec> codecs = new ArrayList<>(2);
MediaTypeCodecRegistry codecRegistry = builder.codecRegistry;
for (MediaTypeCodec codec : codecRegistry.getCodecs()) {
if (codec instanceof MapperMediaTypeCodec typeCodec) {
codecs.add(typeCodec.cloneWithFeatures(jsonFeatures));
} else {
codecs.add(codec);
}
}
if (!codecRegistry.findCodec(MediaType.APPLICATION_JSON_TYPE).isPresent()) {
codecs.add(createNewJsonCodec(this.beanContext, jsonFeatures));
}
builder.codecRegistry(MediaTypeCodecRegistry.of(codecs));
builder.handlerRegistry(new MessageBodyHandlerRegistry() {
final MessageBodyHandlerRegistry delegate = builder.handlerRegistry;
@SuppressWarnings("unchecked")
private <T> T customize(T handler) {
if (handler instanceof CustomizableJsonHandler cnjh) {
return (T) cnjh.customize(jsonFeatures);
}
return handler;
}
@Override
public <T> Optional<MessageBodyReader<T>> findReader(Argument<T> type, List<MediaType> mediaType) {
return delegate.findReader(type, mediaType).map(this::customize);
}
@Override
public <T> Optional<MessageBodyWriter<T>> findWriter(Argument<T> type, List<MediaType> mediaType) {
return delegate.findWriter(type, mediaType).map(this::customize);
}
});
}
return builder.build();
});
}
private DefaultHttpClientBuilder clientBuilder(
HttpClientConfiguration configuration,
String clientId,
BeanContext beanContext,
AnnotationMetadata annotationMetadata) {
String addressResolverGroupName = configuration.getAddressResolverGroupName();
DefaultHttpClientBuilder builder = DefaultHttpClient.builder();
beanContext.findBean(RequestBinderRegistry.class).ifPresent(builder::requestBinderRegistry);
return builder
.configuration(configuration)
.filterResolver(clientFilterResolver)
.clientFilterEntries(clientFilterResolver.resolveFilterEntries(new ClientFilterResolutionContext(
clientId == null ? null : Collections.singletonList(clientId),
annotationMetadata
)))
.threadFactory(threadFactory)
.nettyClientSslBuilder(nettyClientSslBuilder)
.sslFactory(sslFactory, certificateProviders)
.codecRegistry(codecRegistry)
.handlerRegistry(handlerRegistry)
.webSocketBeanRegistry(WebSocketBeanRegistry.forClient(beanContext))
.eventLoopGroup(resolveEventLoopGroup(configuration, beanContext))
.socketChannelFactory(resolveSocketChannelFactory(NettyChannelType.CLIENT_SOCKET, configuration, beanContext))
.udpChannelFactory(resolveSocketChannelFactory(NettyChannelType.DATAGRAM_SOCKET, configuration, beanContext))
.clientCustomizer(clientCustomizer)
.informationalServiceId(clientId)
.conversionService(beanContext.getBean(ConversionService.class))
.resolverGroup(addressResolverGroupName == null ? null : beanContext.getBean(AddressResolverGroup.class, Qualifiers.byName(addressResolverGroupName)))
.blockingExecutor(blockingExecutor);
}
private EventLoopGroup resolveEventLoopGroup(HttpClientConfiguration configuration, BeanContext beanContext) {
final String eventLoopGroupName = configuration.getEventLoopGroup();
EventLoopGroup eventLoopGroup;
if (EventLoopGroupConfiguration.DEFAULT.equals(eventLoopGroupName)) {
eventLoopGroup = eventLoopGroupRegistry.getDefaultEventLoopGroup();
} else {
eventLoopGroup = beanContext.findBean(EventLoopGroup.class, Qualifiers.byName(eventLoopGroupName))
.orElseThrow(() -> new HttpClientException("Specified event loop group is not defined: " + eventLoopGroupName));
}
return eventLoopGroup;
}
private DefaultHttpClient resolveDefaultHttpClient(
@Nullable InjectionPoint injectionPoint,
@Nullable LoadBalancer loadBalancer,
@Nullable HttpClientConfiguration configuration,
@NonNull BeanContext beanContext) {
if (loadBalancer != null) {
if (configuration == null) {
configuration = defaultHttpClientConfiguration;
}
DefaultHttpClient c = clientBuilder(
configuration,
null,
beanContext,
AnnotationMetadata.EMPTY_METADATA
)
.loadBalancer(loadBalancer)
.contextPath(loadBalancer.getContextPath().orElse(null))
.build();
balancedClients.add(c);
return c;
} else {
return getClient(injectionPoint != null ? injectionPoint.getAnnotationMetadata() : AnnotationMetadata.EMPTY_METADATA);
}
}
private ChannelFactory<? extends Channel> resolveSocketChannelFactory(NettyChannelType type, HttpClientConfiguration configuration, BeanContext beanContext) {
final String eventLoopGroup = configuration.getEventLoopGroup();
final EventLoopGroupConfiguration eventLoopGroupConfiguration = beanContext.findBean(EventLoopGroupConfiguration.class, Qualifiers.byName(eventLoopGroup))
.orElseGet(() -> {
if (EventLoopGroupConfiguration.DEFAULT.equals(eventLoopGroup)) {
return new DefaultEventLoopGroupConfiguration();
} else {
throw new HttpClientException("Specified event loop group is not defined: " + eventLoopGroup);
}
});
return () -> eventLoopGroupFactory.channelInstance(type, eventLoopGroupConfiguration);
}
private ClientKey getClientKey(AnnotationMetadata metadata) {
HttpVersionSelection httpVersionSelection = HttpVersionSelection.forClientAnnotation(metadata);
String clientId = metadata.stringValue(Client.class).orElse(null);
String path = metadata.stringValue(Client.class, "path").orElse(null);
List<String> filterAnnotation = metadata
.getAnnotationNamesByStereotype(FilterMatcher.class);
final Class<?> configurationClass =
metadata.classValue(Client.class, "configuration").orElse(null);
JsonFeatures jsonFeatures = jsonMapper.detectFeatures(metadata).orElse(null);
return new ClientKey(httpVersionSelection, clientId, filterAnnotation, path, configurationClass, jsonFeatures);
}
private static MediaTypeCodec createNewJsonCodec(BeanContext beanContext, JsonFeatures jsonFeatures) {
return getJsonCodec(beanContext).cloneWithFeatures(jsonFeatures);
}
private static MapperMediaTypeCodec getJsonCodec(BeanContext beanContext) {
return beanContext.getBean(MapperMediaTypeCodec.class, Qualifiers.byName(MapperMediaTypeCodec.REGULAR_JSON_MEDIA_TYPE_CODEC_NAME));
}
@Override
public Set<String> getObservedConfigurationPrefixes() {
return Set.of(DefaultHttpClientConfiguration.PREFIX, ServiceHttpClientConfiguration.PREFIX, SslConfiguration.PREFIX);
}
@Override
public void onApplicationEvent(RefreshEvent event) {
for (DefaultHttpClient client : unbalancedClients.values()) {
client.connectionManager.refresh();
}
for (DefaultHttpClient client : balancedClients) {
client.connectionManager.refresh();
}
}
/**
* Client key.
*/
@Internal
private static final | must |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/TimestampITCase.java | {
"start": 31867,
"end": 33419
} | class ____ extends AbstractStreamOperator<Integer>
implements OneInputStreamOperator<Integer, Integer> {
List<Watermark> watermarks;
public static List<Watermark>[] finalWatermarks = new List[PARALLELISM];
private final boolean timestampsEnabled;
public CustomOperator(boolean timestampsEnabled) {
this.timestampsEnabled = timestampsEnabled;
}
@Override
public void processElement(StreamRecord<Integer> element) throws Exception {
if (timestampsEnabled) {
if (element.getTimestamp() != element.getValue()) {
Assert.fail("Timestamps are not properly handled.");
}
}
output.collect(element);
}
@Override
public void processWatermark(Watermark mark) throws Exception {
super.processWatermark(mark);
for (Watermark previousMark : watermarks) {
assertTrue(previousMark.getTimestamp() < mark.getTimestamp());
}
watermarks.add(mark);
latch.trigger();
output.emitWatermark(mark);
}
@Override
public void open() throws Exception {
super.open();
watermarks = new ArrayList<>();
}
@Override
public void close() throws Exception {
super.close();
finalWatermarks[getRuntimeContext().getTaskInfo().getIndexOfThisSubtask()] = watermarks;
}
}
private static | CustomOperator |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/dialect/TestingDialects.java | {
"start": 571,
"end": 704
} | class ____ extends Dialect {
@Override
public DatabaseVersion getVersion() {
return ZERO_VERSION;
}
}
public static | MyDialect |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/refcolnames/misc/Misc0Test.java | {
"start": 816,
"end": 1181
} | class ____ {
@Test
public void test(SessionFactoryScope scope) {
scope.inTransaction(s->{
EntityA a = new EntityA();
EntityB b = new EntityB();
a.flag = 1;
a.entityB = b;
b.entityA = a;
s.persist(a);
s.persist(b);
s.createQuery("from B join entityA", EntityB.class).getSingleResult();
});
}
@Entity(name = "A")
public static | Misc0Test |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/type/descriptor/java/JavaType.java | {
"start": 10407,
"end": 11686
} | interface ____ {
TypeConfiguration getTypeConfiguration();
}
default <X> T coerce(X value, CoercionContext coercionContext) {
//noinspection unchecked
return (T) value;
}
/**
* Creates the {@link JavaType} for the given {@link ParameterizedType}
* based on this {@link JavaType} registered for the raw type.
*
* @since 6.1
*/
@Incubating
default JavaType<T> createJavaType(ParameterizedType parameterizedType, TypeConfiguration typeConfiguration) {
return this;
}
/**
* Return true if the implementation is an instance of {@link TemporalJavaType}
*
* @return true if it is an instance of {@link TemporalJavaType}; false otherwise
*/
default boolean isTemporalType() {
return false;
}
/**
* The check constraint that should be added to the column
* definition in generated DDL.
*
* @param columnName the name of the column
* @param jdbcType the {@link JdbcType} of the mapped column
* @param converter the converter, if any, or null
* @param dialect the SQL {@link Dialect}
* @return a check constraint condition or null
* @since 6.2
*/
@Incubating
default String getCheckCondition(String columnName, JdbcType jdbcType, BasicValueConverter<T, ?> converter, Dialect dialect) {
return null;
}
}
| CoercionContext |
java | mockito__mockito | mockito-core/src/test/java/org/mockito/internal/creation/bytebuddy/AbstractByteBuddyMockMakerTest.java | {
"start": 5144,
"end": 5294
} | class ____ extends SampleClass {
@Override
public String foo() {
return super.foo();
}
}
}
| CallingSuperMethodClass |
java | bumptech__glide | annotation/compiler/test/src/test/java/com/bumptech/glide/annotation/compiler/MultipleEmptyLibraryGlideModuleTest.java | {
"start": 872,
"end": 2173
} | class ____ implements CompilationProvider {
@Rule
public final RegenerateResourcesRule regenerateResourcesRule = new RegenerateResourcesRule(this);
private Compilation compilation;
@Before
public void setUp() {
compilation =
javac()
.withProcessors(new GlideAnnotationProcessor())
.compile(
forResource("EmptyLibraryModule1.java"), forResource("EmptyLibraryModule2.java"));
assertThat(compilation).succeededWithoutWarnings();
}
@Test
public void compilation_generatesAllExpectedFiles() {
Truth.assertThat(compilation.generatedSourceFiles()).hasSize(1);
}
@Test
public void compilation_generatesExpectedIndexerForModules() throws IOException {
String expectedClassName =
"GlideIndexer_GlideModule_com_bumptech_glide_test_EmptyLibraryModule1_com_bumptech_glide"
+ "_test_EmptyLibraryModule2";
assertThat(compilation)
.generatedSourceFile(annotation(expectedClassName))
.hasSourceEquivalentTo(forResource(expectedClassName + ".java"));
}
private JavaFileObject forResource(String name) {
return Util.forResource(getClass().getSimpleName(), name);
}
@Override
public Compilation getCompilation() {
return compilation;
}
}
| MultipleEmptyLibraryGlideModuleTest |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java | {
"start": 49755,
"end": 50978
} | class ____ {
/** @return true if data node is part of the excludedNodes. */
static boolean isExcluded(Set<String> excludedNodes, DatanodeInfo dn) {
return isIn(excludedNodes, dn);
}
/**
* @return true if includedNodes is empty or data node is part of the
* includedNodes.
*/
static boolean isIncluded(Set<String> includedNodes, DatanodeInfo dn) {
return (includedNodes.isEmpty() || isIn(includedNodes, dn));
}
/**
* Match is checked using host name , ip address with and without port
* number.
*
* @return true if the datanode's transfer address matches the set of nodes.
*/
private static boolean isIn(Set<String> datanodes, DatanodeInfo dn) {
return isIn(datanodes, dn.getPeerHostName(), dn.getXferPort())
|| isIn(datanodes, dn.getIpAddr(), dn.getXferPort())
|| isIn(datanodes, dn.getHostName(), dn.getXferPort());
}
/** @return true if nodes contains host or host:port */
private static boolean isIn(Set<String> nodes, String host, int port) {
if (host == null) {
return false;
}
return (nodes.contains(host) || nodes.contains(host + ":" + port));
}
}
}
| Util |
java | apache__flink | flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/nodes/exec/stream/OverWindowTestPrograms.java | {
"start": 1289,
"end": 61353
} | class ____ {
static final TableTestProgram LAG_OVER_FUNCTION =
TableTestProgram.of("over-aggregate-lag", "validates restoring a lag function")
.setupTableSource(
SourceTestStep.newBuilder("t")
.addSchema(
"ts STRING",
"b MAP<DOUBLE, DOUBLE>",
"`r_time` AS TO_TIMESTAMP(`ts`)",
"WATERMARK for `r_time` AS `r_time`")
.producedBeforeRestore(
Row.of(
"2020-04-15 08:00:05",
Collections.singletonMap(42.0, 42.0)))
.producedAfterRestore(
Row.of(
"2020-04-15 08:00:06",
Collections.singletonMap(42.1, 42.1)))
.build())
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema("ts STRING", "b MAP<DOUBLE, DOUBLE>")
.consumedBeforeRestore(Row.of("2020-04-15 08:00:05", null))
.consumedAfterRestore(
Row.of(
"2020-04-15 08:00:06",
Collections.singletonMap(42.0, 42.0)))
.build())
.runSql(
"INSERT INTO sink_t SELECT ts, LAG(b, 1) over (order by r_time) AS "
+ "bLag FROM t")
.build();
static SourceTestStep getSource(final String[] schema) {
return SourceTestStep.newBuilder("source_t")
.addSchema(schema)
.addOption("changelog-mode", "I,UB,UA")
.producedBeforeRestore(
Row.of("key1", 1L, 100L),
Row.of("key1", 2L, 200L),
Row.of("key1", 5L, 500L),
Row.of("key1", 6L, 600L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 200L),
Row.of("key2", 1L, 100L),
Row.of("key2", 2L, 200L))
.producedAfterRestore(
Row.of("key3", 1L, 100L),
Row.of("key1", 4L, 400L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 3L, 200L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 300L))
.build();
}
static final SinkTestStep getSink(String[] schema) {
return SinkTestStep.newBuilder("sink_t")
.addSchema(schema)
.consumedBeforeRestore(
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 3L),
Row.of("key1", 5L, 500L, 8L),
Row.of("key1", 6L, 600L, 14L),
Row.ofKind(RowKind.DELETE, "key1", 2L, 200L, 3L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 6L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 12L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 6L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 9L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 12L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 15L),
Row.of("key2", 1L, 100L, 1L),
Row.of("key2", 2L, 200L, 3L))
.consumedAfterRestore(
Row.of("key3", 1L, 100L, 1L),
Row.of("key1", 4L, 400L, 8L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 9L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 13L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 15L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 19L),
Row.ofKind(RowKind.DELETE, "key1", 3L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 4L, 400L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 5L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 13L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 10L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 19L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 16L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 300L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 4L, 400L, 5L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 8L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 10L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 13L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 16L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 19L))
.build();
}
static SinkTestStep getSink(String[] schema, Row[] beforeData, Row[] afterData) {
return SinkTestStep.newBuilder("sink_t")
.addSchema(schema)
.consumedBeforeRestore(beforeData)
.consumedAfterRestore(afterData)
.build();
}
static final TableTestProgram OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode",
"validates restoring a non-time unbounded preceding sum function in retract mode")
.setupTableSource(
getSource(new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
getSink(
new String[] {
"key STRING", "val BIGINT", "ts BIGINT", "sum_val BIGINT"
}))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE_SORT_BY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode-sort-by-key",
"validates restoring a non-time unbounded preceding sum function in retract mode")
.setupTableSource(
getSource(
new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 2L),
Row.of("key1", 5L, 500L, 5L),
Row.of("key1", 6L, 600L, 6L),
Row.ofKind(
RowKind.DELETE, "key1", 2L, 200L, 2L),
Row.ofKind(
RowKind.UPDATE_AFTER,
"key1",
3L,
200L,
3L),
Row.of("key2", 1L, 100L, 2L),
Row.of("key2", 2L, 200L, 2L))
.consumedAfterRestore(
Row.of("key3", 1L, 100L, 3L),
Row.of("key1", 4L, 400L, 4L),
Row.ofKind(
RowKind.DELETE, "key1", 3L, 200L, 3L),
Row.ofKind(
RowKind.UPDATE_AFTER,
"key1",
3L,
300L,
3L))
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY val "
+ "ORDER BY key "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode",
"validates restoring a non-time unbounded preceding sum function in retract mode")
.setupTableSource(
getSource(new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
getSink(
new String[] {
"key STRING", "val BIGINT", "ts BIGINT", "sum_val BIGINT"
}))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE_SORT_BY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode-sort-by-key",
"validates restoring a non-time unbounded preceding sum function in retract mode")
.setupTableSource(
getSource(
new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 2L),
Row.of("key1", 5L, 500L, 5L),
Row.of("key1", 6L, 600L, 6L),
Row.ofKind(
RowKind.DELETE, "key1", 2L, 200L, 2L),
Row.ofKind(
RowKind.UPDATE_AFTER,
"key1",
3L,
200L,
3L),
Row.of("key2", 1L, 100L, 2L),
Row.of("key2", 2L, 200L, 2L))
.consumedAfterRestore(
Row.of("key3", 1L, 100L, 3L),
Row.of("key1", 4L, 400L, 4L),
Row.ofKind(
RowKind.DELETE, "key1", 3L, 200L, 3L),
Row.ofKind(
RowKind.UPDATE_AFTER,
"key1",
3L,
300L,
3L))
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY val "
+ "ORDER BY key "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode-source-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with source table having primary key")
.setupTableSource(
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
}))
.setupTableSink(
getSink(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT"
}))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode-source-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with source table having primary key")
.setupTableSource(
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
}))
.setupTableSink(
getSink(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT"
}))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final Row[] SINK_PRIMARY_KEY_BEFORE_DATA =
new Row[] {
Row.of("key1", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 2L, 200L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 6L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 12L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 9L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 15L),
Row.of("key2", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key2", 2L, 200L, 3L)
};
static final Row[] SINK_PRIMARY_KEY_AFTER_DATA =
new Row[] {
Row.of("key3", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 13L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 19L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 5L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 10L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 16L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 300L, 4L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 13L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 19L)
};
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE_SINK_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode-sink-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with sink table having primary key")
.setupTableSource(
getSource(
new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
getSink(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
},
SINK_PRIMARY_KEY_BEFORE_DATA,
SINK_PRIMARY_KEY_AFTER_DATA))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE_SINK_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode-sink-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with sink table having primary key")
.setupTableSource(
getSource(
new String[] {"key STRING", "val BIGINT", "ts BIGINT"}))
.setupTableSink(
getSink(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
},
SINK_PRIMARY_KEY_BEFORE_DATA,
SINK_PRIMARY_KEY_AFTER_DATA))
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final Row[] SOURCE_SINK_PRIMARY_KEY_SINK_BEFORE_DATA =
new Row[] {
Row.of("key1", 1L, 100L, 1L),
Row.ofKind(RowKind.DELETE, "key1", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 2L, 200L, 2L),
Row.ofKind(RowKind.DELETE, "key1", 2L, 200L, 2L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 5L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 5L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 6L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 6L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 200L, 3L),
Row.of("key2", 1L, 100L, 1L),
Row.ofKind(RowKind.DELETE, "key2", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key2", 2L, 200L, 2L)
};
static final Row[] SOURCE_SINK_PRIMARY_KEY_SINK_AFTER_DATA =
new Row[] {
Row.of("key3", 1L, 100L, 1L),
Row.ofKind(RowKind.DELETE, "key1", 3L, 200L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 4L, 400L, 4L),
Row.ofKind(RowKind.DELETE, "key1", 4L, 400L, 4L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 3L, 300L, 3L)
};
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_SINK_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode-source-sink-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with source and sink tables having primary key")
.setupConfig(
// This option helps create a ChangelogNormalize node after the
// source
// which interprets duplicate input records correctly and
// produces -U and +U correctly
ExecutionConfigOptions.TABLE_EXEC_SOURCE_CDC_EVENTS_DUPLICATE,
true)
.setupTableSource(
// The following record is dropped due to
// DropUpdateBefore and ChangelogNormalize
// Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L)
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(key) NOT ENFORCED")
.consumedBeforeRestore(
SOURCE_SINK_PRIMARY_KEY_SINK_BEFORE_DATA)
.consumedAfterRestore(
SOURCE_SINK_PRIMARY_KEY_SINK_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_SINK_PRIMARY_KEY =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode-source-sink-primary-key",
"validates restoring a non-time unbounded preceding sum function in retract mode with source and sink tables having primary key")
.setupConfig(
// This option helps create a ChangelogNormalize node after the
// source
// which interprets duplicate input records correctly and
// produces -U and +U correctly
ExecutionConfigOptions.TABLE_EXEC_SOURCE_CDC_EVENTS_DUPLICATE,
true)
.setupTableSource(
// The following record is dropped due to
// DropUpdateBefore and ChangelogNormalize
// Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L)
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(key) NOT ENFORCED"
}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(key) NOT ENFORCED")
.consumedBeforeRestore(
SOURCE_SINK_PRIMARY_KEY_SINK_BEFORE_DATA)
.consumedAfterRestore(
SOURCE_SINK_PRIMARY_KEY_SINK_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final Row[] PARTITION_BY_NON_PK_BEFORE_DATA =
new Row[] {
// This is a +I because the sinkMaterialize PK
// is val
Row.of("key1", 1L, 100L, 100L),
// This is a +I because the sinkMaterialize PK
// is val
Row.of("key1", 2L, 200L, 300L),
// This is a +I because the sinkMaterialize PK
// is val
Row.of("key1", 5L, 500L, 800L),
// This is a +I because the sinkMaterialize PK
// is val
Row.of("key1", 6L, 600L, 1400L),
Row.ofKind(RowKind.DELETE, "key1", 2L, 200L, 300L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 800L),
Row.of("key1", 5L, 500L, 600L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 1400L),
Row.of("key1", 6L, 600L, 1200L),
Row.of("key1", 3L, 200L, 300L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 600L),
Row.of("key1", 5L, 500L, 800L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 1200L),
Row.of("key1", 6L, 600L, 1400L),
// The following is a +U since val=1L has been
// inserted before
Row.ofKind(RowKind.UPDATE_AFTER, "key2", 1L, 100L, 100L),
// The following is +I because previously val=2L
// was deleted
Row.of("key2", 2L, 200L, 300L)
};
static final Row[] PARTITION_BY_NON_PK_AFTER_DATA =
new Row[] {
Row.ofKind(RowKind.UPDATE_AFTER, "key3", 1L, 100L, 100L),
Row.of("key1", 4L, 400L, 700L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 800L),
Row.of("key1", 5L, 500L, 1200L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 1400L),
Row.of("key1", 6L, 600L, 1800L),
Row.ofKind(RowKind.DELETE, "key1", 3L, 200L, 300L),
Row.ofKind(RowKind.DELETE, "key1", 4L, 400L, 700L),
Row.of("key1", 4L, 400L, 500L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 1200L),
Row.of("key1", 5L, 500L, 1000L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 1800L),
Row.of("key1", 6L, 600L, 1600L),
Row.of("key1", 3L, 300L, 400L),
Row.ofKind(RowKind.DELETE, "key1", 4L, 400L, 500L),
Row.of("key1", 4L, 400L, 800L),
Row.ofKind(RowKind.DELETE, "key1", 5L, 500L, 1000L),
Row.of("key1", 5L, 500L, 1300L),
Row.ofKind(RowKind.DELETE, "key1", 6L, 600L, 1600L),
Row.of("key1", 6L, 600L, 1900L)
};
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_SINK_PRIMARY_KEY_PARTITION_BY_NON_PK =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-retract-mode-source-sink-primary-key-partition-by-non-pk",
"validates restoring a non-time unbounded preceding sum function in retract mode with source and sink table having primary key but partition by non-primary key")
.setupTableSource(
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(val) NOT ENFORCED"
}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(val) NOT ENFORCED")
.consumedBeforeRestore(PARTITION_BY_NON_PK_BEFORE_DATA)
.consumedAfterRestore(PARTITION_BY_NON_PK_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(ts) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_ts "
+ "FROM source_t")
.build();
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_RETRACT_MODE_SOURCE_SINK_PRIMARY_KEY_PARTITION_BY_NON_PK =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-retract-mode-source-sink-primary-key-partition-by-non-pk",
"validates restoring a non-time unbounded preceding sum function in retract mode with source and sink table having primary key but partition by non-primary key")
.setupTableSource(
getSource(
new String[] {
"key STRING",
"val BIGINT",
"ts BIGINT",
"PRIMARY KEY(val) NOT ENFORCED"
}))
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"PRIMARY KEY(val) NOT ENFORCED")
.consumedBeforeRestore(PARTITION_BY_NON_PK_BEFORE_DATA)
.consumedAfterRestore(PARTITION_BY_NON_PK_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(ts) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_ts "
+ "FROM source_t")
.build();
static final SourceTestStep APPEND_SOURCE =
SourceTestStep.newBuilder("source_t")
.addSchema("key STRING", "val BIGINT", "ts BIGINT")
.addOption("changelog-mode", "I")
.producedBeforeRestore(
Row.of("key1", 1L, 100L),
Row.of("key1", 2L, 200L),
Row.of("key1", 5L, 500L),
Row.of("key1", 6L, 600L),
Row.of("key2", 1L, 100L),
Row.of("key2", 2L, 200L))
.producedAfterRestore(Row.of("key1", 4L, 400L))
.build();
static final Row[] SUM_APPEND_MODE_BEFORE_DATA =
new Row[] {
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 3L),
Row.of("key1", 5L, 500L, 8L),
Row.of("key1", 6L, 600L, 14L),
Row.of("key2", 1L, 100L, 1L),
Row.of("key2", 2L, 200L, 3L)
};
static final Row[] SUM_APPEND_MODE_AFTER_DATA =
new Row[] {
Row.of("key1", 4L, 400L, 7L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 12L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 18L)
};
static final TableTestProgram OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-append-mode",
"validates restoring a non-time unbounded preceding sum function in append mode")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(SUM_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(SUM_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-append-mode",
"validates restoring a non-time unbounded preceding sum function in append mode")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(SUM_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(SUM_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final Row[] AVG_APPEND_MODE_BEFORE_DATA =
new Row[] {
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 1L),
Row.of("key1", 5L, 500L, 2L),
Row.of("key1", 6L, 600L, 3L),
Row.of("key2", 1L, 100L, 1L),
Row.of("key2", 2L, 200L, 1L)
};
static final Row[] AVG_APPEND_MODE_AFTER_DATA =
new Row[] {
Row.of("key1", 4L, 400L, 2L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 2L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 3L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 3L)
};
static final TableTestProgram OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_AVG_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-avg-append-mode",
"validates restoring a non-time unbounded preceding avg function in append mode")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"avg_val BIGINT")
.consumedBeforeRestore(AVG_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(AVG_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, AVG(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS avg_val "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_AVG_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-avg-append-mode",
"validates restoring a non-time unbounded preceding avg function in append mode")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"avg_val BIGINT")
.consumedBeforeRestore(AVG_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(AVG_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, AVG(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS avg_val "
+ "FROM source_t")
.build();
static final Row[] MULTIPLE_AGGS_APPEND_MODE_BEFORE_DATA =
new Row[] {
Row.of("key1", 1L, 100L, 1L, 1L),
Row.of("key1", 2L, 200L, 3L, 2L),
Row.of("key1", 5L, 500L, 8L, 3L),
Row.of("key1", 6L, 600L, 14L, 4L),
Row.of("key2", 1L, 100L, 1L, 1L),
Row.of("key2", 2L, 200L, 3L, 2L)
};
static final Row[] MULTIPLE_AGGS_APPEND_MODE_AFTER_DATA =
new Row[] {
Row.of("key1", 4L, 400L, 7L, 3L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 8L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 12L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 14L, 4L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 18L, 5L)
};
static final TableTestProgram
OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_MULTIPLE_AGGS_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-multiple-aggs-append-mode",
"validates restoring a non-time unbounded preceding sum function in append mode with multiple aggregations")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"cnt_key BIGINT")
.consumedBeforeRestore(
MULTIPLE_AGGS_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(
MULTIPLE_AGGS_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, "
+ "SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val, "
+ "COUNT(key) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS cnt_key "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_MULTIPLE_AGGS_APPEND_MODE =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-multiple-aggs-append-mode",
"validates restoring a non-time unbounded preceding sum function in append mode with multiple aggregations")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT",
"cnt_key BIGINT")
.consumedBeforeRestore(MULTIPLE_AGGS_APPEND_MODE_BEFORE_DATA)
.consumedAfterRestore(MULTIPLE_AGGS_APPEND_MODE_AFTER_DATA)
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, "
+ "SUM(val) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val, "
+ "COUNT(key) OVER ("
+ "PARTITION BY key "
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS cnt_key "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_RANGE_UNBOUNDED_SUM_NO_PARTITION_BY =
TableTestProgram.of(
"over-aggregate-non-time-range-unbounded-sum-no-partition-by",
"validates restoring a non-time unbounded preceding sum function without partition by")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 3L),
Row.of("key1", 5L, 500L, 8L),
Row.of("key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 1L, 100L, 1L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 1L, 100L, 2L),
Row.of("key2", 1L, 100L, 2L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 2L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 9L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 15L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 2L, 200L, 6L),
Row.of("key2", 2L, 200L, 6L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 9L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 11L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 15L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 17L))
.consumedAfterRestore(
Row.of("key1", 4L, 400L, 10L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 11L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 15L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 17L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 21L))
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "ORDER BY val "
+ "RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
static final TableTestProgram OVER_AGGREGATE_NON_TIME_ROWS_UNBOUNDED_SUM_NO_PARTITION_BY =
TableTestProgram.of(
"over-aggregate-non-time-rows-unbounded-sum-no-partition-by",
"validates restoring a non-time unbounded preceding sum function without partition by")
.setupTableSource(APPEND_SOURCE)
.setupTableSink(
SinkTestStep.newBuilder("sink_t")
.addSchema(
"key STRING",
"val BIGINT",
"ts BIGINT",
"sum_val BIGINT")
.consumedBeforeRestore(
Row.of("key1", 1L, 100L, 1L),
Row.of("key1", 2L, 200L, 3L),
Row.of("key1", 5L, 500L, 8L),
Row.of("key1", 6L, 600L, 14L),
Row.of("key2", 1L, 100L, 2L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 2L, 200L, 3L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 2L, 200L, 4L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 8L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 9L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 14L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 15L),
Row.of("key2", 2L, 200L, 6L),
Row.ofKind(RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 9L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 11L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 15L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 17L))
.consumedAfterRestore(
Row.of("key1", 4L, 400L, 10L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 5L, 500L, 11L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 5L, 500L, 15L),
Row.ofKind(
RowKind.UPDATE_BEFORE, "key1", 6L, 600L, 17L),
Row.ofKind(RowKind.UPDATE_AFTER, "key1", 6L, 600L, 21L))
.build())
.runSql(
"INSERT INTO sink_t SELECT key, val, ts, SUM(val) OVER ("
+ "ORDER BY val "
+ "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) "
+ "AS sum_val "
+ "FROM source_t")
.build();
}
| OverWindowTestPrograms |
java | spring-projects__spring-boot | module/spring-boot-r2dbc/src/test/java/org/springframework/boot/r2dbc/autoconfigure/R2dbcAutoConfigurationTests.java | {
"start": 18541,
"end": 18775
} | class ____ {
@Bean
ConnectionFactoryOptionsBuilderCustomizer customizer() {
return (builder) -> builder.option(Option.valueOf("customized"), true);
}
}
@Configuration(proxyBeanMethods = false)
static | CustomizerConfiguration |
java | alibaba__druid | druid-admin/src/main/java/com/alibaba/druid/admin/model/dto/ConnectionResult.java | {
"start": 474,
"end": 1338
} | class ____ {
@JSONField(name = "id")
private int id;
@JSONField(name = "connectionId")
private int connectionId;
@JSONField(name = "useCount")
private int useCount;
@JSONField(name = "lastActiveTime")
private String lastActiveTime;
@JSONField(name = "connectTime")
private String connectTime;
@JSONField(name = "holdability")
private int holdability;
@JSONField(name = "transactionIsolation")
private int transactionIsolation;
@JSONField(name = "autoCommit")
private boolean autoCommit;
@JSONField(name = "readoOnly")
private boolean readoOnly;
@JSONField(name = "keepAliveCheckCount")
private int keepAliveCheckCount;
@JSONField(name = "pscache")
private List<?> pscache;
}
}
| ContentBean |
java | spring-projects__spring-framework | spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/JCacheInterceptor.java | {
"start": 1392,
"end": 1754
} | class ____
* contains the integration with Spring's underlying caching API.
* JCacheInterceptor simply calls the relevant superclass method.
*
* <p>JCacheInterceptors are thread-safe.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @since 4.1
* @see org.springframework.cache.interceptor.CacheInterceptor
*/
@SuppressWarnings("serial")
public | which |
java | elastic__elasticsearch | x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTicketValidatorTests.java | {
"start": 1181,
"end": 7587
} | class ____ extends KerberosTestCase {
private KerberosTicketValidator kerberosTicketValidator = new KerberosTicketValidator();
public void testKerbTicketGeneratedForDifferentServerFailsValidation() throws Exception {
createPrincipalKeyTab(workDir, "differentServer");
// Client login and init token preparation
final String clientUserName = randomFrom(clientUserNames);
try (
SpnegoClient spnegoClient = new SpnegoClient(
principalName(clientUserName),
new SecureString("spnego-test-password".toCharArray()),
principalName("differentServer"),
randomFrom(KerberosTicketValidator.SUPPORTED_OIDS)
)
) {
final String base64KerbToken = spnegoClient.getBase64EncodedTokenForSpnegoHeader();
assertThat(base64KerbToken, is(notNullValue()));
final Environment env = TestEnvironment.newEnvironment(globalSettings);
final Path keytabPath = getKeytabPath(env);
final PlainActionFuture<Tuple<String, String>> future = new PlainActionFuture<>();
kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future);
final GSSException gssException = expectThrows(GSSException.class, () -> unwrapExpectedExceptionFromFutureAndThrow(future));
assertThat(gssException.getMajor(), equalTo(GSSException.FAILURE));
}
}
public void testInvalidKerbTicketFailsValidation() throws Exception {
final String base64KerbToken = Base64.getEncoder().encodeToString(randomByteArrayOfLength(5));
final Environment env = TestEnvironment.newEnvironment(globalSettings);
final Path keytabPath = getKeytabPath(env);
kerberosTicketValidator.validateTicket(
Base64.getDecoder().decode(base64KerbToken),
keytabPath,
true,
new ActionListener<Tuple<String, String>>() {
boolean exceptionHandled = false;
@Override
public void onResponse(Tuple<String, String> response) {
fail("expected exception to be thrown of type GSSException");
}
@Override
public void onFailure(Exception e) {
assertThat(exceptionHandled, is(false));
assertThat(e, instanceOf(GSSException.class));
assertThat(((GSSException) e).getMajor(), equalTo(GSSException.DEFECTIVE_TOKEN));
exceptionHandled = true;
}
}
);
}
public void testWhenKeyTabWithInvalidContentFailsValidation() throws LoginException, GSSException, IOException,
PrivilegedActionException {
// Client login and init token preparation
final String clientUserName = randomFrom(clientUserNames);
try (
SpnegoClient spnegoClient = new SpnegoClient(
principalName(clientUserName),
new SecureString("spnego-test-password".toCharArray()),
principalName(randomFrom(serviceUserNames)),
randomFrom(KerberosTicketValidator.SUPPORTED_OIDS)
);
) {
final String base64KerbToken = spnegoClient.getBase64EncodedTokenForSpnegoHeader();
assertThat(base64KerbToken, is(notNullValue()));
final Path ktabPath = KerberosRealmTestCase.writeKeyTab(workDir.resolve("invalid.keytab"), "not - a - valid - key - tab");
settings = KerberosRealmTestCase.buildKerberosRealmSettings(REALM_NAME, ktabPath.toString());
final Environment env = TestEnvironment.newEnvironment(globalSettings);
final Path keytabPath = getKeytabPath(env);
final PlainActionFuture<Tuple<String, String>> future = new PlainActionFuture<>();
kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future);
final GSSException gssException = expectThrows(GSSException.class, () -> unwrapExpectedExceptionFromFutureAndThrow(future));
assertThat(gssException.getMajor(), equalTo(GSSException.FAILURE));
}
}
public void testValidKebrerosTicket() throws PrivilegedActionException, GSSException, LoginException {
// Client login and init token preparation
final String clientUserName = randomFrom(clientUserNames);
final SecureString password = new SecureString("spnego-test-password".toCharArray());
final String servicePrincipalName = principalName(randomFrom(serviceUserNames));
try (
SpnegoClient spnegoClient = new SpnegoClient(
principalName(clientUserName),
password,
servicePrincipalName,
randomFrom(KerberosTicketValidator.SUPPORTED_OIDS)
)
) {
final String base64KerbToken = spnegoClient.getBase64EncodedTokenForSpnegoHeader();
assertThat(base64KerbToken, is(notNullValue()));
final Environment env = TestEnvironment.newEnvironment(globalSettings);
final Path keytabPath = getKeytabPath(env);
final PlainActionFuture<Tuple<String, String>> future = new PlainActionFuture<>();
kerberosTicketValidator.validateTicket(Base64.getDecoder().decode(base64KerbToken), keytabPath, true, future);
assertThat(future.actionGet(), is(notNullValue()));
assertThat(future.actionGet().v1(), equalTo(principalName(clientUserName)));
assertThat(future.actionGet().v2(), is(notNullValue()));
final String outToken = spnegoClient.handleResponse(future.actionGet().v2());
assertThat(outToken, is(nullValue()));
assertThat(spnegoClient.isEstablished(), is(true));
}
}
private void unwrapExpectedExceptionFromFutureAndThrow(PlainActionFuture<Tuple<String, String>> future) throws Throwable {
try {
future.actionGet();
} catch (Throwable t) {
Throwable throwThis = t;
while (throwThis instanceof UncategorizedExecutionException || throwThis instanceof ExecutionException) {
throwThis = throwThis.getCause();
}
throw throwThis;
}
}
}
| KerberosTicketValidatorTests |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/codestarts/extension/QuarkusExtensionCodestartCatalog.java | {
"start": 2715,
"end": 2905
} | enum ____ implements DataKey {
EXTENSION_BASE,
QUARKIVERSE,
DEVMODE_TEST,
INTEGRATION_TESTS,
UNIT_TEST,
EXTENSION_CODESTART
}
public | Code |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/main/java/org/hibernate/processor/annotation/InnerClassMetaAttribute.java | {
"start": 787,
"end": 1584
} | class ____ {@link ")
// .append( parent.getQualifiedName() )
// .append( "}\n **/\n" )
// .append( "public record Id" );
// String delimiter = "(";
// for ( MetaAttribute component : components ) {
// decl.append( delimiter ).append( parent.importType( component.getTypeDeclaration() ) )
// .append( ' ' ).append( component.getPropertyName() );
// delimiter = ", ";
// }
// return decl.append( ") {}" ).toString();
return "";
}
@Override
public String getAttributeNameDeclarationString() {
return "";
}
@Override
public String getMetaType() {
return "";
}
@Override
public String getPropertyName() {
return "";
}
@Override
public String getTypeDeclaration() {
return "";
}
@Override
public Metamodel getHostingEntity() {
return metaEntity;
}
}
| for |
java | apache__logging-log4j2 | log4j-perf-test/src/main/java/org/apache/logging/log4j/perf/jmh/NanotimeBenchmark.java | {
"start": 1326,
"end": 2021
} | class ____ {
public static void main(final String[] args) {}
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public void baseline() {}
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public long latency_nanotime() {
return System.nanoTime();
}
private long lastValue;
@Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public long granularity_nanotime() {
long cur;
do {
cur = System.nanoTime();
} while (cur == lastValue);
lastValue = cur;
return cur;
}
}
| NanotimeBenchmark |
java | apache__flink | flink-models/flink-model-openai/src/main/java/org/apache/flink/model/openai/OpenAIModelProviderFactory.java | {
"start": 3608,
"end": 4081
} | class ____ implements AsyncPredictRuntimeProvider {
private final AsyncPredictFunction function;
public Provider(AsyncPredictFunction function) {
this.function = function;
}
@Override
public AsyncPredictFunction createAsyncPredictFunction(Context context) {
return function;
}
@Override
public ModelProvider copy() {
return new Provider(function);
}
}
}
| Provider |
java | apache__flink | flink-test-utils-parent/flink-test-utils-junit/src/main/java/org/apache/flink/testutils/junit/SharedObjectsExtension.java | {
"start": 3763,
"end": 5663
} | class ____ an
* instance-field annotated with {@link org.junit.Rule}.
*/
public static SharedObjectsExtension create() {
return new SharedObjectsExtension(LAST_ID.getAndIncrement());
}
private static SharedObjectsExtension get(int sharedObjectsId) {
SharedObjectsExtension sharedObjects = INSTANCES.get(sharedObjectsId);
if (sharedObjects == null) {
throw new IllegalStateException("Object was accessed after the test was completed");
}
return sharedObjects;
}
/**
* Adds a new object to this {@code SharedObjects}. Although not necessary, it is recommended to
* only access the object through the returned {@link SharedReference}.
*/
public <T> SharedReference<T> add(T object) {
SharedReference<T> tag = new SharedObjectsExtension.DefaultTag<>(id, objects.size());
objects.put(tag, object);
return tag;
}
@Override
public void beforeEach(ExtensionContext context) throws Exception {
INSTANCES.put(id, this);
}
@Override
public void afterEach(ExtensionContext context) throws Exception {
objects.clear();
INSTANCES.remove(id);
}
@SuppressWarnings("unchecked")
<T> T get(SharedReference<T> tag) {
T object = (T) objects.get(tag);
if (object == null) {
throw new IllegalStateException("Object was accessed after the test was completed");
}
return object;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SharedObjectsExtension that = (SharedObjectsExtension) o;
return id == that.id;
}
@Override
public int hashCode() {
return Objects.hash(id);
}
private static | as |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/AbstractRequestMatcherRegistryAnyMatcherTests.java | {
"start": 3927,
"end": 4335
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().authenticated()
.requestMatchers(new RegexRequestMatcher(".*", null)).permitAll());
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
static | RegexMatchersAfterAnyRequestConfig |
java | dropwizard__dropwizard | dropwizard-client/src/main/java/io/dropwizard/client/JerseyIgnoreRequestUserAgentHeaderFilter.java | {
"start": 403,
"end": 650
} | class ____ implements ClientRequestFilter {
@Override
public void filter(ClientRequestContext requestContext) throws IOException {
((ClientRequest) requestContext).ignoreUserAgent(true);
}
}
| JerseyIgnoreRequestUserAgentHeaderFilter |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanEvaluator.java | {
"start": 3836,
"end": 4393
} | class ____ implements EvalOperator.ExpressionEvaluator.Factory {
private final Source source;
private final EvalOperator.ExpressionEvaluator.Factory val;
public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val) {
this.source = source;
this.val = val;
}
@Override
public AtanEvaluator get(DriverContext context) {
return new AtanEvaluator(source, val.get(context), context);
}
@Override
public String toString() {
return "AtanEvaluator[" + "val=" + val + "]";
}
}
}
| Factory |
java | apache__flink | flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/catalog/ContextResolvedFunction.java | {
"start": 2545,
"end": 2881
} | class ____ meant for internal usages. However, it needs to be kept in sync with the public
* {@link CallExpression} which contains similar context information. The convenience methods {@link
* #fromCallExpression(CallExpression)} and {@link #toCallExpression(List, DataType)} allow a
* symmetric conversion.
*/
@Internal
public final | is |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/repositories/SnapshotMetricsIT.java | {
"start": 2809,
"end": 30203
} | class ____ extends AbstractSnapshotIntegTestCase {
private static final String REQUIRE_NODE_NAME_SETTING = IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name";
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Stream.concat(super.nodePlugins().stream(), Stream.of(TestTelemetryPlugin.class, MockTransportService.TestPlugin.class))
.toList();
}
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
// Make sanity checking duration histograms possible
.put(ESTIMATED_TIME_INTERVAL_SETTING.getKey(), "0s")
.build();
}
public void testSnapshotAPMMetrics() throws Exception {
final String indexName = randomIdentifier();
final int numShards = randomIntBetween(1, 10);
final int numReplicas = randomIntBetween(0, 1);
createIndex(indexName, numShards, numReplicas);
indexRandom(true, indexName, randomIntBetween(3000, 5000));
final String repositoryName = randomIdentifier();
createRepository(
repositoryName,
"mock",
Settings.builder()
.put(randomRepositorySettings().build())
// Making chunk size small and adding throttling increases the likelihood of upload duration being non-zero
.put("chunk_size", ByteSizeValue.ofKb(1))
.put(BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC.getKey(), ByteSizeValue.ofMb(1))
.put(BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC.getKey(), ByteSizeValue.ofMb(1))
);
// Block the snapshot to test "snapshot shards in progress"
blockAllDataNodes(repositoryName);
final String snapshotName = randomIdentifier();
final long beforeCreateSnapshotNanos = System.nanoTime();
final ActionFuture<CreateSnapshotResponse> snapshotFuture;
try {
snapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName)
.setIndices(indexName)
.setWaitForCompletion(true)
.execute();
// We are able to wait for either the creation to complete (`wait_for_completion=false`), or the snapshot to complete
// (`wait_for_completion=true`), but not both. To know when the creation listeners complete, we must assertBusy
assertBusy(() -> {
collectMetrics();
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOTS_STARTED), equalTo(1L));
assertShardsInProgressMetricIs(hasItem(greaterThan(0L)));
});
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOTS_COMPLETED), equalTo(0L));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_SHARDS_STARTED), greaterThan(0L));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_SHARDS_COMPLETED), equalTo(0L));
} finally {
unblockAllDataNodes(repositoryName);
}
// wait for snapshot to finish to test the other metrics
safeGet(snapshotFuture);
final TimeValue snapshotElapsedTime = TimeValue.timeValueNanos(System.nanoTime() - beforeCreateSnapshotNanos);
collectMetrics();
// sanity check blobs, bytes and throttling metrics
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_BLOBS_UPLOADED), greaterThan(0L));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_BYTES_UPLOADED), greaterThan(0L));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOTS_STARTED), equalTo(1L));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOTS_COMPLETED), equalTo(1L));
// Sanity check shard duration observations
assertDoubleHistogramMetrics(SnapshotMetrics.SNAPSHOT_SHARDS_DURATION, hasSize(numShards));
assertDoubleHistogramMetrics(SnapshotMetrics.SNAPSHOT_SHARDS_DURATION, everyItem(lessThan(snapshotElapsedTime.secondsFrac())));
// Sanity check snapshot observations
assertDoubleHistogramMetrics(SnapshotMetrics.SNAPSHOT_DURATION, hasSize(1));
assertDoubleHistogramMetrics(SnapshotMetrics.SNAPSHOT_DURATION, everyItem(lessThan(snapshotElapsedTime.secondsFrac())));
// Work out the maximum amount of concurrency per node
final ThreadPool tp = internalCluster().getDataNodeInstance(ThreadPool.class);
final int snapshotThreadPoolSize = tp.info(ThreadPool.Names.SNAPSHOT).getMax();
final int maximumPerNodeConcurrency = Math.max(snapshotThreadPoolSize, numShards);
// sanity check duration values
final long upperBoundTimeSpentOnSnapshotThingsMillis = internalCluster().numDataNodes() * maximumPerNodeConcurrency
* snapshotElapsedTime.millis();
assertThat(
getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_UPLOAD_DURATION),
allOf(greaterThan(0L), lessThan(upperBoundTimeSpentOnSnapshotThingsMillis))
);
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_SHARDS_STARTED), equalTo((long) numShards));
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_SHARDS_COMPLETED), equalTo((long) numShards));
assertShardsInProgressMetricIs(everyItem(equalTo(0L)));
// assert appropriate attributes are present
final Map<String, Object> expectedAttrs = Map.of("repo_name", repositoryName, "repo_type", "mock");
final Map<String, Object> expectedAttrsWithShardStage = Maps.copyMapWithAddedEntry(
expectedAttrs,
"stage",
IndexShardSnapshotStatus.Stage.DONE.name()
);
final Map<String, Object> expectedAttrsWithSnapshotState = Maps.copyMapWithAddedEntry(
expectedAttrs,
"state",
SnapshotState.SUCCESS.name()
);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOTS_STARTED, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOTS_COMPLETED, expectedAttrsWithSnapshotState);
assertMetricsHaveAttributes(InstrumentType.DOUBLE_HISTOGRAM, SnapshotMetrics.SNAPSHOT_DURATION, expectedAttrsWithSnapshotState);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_SHARDS_STARTED, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_GAUGE, SnapshotMetrics.SNAPSHOT_SHARDS_IN_PROGRESS, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_SHARDS_COMPLETED, expectedAttrsWithShardStage);
assertMetricsHaveAttributes(InstrumentType.DOUBLE_HISTOGRAM, SnapshotMetrics.SNAPSHOT_SHARDS_DURATION, expectedAttrsWithShardStage);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_UPLOAD_DURATION, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_BYTES_UPLOADED, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_BLOBS_UPLOADED, expectedAttrs);
}
public void testThrottlingMetrics() throws Exception {
final String indexName = randomIdentifier();
final int numShards = randomIntBetween(1, 10);
final int numReplicas = randomIntBetween(0, 1);
createIndex(indexName, numShards, numReplicas);
indexRandom(true, indexName, randomIntBetween(100, 120));
// Create a repository with restrictive throttling settings
final String repositoryName = randomIdentifier();
final Settings.Builder repositorySettings = randomRepositorySettings().put(
BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC.getKey(),
ByteSizeValue.ofKb(2)
)
.put(BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC.getKey(), ByteSizeValue.ofKb(2))
// Small chunk size ensures we don't get stuck throttling for too long
.put("chunk_size", ByteSizeValue.ofBytes(100));
createRepository(repositoryName, "mock", repositorySettings, false);
final String snapshotName = randomIdentifier();
final ActionFuture<CreateSnapshotResponse> snapshotFuture;
// Kick off a snapshot
final long snapshotStartTime = System.currentTimeMillis();
snapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName)
.setIndices(indexName)
.setWaitForCompletion(true)
.execute();
// Poll until we see some throttling occurring
final long snap_ts0 = System.currentTimeMillis();
assertBusy(() -> {
collectMetrics();
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_CREATE_THROTTLE_DURATION), greaterThan(0L));
});
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_RESTORE_THROTTLE_DURATION), equalTo(0L));
// Remove create throttling
final long snap_ts1 = System.currentTimeMillis();
createRepository(
repositoryName,
"mock",
repositorySettings.put(BlobStoreRepository.MAX_SNAPSHOT_BYTES_PER_SEC.getKey(), ByteSizeValue.ZERO),
false
);
final long snap_ts2 = System.currentTimeMillis();
// wait for the snapshot to finish
safeGet(snapshotFuture);
final long snap_ts3 = System.currentTimeMillis();
logger.info(
"saw throttling in [{}] remove throttling took [{}], snapshot took [{}]",
TimeValue.timeValueMillis(snap_ts1 - snap_ts0),
TimeValue.timeValueMillis(snap_ts2 - snap_ts1),
TimeValue.timeValueMillis(snap_ts3 - snap_ts2)
);
// Work out the maximum amount of concurrency per node
final ThreadPool tp = internalCluster().getDataNodeInstance(ThreadPool.class);
final int snapshotThreadPoolSize = tp.info(ThreadPool.Names.SNAPSHOT).getMax();
final int maximumPerNodeConcurrency = Math.max(snapshotThreadPoolSize, numShards);
// we should also have incurred some read duration due to the throttling
final long upperBoundTimeSpentOnSnapshotThingsMillis = internalCluster().numDataNodes() * maximumPerNodeConcurrency * (System
.currentTimeMillis() - snapshotStartTime);
assertThat(
getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_UPLOAD_READ_DURATION),
allOf(greaterThan(0L), lessThan(upperBoundTimeSpentOnSnapshotThingsMillis))
);
// Restore the snapshot
final long restore_ts0 = System.currentTimeMillis();
ActionFuture<RestoreSnapshotResponse> restoreFuture = clusterAdmin().prepareRestoreSnapshot(
TEST_REQUEST_TIMEOUT,
repositoryName,
snapshotName
).setIndices(indexName).setWaitForCompletion(true).setRenamePattern("(.+)").setRenameReplacement("restored-$1").execute();
final long restore_ts1 = System.currentTimeMillis();
// assert we throttled on restore
assertBusy(() -> {
collectMetrics();
assertThat(getTotalClusterLongCounterValue(SnapshotMetrics.SNAPSHOT_RESTORE_THROTTLE_DURATION), greaterThan(0L));
});
final long restore_ts2 = System.currentTimeMillis();
// Remove restore throttling
createRepository(
repositoryName,
"mock",
repositorySettings.put(BlobStoreRepository.MAX_RESTORE_BYTES_PER_SEC.getKey(), ByteSizeValue.ZERO),
false
);
safeGet(restoreFuture);
final long restore_ts3 = System.currentTimeMillis();
logger.info(
"saw throttling in [{}] remove throttling took [{}], restore took [{}]",
TimeValue.timeValueMillis(restore_ts1 - restore_ts0),
TimeValue.timeValueMillis(restore_ts2 - restore_ts1),
TimeValue.timeValueMillis(restore_ts3 - restore_ts2)
);
// assert appropriate attributes are present
final Map<String, Object> expectedAttrs = Map.of("repo_name", repositoryName, "repo_type", "mock");
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_UPLOAD_READ_DURATION, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_RESTORE_THROTTLE_DURATION, expectedAttrs);
assertMetricsHaveAttributes(InstrumentType.LONG_COUNTER, SnapshotMetrics.SNAPSHOT_CREATE_THROTTLE_DURATION, expectedAttrs);
}
public void testByStateCounts_InitAndQueuedShards() throws Exception {
final String indexName = randomIdentifier();
final int numShards = randomIntBetween(2, 10);
final int numReplicas = randomIntBetween(0, 1);
createIndex(indexName, numShards, numReplicas);
indexRandom(true, indexName, randomIntBetween(100, 300));
final String repositoryName = randomIdentifier();
createRepository(repositoryName, "mock");
// Block repo reads so we can queue snapshots
blockAllDataNodes(repositoryName);
final String snapshotName = randomIdentifier();
final ActionFuture<CreateSnapshotResponse> firstSnapshotFuture;
final ActionFuture<CreateSnapshotResponse> secondSnapshotFuture;
try {
firstSnapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName)
.setIndices(indexName)
.setWaitForCompletion(true)
.execute();
waitForBlockOnAnyDataNode(repositoryName);
safeAwait(
(ActionListener<Void> l) -> flushMasterQueue(internalCluster().getCurrentMasterNodeInstance(ClusterService.class), l)
);
// Should be {numShards} in INIT state, and 1 STARTED snapshot
Map<SnapshotsInProgress.ShardState, Long> shardStates = getShardStates();
assertThat(shardStates.get(SnapshotsInProgress.ShardState.INIT), equalTo((long) numShards));
Map<SnapshotsInProgress.State, Long> snapshotStates = getSnapshotStates();
assertThat(snapshotStates.get(SnapshotsInProgress.State.STARTED), equalTo(1L));
// Queue up another snapshot
secondSnapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier())
.setIndices(indexName)
.setWaitForCompletion(true)
.execute();
awaitNumberOfSnapshotsInProgress(2);
// Should be {numShards} in QUEUED and INIT states, and 2 STARTED snapshots
shardStates = getShardStates();
assertThat(shardStates.get(SnapshotsInProgress.ShardState.INIT), equalTo((long) numShards));
assertThat(shardStates.get(SnapshotsInProgress.ShardState.QUEUED), equalTo((long) numShards));
snapshotStates = getSnapshotStates();
assertThat(snapshotStates.get(SnapshotsInProgress.State.STARTED), equalTo(2L));
} finally {
unblockAllDataNodes(repositoryName);
}
// All statuses should return to zero when the snapshots complete
safeGet(firstSnapshotFuture);
safeGet(secondSnapshotFuture);
getShardStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
getSnapshotStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
// Ensure all common attributes are present
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOT_SHARDS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOTS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
}
public void testByStateCounts_PausedForRemovalShards() throws Exception {
final String indexName = randomIdentifier();
final int numShards = randomIntBetween(2, 10);
final int numReplicas = randomIntBetween(0, 1);
final String nodeForRemoval = internalCluster().startDataOnlyNode();
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
.put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval)
.build()
);
indexRandom(true, indexName, randomIntBetween(100, 300));
final String repositoryName = randomIdentifier();
createRepository(repositoryName, "mock");
// block the node to be removed
blockNodeOnAnyFiles(repositoryName, nodeForRemoval);
final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
final ActionFuture<CreateSnapshotResponse> snapshotFuture;
try {
// Kick off a snapshot
snapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, randomIdentifier())
.setIndices(indexName)
.setWaitForCompletion(true)
.execute();
// Wait till we're blocked
waitForBlock(nodeForRemoval, repositoryName);
// Put shutdown metadata
putShutdownForRemovalMetadata(nodeForRemoval, clusterService);
} finally {
unblockAllDataNodes(repositoryName);
}
// Wait for snapshot to be paused
safeAwait(createSnapshotPausedListener(clusterService, repositoryName, indexName, numShards));
final Map<SnapshotsInProgress.ShardState, Long> shardStates = getShardStates();
assertThat(shardStates.get(SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL), equalTo((long) numShards));
final Map<SnapshotsInProgress.State, Long> snapshotStates = getSnapshotStates();
assertThat(snapshotStates.get(SnapshotsInProgress.State.STARTED), equalTo(1L));
// clear shutdown metadata to allow snapshot to complete
clearShutdownMetadata(clusterService);
// All statuses should return to zero when the snapshot completes
safeGet(snapshotFuture);
getShardStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
getSnapshotStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
// Ensure all common attributes are present
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOT_SHARDS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOTS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
}
public void testByStateCounts_WaitingShards() throws Exception {
final String indexName = randomIdentifier();
final String boundNode = internalCluster().startDataOnlyNode();
final String destinationNode = internalCluster().startDataOnlyNode();
// Create with single shard so we can reliably delay relocation
createIndex(
indexName,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(REQUIRE_NODE_NAME_SETTING, boundNode)
.build()
);
indexRandom(true, indexName, randomIntBetween(100, 300));
final String repositoryName = randomIdentifier();
createRepository(repositoryName, "mock");
final MockTransportService transportService = MockTransportService.getInstance(destinationNode);
final CyclicBarrier handoffRequestBarrier = new CyclicBarrier(2);
transportService.addRequestHandlingBehavior(
PeerRecoveryTargetService.Actions.HANDOFF_PRIMARY_CONTEXT,
(handler, request, channel, task) -> {
safeAwait(handoffRequestBarrier);
safeAwait(handoffRequestBarrier);
handler.messageReceived(request, channel, task);
}
);
// Force the index to move to another node
client().admin()
.indices()
.prepareUpdateSettings(indexName)
.setSettings(Settings.builder().put(REQUIRE_NODE_NAME_SETTING, destinationNode).build())
.get();
// Wait for hand-off request to be blocked (the shard should be relocating now)
safeAwait(handoffRequestBarrier);
// Kick off a snapshot
final ActionFuture<CreateSnapshotResponse> snapshotFuture = clusterAdmin().prepareCreateSnapshot(
TEST_REQUEST_TIMEOUT,
repositoryName,
randomIdentifier()
).setIndices(indexName).setWaitForCompletion(true).execute();
// Wait for the snapshot to start
awaitNumberOfSnapshotsInProgress(1);
// Wait till we see a shard in WAITING state
createSnapshotInStateListener(clusterService(), repositoryName, indexName, 1, SnapshotsInProgress.ShardState.WAITING);
// Metrics should have 1 WAITING shard and 1 STARTED snapshot
final Map<SnapshotsInProgress.ShardState, Long> shardStates = getShardStates();
assertThat(shardStates.get(SnapshotsInProgress.ShardState.WAITING), equalTo(1L));
final Map<SnapshotsInProgress.State, Long> snapshotStates = getSnapshotStates();
assertThat(snapshotStates.get(SnapshotsInProgress.State.STARTED), equalTo(1L));
// allow the relocation to complete
safeAwait(handoffRequestBarrier);
// All statuses should return to zero when the snapshot completes
safeGet(snapshotFuture);
getShardStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
getSnapshotStates().forEach((key, value) -> assertThat(value, equalTo(0L)));
// Ensure all common attributes are present
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOT_SHARDS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
assertMetricsHaveAttributes(
InstrumentType.LONG_GAUGE,
SnapshotMetrics.SNAPSHOTS_BY_STATE,
Map.of("repo_name", repositoryName, "repo_type", "mock")
);
}
private Map<SnapshotsInProgress.ShardState, Long> getShardStates() {
collectMetrics();
return allTestTelemetryPlugins().flatMap(testTelemetryPlugin -> {
final List<Measurement> longGaugeMeasurement = testTelemetryPlugin.getLongGaugeMeasurement(
SnapshotMetrics.SNAPSHOT_SHARDS_BY_STATE
);
final Map<SnapshotsInProgress.ShardState, Long> shardStates = new HashMap<>();
// last one in wins
for (Measurement measurement : longGaugeMeasurement) {
shardStates.put(
SnapshotsInProgress.ShardState.valueOf(measurement.attributes().get("state").toString()),
measurement.getLong()
);
}
return shardStates.entrySet().stream();
}).collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue, Long::sum));
}
private Map<SnapshotsInProgress.State, Long> getSnapshotStates() {
collectMetrics();
return allTestTelemetryPlugins().flatMap(testTelemetryPlugin -> {
final List<Measurement> longGaugeMeasurement = testTelemetryPlugin.getLongGaugeMeasurement(SnapshotMetrics.SNAPSHOTS_BY_STATE);
final Map<SnapshotsInProgress.State, Long> shardStates = new HashMap<>();
// last one in wins
for (Measurement measurement : longGaugeMeasurement) {
shardStates.put(SnapshotsInProgress.State.valueOf(measurement.attributes().get("state").toString()), measurement.getLong());
}
return shardStates.entrySet().stream();
}).collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue, Long::sum));
}
private static void assertMetricsHaveAttributes(
InstrumentType instrumentType,
String metricName,
Map<String, Object> expectedAttributes
) {
final List<Measurement> clusterMeasurements = getClusterMeasurements(instrumentType, metricName);
assertThat(clusterMeasurements, not(empty()));
clusterMeasurements.forEach(recordingMetric -> {
for (Map.Entry<String, Object> entry : expectedAttributes.entrySet()) {
assertThat(recordingMetric.attributes(), hasEntry(entry.getKey(), entry.getValue()));
}
});
}
private static List<Measurement> getClusterMeasurements(InstrumentType instrumentType, String metricName) {
return allTestTelemetryPlugins().flatMap(
testTelemetryPlugin -> ((RecordingMeterRegistry) testTelemetryPlugin.getTelemetryProvider(Settings.EMPTY).getMeterRegistry())
.getRecorder()
.getMeasurements(instrumentType, metricName)
.stream()
).toList();
}
private static void assertDoubleHistogramMetrics(String metricName, Matcher<? super List<Double>> matcher) {
final List<Double> values = allTestTelemetryPlugins().flatMap(testTelemetryPlugin -> {
final List<Measurement> doubleHistogramMeasurement = testTelemetryPlugin.getDoubleHistogramMeasurement(metricName);
return doubleHistogramMeasurement.stream().map(Measurement::getDouble);
}).toList();
assertThat(values, matcher);
}
private static void assertShardsInProgressMetricIs(Matcher<? super List<Long>> matcher) {
final List<Long> values = allTestTelemetryPlugins().map(testTelemetryPlugin -> {
final List<Measurement> longGaugeMeasurement = testTelemetryPlugin.getLongGaugeMeasurement(
SnapshotMetrics.SNAPSHOT_SHARDS_IN_PROGRESS
);
return longGaugeMeasurement.getLast().getLong();
}).toList();
assertThat(values, matcher);
}
private static void collectMetrics() {
allTestTelemetryPlugins().forEach(TestTelemetryPlugin::collect);
}
private long getTotalClusterLongCounterValue(String metricName) {
return allTestTelemetryPlugins().flatMap(testTelemetryPlugin -> testTelemetryPlugin.getLongCounterMeasurement(metricName).stream())
.mapToLong(Measurement::getLong)
.sum();
}
private static Stream<TestTelemetryPlugin> allTestTelemetryPlugins() {
return StreamSupport.stream(internalCluster().getDataOrMasterNodeInstances(PluginsService.class).spliterator(), false)
.flatMap(pluginsService -> pluginsService.filterPlugins(TestTelemetryPlugin.class));
}
}
| SnapshotMetricsIT |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java | {
"start": 1884,
"end": 6643
} | class ____ {
private final ErasureCoderOptions coderOptions;
public RawErasureEncoder(ErasureCoderOptions coderOptions) {
this.coderOptions = coderOptions;
}
/**
* Encode with inputs and generates outputs.
*
* Note, for both inputs and outputs, no mixing of on-heap buffers and direct
* buffers are allowed.
*
* If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
* content of input buffers may change after the call, subject to concrete
* implementation. Anyway the positions of input buffers will move forward.
*
* @param inputs input buffers to read data from. The buffers' remaining will
* be 0 after encoding
* @param outputs output buffers to put the encoded data into, ready to read
* after the call
* @throws IOException if the encoder is closed.
*/
public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs)
throws IOException {
ByteBufferEncodingState bbeState = new ByteBufferEncodingState(
this, inputs, outputs);
boolean usingDirectBuffer = bbeState.usingDirectBuffer;
int dataLen = bbeState.encodeLength;
if (dataLen == 0) {
return;
}
int[] inputPositions = new int[inputs.length];
for (int i = 0; i < inputPositions.length; i++) {
if (inputs[i] != null) {
inputPositions[i] = inputs[i].position();
}
}
if (usingDirectBuffer) {
doEncode(bbeState);
} else {
ByteArrayEncodingState baeState = bbeState.convertToByteArrayState();
doEncode(baeState);
}
for (int i = 0; i < inputs.length; i++) {
if (inputs[i] != null) {
// dataLen bytes consumed
inputs[i].position(inputPositions[i] + dataLen);
}
}
}
/**
* Perform the real encoding work using direct ByteBuffer.
* @param encodingState the encoding state.
* @throws IOException raised on errors performing I/O.
*/
protected abstract void doEncode(ByteBufferEncodingState encodingState)
throws IOException;
/**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param outputs output buffers to put the encoded data into, read to read
* after the call
* @throws IOException raised on errors performing I/O.
*/
public void encode(byte[][] inputs, byte[][] outputs) throws IOException {
ByteArrayEncodingState baeState = new ByteArrayEncodingState(
this, inputs, outputs);
int dataLen = baeState.encodeLength;
if (dataLen == 0) {
return;
}
doEncode(baeState);
}
/**
* Perform the real encoding work using bytes array, supporting offsets
* and lengths.
* @param encodingState the encoding state
* @throws IOException raised on errors performing I/O.
*/
protected abstract void doEncode(ByteArrayEncodingState encodingState)
throws IOException;
/**
* Encode with inputs and generates outputs. More see above.
*
* @param inputs input buffers to read data from
* @param outputs output buffers to put the encoded data into, read to read
* after the call
* @throws IOException if the encoder is closed.
*/
public void encode(ECChunk[] inputs, ECChunk[] outputs) throws IOException {
ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
encode(newInputs, newOutputs);
}
public int getNumDataUnits() {
return coderOptions.getNumDataUnits();
}
public int getNumParityUnits() {
return coderOptions.getNumParityUnits();
}
public int getNumAllUnits() {
return coderOptions.getNumAllUnits();
}
/**
* Tell if direct buffer is preferred or not. It's for callers to
* decide how to allocate coding chunk buffers, using DirectByteBuffer or
* bytes array. It will return false by default.
* @return true if native buffer is preferred for performance consideration,
* otherwise false.
*/
public boolean preferDirectBuffer() {
return false;
}
/**
* Allow change into input buffers or not while perform encoding/decoding.
* @return true if it's allowed to change inputs, false otherwise
*/
public boolean allowChangeInputs() {
return coderOptions.allowChangeInputs();
}
/**
* Allow to dump verbose info during encoding/decoding.
* @return true if it's allowed to do verbose dump, false otherwise.
*/
public boolean allowVerboseDump() {
return coderOptions.allowVerboseDump();
}
/**
* Should be called when release this coder. Good chance to release encoding
* or decoding buffers
*/
public void release() {
// Nothing to do here.
}
}
| RawErasureEncoder |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/GetClassOnClassTest.java | {
"start": 922,
"end": 1465
} | class ____ {
private final CompilationTestHelper compilationHelper =
CompilationTestHelper.newInstance(GetClassOnClass.class, getClass());
@Test
public void positiveCase() {
compilationHelper
.addSourceLines(
"GetClassOnClassPositiveCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
/**
* @author chy@google.com (Christine Yang)
* @author kmuhlrad@google.com (Katy Muhlrad)
*/
public | GetClassOnClassTest |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/rest/RestServerEndpointITCase.java | {
"start": 46270,
"end": 46560
} | enum ____ implements TestVersionSelectionHeadersBase {
INSTANCE;
@Override
public Collection<RuntimeRestAPIVersion> getSupportedAPIVersions() {
return Collections.singleton(RuntimeRestAPIVersion.V0);
}
}
private | TestVersionSelectionHeaders1 |
java | google__error-prone | core/src/main/java/com/google/errorprone/bugpatterns/threadsafety/GuardedByExpression.java | {
"start": 1656,
"end": 1933
} | class ____ extends GuardedByExpression {
public static ClassLiteral create(Symbol owner) {
return new AutoValue_GuardedByExpression_ClassLiteral(Kind.CLASS_LITERAL, owner, owner.type);
}
}
/**
* The base expression for a static member select on a | ClassLiteral |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/function/server/RequestPredicates.java | {
"start": 15093,
"end": 19052
} | interface ____ {
/**
* Receive notification of an HTTP method predicate.
* @param methods the HTTP methods that make up the predicate
* @see RequestPredicates#method(HttpMethod)
*/
void method(Set<HttpMethod> methods);
/**
* Receive notification of a path predicate.
* @param pattern the path pattern that makes up the predicate
* @see RequestPredicates#path(String)
* @see org.springframework.web.util.pattern.PathPattern
*/
void path(String pattern);
/**
* Receive notification of a path extension predicate.
* @param extension the path extension that makes up the predicate
* @see RequestPredicates#pathExtension(String)
* @deprecated without replacement to discourage use of path extensions for request
* mapping and for content negotiation (with similar deprecations and removals already
* applied to annotated controllers). For further context, please read issue
* <a href="https://github.com/spring-projects/spring-framework/issues/24179">#24179</a>
*/
@Deprecated(since = "7.0", forRemoval = true)
void pathExtension(String extension);
/**
* Receive notification of an HTTP header predicate.
* @param name the name of the HTTP header to check
* @param value the desired value of the HTTP header
* @see RequestPredicates#headers(Predicate)
* @see RequestPredicates#contentType(MediaType...)
* @see RequestPredicates#accept(MediaType...)
*/
void header(String name, String value);
/**
* Receive notification of a query parameter predicate.
* @param name the name of the query parameter
* @param value the desired value of the parameter
* @see RequestPredicates#queryParam(String, String)
*/
void queryParam(String name, String value);
/**
* Receive notification of an API version predicate. The version could
* be fixed ("1.2") or baseline ("1.2+").
* @param version the configured version
* @since 7.0
*/
void version(String version);
/**
* Receive first notification of a logical AND predicate.
* The first subsequent notification will contain the left-hand side of the AND-predicate;
* followed by {@link #and()}, followed by the right-hand side, followed by {@link #endAnd()}.
* @see RequestPredicate#and(RequestPredicate)
*/
void startAnd();
/**
* Receive "middle" notification of a logical AND predicate.
* The following notification contains the right-hand side, followed by {@link #endAnd()}.
* @see RequestPredicate#and(RequestPredicate)
*/
void and();
/**
* Receive last notification of a logical AND predicate.
* @see RequestPredicate#and(RequestPredicate)
*/
void endAnd();
/**
* Receive first notification of a logical OR predicate.
* The first subsequent notification will contain the left-hand side of the OR-predicate;
* the second notification contains the right-hand side, followed by {@link #endOr()}.
* @see RequestPredicate#or(RequestPredicate)
*/
void startOr();
/**
* Receive "middle" notification of a logical OR predicate.
* The following notification contains the right-hand side, followed by {@link #endOr()}.
* @see RequestPredicate#or(RequestPredicate)
*/
void or();
/**
* Receive last notification of a logical OR predicate.
* @see RequestPredicate#or(RequestPredicate)
*/
void endOr();
/**
* Receive first notification of a negated predicate.
* The first subsequent notification will contain the negated predicated, followed
* by {@link #endNegate()}.
* @see RequestPredicate#negate()
*/
void startNegate();
/**
* Receive last notification of a negated predicate.
* @see RequestPredicate#negate()
*/
void endNegate();
/**
* Receive first notification of an unknown predicate.
*/
void unknown(RequestPredicate predicate);
}
/**
* Extension of {@code RequestPredicate} that can modify the {@code ServerRequest}.
*/
abstract static | Visitor |
java | spring-projects__spring-framework | spring-core/src/test/java/org/springframework/core/annotation/MergedAnnotationsTests.java | {
"start": 108488,
"end": 108676
} | interface ____ {
@AliasFor("locations")
String[] value() default {};
@AliasFor("value")
String[] locations() default {};
}
@Retention(RetentionPolicy.RUNTIME)
@ | TestPropertySource |
java | google__gson | gson/src/main/java/com/google/gson/internal/bind/JsonAdapterAnnotationTypeAdapterFactory.java | {
"start": 5896,
"end": 8427
} | class ____ field
TypeAdapterFactory skipPast;
if (isClassAnnotation) {
skipPast = TREE_TYPE_CLASS_DUMMY_FACTORY;
} else {
skipPast = TREE_TYPE_FIELD_DUMMY_FACTORY;
}
@SuppressWarnings({"unchecked", "rawtypes"})
TypeAdapter<?> tempAdapter =
new TreeTypeAdapter(serializer, deserializer, gson, type, skipPast, nullSafe);
typeAdapter = tempAdapter;
// TreeTypeAdapter handles nullSafe; don't additionally call `nullSafe()`
nullSafe = false;
} else {
throw new IllegalArgumentException(
"Invalid attempt to bind an instance of "
+ instance.getClass().getName()
+ " as a @JsonAdapter for "
+ type.toString()
+ ". @JsonAdapter value must be a TypeAdapter, TypeAdapterFactory,"
+ " JsonSerializer or JsonDeserializer.");
}
if (typeAdapter != null && nullSafe) {
typeAdapter = typeAdapter.nullSafe();
}
return typeAdapter;
}
/**
* Returns whether {@code factory} is a type adapter factory created for {@code @JsonAdapter}
* placed on {@code type}.
*/
public boolean isClassJsonAdapterFactory(TypeToken<?> type, TypeAdapterFactory factory) {
Objects.requireNonNull(type);
Objects.requireNonNull(factory);
if (factory == TREE_TYPE_CLASS_DUMMY_FACTORY) {
return true;
}
// Using raw type to match behavior of `create(Gson, TypeToken<T>)` above
Class<?> rawType = type.getRawType();
TypeAdapterFactory existingFactory = adapterFactoryMap.get(rawType);
if (existingFactory != null) {
// Checks for reference equality, like it is done by `Gson.getDelegateAdapter`
return existingFactory == factory;
}
// If no factory has been created for the type yet check manually for a @JsonAdapter annotation
// which specifies a TypeAdapterFactory
// Otherwise behavior would not be consistent, depending on whether or not adapter had been
// requested before call to `isClassJsonAdapterFactory` was made
JsonAdapter annotation = getAnnotation(rawType);
if (annotation == null) {
return false;
}
Class<?> adapterClass = annotation.value();
if (!TypeAdapterFactory.class.isAssignableFrom(adapterClass)) {
return false;
}
Object adapter = createAdapter(constructorConstructor, adapterClass);
TypeAdapterFactory newFactory = (TypeAdapterFactory) adapter;
return putFactoryAndGetCurrent(rawType, newFactory) == factory;
}
}
| or |
java | elastic__elasticsearch | modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java | {
"start": 18787,
"end": 19738
} | class ____.elasticsearch.action.search.SearchRequest] is not compatible with version")
);
assertThat(primary.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled."));
assertThat(
underlying.getMessage(),
matchesRegex(
"\\[fail_before_current_version] was released first in version .+,"
+ " failed compatibility check trying to send it to node with version .+"
)
);
}
public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) {
assertResponse(requestBuilder, response -> ElasticsearchAssertions.assertHitCount(response.getResponse(), expectedHitCount));
}
private void putJsonStoredScript(String id, String jsonContent) {
assertAcked(safeExecute(TransportPutStoredScriptAction.TYPE, newPutStoredScriptTestRequest(id, jsonContent)));
}
}
| org |
java | spring-projects__spring-security | web/src/test/java/org/springframework/security/web/csrf/CsrfTokenRequestAttributeHandlerTests.java | {
"start": 1297,
"end": 5340
} | class ____ {
private MockHttpServletRequest request;
private MockHttpServletResponse response;
private CsrfToken token;
private CsrfTokenRequestAttributeHandler handler;
@BeforeEach
public void setup() {
this.request = new MockHttpServletRequest();
this.response = new MockHttpServletResponse();
this.token = new DefaultCsrfToken("headerName", "paramName", "csrfTokenValue");
this.handler = new CsrfTokenRequestAttributeHandler();
}
@Test
public void handleWhenRequestIsNullThenThrowsIllegalArgumentException() {
assertThatIllegalArgumentException()
.isThrownBy(() -> this.handler.handle(null, this.response, () -> this.token))
.withMessage("request cannot be null");
}
@Test
public void handleWhenResponseIsNullThenThrowsIllegalArgumentException() {
// @formatter:off
assertThatIllegalArgumentException()
.isThrownBy(() -> this.handler.handle(this.request, null, () -> this.token))
.withMessage("response cannot be null");
// @formatter:on
}
@Test
public void handleWhenCsrfTokenSupplierIsNullThenThrowsIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.handler.handle(this.request, this.response, null))
.withMessage("deferredCsrfToken cannot be null");
}
@Test
public void handleWhenCsrfTokenIsNullThenThrowsIllegalArgumentException() {
// @formatter:off
this.handler.setCsrfRequestAttributeName(null);
assertThatIllegalStateException()
.isThrownBy(() -> this.handler.handle(this.request, this.response, () -> null))
.withMessage("csrfTokenSupplier returned null delegate");
// @formatter:on
}
@Test
public void handleWhenCsrfRequestAttributeSetThenUsed() {
this.handler.setCsrfRequestAttributeName("_csrf");
this.handler.handle(this.request, this.response, () -> this.token);
assertThatCsrfToken(this.request.getAttribute(CsrfToken.class.getName())).isEqualTo(this.token);
assertThatCsrfToken(this.request.getAttribute("_csrf")).isEqualTo(this.token);
}
@Test
public void handleWhenValidParametersThenRequestAttributesSet() {
this.handler.handle(this.request, this.response, () -> this.token);
assertThatCsrfToken(this.request.getAttribute(CsrfToken.class.getName())).isEqualTo(this.token);
assertThatCsrfToken(this.request.getAttribute("_csrf")).isEqualTo(this.token);
}
@Test
public void resolveCsrfTokenValueWhenRequestIsNullThenThrowsIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.handler.resolveCsrfTokenValue(null, this.token))
.withMessage("request cannot be null");
}
@Test
public void resolveCsrfTokenValueWhenCsrfTokenIsNullThenThrowsIllegalArgumentException() {
assertThatIllegalArgumentException().isThrownBy(() -> this.handler.resolveCsrfTokenValue(this.request, null))
.withMessage("csrfToken cannot be null");
}
@Test
public void resolveCsrfTokenValueWhenTokenNotSetThenReturnsNull() {
String tokenValue = this.handler.resolveCsrfTokenValue(this.request, this.token);
assertThat(tokenValue).isNull();
}
@Test
public void resolveCsrfTokenValueWhenParameterSetThenReturnsTokenValue() {
this.request.setParameter(this.token.getParameterName(), this.token.getToken());
String tokenValue = this.handler.resolveCsrfTokenValue(this.request, this.token);
assertThat(tokenValue).isEqualTo(this.token.getToken());
}
@Test
public void resolveCsrfTokenValueWhenHeaderSetThenReturnsTokenValue() {
this.request.addHeader(this.token.getHeaderName(), this.token.getToken());
String tokenValue = this.handler.resolveCsrfTokenValue(this.request, this.token);
assertThat(tokenValue).isEqualTo(this.token.getToken());
}
@Test
public void resolveCsrfTokenValueWhenHeaderAndParameterSetThenHeaderIsPreferred() {
this.request.addHeader(this.token.getHeaderName(), "header");
this.request.setParameter(this.token.getParameterName(), "parameter");
String tokenValue = this.handler.resolveCsrfTokenValue(this.request, this.token);
assertThat(tokenValue).isEqualTo("header");
}
}
| CsrfTokenRequestAttributeHandlerTests |
java | apache__kafka | metadata/src/main/java/org/apache/kafka/metadata/BrokerRegistration.java | {
"start": 1759,
"end": 14010
} | class ____ {
private int id;
private long epoch;
private Uuid incarnationId;
private Map<String, Endpoint> listeners;
private Map<String, VersionRange> supportedFeatures;
private Optional<String> rack;
private boolean fenced;
private boolean inControlledShutdown;
private boolean isMigratingZkBroker;
private List<Uuid> directories;
public Builder() {
this.id = 0;
this.epoch = -1;
this.incarnationId = null;
this.listeners = new HashMap<>();
this.supportedFeatures = new HashMap<>();
this.rack = Optional.empty();
this.fenced = false;
this.inControlledShutdown = false;
this.isMigratingZkBroker = false;
this.directories = List.of();
}
public Builder setId(int id) {
this.id = id;
return this;
}
public Builder setEpoch(long epoch) {
this.epoch = epoch;
return this;
}
public Builder setIncarnationId(Uuid incarnationId) {
this.incarnationId = incarnationId;
return this;
}
public Builder setListeners(List<Endpoint> listeners) {
Map<String, Endpoint> listenersMap = new HashMap<>();
for (Endpoint endpoint : listeners) {
listenersMap.put(endpoint.listener(), endpoint);
}
this.listeners = listenersMap;
return this;
}
public Builder setListeners(Map<String, Endpoint> listeners) {
this.listeners = listeners;
return this;
}
public Builder setSupportedFeatures(Map<String, VersionRange> supportedFeatures) {
this.supportedFeatures = supportedFeatures;
return this;
}
public Builder setRack(Optional<String> rack) {
Objects.requireNonNull(rack);
this.rack = rack;
return this;
}
public Builder setFenced(boolean fenced) {
this.fenced = fenced;
return this;
}
public Builder setInControlledShutdown(boolean inControlledShutdown) {
this.inControlledShutdown = inControlledShutdown;
return this;
}
public Builder setIsMigratingZkBroker(boolean isMigratingZkBroker) {
this.isMigratingZkBroker = isMigratingZkBroker;
return this;
}
public Builder setDirectories(List<Uuid> directories) {
this.directories = directories;
return this;
}
public BrokerRegistration build() {
return new BrokerRegistration(
id,
epoch,
incarnationId,
listeners,
supportedFeatures,
rack,
fenced,
inControlledShutdown,
isMigratingZkBroker,
directories);
}
}
private final int id;
private final long epoch;
private final Uuid incarnationId;
private final Map<String, Endpoint> listeners;
private final Map<String, VersionRange> supportedFeatures;
private final Optional<String> rack;
private final boolean fenced;
private final boolean inControlledShutdown;
private final boolean isMigratingZkBroker;
private final List<Uuid> directories;
private BrokerRegistration(
int id,
long epoch,
Uuid incarnationId,
Map<String, Endpoint> listeners,
Map<String, VersionRange> supportedFeatures,
Optional<String> rack,
boolean fenced,
boolean inControlledShutdown,
boolean isMigratingZkBroker,
List<Uuid> directories
) {
this.id = id;
this.epoch = epoch;
this.incarnationId = incarnationId;
Map<String, Endpoint> newListeners = new HashMap<>(listeners.size());
for (Entry<String, Endpoint> entry : listeners.entrySet()) {
if (entry.getValue().listener().isEmpty()) {
throw new IllegalArgumentException("Broker listeners must be named.");
}
newListeners.put(entry.getKey(), entry.getValue());
}
this.listeners = Collections.unmodifiableMap(newListeners);
Objects.requireNonNull(supportedFeatures);
this.supportedFeatures = new HashMap<>(supportedFeatures);
this.rack = rack;
this.fenced = fenced;
this.inControlledShutdown = inControlledShutdown;
this.isMigratingZkBroker = isMigratingZkBroker;
directories = new ArrayList<>(directories);
directories.sort(Uuid::compareTo);
this.directories = Collections.unmodifiableList(directories);
}
public static BrokerRegistration fromRecord(RegisterBrokerRecord record) {
Map<String, Endpoint> listeners = new HashMap<>();
for (BrokerEndpoint endpoint : record.endPoints()) {
listeners.put(endpoint.name(), new Endpoint(endpoint.name(),
SecurityProtocol.forId(endpoint.securityProtocol()),
endpoint.host(),
endpoint.port()));
}
Map<String, VersionRange> supportedFeatures = new HashMap<>();
for (BrokerFeature feature : record.features()) {
supportedFeatures.put(feature.name(), VersionRange.of(
feature.minSupportedVersion(), feature.maxSupportedVersion()));
}
return new BrokerRegistration(record.brokerId(),
record.brokerEpoch(),
record.incarnationId(),
listeners,
supportedFeatures,
Optional.ofNullable(record.rack()),
record.fenced(),
record.inControlledShutdown(),
record.isMigratingZkBroker(),
record.logDirs());
}
public int id() {
return id;
}
public long epoch() {
return epoch;
}
public Uuid incarnationId() {
return incarnationId;
}
public Map<String, Endpoint> listeners() {
return listeners;
}
public Optional<Node> node(String listenerName) {
Endpoint endpoint = listeners().get(listenerName);
if (endpoint == null) {
return Optional.empty();
}
return Optional.of(new Node(id, endpoint.host(), endpoint.port(), rack.orElse(null), fenced));
}
public List<Node> nodes() {
return listeners.keySet().stream().flatMap(l -> node(l).stream()).toList();
}
public Map<String, VersionRange> supportedFeatures() {
return supportedFeatures;
}
public Optional<String> rack() {
return rack;
}
public boolean fenced() {
return fenced;
}
public boolean inControlledShutdown() {
return inControlledShutdown;
}
public List<Uuid> directories() {
return directories;
}
public boolean hasOnlineDir(Uuid dir) {
return DirectoryId.isOnline(dir, directories);
}
public List<Uuid> directoryIntersection(List<Uuid> otherDirectories) {
List<Uuid> results = new ArrayList<>();
for (Uuid directory : directories) {
if (otherDirectories.contains(directory)) {
results.add(directory);
}
}
return results;
}
public List<Uuid> directoryDifference(List<Uuid> otherDirectories) {
List<Uuid> results = new ArrayList<>();
for (Uuid directory : directories) {
if (!otherDirectories.contains(directory)) {
results.add(directory);
}
}
return results;
}
public ApiMessageAndVersion toRecord(ImageWriterOptions options) {
RegisterBrokerRecord registrationRecord = new RegisterBrokerRecord().
setBrokerId(id).
setRack(rack.orElse(null)).
setBrokerEpoch(epoch).
setIncarnationId(incarnationId).
setFenced(fenced).
setInControlledShutdown(inControlledShutdown);
if (isMigratingZkBroker) {
if (options.metadataVersion().isMigrationSupported()) {
registrationRecord.setIsMigratingZkBroker(isMigratingZkBroker);
} else {
options.handleLoss("the isMigratingZkBroker state of one or more brokers");
}
}
if (directories.isEmpty() || options.metadataVersion().isDirectoryAssignmentSupported()) {
registrationRecord.setLogDirs(directories);
} else {
options.handleLoss("the online log directories of one or more brokers");
}
for (Entry<String, Endpoint> entry : listeners.entrySet()) {
Endpoint endpoint = entry.getValue();
registrationRecord.endPoints().add(new BrokerEndpoint().
setName(entry.getKey()).
setHost(endpoint.host()).
setPort(endpoint.port()).
setSecurityProtocol(endpoint.securityProtocol().id));
}
for (Entry<String, VersionRange> entry : supportedFeatures.entrySet()) {
registrationRecord.features().add(new BrokerFeature().
setName(entry.getKey()).
setMinSupportedVersion(entry.getValue().min()).
setMaxSupportedVersion(entry.getValue().max()));
}
return new ApiMessageAndVersion(registrationRecord,
options.metadataVersion().registerBrokerRecordVersion());
}
@Override
public int hashCode() {
return Objects.hash(id, epoch, incarnationId, listeners, supportedFeatures,
rack, fenced, inControlledShutdown, isMigratingZkBroker, directories);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof BrokerRegistration other)) return false;
return other.id == id &&
other.epoch == epoch &&
other.incarnationId.equals(incarnationId) &&
other.listeners.equals(listeners) &&
other.supportedFeatures.equals(supportedFeatures) &&
other.rack.equals(rack) &&
other.fenced == fenced &&
other.inControlledShutdown == inControlledShutdown &&
other.isMigratingZkBroker == isMigratingZkBroker &&
other.directories.equals(directories);
}
@Override
public String toString() {
return "BrokerRegistration(id=" + id +
", epoch=" + epoch +
", incarnationId=" + incarnationId +
", listeners=[" +
listeners.keySet().stream().sorted().
map(n -> listeners.get(n).toString()).
collect(Collectors.joining(", ")) +
"], supportedFeatures={" +
supportedFeatures.keySet().stream().sorted().
map(k -> k + ": " + supportedFeatures.get(k)).
collect(Collectors.joining(", ")) +
"}" +
", rack=" + rack +
", fenced=" + fenced +
", inControlledShutdown=" + inControlledShutdown +
", isMigratingZkBroker=" + isMigratingZkBroker +
", directories=" + directories +
")";
}
public BrokerRegistration cloneWith(
Optional<Boolean> fencingChange,
Optional<Boolean> inControlledShutdownChange,
Optional<List<Uuid>> directoriesChange
) {
boolean newFenced = fencingChange.orElse(fenced);
boolean newInControlledShutdownChange = inControlledShutdownChange.orElse(inControlledShutdown);
List<Uuid> newDirectories = directoriesChange.orElse(directories);
if (newFenced == fenced && newInControlledShutdownChange == inControlledShutdown && newDirectories.equals(directories))
return this;
return new BrokerRegistration(
id,
epoch,
incarnationId,
listeners,
supportedFeatures,
rack,
newFenced,
newInControlledShutdownChange,
isMigratingZkBroker,
newDirectories
);
}
}
| Builder |
java | quarkusio__quarkus | independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/buildextension/observers/ObservertransformerTest.java | {
"start": 1185,
"end": 2817
} | class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder().beanClasses(MyObserver.class, AlphaQualifier.class)
.observerTransformers(new ObserverTransformer() {
@Override
public void transform(TransformationContext context) {
context.transform()
.reception(Reception.IF_EXISTS)
.priority(1)
.done();
}
@Override
public boolean appliesTo(Type observedType, Set<AnnotationInstance> qualifiers) {
// observed type is String and qualifiers: @AlphaQualifier
return observedType.name().toString().equals(String.class.getName()) && qualifiers.size() == 1
&& qualifiers.iterator().next().name().toString().equals(AlphaQualifier.class.getName());
}
}).build();
@Test
public void testTransformedObserver() {
MyObserver.EVENTS.clear();
@SuppressWarnings("serial")
Event<String> event = Arc.container().beanManager().getEvent().select(String.class,
new AlphaQualifier.Literal());
event.fire("foo");
// Reception was transformed to IF_EXISTS so test1() is not invoked
assertEquals(List.of("foo_MyObserver2"), MyObserver.EVENTS);
MyObserver.EVENTS.clear();
event.fire("foo");
assertEquals(List.of("foo_MyObserver1", "foo_MyObserver2"), MyObserver.EVENTS);
}
@Singleton
static | ObservertransformerTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/time/JodaToSelfTest.java | {
"start": 1199,
"end": 1648
} | class ____ {
// BUG: Diagnostic contains: private static final Duration DUR = Duration.ZERO;
private static final Duration DUR = Duration.ZERO.toDuration();
}
""")
.doTest();
}
@Test
public void durationWithSecondsNamedVariable() {
helper
.addSourceLines(
"TestClass.java",
"""
import org.joda.time.Duration;
public | TestClass |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/scheduler/benchmark/failover/RegionToRestartInStreamingJobBenchmark.java | {
"start": 1607,
"end": 2264
} | class ____ extends FailoverBenchmarkBase {
@Override
public void setup(JobConfiguration jobConfiguration) throws Exception {
super.setup(jobConfiguration);
TestingLogicalSlotBuilder slotBuilder = new TestingLogicalSlotBuilder();
deployAllTasks(executionGraph, slotBuilder);
switchAllVerticesToRunning(executionGraph);
}
public Set<ExecutionVertexID> calculateRegionToRestart() {
return strategy.getTasksNeedingRestart(
executionGraph.getJobVertex(source.getID()).getTaskVertices()[0].getID(),
new Exception("For test."));
}
}
| RegionToRestartInStreamingJobBenchmark |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java | {
"start": 2016,
"end": 5878
} | class ____ implements
ConstraintPlacementAlgorithmOutputCollector {
private static final Logger LOG =
LoggerFactory.getLogger(PlacementDispatcher.class);
private ConstraintPlacementAlgorithm algorithm;
private ExecutorService algorithmThreadPool;
private Map<ApplicationId, List<PlacedSchedulingRequest>>
placedRequests = new ConcurrentHashMap<>();
private Map<ApplicationId, List<SchedulingRequestWithPlacementAttempt>>
rejectedRequests = new ConcurrentHashMap<>();
public void init(RMContext rmContext,
ConstraintPlacementAlgorithm placementAlgorithm, int poolSize) {
LOG.info("Initializing Constraint Placement Planner:");
this.algorithm = placementAlgorithm;
this.algorithm.init(rmContext);
this.algorithmThreadPool = Executors.newFixedThreadPool(poolSize);
}
void dispatch(final BatchedRequests batchedRequests) {
final ConstraintPlacementAlgorithmOutputCollector collector = this;
Runnable placingTask = () -> {
LOG.debug("Got [{}] requests to place from application [{}].. " +
"Attempt count [{}]",
batchedRequests.getSchedulingRequests().size(),
batchedRequests.getApplicationId(),
batchedRequests.getPlacementAttempt());
algorithm.place(batchedRequests, collector);
};
this.algorithmThreadPool.submit(placingTask);
}
public List<PlacedSchedulingRequest> pullPlacedRequests(
ApplicationId applicationId) {
List<PlacedSchedulingRequest> placedReqs =
this.placedRequests.get(applicationId);
if (placedReqs != null && !placedReqs.isEmpty()) {
List<PlacedSchedulingRequest> retList = new ArrayList<>();
synchronized (placedReqs) {
if (placedReqs.size() > 0) {
retList.addAll(placedReqs);
placedReqs.clear();
}
}
return retList;
}
return Collections.emptyList();
}
public List<SchedulingRequestWithPlacementAttempt> pullRejectedRequests(
ApplicationId applicationId) {
List<SchedulingRequestWithPlacementAttempt> rejectedReqs =
this.rejectedRequests.get(applicationId);
if (rejectedReqs != null && !rejectedReqs.isEmpty()) {
List<SchedulingRequestWithPlacementAttempt> retList = new ArrayList<>();
synchronized (rejectedReqs) {
if (rejectedReqs.size() > 0) {
retList.addAll(rejectedReqs);
rejectedReqs.clear();
}
}
return retList;
}
return Collections.emptyList();
}
void clearApplicationState(ApplicationId applicationId) {
placedRequests.remove(applicationId);
rejectedRequests.remove(applicationId);
}
@Override
public void collect(ConstraintPlacementAlgorithmOutput placement) {
if (!placement.getPlacedRequests().isEmpty()) {
List<PlacedSchedulingRequest> processed =
placedRequests.computeIfAbsent(
placement.getApplicationId(), k -> new ArrayList<>());
synchronized (processed) {
LOG.debug(
"Planning Algorithm has placed for application [{}]" +
" the following [{}]", placement.getApplicationId(),
placement.getPlacedRequests());
for (PlacedSchedulingRequest esr :
placement.getPlacedRequests()) {
processed.add(esr);
}
}
}
if (!placement.getRejectedRequests().isEmpty()) {
List<SchedulingRequestWithPlacementAttempt> rejected =
rejectedRequests.computeIfAbsent(
placement.getApplicationId(), k -> new ArrayList());
LOG.warn(
"Planning Algorithm has rejected for application [{}]" +
" the following [{}]", placement.getApplicationId(),
placement.getRejectedRequests());
synchronized (rejected) {
rejected.addAll(placement.getRejectedRequests());
}
}
}
}
| PlacementDispatcher |
java | spring-projects__spring-security | oauth2/oauth2-resource-server/src/main/java/org/springframework/security/oauth2/server/resource/authentication/JwtIssuerAuthenticationManagerResolver.java | {
"start": 2065,
"end": 2345
} | class ____ be able to determine whether the `iss` claim is trusted. Recall
* that anyone can stand up an authorization server and issue valid tokens to a resource
* server. The simplest way to achieve this is to supply a set of trusted issuers in the
* constructor.
*
* This | must |
java | google__auto | factory/src/test/resources/expected/FactoryExtendingAbstractClassFactory.java | {
"start": 841,
"end": 1189
} | class ____
extends FactoryExtendingAbstractClass.AbstractFactory {
@Inject
FactoryExtendingAbstractClassFactory() {}
FactoryExtendingAbstractClass create() {
return new FactoryExtendingAbstractClass();
}
@Override
public FactoryExtendingAbstractClass newInstance() {
return create();
}
}
| FactoryExtendingAbstractClassFactory |
java | apache__kafka | connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java | {
"start": 33164,
"end": 36560
} | class ____ not provided as part of the connector properties and not provided as part of the worker properties,
the converter to use is unknown hence no default version can be determined (null)
Note: Connect when using service loading has an issue outlined in KAFKA-18119. The issue means that the above
logic does not hold currently for clusters using service loading when converters are defined in the connector.
However, the logic to determine the default should ideally follow the one outlined above, and the code here
should still show the correct default version regardless of the bug.
*/
final String connectorConverter = connectorProps.get(connectorConverterConfig);
// since header converter defines a default in the worker config we need to handle it separately
final String workerConverter = workerConverterConfig.equals(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG) ?
workerConfig.getClass(workerConverterConfig).getName() : workerConfig.originalsStrings().get(workerConverterConfig);
final String connectorClass = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG);
final String connectorVersion = connectorProps.get(ConnectorConfig.CONNECTOR_VERSION);
String type = null;
if (connectorClass == null || (connectorConverter == null && workerConverter == null)) {
return new ConverterDefaults(null, null);
}
// update the default of connector converter based on if the worker converter is provided
type = workerConverter;
String version = null;
if (connectorConverter != null) {
version = fetchPluginVersion(plugins, connectorClass, connectorVersion, connectorConverter, converterType);
} else {
version = workerConfig.originalsStrings().get(workerConverterVersionConfig);
if (version == null) {
version = plugins.latestVersion(workerConverter, converterType);
}
}
return new ConverterDefaults(type, version);
}
private static <T> String fetchPluginVersion(Plugins plugins, String connectorClass, String connectorVersion, String pluginName, PluginType pluginType) {
if (pluginName == null || connectorClass == null) {
return null;
}
try {
VersionRange range = PluginUtils.connectorVersionRequirement(connectorVersion);
return plugins.pluginVersion(pluginName, plugins.connectorLoader(connectorClass, range), pluginType);
} catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) {
// these errors should be captured in other places, so we can ignore them here
log.warn("Failed to determine default plugin version for {}", connectorClass, e);
}
return null;
}
/**
* An abstraction over "enrichable plugins" ({@link Transformation}s and {@link Predicate}s) used for computing the
* contribution to a Connectors ConfigDef.
*
* This is not entirely elegant because
* although they basically use the same "alias prefix" configuration idiom there are some differences.
* The abstract method pattern is used to cope with this.
* @param <T> The type of plugin (either {@code Transformation} or {@code Predicate}).
*/
abstract static | is |
java | quarkusio__quarkus | integration-tests/oidc-client-wiremock/src/main/java/io/quarkus/it/keycloak/OidcClientRequestCustomJwtBearerForceNewTokenFilter.java | {
"start": 329,
"end": 897
} | class ____ extends AbstractOidcClientRequestFilter {
@Override
protected Map<String, String> additionalParameters() {
return Map.of(OidcConstants.CLIENT_ASSERTION, "123456");
}
@Override
protected boolean isForceNewTokens() {
// Easiest way to force requesting new tokens, instead of
// manipulating the token expiration time
return true;
}
@Override
protected Optional<String> clientId() {
return Optional.of("jwtbearer-forcenewtoken");
}
}
| OidcClientRequestCustomJwtBearerForceNewTokenFilter |
java | quarkusio__quarkus | extensions/arc/deployment/src/test/java/io/quarkus/arc/test/exclude/ExcludeTypesTest.java | {
"start": 2186,
"end": 2324
} | class ____ implements Pong {
@Magic
public String ping() {
return "charlie";
}
}
public | Charlie |
java | bumptech__glide | integration/cronet/src/main/java/com/bumptech/glide/integration/cronet/CronetEngineSingleton.java | {
"start": 413,
"end": 1325
} | class ____ {
// non instantiable
private CronetEngineSingleton() {}
private static volatile CronetEngine cronetEngineSingleton;
public static CronetEngine getSingleton(Context context) {
// Lazily create the engine.
if (cronetEngineSingleton == null) {
synchronized (CronetEngineSingleton.class) {
// have to re-check since this might have changed before synchronization, but we don't
// want to synchronize just to check for null.
if (cronetEngineSingleton == null) {
cronetEngineSingleton = createEngine(context);
}
}
}
return cronetEngineSingleton;
}
private static CronetEngine createEngine(Context context) {
return new CronetEngine.Builder(context)
.enableHttpCache(CronetEngine.Builder.HTTP_CACHE_DISABLED, 0)
.enableHttp2(true)
.enableQuic(false)
.build();
}
}
| CronetEngineSingleton |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java | {
"start": 31744,
"end": 48931
} | class ____<K extends Object, V extends Object>
implements MapOutputCollector<K, V>, IndexedSortable {
private int partitions;
private JobConf job;
private TaskReporter reporter;
private Class<K> keyClass;
private Class<V> valClass;
private RawComparator<K> comparator;
private SerializationFactory serializationFactory;
private Serializer<K> keySerializer;
private Serializer<V> valSerializer;
private CombinerRunner<K,V> combinerRunner;
private CombineOutputCollector<K, V> combineCollector;
// Compression for map-outputs
private CompressionCodec codec;
// k/v accounting
private IntBuffer kvmeta; // metadata overlay on backing store
int kvstart; // marks origin of spill metadata
int kvend; // marks end of spill metadata
int kvindex; // marks end of fully serialized records
int equator; // marks origin of meta/serialization
int bufstart; // marks beginning of spill
int bufend; // marks beginning of collectable
int bufmark; // marks end of record
int bufindex; // marks end of collected
int bufvoid; // marks the point where we should stop
// reading at the end of the buffer
byte[] kvbuffer; // main output buffer
private final byte[] b0 = new byte[0];
private static final int VALSTART = 0; // val offset in acct
private static final int KEYSTART = 1; // key offset in acct
private static final int PARTITION = 2; // partition offset in acct
private static final int VALLEN = 3; // length of value
private static final int NMETA = 4; // num meta ints
private static final int METASIZE = NMETA * 4; // size in bytes
// spill accounting
private int maxRec;
private int softLimit;
boolean spillInProgress;
int bufferRemaining;
volatile Throwable sortSpillException = null;
int numSpills = 0;
private int minSpillsForCombine;
private IndexedSorter sorter;
final ReentrantLock spillLock = new ReentrantLock();
final Condition spillDone = spillLock.newCondition();
final Condition spillReady = spillLock.newCondition();
final BlockingBuffer bb = new BlockingBuffer();
volatile boolean spillThreadRunning = false;
final SpillThread spillThread = new SpillThread();
private FileSystem rfs;
// Counters
private Counters.Counter mapOutputByteCounter;
private Counters.Counter mapOutputRecordCounter;
private Counters.Counter fileOutputByteCounter;
final ArrayList<SpillRecord> indexCacheList =
new ArrayList<SpillRecord>();
private int totalIndexCacheMemory;
private int indexCacheMemoryLimit;
private int spillFilesCountLimit;
private static final int INDEX_CACHE_MEMORY_LIMIT_DEFAULT = 1024 * 1024;
private static final int SPILL_FILES_COUNT_LIMIT_DEFAULT = -1;
private static final int SPILL_FILES_COUNT_UNBOUNDED_LIMIT_VALUE = -1;
private MapTask mapTask;
private MapOutputFile mapOutputFile;
private Progress sortPhase;
private Counters.Counter spilledRecordsCounter;
public MapOutputBuffer() {
}
@SuppressWarnings("unchecked")
public void init(MapOutputCollector.Context context
) throws IOException, ClassNotFoundException {
job = context.getJobConf();
reporter = context.getReporter();
mapTask = context.getMapTask();
mapOutputFile = mapTask.getMapOutputFile();
sortPhase = mapTask.getSortPhase();
spilledRecordsCounter = reporter.getCounter(TaskCounter.SPILLED_RECORDS);
partitions = job.getNumReduceTasks();
rfs = ((LocalFileSystem)FileSystem.getLocal(job)).getRaw();
//sanity checks
final float spillper =
job.getFloat(JobContext.MAP_SORT_SPILL_PERCENT, (float)0.8);
final int sortmb = job.getInt(MRJobConfig.IO_SORT_MB,
MRJobConfig.DEFAULT_IO_SORT_MB);
indexCacheMemoryLimit = job.getInt(JobContext.INDEX_CACHE_MEMORY_LIMIT,
INDEX_CACHE_MEMORY_LIMIT_DEFAULT);
spillFilesCountLimit = job.getInt(JobContext.SPILL_FILES_COUNT_LIMIT,
SPILL_FILES_COUNT_LIMIT_DEFAULT);
if (spillper > (float)1.0 || spillper <= (float)0.0) {
throw new IOException("Invalid \"" + JobContext.MAP_SORT_SPILL_PERCENT +
"\": " + spillper);
}
if(spillFilesCountLimit != SPILL_FILES_COUNT_UNBOUNDED_LIMIT_VALUE
&& spillFilesCountLimit < 0) {
throw new IOException("Invalid value for \"" + JobContext.SPILL_FILES_COUNT_LIMIT + "\", " +
"current value: " + spillFilesCountLimit);
}
if ((sortmb & 0x7FF) != sortmb) {
throw new IOException(
"Invalid \"" + JobContext.IO_SORT_MB + "\": " + sortmb);
}
sorter = ReflectionUtils.newInstance(job.getClass(
MRJobConfig.MAP_SORT_CLASS, QuickSort.class,
IndexedSorter.class), job);
// buffers and accounting
int maxMemUsage = sortmb << 20;
maxMemUsage -= maxMemUsage % METASIZE;
kvbuffer = new byte[maxMemUsage];
bufvoid = kvbuffer.length;
kvmeta = ByteBuffer.wrap(kvbuffer)
.order(ByteOrder.nativeOrder())
.asIntBuffer();
setEquator(0);
bufstart = bufend = bufindex = equator;
kvstart = kvend = kvindex;
maxRec = kvmeta.capacity() / NMETA;
softLimit = (int)(kvbuffer.length * spillper);
bufferRemaining = softLimit;
LOG.info(JobContext.IO_SORT_MB + ": " + sortmb);
LOG.info("soft limit at " + softLimit);
LOG.info("bufstart = " + bufstart + "; bufvoid = " + bufvoid);
LOG.info("kvstart = " + kvstart + "; length = " + maxRec);
// k/v serialization
comparator = job.getOutputKeyComparator();
keyClass = (Class<K>)job.getMapOutputKeyClass();
valClass = (Class<V>)job.getMapOutputValueClass();
serializationFactory = new SerializationFactory(job);
keySerializer = serializationFactory.getSerializer(keyClass);
keySerializer.open(bb);
valSerializer = serializationFactory.getSerializer(valClass);
valSerializer.open(bb);
// output counters
mapOutputByteCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
mapOutputRecordCounter =
reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
fileOutputByteCounter = reporter
.getCounter(TaskCounter.MAP_OUTPUT_MATERIALIZED_BYTES);
// compression
if (job.getCompressMapOutput()) {
Class<? extends CompressionCodec> codecClass =
job.getMapOutputCompressorClass(DefaultCodec.class);
codec = ReflectionUtils.newInstance(codecClass, job);
} else {
codec = null;
}
// combiner
final Counters.Counter combineInputCounter =
reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
combinerRunner = CombinerRunner.create(job, getTaskID(),
combineInputCounter,
reporter, null);
if (combinerRunner != null) {
final Counters.Counter combineOutputCounter =
reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
combineCollector= new CombineOutputCollector<K,V>(combineOutputCounter, reporter, job);
} else {
combineCollector = null;
}
spillInProgress = false;
minSpillsForCombine = job.getInt(JobContext.MAP_COMBINE_MIN_SPILLS, 3);
spillThread.setDaemon(true);
spillThread.setName("SpillThread");
spillLock.lock();
try {
spillThread.start();
while (!spillThreadRunning) {
spillDone.await();
}
} catch (InterruptedException e) {
throw new IOException("Spill thread failed to initialize", e);
} finally {
spillLock.unlock();
}
if (sortSpillException != null) {
throw new IOException("Spill thread failed to initialize",
sortSpillException);
}
}
/**
* Serialize the key, value to intermediate storage.
* When this method returns, kvindex must refer to sufficient unused
* storage to store one METADATA.
*/
public synchronized void collect(K key, V value, final int partition
) throws IOException {
reporter.progress();
if (key.getClass() != keyClass) {
throw new IOException("Type mismatch in key from map: expected "
+ keyClass.getName() + ", received "
+ key.getClass().getName());
}
if (value.getClass() != valClass) {
throw new IOException("Type mismatch in value from map: expected "
+ valClass.getName() + ", received "
+ value.getClass().getName());
}
if (partition < 0 || partition >= partitions) {
throw new IOException("Illegal partition for " + key + " (" +
partition + ")");
}
checkSpillException();
bufferRemaining -= METASIZE;
if (bufferRemaining <= 0) {
// start spill if the thread is not running and the soft limit has been
// reached
spillLock.lock();
try {
do {
if (!spillInProgress) {
final int kvbidx = 4 * kvindex;
final int kvbend = 4 * kvend;
// serialized, unspilled bytes always lie between kvindex and
// bufindex, crossing the equator. Note that any void space
// created by a reset must be included in "used" bytes
final int bUsed = distanceTo(kvbidx, bufindex);
final boolean bufsoftlimit = bUsed >= softLimit;
if ((kvbend + METASIZE) % kvbuffer.length !=
equator - (equator % METASIZE)) {
// spill finished, reclaim space
resetSpill();
bufferRemaining = Math.min(
distanceTo(bufindex, kvbidx) - 2 * METASIZE,
softLimit - bUsed) - METASIZE;
continue;
} else if (bufsoftlimit && kvindex != kvend) {
// spill records, if any collected; check latter, as it may
// be possible for metadata alignment to hit spill pcnt
startSpill();
final int avgRec = (int)
(mapOutputByteCounter.getCounter() /
mapOutputRecordCounter.getCounter());
// leave at least half the split buffer for serialization data
// ensure that kvindex >= bufindex
final int distkvi = distanceTo(bufindex, kvbidx);
final int newPos = (bufindex +
Math.max(2 * METASIZE - 1,
Math.min(distkvi / 2,
distkvi / (METASIZE + avgRec) * METASIZE)))
% kvbuffer.length;
setEquator(newPos);
bufmark = bufindex = newPos;
final int serBound = 4 * kvend;
// bytes remaining before the lock must be held and limits
// checked is the minimum of three arcs: the metadata space, the
// serialization space, and the soft limit
bufferRemaining = Math.min(
// metadata max
distanceTo(bufend, newPos),
Math.min(
// serialization max
distanceTo(newPos, serBound),
// soft limit
softLimit)) - 2 * METASIZE;
}
}
} while (false);
} finally {
spillLock.unlock();
}
}
try {
// serialize key bytes into buffer
int keystart = bufindex;
keySerializer.serialize(key);
if (bufindex < keystart) {
// wrapped the key; must make contiguous
bb.shiftBufferedKey();
keystart = 0;
}
// serialize value bytes into buffer
final int valstart = bufindex;
valSerializer.serialize(value);
// It's possible for records to have zero length, i.e. the serializer
// will perform no writes. To ensure that the boundary conditions are
// checked and that the kvindex invariant is maintained, perform a
// zero-length write into the buffer. The logic monitoring this could be
// moved into collect, but this is cleaner and inexpensive. For now, it
// is acceptable.
bb.write(b0, 0, 0);
// the record must be marked after the preceding write, as the metadata
// for this record are not yet written
int valend = bb.markRecord();
mapOutputRecordCounter.increment(1);
mapOutputByteCounter.increment(
distanceTo(keystart, valend, bufvoid));
// write accounting info
kvmeta.put(kvindex + PARTITION, partition);
kvmeta.put(kvindex + KEYSTART, keystart);
kvmeta.put(kvindex + VALSTART, valstart);
kvmeta.put(kvindex + VALLEN, distanceTo(valstart, valend));
// advance kvindex
kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity();
} catch (MapBufferTooSmallException e) {
LOG.info("Record too large for in-memory buffer: " + e.getMessage());
spillSingleRecord(key, value, partition);
mapOutputRecordCounter.increment(1);
return;
}
}
private TaskAttemptID getTaskID() {
return mapTask.getTaskID();
}
/**
* Set the point from which meta and serialization data expand. The meta
* indices are aligned with the buffer, so metadata never spans the ends of
* the circular buffer.
*/
private void setEquator(int pos) {
equator = pos;
// set index prior to first entry, aligned at meta boundary
final int aligned = pos - (pos % METASIZE);
// Cast one of the operands to long to avoid integer overflow
kvindex = (int)
(((long)aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
LOG.info("(EQUATOR) " + pos + " kvi " + kvindex +
"(" + (kvindex * 4) + ")");
}
/**
* The spill is complete, so set the buffer and meta indices to be equal to
* the new equator to free space for continuing collection. Note that when
* kvindex == kvend == kvstart, the buffer is empty.
*/
private void resetSpill() {
final int e = equator;
bufstart = bufend = e;
final int aligned = e - (e % METASIZE);
// set start/end to point to first meta record
// Cast one of the operands to long to avoid integer overflow
kvstart = kvend = (int)
(((long)aligned - METASIZE + kvbuffer.length) % kvbuffer.length) / 4;
LOG.info("(RESET) equator " + e + " kv " + kvstart + "(" +
(kvstart * 4) + ")" + " kvi " + kvindex + "(" + (kvindex * 4) + ")");
}
/**
* Compute the distance in bytes between two indices in the serialization
* buffer.
* @see #distanceTo(int,int,int)
*/
final int distanceTo(final int i, final int j) {
return distanceTo(i, j, kvbuffer.length);
}
/**
* Compute the distance between two indices in the circular buffer given the
* max distance.
*/
int distanceTo(final int i, final int j, final int mod) {
return i <= j
? j - i
: mod - i + j;
}
/**
* For the given meta position, return the offset into the int-sized
* kvmeta buffer.
*/
int offsetFor(int metapos) {
return metapos * NMETA;
}
/**
* Compare logical range, st i, j MOD offset capacity.
* Compare by partition, then by key.
* @see IndexedSortable#compare
*/
@Override
public int compare(final int mi, final int mj) {
final int kvi = offsetFor(mi % maxRec);
final int kvj = offsetFor(mj % maxRec);
final int kvip = kvmeta.get(kvi + PARTITION);
final int kvjp = kvmeta.get(kvj + PARTITION);
// sort by partition
if (kvip != kvjp) {
return kvip - kvjp;
}
// sort by key
return comparator.compare(kvbuffer,
kvmeta.get(kvi + KEYSTART),
kvmeta.get(kvi + VALSTART) - kvmeta.get(kvi + KEYSTART),
kvbuffer,
kvmeta.get(kvj + KEYSTART),
kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART));
}
final byte META_BUFFER_TMP[] = new byte[METASIZE];
/**
* Swap metadata for items i, j
* @see IndexedSortable#swap
*/
@Override
public void swap(final int mi, final int mj) {
int iOff = (mi % maxRec) * METASIZE;
int jOff = (mj % maxRec) * METASIZE;
System.arraycopy(kvbuffer, iOff, META_BUFFER_TMP, 0, METASIZE);
System.arraycopy(kvbuffer, jOff, kvbuffer, iOff, METASIZE);
System.arraycopy(META_BUFFER_TMP, 0, kvbuffer, jOff, METASIZE);
}
/**
* Inner | MapOutputBuffer |
java | square__retrofit | retrofit-adapters/guava/src/main/java/retrofit2/adapter/guava/GuavaCallAdapterFactory.java | {
"start": 1981,
"end": 3337
} | class ____ extends CallAdapter.Factory {
public static GuavaCallAdapterFactory create() {
return new GuavaCallAdapterFactory();
}
private GuavaCallAdapterFactory() {}
@Override
public @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit) {
if (getRawType(returnType) != ListenableFuture.class) {
return null;
}
if (!(returnType instanceof ParameterizedType)) {
throw new IllegalStateException(
"ListenableFuture return type must be parameterized"
+ " as ListenableFuture<Foo> or ListenableFuture<? extends Foo>");
}
Type innerType = getParameterUpperBound(0, (ParameterizedType) returnType);
if (getRawType(innerType) != Response.class) {
// Generic type is not Response<T>. Use it for body-only adapter.
return new BodyCallAdapter<>(innerType);
}
// Generic type is Response<T>. Extract T and create the Response version of the adapter.
if (!(innerType instanceof ParameterizedType)) {
throw new IllegalStateException(
"Response must be parameterized" + " as Response<Foo> or Response<? extends Foo>");
}
Type responseType = getParameterUpperBound(0, (ParameterizedType) innerType);
return new ResponseCallAdapter<>(responseType);
}
private static final | GuavaCallAdapterFactory |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/javadoc/MalformedInlineTagTest.java | {
"start": 1655,
"end": 1841
} | class ____ {}
""")
.doTest(TEXT_MATCH);
}
@Test
public void positive_withinTag() {
helper
.addInputLines(
"Test.java",
"""
| Test |
java | elastic__elasticsearch | x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/TermsGroupConfig.java | {
"start": 1389,
"end": 6235
} | class ____ implements Writeable, ToXContentObject {
public static final String NAME = "terms";
private static final String FIELDS = "fields";
private static final List<String> FLOAT_TYPES = Arrays.asList("half_float", "float", "double", "scaled_float");
private static final List<String> NATURAL_TYPES = Arrays.asList("byte", "short", "integer", "long");
private static final ConstructingObjectParser<TermsGroupConfig, Void> PARSER;
static {
PARSER = new ConstructingObjectParser<>(NAME, args -> {
@SuppressWarnings("unchecked")
List<String> fields = (List<String>) args[0];
return new TermsGroupConfig(fields != null ? fields.toArray(new String[fields.size()]) : null);
});
PARSER.declareStringArray(constructorArg(), new ParseField(FIELDS));
}
private final String[] fields;
public TermsGroupConfig(final String... fields) {
if (fields == null || fields.length == 0) {
throw new IllegalArgumentException("Fields must have at least one value");
}
this.fields = fields;
}
public TermsGroupConfig(StreamInput in) throws IOException {
fields = in.readStringArray();
}
/**
* @return the names of the fields. Never {@code null}.
*/
public String[] getFields() {
return fields;
}
public void validateMappings(
Map<String, Map<String, FieldCapabilities>> fieldCapsResponse,
ActionRequestValidationException validationException
) {
Arrays.stream(fields).forEach(field -> {
Map<String, FieldCapabilities> fieldCaps = fieldCapsResponse.get(field);
if (fieldCaps != null && fieldCaps.isEmpty() == false) {
fieldCaps.forEach((key, value) -> {
if (key.equals(KeywordFieldMapper.CONTENT_TYPE) || key.equals(TextFieldMapper.CONTENT_TYPE)) {
if (value.isAggregatable() == false) {
validationException.addValidationError(
"The field [" + field + "] must be aggregatable across all indices, but is not."
);
}
} else if (FLOAT_TYPES.contains(key)) {
if (value.isAggregatable() == false) {
validationException.addValidationError(
"The field [" + field + "] must be aggregatable across all indices, but is not."
);
}
} else if (NATURAL_TYPES.contains(key)) {
if (value.isAggregatable() == false) {
validationException.addValidationError(
"The field [" + field + "] must be aggregatable across all indices, but is not."
);
}
} else {
validationException.addValidationError(
"The field referenced by a terms group must be a [numeric] or "
+ "[keyword/text] type, but found "
+ fieldCaps.keySet().toString()
+ " for field ["
+ field
+ "]"
);
}
});
} else {
validationException.addValidationError(
"Could not find a [numeric] or [keyword/text] field with name ["
+ field
+ "] in any of the indices matching the index pattern."
);
}
});
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
{
builder.field(FIELDS, fields);
}
return builder.endObject();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(fields);
}
@Override
public boolean equals(final Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
final TermsGroupConfig that = (TermsGroupConfig) other;
return Arrays.equals(fields, that.fields);
}
@Override
public int hashCode() {
return Arrays.hashCode(fields);
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
public static TermsGroupConfig fromXContent(final XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
}
| TermsGroupConfig |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/mapping/embeddable/TargetEmbeddableOnInterfaceTest.java | {
"start": 881,
"end": 1574
} | class ____ {
@Test
public void testLifecycle(SessionFactoryScope factoryScope) {
factoryScope.inTransaction( (session) -> {
City city = new City();
city.setName("Cluj");
city.setCoordinates( new GPS(46.77120, 23.62360 ) );
session.persist( city );
} );
factoryScope.inTransaction( (session) -> {
City city = session.find(City.class, 1L);
assert city.getCoordinates() instanceof GPS;
assertThat( city.getCoordinates().x() ).isCloseTo( 46.77120, offset( 0.00001 ) );
assertThat( city.getCoordinates().y() ).isCloseTo( 23.62360, offset( 0.00001 ) );
} );
}
//tag::embeddable-Target-example2[]
@TargetEmbeddable(GPS.class)
public | TargetEmbeddableOnInterfaceTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/SynchronizeOnNonFinalFieldTest.java | {
"start": 2781,
"end": 3267
} | class ____ {
final Object lock = new Object();
void m() {
synchronized (lock) {
}
}
}
""")
.doTest();
}
@Test
public void negative_lazyInit() {
compilationHelper
.addSourceLines(
"threadsafety/Test.java",
"""
package threadsafety.Test;
import com.google.errorprone.annotations.concurrent.LazyInit;
| Test |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy-client/deployment/src/test/java/io/quarkus/restclient/registerclientheaders/MyHeadersFactory.java | {
"start": 362,
"end": 761
} | class ____ implements ClientHeadersFactory {
@Inject
BeanManager beanManager;
@Override
public MultivaluedMap<String, String> update(MultivaluedMap<String, String> incomingHeaders,
MultivaluedMap<String, String> clientOutgoingHeaders) {
assertNotNull(beanManager);
incomingHeaders.add("foo", "bar");
return incomingHeaders;
}
}
| MyHeadersFactory |
java | elastic__elasticsearch | server/src/test/java/org/elasticsearch/common/util/concurrent/RunOnceTests.java | {
"start": 672,
"end": 2948
} | class ____ extends ESTestCase {
public void testRunOnce() {
final AtomicInteger counter = new AtomicInteger(0);
final RunOnce runOnce = new RunOnce(counter::incrementAndGet);
assertFalse(runOnce.hasRun());
runOnce.run();
assertTrue(runOnce.hasRun());
assertEquals(1, counter.get());
runOnce.run();
assertTrue(runOnce.hasRun());
assertEquals(1, counter.get());
}
public void testRunOnceConcurrently() throws InterruptedException {
final AtomicInteger counter = new AtomicInteger(0);
final RunOnce runOnce = new RunOnce(counter::incrementAndGet);
startInParallel(between(3, 10), i -> runOnce.run());
assertTrue(runOnce.hasRun());
assertEquals(1, counter.get());
}
public void testRunOnceWithAbstractRunnable() {
final AtomicInteger onRun = new AtomicInteger(0);
final AtomicInteger onFailure = new AtomicInteger(0);
final AtomicInteger onAfter = new AtomicInteger(0);
final RunOnce runOnce = new RunOnce(new AbstractRunnable() {
@Override
protected void doRun() throws Exception {
onRun.incrementAndGet();
throw new RuntimeException("failure");
}
@Override
public void onFailure(Exception e) {
onFailure.incrementAndGet();
}
@Override
public void onAfter() {
onAfter.incrementAndGet();
}
});
final int iterations = randomIntBetween(1, 10);
for (int i = 0; i < iterations; i++) {
runOnce.run();
assertEquals(1, onRun.get());
assertEquals(1, onFailure.get());
assertEquals(1, onAfter.get());
assertTrue(runOnce.hasRun());
}
}
public void testReleasesDelegate() {
final var reachabilityChecker = new ReachabilityChecker();
final var runOnce = new RunOnce(reachabilityChecker.register(this::noop));
reachabilityChecker.checkReachable();
runOnce.run();
reachabilityChecker.ensureUnreachable();
assertEquals("RunOnce[null]", runOnce.toString());
}
private void noop() {}
}
| RunOnceTests |
java | reactor__reactor-core | reactor-core/src/main/java/reactor/core/publisher/FluxName.java | {
"start": 1298,
"end": 3797
} | class ____<T> extends InternalFluxOperator<T, T> {
final @Nullable String name;
final @Nullable List<Tuple2<String, String>> tagsWithDuplicates;
static <T> Flux<T> createOrAppend(Flux<T> source, String name) {
Objects.requireNonNull(name, "name");
if (source instanceof FluxName) {
FluxName<T> s = (FluxName<T>) source;
return new FluxName<>(s.source, name, s.tagsWithDuplicates);
}
if (source instanceof FluxNameFuseable) {
FluxNameFuseable<T> s = (FluxNameFuseable<T>) source;
return new FluxNameFuseable<>(s.source, name, s.tagsWithDuplicates);
}
if (source instanceof Fuseable) {
return new FluxNameFuseable<>(source, name, null);
}
return new FluxName<>(source, name, null);
}
static <T> Flux<T> createOrAppend(Flux<T> source, String tagName, String tagValue) {
Objects.requireNonNull(tagName, "tagName");
Objects.requireNonNull(tagValue, "tagValue");
Tuple2<String, String> newTag = Tuples.of(tagName, tagValue);
if (source instanceof FluxName) {
FluxName<T> s = (FluxName<T>) source;
List<Tuple2<String, String>> tags;
if(s.tagsWithDuplicates != null) {
tags = new LinkedList<>(s.tagsWithDuplicates);
tags.add(newTag);
}
else {
tags = Collections.singletonList(newTag);
}
return new FluxName<>(s.source, s.name, tags);
}
if (source instanceof FluxNameFuseable) {
FluxNameFuseable<T> s = (FluxNameFuseable<T>) source;
List<Tuple2<String, String>> tags;
if (s.tagsWithDuplicates != null) {
tags = new LinkedList<>(s.tagsWithDuplicates);
tags.add(newTag);
}
else {
tags = Collections.singletonList(newTag);
}
return new FluxNameFuseable<>(s.source, s.name, tags);
}
if (source instanceof Fuseable) {
return new FluxNameFuseable<>(source, null, Collections.singletonList(newTag));
}
return new FluxName<>(source, null, Collections.singletonList(newTag));
}
FluxName(Flux<? extends T> source,
@Nullable String name,
@Nullable List<Tuple2<String, String>> tags) {
super(source);
this.name = name;
this.tagsWithDuplicates = tags;
}
@Override
public CoreSubscriber<? super T> subscribeOrReturn(CoreSubscriber<? super T> actual) {
return actual;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.NAME) {
return name;
}
if (key == Attr.TAGS && tagsWithDuplicates != null) {
return tagsWithDuplicates.stream();
}
if (key == RUN_STYLE) {
return SYNC;
}
return super.scanUnsafe(key);
}
}
| FluxName |
java | apache__camel | core/camel-support/src/main/java/org/apache/camel/support/processor/DelegateProcessor.java | {
"start": 1551,
"end": 3304
} | class ____ extends ServiceSupport
implements org.apache.camel.DelegateProcessor, Processor, Navigate<Processor> {
protected Processor processor;
public DelegateProcessor() {
}
public DelegateProcessor(Processor processor) {
if (processor == this) {
throw new IllegalArgumentException("Recursive DelegateProcessor!");
}
this.processor = processor;
}
@Override
public void process(Exchange exchange) throws Exception {
processNext(exchange);
}
protected void processNext(Exchange exchange) throws Exception {
if (processor != null) {
processor.process(exchange);
}
}
@Override
public String toString() {
return "Delegate[" + processor + "]";
}
@Override
public Processor getProcessor() {
return processor;
}
public void setProcessor(Processor processor) {
this.processor = processor;
}
@Override
protected void doBuild() throws Exception {
ServiceHelper.buildService(processor);
}
@Override
protected void doInit() throws Exception {
ServiceHelper.initService(processor);
}
@Override
protected void doStart() throws Exception {
ServiceHelper.startService(processor);
}
@Override
protected void doStop() throws Exception {
ServiceHelper.stopService(processor);
}
@Override
public boolean hasNext() {
return processor != null;
}
@Override
public List<Processor> next() {
if (!hasNext()) {
return null;
}
List<Processor> answer = new ArrayList<>(1);
answer.add(processor);
return answer;
}
}
| DelegateProcessor |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java | {
"start": 7943,
"end": 8044
} | class ____ the process of moving the block replica to the
* given target.
*/
private | encapsulates |
java | spring-projects__spring-boot | configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/fieldvalues/javac/JavaCompilerFieldValuesParser.java | {
"start": 1421,
"end": 2049
} | class ____ implements FieldValuesParser {
private final Trees trees;
public JavaCompilerFieldValuesParser(ProcessingEnvironment env) throws Exception {
this.trees = Trees.instance(env);
}
@Override
public Map<String, Object> getFieldValues(TypeElement element) throws Exception {
Tree tree = this.trees.getTree(element);
if (tree != null) {
FieldCollector fieldCollector = new FieldCollector();
tree.accept(fieldCollector);
return fieldCollector.getFieldValues();
}
return Collections.emptyMap();
}
/**
* {@link TreeVisitor} to collect fields.
*/
private static final | JavaCompilerFieldValuesParser |
java | spring-projects__spring-framework | spring-context/src/test/java/org/springframework/context/annotation/spr10546/ParentWithParentConfig.java | {
"start": 800,
"end": 855
} | class ____ extends ParentConfig {
}
| ParentWithParentConfig |
java | netty__netty | transport/src/test/java/io/netty/channel/DefaultChannelPipelineTest.java | {
"start": 62957,
"end": 71297
} | class ____ extends ChannelInboundHandlerAdapter {
private static final int MASK_CHANNEL_REGISTER = 1;
private static final int MASK_CHANNEL_UNREGISTER = 1 << 1;
private static final int MASK_CHANNEL_ACTIVE = 1 << 2;
private static final int MASK_CHANNEL_INACTIVE = 1 << 3;
private static final int MASK_CHANNEL_READ = 1 << 4;
private static final int MASK_CHANNEL_READ_COMPLETE = 1 << 5;
private static final int MASK_USER_EVENT_TRIGGERED = 1 << 6;
private static final int MASK_CHANNEL_WRITABILITY_CHANGED = 1 << 7;
private static final int MASK_EXCEPTION_CAUGHT = 1 << 8;
private static final int MASK_ADDED = 1 << 9;
private static final int MASK_REMOVED = 1 << 10;
private int executionMask;
@Override
public void handlerAdded(ChannelHandlerContext ctx) {
executionMask |= MASK_ADDED;
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) {
executionMask |= MASK_REMOVED;
}
@Override
public void channelRegistered(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_REGISTER;
}
@Override
public void channelUnregistered(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_UNREGISTER;
}
@Override
public void channelActive(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_ACTIVE;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_INACTIVE;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
executionMask |= MASK_CHANNEL_READ;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_READ_COMPLETE;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
executionMask |= MASK_USER_EVENT_TRIGGERED;
}
@Override
public void channelWritabilityChanged(ChannelHandlerContext ctx) {
executionMask |= MASK_CHANNEL_WRITABILITY_CHANGED;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
executionMask |= MASK_EXCEPTION_CAUGHT;
}
void assertCalled() {
assertCalled("handlerAdded", MASK_ADDED);
assertCalled("handlerRemoved", MASK_REMOVED);
assertCalled("channelRegistered", MASK_CHANNEL_REGISTER);
assertCalled("channelUnregistered", MASK_CHANNEL_UNREGISTER);
assertCalled("channelActive", MASK_CHANNEL_ACTIVE);
assertCalled("channelInactive", MASK_CHANNEL_INACTIVE);
assertCalled("channelRead", MASK_CHANNEL_READ);
assertCalled("channelReadComplete", MASK_CHANNEL_READ_COMPLETE);
assertCalled("userEventTriggered", MASK_USER_EVENT_TRIGGERED);
assertCalled("channelWritabilityChanged", MASK_CHANNEL_WRITABILITY_CHANGED);
assertCalled("exceptionCaught", MASK_EXCEPTION_CAUGHT);
}
private void assertCalled(String methodName, int mask) {
assertTrue((executionMask & mask) != 0, methodName + " was not called");
}
}
OutboundCalledHandler outboundCalledHandler = new OutboundCalledHandler();
SkipHandler skipHandler = new SkipHandler();
InboundCalledHandler inboundCalledHandler = new InboundCalledHandler();
pipeline.addLast(outboundCalledHandler, skipHandler, inboundCalledHandler);
pipeline.fireChannelRegistered();
pipeline.fireChannelUnregistered();
pipeline.fireChannelActive();
pipeline.fireChannelInactive();
pipeline.fireChannelRead("");
pipeline.fireChannelReadComplete();
pipeline.fireChannelWritabilityChanged();
pipeline.fireUserEventTriggered("");
pipeline.fireExceptionCaught(new Exception());
pipeline.deregister().syncUninterruptibly();
pipeline.bind(new SocketAddress() {
}).syncUninterruptibly();
pipeline.connect(new SocketAddress() {
}).syncUninterruptibly();
pipeline.disconnect().syncUninterruptibly();
pipeline.close().syncUninterruptibly();
pipeline.write("");
pipeline.flush();
pipeline.read();
pipeline.remove(outboundCalledHandler);
pipeline.remove(inboundCalledHandler);
pipeline.remove(skipHandler);
assertFalse(channel.finish());
outboundCalledHandler.assertCalled();
inboundCalledHandler.assertCalled();
skipHandler.assertSkipped();
}
@Test
public void testWriteThrowsReleaseMessage() {
testWriteThrowsReleaseMessage0(false);
}
@Test
public void testWriteAndFlushThrowsReleaseMessage() {
testWriteThrowsReleaseMessage0(true);
}
private void testWriteThrowsReleaseMessage0(boolean flush) {
ReferenceCounted referenceCounted = new AbstractReferenceCounted() {
@Override
protected void deallocate() {
// NOOP
}
@Override
public ReferenceCounted touch(Object hint) {
return this;
}
};
assertEquals(1, referenceCounted.refCnt());
Channel channel = new LocalChannel();
Channel channel2 = new LocalChannel();
group.register(channel).syncUninterruptibly();
group.register(channel2).syncUninterruptibly();
try {
if (flush) {
channel.writeAndFlush(referenceCounted, channel2.newPromise());
} else {
channel.write(referenceCounted, channel2.newPromise());
}
fail();
} catch (IllegalArgumentException expected) {
// expected
}
assertEquals(0, referenceCounted.refCnt());
channel.close().syncUninterruptibly();
channel2.close().syncUninterruptibly();
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testHandlerAddedFailedButHandlerStillRemoved() throws InterruptedException {
testHandlerAddedFailedButHandlerStillRemoved0(false);
}
@Test
@Timeout(value = 5000, unit = TimeUnit.MILLISECONDS)
public void testHandlerAddedFailedButHandlerStillRemovedWithLaterRegister() throws InterruptedException {
testHandlerAddedFailedButHandlerStillRemoved0(true);
}
private static void testHandlerAddedFailedButHandlerStillRemoved0(boolean lateRegister)
throws InterruptedException {
EventExecutorGroup executorGroup = new DefaultEventExecutorGroup(16);
final int numHandlers = 32;
try {
Channel channel = new LocalChannel();
channel.config().setOption(ChannelOption.SINGLE_EVENTEXECUTOR_PER_GROUP, false);
if (!lateRegister) {
group.register(channel).sync();
}
channel.pipeline().addFirst(newHandler());
List<CountDownLatch> latchList = new ArrayList<CountDownLatch>(numHandlers);
for (int i = 0; i < numHandlers; i++) {
CountDownLatch latch = new CountDownLatch(1);
channel.pipeline().addFirst(executorGroup, "h" + i, new BadChannelHandler(latch));
latchList.add(latch);
}
if (lateRegister) {
group.register(channel).sync();
}
for (int i = 0; i < numHandlers; i++) {
// Wait until the latch was countDown which means handlerRemoved(...) was called.
latchList.get(i).await();
assertNull(channel.pipeline().get("h" + i));
}
} finally {
executorGroup.shutdownGracefully();
}
}
private static final | InboundCalledHandler |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/LockingAtomicCounterTests.java | {
"start": 555,
"end": 2192
} | class ____ extends ESTestCase {
private LockingAtomicCounter lockingAtomicCounter;
@Before
public void setup() {
lockingAtomicCounter = new LockingAtomicCounter();
}
public void testRunnableWillRunIfCountMatches() throws Exception {
final AtomicBoolean done = new AtomicBoolean();
final long invalidationCount = lockingAtomicCounter.get();
assertTrue(lockingAtomicCounter.compareAndRun(invalidationCount, () -> done.set(true)));
assertTrue(done.get());
}
public void testIncrementAndRun() {
final int loop = randomIntBetween(1, 5);
IntStream.range(0, loop).forEach((ignored) -> {
try {
lockingAtomicCounter.increment();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
assertThat((long) loop, equalTo(lockingAtomicCounter.get()));
}
public void testRunnableWillNotRunIfCounterHasChanged() throws Exception {
final AtomicBoolean done = new AtomicBoolean();
final long invalidationCount = lockingAtomicCounter.get();
final CountDownLatch countDownLatch = new CountDownLatch(1);
new Thread(() -> {
try {
lockingAtomicCounter.increment();
} catch (Exception e) {
throw new RuntimeException(e);
}
countDownLatch.countDown();
}).start();
countDownLatch.await();
assertFalse(lockingAtomicCounter.compareAndRun(invalidationCount, () -> done.set(true)));
assertFalse(done.get());
}
}
| LockingAtomicCounterTests |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/selection/methodgenerics/nestedgenerics/ReturnTypeHasNestedTypeVarMapper.java | {
"start": 959,
"end": 1227
} | class ____ {
private List<Set<String>> prop;
public List<Set<String>> getProp() {
return prop;
}
public Target setProp(List<Set<String>> prop) {
this.prop = prop;
return this;
}
}
}
| Target |
java | apache__kafka | clients/src/main/java/org/apache/kafka/common/acl/AclBinding.java | {
"start": 1018,
"end": 1167
} | class ____ {
private final ResourcePattern pattern;
private final AccessControlEntry entry;
/**
* Create an instance of this | AclBinding |
java | quarkusio__quarkus | integration-tests/quartz-deferred-datasource/src/main/java/io/quarkus/it/quartz/DisabledScheduledMethods.java | {
"start": 151,
"end": 744
} | class ____ {
volatile static String valueSetByCronScheduledMethod = "";
volatile static String valueSetByEveryScheduledMethod = "";
// This should never be called as the job is disabled
@Scheduled(cron = "${disabled}", identity = "disabled-cron-counter")
void setValueByCron() {
valueSetByCronScheduledMethod = "cron";
}
// This should never be called as the job is turned off
@Scheduled(every = "${off}", identity = "disabled-every-counter")
void setValueByEvery() {
valueSetByEveryScheduledMethod = "every";
}
}
| DisabledScheduledMethods |
java | quarkusio__quarkus | independent-projects/tools/devtools-common/src/main/java/io/quarkus/devtools/commands/handlers/UpdateProjectCommandHandler.java | {
"start": 2364,
"end": 21363
} | class ____ implements QuarkusCommandHandler {
public static final String ITEM_FORMAT = " %-7s %s";
@Override
public QuarkusCommandOutcome execute(QuarkusCommandInvocation invocation) throws QuarkusCommandException {
final JavaVersion projectJavaVersion = invocation.getQuarkusProject().getJavaVersion();
if (projectJavaVersion.isEmpty()) {
String instruction = invocation.getQuarkusProject().getBuildTool().isAnyGradle() ? "java>targetCompatibility"
: "maven.compiler.release property";
String error = String.format("Project Java version not detected, set %s in your build file to fix the error.",
instruction);
invocation.log().error(error);
return QuarkusCommandOutcome.failure(error);
} else {
invocation.log().info("Detected project Java version: %s", projectJavaVersion);
}
final ApplicationModel appModel = invocation.getValue(UpdateProject.APP_MODEL);
final ExtensionCatalog targetCatalog = invocation.getValue(UpdateProject.TARGET_CATALOG);
final ProjectState currentState = resolveProjectState(appModel,
invocation.getQuarkusProject().getExtensionsCatalog());
final ArtifactCoords currentQuarkusPlatformBom = getProjectQuarkusPlatformBOM(currentState);
var failure = ensureQuarkusBomVersionIsNotNull(currentQuarkusPlatformBom, invocation.log());
if (failure != null) {
return failure;
}
final ProjectState recommendedState = resolveRecommendedState(currentState, targetCatalog,
invocation.log());
final ArtifactCoords recommendedQuarkusPlatformBom = getProjectQuarkusPlatformBOM(recommendedState);
failure = ensureQuarkusBomVersionIsNotNull(recommendedQuarkusPlatformBom, invocation.log());
if (failure != null) {
return failure;
}
final ProjectPlatformUpdateInfo platformUpdateInfo = resolvePlatformUpdateInfo(currentState,
recommendedState);
final ProjectExtensionsUpdateInfo extensionsUpdateInfo = ProjectUpdateInfos.resolveExtensionsUpdateInfo(
currentState,
recommendedState);
boolean shouldUpdate = logUpdates(invocation.getQuarkusProject(), currentState, recommendedState, platformUpdateInfo,
extensionsUpdateInfo,
invocation.log());
Boolean rewrite = invocation.getValue(UpdateProject.REWRITE, null);
boolean rewriteDryRun = invocation.getValue(UpdateProject.REWRITE_DRY_RUN, false);
if (shouldUpdate) {
final QuarkusProject quarkusProject = invocation.getQuarkusProject();
final BuildTool buildTool = quarkusProject.getExtensionManager().getBuildTool();
// TODO targetCatalog shouldn't be used here, since it might not be the recommended one according to the calculated recommended state
String kotlinVersion = getMetadata(targetCatalog, "project", "properties", "kotlin-version");
final Optional<Integer> updateJavaVersion = resolveUpdateJavaVersion(extensionsUpdateInfo, projectJavaVersion);
QuarkusUpdates.ProjectUpdateRequest request = new QuarkusUpdates.ProjectUpdateRequest(
buildTool,
currentQuarkusPlatformBom.getVersion(),
recommendedQuarkusPlatformBom.getVersion(),
kotlinVersion,
updateJavaVersion,
extensionsUpdateInfo);
Path recipe = null;
try {
final Path projectDir = invocation.getQuarkusProject().getProjectDirPath();
final Path buildDir = projectDir
.resolve(invocation.getQuarkusProject().getBuildTool().getBuildDirectory());
final Path rewriteDir = buildDir.resolve("rewrite");
recipe = rewriteDir.resolve("rewrite.yaml");
Files.deleteIfExists(recipe);
Files.createDirectories(recipe.getParent());
final String quarkusUpdateRecipes = invocation.getValue(
UpdateProject.REWRITE_QUARKUS_UPDATE_RECIPES,
QuarkusUpdatesRepository.DEFAULT_UPDATE_RECIPES_VERSION);
final String additionalUpdateRecipes = invocation.getValue(
UpdateProject.REWRITE_ADDITIONAL_UPDATE_RECIPES,
null);
final FetchResult fetchResult = QuarkusUpdates.createRecipe(invocation.log(), recipe,
QuarkusProjectHelper.artifactResolver(), buildTool, quarkusUpdateRecipes,
additionalUpdateRecipes, request);
quarkusProject.log().info("");
quarkusProject.log()
.info("We have generated a recipe file to update your project (version updates + specific recipes):");
quarkusProject.log().info(MessageFormatter.underline(projectDir.relativize(recipe).toString()));
quarkusProject.log().info("");
if (rewriteDryRun) {
rewrite = true;
}
if (rewrite == null) {
CompletableFuture<String> userInputFuture = CompletableFuture
.supplyAsync(() -> askUserConfirmationForUpdate(invocation.log()));
try {
final String userInput = userInputFuture.get(2, TimeUnit.MINUTES).toLowerCase().trim();
if (userInput.equalsIgnoreCase("y")) {
rewrite = true;
} else if (userInput.equalsIgnoreCase("d")) {
rewriteDryRun = true;
rewrite = true;
} else {
quarkusProject.log().info("");
quarkusProject.log().info("Project update has been skipped.");
rewrite = false;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException | TimeoutException e) {
quarkusProject.log().info("");
quarkusProject.log().info("Project update has been skipped after timeout.");
rewrite = false;
}
}
if (rewrite) {
String rewritePluginVersion = invocation.getValue(UpdateProject.REWRITE_PLUGIN_VERSION,
fetchResult.getRewritePluginVersion());
quarkusProject.log().info("");
Path logFile = recipe.getParent().resolve("rewrite.log");
QuarkusUpdateCommand.handle(
invocation.log(),
buildTool,
quarkusProject.getProjectDirPath(),
rewritePluginVersion,
fetchResult.getRecipesGAV(),
recipe,
logFile,
rewriteDryRun);
final Path patchFile = rewriteDir.resolve("rewrite.patch");
if (rewriteDryRun && Files.isRegularFile(patchFile)) {
quarkusProject.log().info("Patch file available:");
quarkusProject.log().info(MessageFormatter.underline(projectDir.relativize(patchFile).toString()));
quarkusProject.log().info("");
}
} else {
printSeparator(quarkusProject.log());
}
} catch (IOException e) {
throw new QuarkusCommandException("Error while generating the project update script", e);
}
}
return QuarkusCommandOutcome.success();
}
private static String askUserConfirmationForUpdate(MessageWriter log) {
System.out.print(System.lineSeparator() +
MessageFormatter.bold(
"Do you want to apply the generated update recipes with OpenRewrite?")
+ " ([" + MessageFormatter.green("y") + "]es, [" + MessageFormatter.red("n") + "]o, ["
+ MessageFormatter.blue("d") + "]ry-run + [Enter])"
+ System.lineSeparator() + System.lineSeparator());
try (Scanner scanner = new Scanner(new FilterInputStream(System.in) {
@Override
public void close() throws IOException {
//don't close System.in!
}
})) {
return scanner.nextLine();
} catch (Exception e) {
log.debug("Unable to get user confirmation", e);
return "";
}
}
private static Optional<Integer> resolveUpdateJavaVersion(ProjectExtensionsUpdateInfo extensionsUpdateInfo,
JavaVersion projectJavaVersion) {
final OptionalInt minJavaVersion = extensionsUpdateInfo.getMinJavaVersion();
final Optional<Integer> updateJavaVersion;
if (minJavaVersion.isPresent()
&& projectJavaVersion.isPresent()
&& minJavaVersion.getAsInt() > projectJavaVersion.getAsInt()) {
updateJavaVersion = Optional.of(minJavaVersion.getAsInt());
} else {
updateJavaVersion = Optional.empty();
}
return updateJavaVersion;
}
private static ArtifactCoords getProjectQuarkusPlatformBOM(ProjectState currentState) {
for (ArtifactCoords c : currentState.getPlatformBoms()) {
if (c.getArtifactId().equals(ToolsConstants.DEFAULT_PLATFORM_BOM_ARTIFACT_ID)
|| c.getArtifactId().equals(ToolsConstants.UNIVERSE_PLATFORM_BOM_ARTIFACT_ID)) {
return c;
}
}
return null;
}
private static QuarkusCommandOutcome<Void> ensureQuarkusBomVersionIsNotNull(ArtifactCoords bomCoords, MessageWriter log) {
if (bomCoords == null) {
String error = "The project state is missing the Quarkus platform BOM";
log.error(error);
return QuarkusCommandOutcome.failure(error);
}
return null;
}
private static boolean logUpdates(QuarkusProject project, ProjectState currentState, ProjectState recommendedState,
ProjectPlatformUpdateInfo platformUpdateInfo,
ProjectExtensionsUpdateInfo extensionsUpdateInfo,
MessageWriter log) {
printSeparator(log);
if (currentState.getPlatformBoms().isEmpty()) {
log.info(MessageFormatter.red("The project does not import any Quarkus platform BOM"));
printSeparator(log);
return false;
}
if (currentState.getExtensions().isEmpty()) {
log.info("No Quarkus extensions were found among the project dependencies");
printSeparator(log);
return false;
}
if (currentState == recommendedState) {
log.info(MessageFormatter.green("The project is up-to-date)"));
printSeparator(log);
return false;
}
if (platformUpdateInfo.isPlatformUpdatesAvailable()) {
log.info(MessageFormatter.bold("Suggested Quarkus platform BOM updates:"));
if (!platformUpdateInfo.getImportVersionUpdates().isEmpty()) {
for (PlatformInfo importInfo : platformUpdateInfo.getImportVersionUpdates()) {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "~",
importInfo.getImported().getKey().toGacString() + ":pom:["
+ MessageFormatter.red(importInfo.getImported().getVersion()) + " -> "
+ MessageFormatter.green(importInfo.getRecommendedVersion())
+ "]"));
}
}
if (platformUpdateInfo.isImportsToBeRemoved()) {
for (PlatformInfo importInfo : platformUpdateInfo.getPlatformImports().values()) {
if (importInfo.getRecommended() == null) {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "-",
"[" + MessageFormatter.red(importInfo.getImported().toCompactCoords()) + "]"));
}
}
}
if (!platformUpdateInfo.getNewImports().isEmpty()) {
for (PlatformInfo importInfo : platformUpdateInfo.getNewImports()) {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "+",
"[" + MessageFormatter.green(importInfo.getRecommended().toCompactCoords()) + "]"));
}
}
log.info("");
} else if (!extensionsUpdateInfo.shouldUpdateExtensions()) {
log.info(MessageFormatter.green("The project is up-to-date " + MessageIcons.UP_TO_DATE_ICON.iconOrMessage()));
printSeparator(log);
return false;
} else {
log.info(MessageFormatter
.green("Quarkus platform BOMs are up-to-date " + MessageIcons.UP_TO_DATE_ICON.iconOrMessage()));
log.info("");
}
if (extensionsUpdateInfo.getMinJavaVersion().isPresent() && project.getJavaVersion().isPresent()) {
final Integer extensionsMinJavaVersion = extensionsUpdateInfo.getMinJavaVersion().getAsInt();
if (extensionsMinJavaVersion > project.getJavaVersion().getAsInt()) {
log.warn("We detected that some of the updated extensions require an update of the Java version to: %s",
extensionsMinJavaVersion);
}
}
for (PlatformInfo platform : platformUpdateInfo.getPlatformImports().values()) {
final String provider = platform.getRecommendedProviderKey();
final List<ExtensionUpdateInfo> extensions = extensionsUpdateInfo.extensionsByProvider().getOrDefault(provider,
Collections.emptyList()).stream().filter(ExtensionUpdateInfo::isUpdateRecommended).toList();
if (extensions.isEmpty()) {
continue;
}
log.info(MessageFormatter.bold(
"Suggested extensions updates for '%s':".formatted(platform.getRecommendedProviderKey())));
for (ExtensionUpdateInfo e : extensions) {
final ExtensionUpdateInfo.VersionUpdateType versionUpdateType = e.getVersionUpdateType();
if (e.hasKeyChanged()) {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "~", updateInfo(e)));
} else {
switch (versionUpdateType) {
case PLATFORM_MANAGED:
// The extension update is done when updating the platform
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, UP_TO_DATE_ICON.iconOrMessage(),
e.getCurrentDep().getArtifact().getKey().toGacString()
+ " (synced with BOM)"));
break;
case RECOMMEND_PLATFORM_MANAGED:
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "-",
e.getCurrentDep().getArtifact().getKey().toGacString()) + ":["
+ MessageFormatter.red(e.getCurrentDep().getVersion()) + " -> "
+ MessageFormatter.green("managed") + "]");
break;
case ADD_VERSION:
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, versionUpdateType.equals(
ExtensionUpdateInfo.VersionUpdateType.ADD_VERSION) ? "+" : "-",
e.getCurrentDep().getArtifact().getKey().toGacString()) + ":["
+ MessageFormatter.red("managed") + " -> " + MessageFormatter.green(
e.getRecommendedDependency().getVersion())
+ "]");
break;
case UPDATE_VERSION:
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "~",
updateInfo(e)));
break;
}
}
}
log.info("");
}
final List<ExtensionUpdateInfo> simpleVersionUpdates = extensionsUpdateInfo.getSimpleVersionUpdates().stream()
.filter(u -> !u.getCurrentDep().isPlatformExtension()).toList();
if (!simpleVersionUpdates.isEmpty()) {
log.info(MessageFormatter.bold("Suggested extension updates from other origins:"));
for (ExtensionUpdateInfo u : simpleVersionUpdates) {
if (u.getVersionUpdateType() == ExtensionUpdateInfo.VersionUpdateType.PLATFORM_MANAGED) {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "-",
u.getCurrentDep().getArtifact().getKey().toGacString()) + ":["
+ MessageFormatter.red(u.getCurrentDep().getVersion()) + "]");
} else {
log.info(String.format(UpdateProjectCommandHandler.ITEM_FORMAT, "~",
updateInfo(u)));
}
}
}
printSeparator(log);
return true;
}
private static String updateInfo(ExtensionUpdateInfo u) {
return updateInfo(u.getCurrentDep().getArtifact(), u.getRecommendedDependency().getVersion());
}
static void printSeparator(MessageWriter log) {
log.info("");
log.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
log.info("");
}
static String updateInfo(ArtifactCoords current, String newVersion) {
return current.getKey().toGacString() + ":[" + MessageFormatter.red(
current.getVersion()) + " -> "
+ MessageFormatter.green(newVersion) + "]";
}
@SuppressWarnings({ "rawtypes", "unchecked" })
private <T> T getMetadata(ExtensionCatalog catalog, String... path) {
Object currentValue = catalog.getMetadata();
for (String pathElement : path) {
if (!(currentValue instanceof Map)) {
return null;
}
currentValue = ((Map) currentValue).get(pathElement);
}
return (T) currentValue;
}
}
| UpdateProjectCommandHandler |
java | apache__camel | components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/format/factories/LocalTimeFormatFactory.java | {
"start": 1202,
"end": 1618
} | class ____ extends AbstractFormatFactory {
{
supportedClasses.add(LocalTime.class);
}
@Override
public Format<?> build(FormattingOptions formattingOptions) {
return new LocalTimePatternFormat(
formattingOptions.getPattern(),
formattingOptions.getTimezone(),
formattingOptions.getLocale());
}
private static | LocalTimeFormatFactory |
java | apache__spark | mllib/src/test/java/org/apache/spark/ml/linalg/JavaSQLDataTypesSuite.java | {
"start": 981,
"end": 1180
} | class ____ {
@Test
public void testSQLDataTypes() {
Assertions.assertEquals(new VectorUDT(), VectorType());
Assertions.assertEquals(new MatrixUDT(), MatrixType());
}
}
| JavaSQLDataTypesSuite |
java | apache__kafka | raft/src/main/java/org/apache/kafka/raft/MetadataLogConfig.java | {
"start": 1450,
"end": 10576
} | class ____ {
public static final String METADATA_LOG_DIR_CONFIG = "metadata.log.dir";
public static final String METADATA_LOG_DIR_DOC = "This configuration determines where we put the metadata log. " +
"If it is not set, the metadata log is placed in the first log directory from log.dirs.";
public static final String METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG = "metadata.log.max.snapshot.interval.ms";
public static final long METADATA_SNAPSHOT_MAX_INTERVAL_MS_DEFAULT = TimeUnit.HOURS.toMillis(1);
public static final String METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG = "metadata.log.max.record.bytes.between.snapshots";
public static final int METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES = 20 * 1024 * 1024;
public static final String METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_DOC = "This is the maximum number of bytes in the log between the latest " +
"snapshot and the high-watermark needed before generating a new snapshot. The default value is " +
METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES + ". To generate snapshots based on the time elapsed, see the <code>" +
METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG + "</code> configuration. The Kafka node will generate a snapshot when " +
"either the maximum time interval is reached or the maximum bytes limit is reached.";
public static final String METADATA_SNAPSHOT_MAX_INTERVAL_MS_DOC = "This is the maximum number of milliseconds to wait to generate a snapshot " +
"if there are committed records in the log that are not included in the latest snapshot. A value of zero disables " +
"time based snapshot generation. The default value is " + METADATA_SNAPSHOT_MAX_INTERVAL_MS_DEFAULT + ". To generate " +
"snapshots based on the number of metadata bytes, see the <code>" + METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG + "</code> " +
"configuration. The Kafka node will generate a snapshot when either the maximum time interval is reached or the " +
"maximum bytes limit is reached.";
public static final String METADATA_LOG_SEGMENT_BYTES_CONFIG = "metadata.log.segment.bytes";
public static final String METADATA_LOG_SEGMENT_BYTES_DOC = "The maximum size of a single metadata log file.";
public static final int METADATA_LOG_SEGMENT_BYTES_DEFAULT = 1024 * 1024 * 1024;
public static final String INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG = "internal.metadata.log.segment.bytes";
public static final String INTERNAL_METADATA_LOG_SEGMENT_BYTES_DOC = "The maximum size of a single metadata log file, only for testing.";
public static final String METADATA_LOG_SEGMENT_MILLIS_CONFIG = "metadata.log.segment.ms";
public static final String METADATA_LOG_SEGMENT_MILLIS_DOC = "The maximum time before a new metadata log file is rolled out (in milliseconds).";
public static final long METADATA_LOG_SEGMENT_MILLIS_DEFAULT = 24 * 7 * 60 * 60 * 1000L;
public static final String METADATA_MAX_RETENTION_BYTES_CONFIG = "metadata.max.retention.bytes";
public static final int METADATA_MAX_RETENTION_BYTES_DEFAULT = 100 * 1024 * 1024;
public static final String METADATA_MAX_RETENTION_BYTES_DOC = "The maximum combined size of the metadata log and snapshots before deleting old " +
"snapshots and log files. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit.";
public static final String METADATA_MAX_RETENTION_MILLIS_CONFIG = "metadata.max.retention.ms";
public static final String METADATA_MAX_RETENTION_MILLIS_DOC = "The number of milliseconds to keep a metadata log file or snapshot before " +
"deleting it. Since at least one snapshot must exist before any logs can be deleted, this is a soft limit.";
public static final long METADATA_MAX_RETENTION_MILLIS_DEFAULT = 24 * 7 * 60 * 60 * 1000L;
public static final String METADATA_MAX_IDLE_INTERVAL_MS_CONFIG = "metadata.max.idle.interval.ms";
public static final int METADATA_MAX_IDLE_INTERVAL_MS_DEFAULT = 500;
public static final String METADATA_MAX_IDLE_INTERVAL_MS_DOC = "This configuration controls how often the active " +
"controller should write no-op records to the metadata partition. If the value is 0, no-op records " +
"are not appended to the metadata partition. The default value is " + METADATA_MAX_IDLE_INTERVAL_MS_DEFAULT;
public static final String INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG = "internal.metadata.max.batch.size.in.bytes";
public static final String INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_DOC = "The largest record batch size allowed in the metadata log, only for testing.";
public static final String INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG = "internal.metadata.max.fetch.size.in.bytes";
public static final String INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_DOC = "The maximum number of bytes to read when fetching from the metadata log, only for testing.";
public static final String INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG = "internal.metadata.delete.delay.millis";
public static final String INTERNAL_METADATA_DELETE_DELAY_MILLIS_DOC = "The amount of time to wait before deleting a file from the filesystem, only for testing.";
public static final ConfigDef CONFIG_DEF = new ConfigDef()
.define(METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG, LONG, METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES, atLeast(1), HIGH, METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_DOC)
.define(METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, LONG, METADATA_SNAPSHOT_MAX_INTERVAL_MS_DEFAULT, atLeast(0), HIGH, METADATA_SNAPSHOT_MAX_INTERVAL_MS_DOC)
.define(METADATA_LOG_DIR_CONFIG, STRING, null, null, HIGH, METADATA_LOG_DIR_DOC)
.define(METADATA_LOG_SEGMENT_BYTES_CONFIG, INT, METADATA_LOG_SEGMENT_BYTES_DEFAULT, atLeast(8 * 1024 * 1024), HIGH, METADATA_LOG_SEGMENT_BYTES_DOC)
.define(METADATA_LOG_SEGMENT_MILLIS_CONFIG, LONG, METADATA_LOG_SEGMENT_MILLIS_DEFAULT, null, HIGH, METADATA_LOG_SEGMENT_MILLIS_DOC)
.define(METADATA_MAX_RETENTION_BYTES_CONFIG, LONG, METADATA_MAX_RETENTION_BYTES_DEFAULT, null, HIGH, METADATA_MAX_RETENTION_BYTES_DOC)
.define(METADATA_MAX_RETENTION_MILLIS_CONFIG, LONG, METADATA_MAX_RETENTION_MILLIS_DEFAULT, null, HIGH, METADATA_MAX_RETENTION_MILLIS_DOC)
.define(METADATA_MAX_IDLE_INTERVAL_MS_CONFIG, INT, METADATA_MAX_IDLE_INTERVAL_MS_DEFAULT, atLeast(0), LOW, METADATA_MAX_IDLE_INTERVAL_MS_DOC)
.defineInternal(INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG, INT, null, null, LOW, INTERNAL_METADATA_LOG_SEGMENT_BYTES_DOC)
.defineInternal(INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG, INT, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, null, LOW, INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_DOC)
.defineInternal(INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG, INT, KafkaRaftClient.MAX_FETCH_SIZE_BYTES, null, LOW, INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_DOC)
.defineInternal(INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG, LONG, ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, null, LOW, INTERNAL_METADATA_DELETE_DELAY_MILLIS_DOC);
private final int logSegmentBytes;
private final Integer internalSegmentBytes;
private final long logSegmentMillis;
private final long retentionMaxBytes;
private final long retentionMillis;
private final int internalMaxBatchSizeInBytes;
private final int internalMaxFetchSizeInBytes;
private final long internalDeleteDelayMillis;
public MetadataLogConfig(AbstractConfig config) {
this.logSegmentBytes = config.getInt(METADATA_LOG_SEGMENT_BYTES_CONFIG);
this.internalSegmentBytes = config.getInt(INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG);
this.logSegmentMillis = config.getLong(METADATA_LOG_SEGMENT_MILLIS_CONFIG);
this.retentionMaxBytes = config.getLong(METADATA_MAX_RETENTION_BYTES_CONFIG);
this.retentionMillis = config.getLong(METADATA_MAX_RETENTION_MILLIS_CONFIG);
this.internalMaxBatchSizeInBytes = config.getInt(INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG);
this.internalMaxFetchSizeInBytes = config.getInt(INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG);
this.internalDeleteDelayMillis = config.getLong(INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG);
}
public int logSegmentBytes() {
return logSegmentBytes;
}
public Integer internalSegmentBytes() {
return internalSegmentBytes;
}
public long logSegmentMillis() {
return logSegmentMillis;
}
public long retentionMaxBytes() {
return retentionMaxBytes;
}
public long retentionMillis() {
return retentionMillis;
}
public int internalMaxBatchSizeInBytes() {
return internalMaxBatchSizeInBytes;
}
public int internalMaxFetchSizeInBytes() {
return internalMaxFetchSizeInBytes;
}
public long internalDeleteDelayMillis() {
return internalDeleteDelayMillis;
}
}
| MetadataLogConfig |
java | apache__camel | components/camel-spring-parent/camel-spring/src/main/java/org/apache/camel/component/event/EventEndpoint.java | {
"start": 1864,
"end": 5285
} | class ____ extends DefaultEndpoint implements ApplicationContextAware {
private LoadBalancer loadBalancer;
private ApplicationContext applicationContext;
@UriPath(description = "Name of endpoint")
private String name;
public EventEndpoint(String endpointUri, EventComponent component, String name) {
super(endpointUri, component);
this.applicationContext = component.getApplicationContext();
this.name = name;
}
@Override
public boolean isRemote() {
return false;
}
@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
this.applicationContext = applicationContext;
}
public ApplicationContext getApplicationContext() {
return applicationContext;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public Producer createProducer() throws Exception {
ObjectHelper.notNull(applicationContext, "applicationContext");
return new DefaultProducer(this) {
public void process(Exchange exchange) throws Exception {
ApplicationEvent event = toApplicationEvent(exchange);
applicationContext.publishEvent(event);
}
};
}
@Override
public EventConsumer createConsumer(Processor processor) throws Exception {
ObjectHelper.notNull(applicationContext, "applicationContext");
EventConsumer answer = new EventConsumer(this, processor);
configureConsumer(answer);
return answer;
}
public void onApplicationEvent(ApplicationEvent event) {
Exchange exchange = createExchange();
exchange.getIn().setBody(event);
try {
getLoadBalancer().process(exchange);
} catch (Exception e) {
throw wrapRuntimeCamelException(e);
}
}
public LoadBalancer getLoadBalancer() {
if (loadBalancer == null) {
loadBalancer = createLoadBalancer();
}
return loadBalancer;
}
public void setLoadBalancer(LoadBalancer loadBalancer) {
this.loadBalancer = loadBalancer;
}
@Override
public EventComponent getComponent() {
return (EventComponent) super.getComponent();
}
// Implementation methods
// -------------------------------------------------------------------------
public void consumerStarted(EventConsumer consumer) {
lock.lock();
try {
getComponent().consumerStarted(this);
getLoadBalancer().addProcessor(consumer.getAsyncProcessor());
} finally {
lock.unlock();
}
}
public void consumerStopped(EventConsumer consumer) {
lock.lock();
try {
getComponent().consumerStopped(this);
getLoadBalancer().removeProcessor(consumer.getAsyncProcessor());
} finally {
lock.unlock();
}
}
protected LoadBalancer createLoadBalancer() {
return new TopicLoadBalancer();
}
protected ApplicationEvent toApplicationEvent(Exchange exchange) {
ApplicationEvent event = exchange.getIn().getBody(ApplicationEvent.class);
if (event != null) {
return event;
}
return new CamelEvent(this, exchange);
}
}
| EventEndpoint |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/records/tofix/RecordUpdate3079FailingTest.java | {
"start": 563,
"end": 1281
} | class ____ {
public IdNameRecord value;
protected IdNameWrapper() { }
public IdNameWrapper(IdNameRecord v) { value = v; }
}
private final ObjectMapper MAPPER = newJsonMapper();
// [databind#3079]: Should be able to Record value directly
@JacksonTestFailureExpected
@Test
public void testDirectRecordUpdate() throws Exception
{
IdNameRecord orig = new IdNameRecord(123, "Bob");
IdNameRecord result = MAPPER.updateValue(orig,
Collections.singletonMap("id", 137));
assertNotNull(result);
assertEquals(137, result.id());
assertEquals("Bob", result.name());
assertNotSame(orig, result);
}
}
| IdNameWrapper |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/tool/schema/internal/DefaultSchemaFilterProvider.java | {
"start": 391,
"end": 1035
} | class ____ implements SchemaFilterProvider {
public static final DefaultSchemaFilterProvider INSTANCE = new DefaultSchemaFilterProvider();
@Override
public SchemaFilter getCreateFilter() {
return DefaultSchemaFilter.INSTANCE;
}
@Override
public SchemaFilter getDropFilter() {
return DefaultSchemaFilter.INSTANCE;
}
@Override
public SchemaFilter getMigrateFilter() {
return DefaultSchemaFilter.INSTANCE;
}
@Override
public SchemaFilter getValidateFilter() {
return DefaultSchemaFilter.INSTANCE;
}
@Override
public SchemaFilter getTruncatorFilter() {
return DefaultSchemaFilter.INSTANCE;
}
}
| DefaultSchemaFilterProvider |
java | apache__dubbo | dubbo-registry/dubbo-registry-api/src/main/java/org/apache/dubbo/registry/integration/RegistryProtocol.java | {
"start": 8392,
"end": 29374
} | class ____ implements Protocol, ScopeModelAware {
public static final String[] DEFAULT_REGISTER_PROVIDER_KEYS = {
APPLICATION_KEY,
CODEC_KEY,
EXCHANGER_KEY,
SERIALIZATION_KEY,
PREFER_SERIALIZATION_KEY,
CLUSTER_KEY,
CONNECTIONS_KEY,
DEPRECATED_KEY,
GROUP_KEY,
LOADBALANCE_KEY,
MOCK_KEY,
PATH_KEY,
TIMEOUT_KEY,
TOKEN_KEY,
VERSION_KEY,
WARMUP_KEY,
WEIGHT_KEY,
DUBBO_VERSION_KEY,
RELEASE_KEY,
SIDE_KEY,
IPV6_KEY,
PACKABLE_METHOD_FACTORY_KEY,
AUTH_KEY,
AUTHENTICATOR_KEY,
USERNAME_KEY,
PASSWORD_KEY
};
public static final String[] DEFAULT_REGISTER_CONSUMER_KEYS = {
APPLICATION_KEY, VERSION_KEY, GROUP_KEY, DUBBO_VERSION_KEY, RELEASE_KEY
};
private static final ErrorTypeAwareLogger logger = LoggerFactory.getErrorTypeAwareLogger(RegistryProtocol.class);
private final Map<String, ServiceConfigurationListener> serviceConfigurationListeners = new ConcurrentHashMap<>();
// To solve the problem of RMI repeated exposure port conflicts, the services that have been exposed are no longer
// exposed.
// provider url <--> registry url <--> exporter
private final ConcurrentHashMap<String, ConcurrentHashMap<String, ExporterChangeableWrapper<?>>> bounds =
new ConcurrentHashMap<>();
protected Protocol protocol;
protected ProxyFactory proxyFactory;
private ConcurrentMap<URL, ReExportTask> reExportFailedTasks = new ConcurrentHashMap<>();
private HashedWheelTimer retryTimer = new HashedWheelTimer(
new NamedThreadFactory("DubboReexportTimer", true),
DEFAULT_REGISTRY_RETRY_PERIOD,
TimeUnit.MILLISECONDS,
128);
private FrameworkModel frameworkModel;
private ExporterFactory exporterFactory;
public RegistryProtocol() {}
@Override
public void setFrameworkModel(FrameworkModel frameworkModel) {
this.frameworkModel = frameworkModel;
this.exporterFactory = frameworkModel.getBeanFactory().getBean(ExporterFactory.class);
}
public void setProtocol(Protocol protocol) {
this.protocol = protocol;
}
public void setProxyFactory(ProxyFactory proxyFactory) {
this.proxyFactory = proxyFactory;
}
@Override
public int getDefaultPort() {
return 9090;
}
public Map<URL, Set<NotifyListener>> getOverrideListeners() {
Map<URL, Set<NotifyListener>> map = new HashMap<>();
List<ApplicationModel> applicationModels = frameworkModel.getApplicationModels();
if (applicationModels.size() == 1) {
return applicationModels
.get(0)
.getBeanFactory()
.getBean(ProviderConfigurationListener.class)
.getOverrideListeners();
} else {
for (ApplicationModel applicationModel : applicationModels) {
map.putAll(applicationModel
.getBeanFactory()
.getBean(ProviderConfigurationListener.class)
.getOverrideListeners());
}
}
return map;
}
private static void register(Registry registry, URL registeredProviderUrl) {
ApplicationDeployer deployer =
registeredProviderUrl.getOrDefaultApplicationModel().getDeployer();
try {
deployer.increaseServiceRefreshCount();
String registryName = Optional.ofNullable(registry.getUrl())
.map(u -> u.getParameter(
RegistryConstants.REGISTRY_CLUSTER_KEY,
UrlUtils.isServiceDiscoveryURL(u) ? u.getParameter(REGISTRY_KEY) : u.getProtocol()))
.filter(StringUtils::isNotEmpty)
.orElse("unknown");
MetricsEventBus.post(
RegistryEvent.toRsEvent(
registeredProviderUrl.getApplicationModel(),
registeredProviderUrl.getServiceKey(),
1,
Collections.singletonList(registryName)),
() -> {
registry.register(registeredProviderUrl);
return null;
});
} finally {
deployer.decreaseServiceRefreshCount();
}
}
private void registerStatedUrl(URL registryUrl, URL registeredProviderUrl, boolean registered) {
ProviderModel model = (ProviderModel) registeredProviderUrl.getServiceModel();
model.addStatedUrl(new ProviderModel.RegisterStatedURL(registeredProviderUrl, registryUrl, registered));
}
@Override
public <T> Exporter<T> export(final Invoker<T> originInvoker) throws RpcException {
URL registryUrl = getRegistryUrl(originInvoker);
// url to export locally
URL providerUrl = getProviderUrl(originInvoker);
// Subscribe the override data
// FIXME When the provider subscribes, it will affect the scene : a certain JVM exposes the service and call
// the same service. Because the subscribed is cached key with the name of the service, it causes the
// subscription information to cover.
final URL overrideSubscribeUrl = getSubscribedOverrideUrl(providerUrl);
final OverrideListener overrideSubscribeListener = new OverrideListener(overrideSubscribeUrl, originInvoker);
ConcurrentHashMap<URL, Set<NotifyListener>> overrideListeners =
getProviderConfigurationListener(overrideSubscribeUrl).getOverrideListeners();
ConcurrentHashMapUtils.computeIfAbsent(overrideListeners, overrideSubscribeUrl, k -> new ConcurrentHashSet<>())
.add(overrideSubscribeListener);
providerUrl = overrideUrlWithConfig(providerUrl, overrideSubscribeListener);
// export invoker
final ExporterChangeableWrapper<T> exporter = doLocalExport(originInvoker, providerUrl);
// url to registry
final Registry registry = getRegistry(registryUrl);
final URL registeredProviderUrl = customizeURL(providerUrl, registryUrl);
// decide if we need to delay publish (provider itself and registry should both need to register)
boolean register = providerUrl.getParameter(REGISTER_KEY, true) && registryUrl.getParameter(REGISTER_KEY, true);
if (register) {
register(registry, registeredProviderUrl);
}
// register stated url on provider model
registerStatedUrl(registryUrl, registeredProviderUrl, register);
exporter.setRegisterUrl(registeredProviderUrl);
exporter.setSubscribeUrl(overrideSubscribeUrl);
exporter.setNotifyListener(overrideSubscribeListener);
exporter.setRegistered(register);
ApplicationModel applicationModel = getApplicationModel(providerUrl.getScopeModel());
if (applicationModel
.modelEnvironment()
.getConfiguration()
.convert(Boolean.class, ENABLE_26X_CONFIGURATION_LISTEN, true)) {
if (!registry.isServiceDiscovery()) {
// Deprecated! Subscribe to override rules in 2.6.x or before.
registry.subscribe(overrideSubscribeUrl, overrideSubscribeListener);
}
}
notifyExport(exporter);
// Ensure that a new exporter instance is returned every time export
return new DestroyableExporter<>(exporter);
}
private <T> void notifyExport(ExporterChangeableWrapper<T> exporter) {
ScopeModel scopeModel = exporter.getRegisterUrl().getScopeModel();
List<RegistryProtocolListener> listeners = ScopeModelUtil.getExtensionLoader(
RegistryProtocolListener.class, scopeModel)
.getActivateExtension(exporter.getOriginInvoker().getUrl(), REGISTRY_PROTOCOL_LISTENER_KEY);
if (CollectionUtils.isNotEmpty(listeners)) {
for (RegistryProtocolListener listener : listeners) {
listener.onExport(this, exporter);
}
}
}
private URL overrideUrlWithConfig(URL providerUrl, OverrideListener listener) {
ProviderConfigurationListener providerConfigurationListener = getProviderConfigurationListener(providerUrl);
providerUrl = providerConfigurationListener.overrideUrl(providerUrl);
ServiceConfigurationListener serviceConfigurationListener =
new ServiceConfigurationListener(providerUrl.getOrDefaultModuleModel(), providerUrl, listener);
serviceConfigurationListeners.put(providerUrl.getServiceKey(), serviceConfigurationListener);
return serviceConfigurationListener.overrideUrl(providerUrl);
}
@SuppressWarnings("unchecked")
private <T> ExporterChangeableWrapper<T> doLocalExport(final Invoker<T> originInvoker, URL providerUrl) {
String providerUrlKey = getProviderUrlKey(originInvoker);
String registryUrlKey = getRegistryUrlKey(originInvoker);
Invoker<?> invokerDelegate = new InvokerDelegate<>(originInvoker, providerUrl);
ReferenceCountExporter<?> exporter =
exporterFactory.createExporter(providerUrlKey, () -> protocol.export(invokerDelegate));
return (ExporterChangeableWrapper<T>) ConcurrentHashMapUtils.computeIfAbsent(
ConcurrentHashMapUtils.computeIfAbsent(bounds, providerUrlKey, k -> new ConcurrentHashMap<>()),
registryUrlKey,
s -> new ExporterChangeableWrapper<>((ReferenceCountExporter<T>) exporter, originInvoker));
}
public <T> void reExport(Exporter<T> exporter, URL newInvokerUrl) {
if (exporter instanceof ExporterChangeableWrapper) {
ExporterChangeableWrapper<T> exporterWrapper = (ExporterChangeableWrapper<T>) exporter;
Invoker<T> originInvoker = exporterWrapper.getOriginInvoker();
reExport(originInvoker, newInvokerUrl);
}
}
/**
* Reexport the invoker of the modified url
*
* @param originInvoker
* @param newInvokerUrl
* @param <T>
*/
@SuppressWarnings("unchecked")
public <T> void reExport(final Invoker<T> originInvoker, URL newInvokerUrl) {
String providerUrlKey = getProviderUrlKey(originInvoker);
String registryUrlKey = getRegistryUrlKey(originInvoker);
Map<String, ExporterChangeableWrapper<?>> registryMap = bounds.get(providerUrlKey);
if (registryMap == null) {
logger.warn(
INTERNAL_ERROR,
"error state, exporterMap can not be null",
"",
"error state, exporterMap can not be null",
new IllegalStateException("error state, exporterMap can not be null"));
return;
}
ExporterChangeableWrapper<T> exporter = (ExporterChangeableWrapper<T>) registryMap.get(registryUrlKey);
if (exporter == null) {
logger.warn(
INTERNAL_ERROR,
"error state, exporterMap can not be null",
"",
"error state, exporterMap can not be null",
new IllegalStateException("error state, exporterMap can not be null"));
return;
}
URL registeredUrl = exporter.getRegisterUrl();
URL registryUrl = getRegistryUrl(originInvoker);
URL newProviderUrl = customizeURL(newInvokerUrl, registryUrl);
// update local exporter
Invoker<T> invokerDelegate = new InvokerDelegate<>(originInvoker, newInvokerUrl);
exporter.setExporter(protocol.export(invokerDelegate));
// update registry
if (!newProviderUrl.equals(registeredUrl)) {
try {
doReExport(originInvoker, exporter, registryUrl, registeredUrl, newProviderUrl);
} catch (Exception e) {
ReExportTask oldTask = reExportFailedTasks.get(registeredUrl);
if (oldTask != null) {
return;
}
ReExportTask task = new ReExportTask(
() -> doReExport(originInvoker, exporter, registryUrl, registeredUrl, newProviderUrl),
registeredUrl,
null);
oldTask = reExportFailedTasks.putIfAbsent(registeredUrl, task);
if (oldTask == null) {
// never has a retry task. then start a new task for retry.
retryTimer.newTimeout(
task,
registryUrl.getParameter(REGISTRY_RETRY_PERIOD_KEY, DEFAULT_REGISTRY_RETRY_PERIOD),
TimeUnit.MILLISECONDS);
}
}
}
}
private <T> void doReExport(
final Invoker<T> originInvoker,
ExporterChangeableWrapper<T> exporter,
URL registryUrl,
URL oldProviderUrl,
URL newProviderUrl) {
if (exporter.isRegistered()) {
Registry registry;
try {
registry = getRegistry(getRegistryUrl(originInvoker));
} catch (Exception e) {
throw new SkipFailbackWrapperException(e);
}
logger.info("Try to unregister old url: " + oldProviderUrl);
registry.reExportUnregister(oldProviderUrl);
logger.info("Try to register new url: " + newProviderUrl);
registry.reExportRegister(newProviderUrl);
}
try {
ProviderModel.RegisterStatedURL statedUrl = getStatedUrl(registryUrl, newProviderUrl);
statedUrl.setProviderUrl(newProviderUrl);
exporter.setRegisterUrl(newProviderUrl);
} catch (Exception e) {
throw new SkipFailbackWrapperException(e);
}
}
private ProviderModel.RegisterStatedURL getStatedUrl(URL registryUrl, URL providerUrl) {
ProviderModel providerModel =
frameworkModel.getServiceRepository().lookupExportedService(providerUrl.getServiceKey());
List<ProviderModel.RegisterStatedURL> statedUrls = providerModel.getStatedUrl();
return statedUrls.stream()
.filter(u -> u.getRegistryUrl().equals(registryUrl)
&& u.getProviderUrl().getProtocol().equals(providerUrl.getProtocol()))
.findFirst()
.orElseThrow(() -> new IllegalStateException("There should have at least one registered url."));
}
/**
* Get an instance of registry based on the address of invoker
*
* @param registryUrl
* @return
*/
protected Registry getRegistry(final URL registryUrl) {
RegistryFactory registryFactory = ScopeModelUtil.getExtensionLoader(
RegistryFactory.class, registryUrl.getScopeModel())
.getAdaptiveExtension();
return registryFactory.getRegistry(registryUrl);
}
protected URL getRegistryUrl(Invoker<?> originInvoker) {
return originInvoker.getUrl();
}
protected URL getRegistryUrl(URL url) {
if (SERVICE_REGISTRY_PROTOCOL.equals(url.getProtocol())) {
return url;
}
return url.addParameter(REGISTRY_KEY, url.getProtocol()).setProtocol(SERVICE_REGISTRY_PROTOCOL);
}
/**
* Return the url that is registered to the registry and filter the url parameter once
*
* @param providerUrl provider service url
* @param registryUrl registry center url
* @return url to registry.
*/
private URL customizeURL(final URL providerUrl, final URL registryUrl) {
URL newProviderURL = providerUrl.putAttribute(SIMPLIFIED_KEY, registryUrl.getParameter(SIMPLIFIED_KEY, false));
newProviderURL = newProviderURL.putAttribute(EXTRA_KEYS_KEY, registryUrl.getParameter(EXTRA_KEYS_KEY, ""));
ApplicationModel applicationModel = providerUrl.getOrDefaultApplicationModel();
ExtensionLoader<ServiceURLCustomizer> loader = applicationModel.getExtensionLoader(ServiceURLCustomizer.class);
for (ServiceURLCustomizer customizer : loader.getSupportedExtensionInstances()) {
newProviderURL = customizer.customize(newProviderURL, applicationModel);
}
return newProviderURL;
}
private URL getSubscribedOverrideUrl(URL registeredProviderUrl) {
return registeredProviderUrl
.setProtocol(PROVIDER_PROTOCOL)
.addParameters(CATEGORY_KEY, CONFIGURATORS_CATEGORY, CHECK_KEY, String.valueOf(false));
}
/**
* Get the address of the providerUrl through the url of the invoker
*
* @param originInvoker
* @return
*/
private URL getProviderUrl(final Invoker<?> originInvoker) {
Object providerURL = originInvoker.getUrl().getAttribute(EXPORT_KEY);
if (!(providerURL instanceof URL)) {
throw new IllegalArgumentException("The registry export url is null! registry: "
+ originInvoker.getUrl().getAddress());
}
return (URL) providerURL;
}
/**
* Get the key cached in bounds by invoker
*
* @param originInvoker
* @return
*/
private String getProviderUrlKey(final Invoker<?> originInvoker) {
URL providerUrl = getProviderUrl(originInvoker);
return providerUrl.removeParameters(DYNAMIC_KEY, ENABLED_KEY).toFullString();
}
private String getRegistryUrlKey(final Invoker<?> originInvoker) {
URL registryUrl = getRegistryUrl(originInvoker);
return registryUrl.removeParameters(DYNAMIC_KEY, ENABLED_KEY).toFullString();
}
@Override
@SuppressWarnings("unchecked")
public <T> Invoker<T> refer(Class<T> type, URL url) throws RpcException {
url = getRegistryUrl(url);
Registry registry = getRegistry(url);
if (RegistryService.class.equals(type)) {
return proxyFactory.getInvoker((T) registry, type, url);
}
// group="a,b" or group="*"
Map<String, String> qs = (Map<String, String>) url.getAttribute(REFER_KEY);
String group = qs.get(GROUP_KEY);
if (StringUtils.isNotEmpty(group)) {
if ((COMMA_SPLIT_PATTERN.split(group)).length > 1 || "*".equals(group)) {
return doRefer(
Cluster.getCluster(url.getScopeModel(), MERGEABLE_CLUSTER_NAME), registry, type, url, qs);
}
}
Cluster cluster = Cluster.getCluster(url.getScopeModel(), qs.get(CLUSTER_KEY));
return doRefer(cluster, registry, type, url, qs);
}
protected <T> Invoker<T> doRefer(
Cluster cluster, Registry registry, Class<T> type, URL url, Map<String, String> parameters) {
Map<String, Object> consumerAttribute = new HashMap<>(url.getAttributes());
consumerAttribute.remove(REFER_KEY);
String p = isEmpty(parameters.get(PROTOCOL_KEY)) ? CONSUMER : parameters.get(PROTOCOL_KEY);
URL consumerUrl = new ServiceConfigURL(
p,
null,
null,
parameters.get(REGISTER_IP_KEY),
0,
getPath(parameters, type),
parameters,
consumerAttribute);
url = url.putAttribute(CONSUMER_URL_KEY, consumerUrl);
ClusterInvoker<T> migrationInvoker = getMigrationInvoker(this, cluster, registry, type, url, consumerUrl);
return interceptInvoker(migrationInvoker, url, consumerUrl);
}
private String getPath(Map<String, String> parameters, Class<?> type) {
return !ProtocolUtils.isGeneric(parameters.get(GENERIC_KEY)) ? type.getName() : parameters.get(INTERFACE_KEY);
}
protected <T> ClusterInvoker<T> getMigrationInvoker(
RegistryProtocol registryProtocol,
Cluster cluster,
Registry registry,
Class<T> type,
URL url,
URL consumerUrl) {
return new ServiceDiscoveryMigrationInvoker<>(registryProtocol, cluster, registry, type, url, consumerUrl);
}
/**
* This method tries to load all RegistryProtocolListener definitions, which are used to control the behaviour of invoker by interacting with defined, then uses those listeners to
* change the status and behaviour of the MigrationInvoker.
* <p>
* Currently available Listener is MigrationRuleListener, one used to control the Migration behaviour with dynamically changing rules.
*
* @param invoker MigrationInvoker that determines which type of invoker list to use
* @param url The original url generated during refer, more like a registry:// style url
* @param consumerUrl Consumer url representing current | RegistryProtocol |
java | quarkusio__quarkus | extensions/mongodb-client/runtime/src/main/java/io/quarkus/mongodb/reactive/ReactiveMongoCollection.java | {
"start": 7230,
"end": 7774
} | class ____ cast any distinct items into.
* @param <D> the target type of the iterable.
* @param options the stream options
* @return a {@link Multi} emitting the sequence of distinct values
*/
<D> Multi<D> distinct(String fieldName, Bson filter, Class<D> clazz, DistinctOptions options);
/**
* Gets the distinct values of the specified field name.
*
* @param clientSession the client session with which to associate this operation
* @param fieldName the field name
* @param clazz the default | to |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java | {
"start": 4238,
"end": 65185
} | class ____ extends AbstractService
implements EventHandler<JobHistoryEvent> {
private final AppContext context;
private final int startCount;
private int eventCounter;
// Those file systems may differ from the job configuration
// See org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
// #ensurePathInDefaultFileSystem
private FileSystem stagingDirFS; // log Dir FileSystem
private FileSystem doneDirFS; // done Dir FileSystem
private Path stagingDirPath = null;
private Path doneDirPrefixPath = null; // folder for completed jobs
private int maxUnflushedCompletionEvents;
private int postJobCompletionMultiplier;
private long flushTimeout;
private int minQueueSizeForBatchingFlushes; // TODO: Rename
private int numUnflushedCompletionEvents = 0;
private boolean isTimerActive;
private EventWriter.WriteMode jhistMode =
EventWriter.WriteMode.JSON;
protected BlockingQueue<JobHistoryEvent> eventQueue =
new LinkedBlockingQueue<JobHistoryEvent>();
protected boolean handleTimelineEvent = false;
protected AsyncDispatcher atsEventDispatcher = null;
protected Thread eventHandlingThread;
private volatile boolean stopped;
private final Object lock = new Object();
private static final Logger LOG = LoggerFactory.getLogger(
JobHistoryEventHandler.class);
protected static final Map<JobId, MetaInfo> fileMap =
Collections.<JobId,MetaInfo>synchronizedMap(new HashMap<JobId,MetaInfo>());
// should job completion be force when the AM shuts down?
protected volatile boolean forceJobCompletion = false;
@VisibleForTesting
protected TimelineClient timelineClient;
@VisibleForTesting
protected TimelineV2Client timelineV2Client;
private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB";
private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK";
private static final String MAPREDUCE_TASK_ATTEMPT_ENTITY_TYPE =
"MAPREDUCE_TASK_ATTEMPT";
public JobHistoryEventHandler(AppContext context, int startCount) {
super("JobHistoryEventHandler");
this.context = context;
this.startCount = startCount;
}
/* (non-Javadoc)
* @see org.apache.hadoop.yarn.service.AbstractService#init(org.
* apache.hadoop.conf.Configuration)
* Initializes the FileSystem and Path objects for the log and done directories.
* Creates these directories if they do not already exist.
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
String jobId =
TypeConverter.fromYarn(context.getApplicationID()).toString();
String stagingDirStr = null;
String doneDirStr = null;
String userDoneDirStr = null;
try {
stagingDirStr = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf,
jobId);
doneDirStr =
JobHistoryUtils.getConfiguredHistoryIntermediateDoneDirPrefix(conf);
userDoneDirStr =
JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
} catch (IOException e) {
LOG.error("Failed while getting the configured log directories", e);
throw new YarnRuntimeException(e);
}
//Check for the existence of the history staging dir. Maybe create it.
try {
stagingDirPath =
FileContext.getFileContext(conf).makeQualified(new Path(stagingDirStr));
stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf);
mkdir(stagingDirFS, stagingDirPath, new FsPermission(
JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS));
} catch (IOException e) {
LOG.error("Failed while checking for/creating history staging path: ["
+ stagingDirPath + "]", e);
throw new YarnRuntimeException(e);
}
//Check for the existence of intermediate done dir.
Path doneDirPath = null;
try {
doneDirPath = FileContext.getFileContext(conf).makeQualified(new Path(doneDirStr));
doneDirFS = FileSystem.get(doneDirPath.toUri(), conf);
// This directory will be in a common location, or this may be a cluster
// meant for a single user. Creating based on the conf. Should ideally be
// created by the JobHistoryServer or as part of deployment.
if (!doneDirFS.exists(doneDirPath)) {
if (JobHistoryUtils.shouldCreateNonUserDirectory(conf)) {
LOG.info("Creating intermediate history logDir: ["
+ doneDirPath
+ "] + based on conf. Should ideally be created by the JobHistoryServer: "
+ MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR);
mkdir(
doneDirFS,
doneDirPath,
new FsPermission(
JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS
.toShort()));
// TODO Temporary toShort till new FsPermission(FsPermissions)
// respects
// sticky
} else {
String message = "Not creating intermediate history logDir: ["
+ doneDirPath
+ "] based on conf: "
+ MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR
+ ". Either set to true or pre-create this directory with" +
" appropriate permissions";
LOG.error(message);
throw new YarnRuntimeException(message);
}
}
} catch (IOException e) {
LOG.error("Failed checking for the existence of history intermediate " +
"done directory: [" + doneDirPath + "]");
throw new YarnRuntimeException(e);
}
//Check/create user directory under intermediate done dir.
try {
doneDirPrefixPath =
FileContext.getFileContext(conf).makeQualified(new Path(userDoneDirStr));
mkdir(doneDirFS, doneDirPrefixPath, JobHistoryUtils.
getConfiguredHistoryIntermediateUserDoneDirPermissions(conf));
} catch (IOException e) {
LOG.error("Error creating user intermediate history done directory: [ "
+ doneDirPrefixPath + "]", e);
throw new YarnRuntimeException(e);
}
// Maximum number of unflushed completion-events that can stay in the queue
// before flush kicks in.
maxUnflushedCompletionEvents =
conf.getInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS,
MRJobConfig.DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS);
// We want to cut down flushes after job completes so as to write quicker,
// so we increase maxUnflushedEvents post Job completion by using the
// following multiplier.
postJobCompletionMultiplier =
conf.getInt(
MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER,
MRJobConfig.DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER);
// Max time until which flush doesn't take place.
flushTimeout =
conf.getLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
MRJobConfig.DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS);
minQueueSizeForBatchingFlushes =
conf.getInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD,
MRJobConfig.DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD);
// TODO replace MR specific configurations on timeline service with getting
// configuration from RM through registerApplicationMaster() in
// ApplicationMasterProtocol with return value for timeline service
// configuration status: off, on_with_v1 or on_with_v2.
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA,
MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
LOG.info("Emitting job history data to the timeline service is enabled");
if (YarnConfiguration.timelineServiceEnabled(conf)) {
boolean timelineServiceV2Enabled =
YarnConfiguration.timelineServiceV2Enabled(conf);
if(timelineServiceV2Enabled) {
timelineV2Client =
((MRAppMaster.RunningAppContext)context).getTimelineV2Client();
timelineV2Client.init(conf);
} else {
timelineClient =
((MRAppMaster.RunningAppContext) context).getTimelineClient();
timelineClient.init(conf);
}
handleTimelineEvent = true;
LOG.info("Timeline service is enabled; version: " +
YarnConfiguration.getTimelineServiceVersion(conf));
} else {
LOG.info("Timeline service is not enabled");
}
} else {
LOG.info("Emitting job history data to the timeline server is not " +
"enabled");
}
// Flag for setting
String jhistFormat = conf.get(JHAdminConfig.MR_HS_JHIST_FORMAT,
JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT);
if (jhistFormat.equals("json")) {
jhistMode = EventWriter.WriteMode.JSON;
} else if (jhistFormat.equals("binary")) {
jhistMode = EventWriter.WriteMode.BINARY;
} else {
LOG.warn("Unrecognized value '" + jhistFormat + "' for property " +
JHAdminConfig.MR_HS_JHIST_FORMAT + ". Valid values are " +
"'json' or 'binary'. Falling back to default value '" +
JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT + "'.");
}
// initiate the atsEventDispatcher for timeline event
// if timeline service is enabled.
if (handleTimelineEvent) {
atsEventDispatcher = createDispatcher();
EventHandler<JobHistoryEvent> timelineEventHandler =
new ForwardingEventHandler();
atsEventDispatcher.register(EventType.class, timelineEventHandler);
atsEventDispatcher.setDrainEventsOnStop();
atsEventDispatcher.init(conf);
}
super.serviceInit(conf);
}
protected AsyncDispatcher createDispatcher() {
return new AsyncDispatcher("Job ATS Event Dispatcher");
}
private void mkdir(FileSystem fs, Path path, FsPermission fsp)
throws IOException {
if (!fs.exists(path)) {
try {
fs.mkdirs(path, fsp);
FileStatus fsStatus = fs.getFileStatus(path);
LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ ", Expected: " + fsp.toShort());
if (fsStatus.getPermission().toShort() != fsp.toShort()) {
LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ ", " + fsp);
fs.setPermission(path, fsp);
}
} catch (FileAlreadyExistsException e) {
LOG.info("Directory: [" + path + "] already exists.");
}
}
}
@Override
protected void serviceStart() throws Exception {
if (timelineClient != null) {
timelineClient.start();
} else if (timelineV2Client != null) {
timelineV2Client.start();
}
eventHandlingThread = new SubjectInheritingThread(new Runnable() {
@Override
public void run() {
JobHistoryEvent event = null;
while (!stopped && !Thread.currentThread().isInterrupted()) {
// Log the size of the history-event-queue every so often.
if (eventCounter != 0 && eventCounter % 1000 == 0) {
eventCounter = 0;
LOG.info("Size of the JobHistory event queue is "
+ eventQueue.size());
} else {
eventCounter++;
}
try {
event = eventQueue.take();
} catch (InterruptedException e) {
LOG.info("EventQueue take interrupted. Returning");
return;
}
// If an event has been removed from the queue. Handle it.
// The rest of the queue is handled via stop()
// Clear the interrupt status if it's set before calling handleEvent
// and set it if it was set before calling handleEvent.
// Interrupts received from other threads during handleEvent cannot be
// dealth with - Shell.runCommand() ignores them.
synchronized (lock) {
boolean isInterrupted = Thread.interrupted();
handleEvent(event);
if (isInterrupted) {
LOG.debug("Event handling interrupted");
Thread.currentThread().interrupt();
}
}
}
}
}, "eventHandlingThread");
eventHandlingThread.start();
if (handleTimelineEvent) {
atsEventDispatcher.start();
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Stopping JobHistoryEventHandler. "
+ "Size of the outstanding queue size is " + eventQueue.size());
stopped = true;
//do not interrupt while event handling is in progress
synchronized(lock) {
if (eventHandlingThread != null) {
LOG.debug("Interrupting Event Handling thread");
eventHandlingThread.interrupt();
} else {
LOG.debug("Null event handling thread");
}
}
try {
if (eventHandlingThread != null) {
LOG.debug("Waiting for Event Handling thread to complete");
eventHandlingThread.join();
}
} catch (InterruptedException ie) {
LOG.info("Interrupted Exception while stopping", ie);
}
// Cancel all timers - so that they aren't invoked during or after
// the metaInfo object is wrapped up.
for (MetaInfo mi : fileMap.values()) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Shutting down timer for " + mi);
}
mi.shutDownTimer();
} catch (IOException e) {
LOG.info("Exception while canceling delayed flush timer. "
+ "Likely caused by a failed flush " + e.getMessage());
}
}
//write all the events remaining in queue
Iterator<JobHistoryEvent> it = eventQueue.iterator();
while(it.hasNext()) {
JobHistoryEvent ev = it.next();
LOG.info("In stop, writing event " + ev.getType());
handleEvent(ev);
}
// Process JobUnsuccessfulCompletionEvent for jobIds which still haven't
// closed their event writers
if(forceJobCompletion) {
for (Map.Entry<JobId,MetaInfo> jobIt : fileMap.entrySet()) {
JobId toClose = jobIt.getKey();
MetaInfo mi = jobIt.getValue();
if(mi != null && mi.isWriterActive()) {
LOG.warn("Found jobId " + toClose
+ " to have not been closed. Will close");
//Create a JobFinishEvent so that it is written to the job history
final Job job = context.getJob(toClose);
int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
- job.getKilledMaps();
int successfulReduces = job.getCompletedReduces()
- job.getFailedReduces() - job.getKilledReduces();
JobUnsuccessfulCompletionEvent jucEvent =
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(toClose),
System.currentTimeMillis(),
successfulMaps,
successfulReduces,
job.getFailedMaps(), job.getFailedReduces(),
job.getKilledMaps(), job.getKilledReduces(),
createJobStateForJobUnsuccessfulCompletionEvent(
mi.getForcedJobStateOnShutDown()),
job.getDiagnostics());
JobHistoryEvent jfEvent = new JobHistoryEvent(toClose, jucEvent);
//Bypass the queue mechanism which might wait. Call the method directly
handleEvent(jfEvent);
}
}
}
//close all file handles
for (MetaInfo mi : fileMap.values()) {
try {
mi.closeWriter();
} catch (IOException e) {
LOG.info("Exception while closing file " + e.getMessage());
}
}
if (handleTimelineEvent && atsEventDispatcher != null) {
atsEventDispatcher.stop();
}
if (timelineClient != null) {
timelineClient.stop();
} else if (timelineV2Client != null) {
timelineV2Client.stop();
}
LOG.info("Stopped JobHistoryEventHandler. super.stop()");
super.serviceStop();
}
protected EventWriter createEventWriter(Path historyFilePath)
throws IOException {
FSDataOutputStream out = stagingDirFS.create(historyFilePath, true);
return new EventWriter(out, this.jhistMode);
}
/**
* Create an event writer for the Job represented by the jobID.
* Writes out the job configuration to the log directory.
* This should be the first call to history for a job
*
* @param jobId the jobId.
* @param amStartedEvent
* @throws IOException
*/
protected void setupEventWriter(JobId jobId, AMStartedEvent amStartedEvent)
throws IOException {
if (stagingDirPath == null) {
LOG.error("Log Directory is null, returning");
throw new IOException("Missing Log Directory for History");
}
MetaInfo oldFi = fileMap.get(jobId);
Configuration conf = getConfig();
// TODO Ideally this should be written out to the job dir
// (.staging/jobid/files - RecoveryService will need to be patched)
Path historyFile = JobHistoryUtils.getStagingJobHistoryFile(
stagingDirPath, jobId, startCount);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
if (user == null) {
throw new IOException(
"User is null while setting up jobhistory eventwriter");
}
String jobName = context.getJob(jobId).getName();
EventWriter writer = (oldFi == null) ? null : oldFi.writer;
Path logDirConfPath =
JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId, startCount);
if (writer == null) {
try {
writer = createEventWriter(historyFile);
LOG.info("Event Writer setup for JobId: " + jobId + ", File: "
+ historyFile);
} catch (IOException ioe) {
LOG.info("Could not create log file: [" + historyFile + "] + for job "
+ "[" + jobName + "]");
throw ioe;
}
//Write out conf only if the writer isn't already setup.
if (conf != null) {
// TODO Ideally this should be written out to the job dir
// (.staging/jobid/files - RecoveryService will need to be patched)
if (logDirConfPath != null) {
Configuration redactedConf = new Configuration(conf);
MRJobConfUtil.redact(redactedConf);
try (FSDataOutputStream jobFileOut = stagingDirFS
.create(logDirConfPath, true)) {
redactedConf.writeXml(jobFileOut);
} catch (IOException e) {
LOG.info("Failed to write the job configuration file", e);
throw e;
}
}
}
}
String queueName = JobConf.DEFAULT_QUEUE_NAME;
if (conf != null) {
queueName = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME);
}
MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer,
user, jobName, jobId, amStartedEvent.getForcedJobStateOnShutDown(),
queueName);
fi.getJobSummary().setJobId(jobId);
fi.getJobSummary().setJobLaunchTime(amStartedEvent.getStartTime());
fi.getJobSummary().setJobSubmitTime(amStartedEvent.getSubmitTime());
fi.getJobIndexInfo().setJobStartTime(amStartedEvent.getStartTime());
fi.getJobIndexInfo().setSubmitTime(amStartedEvent.getSubmitTime());
fileMap.put(jobId, fi);
}
/** Close the event writer for this id
* @throws IOException */
public void closeWriter(JobId id) throws IOException {
try {
final MetaInfo mi = fileMap.get(id);
if (mi != null) {
mi.closeWriter();
}
} catch (IOException e) {
LOG.error("Error closing writer for JobID: " + id);
throw e;
}
}
@Override
public void handle(JobHistoryEvent event) {
try {
if (isJobCompletionEvent(event.getHistoryEvent())) {
// When the job is complete, flush slower but write faster.
maxUnflushedCompletionEvents =
maxUnflushedCompletionEvents * postJobCompletionMultiplier;
}
eventQueue.put(event);
// Process it for ATS (if enabled)
if (handleTimelineEvent) {
atsEventDispatcher.getEventHandler().handle(event);
}
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
}
private boolean isJobCompletionEvent(HistoryEvent historyEvent) {
if (EnumSet.of(EventType.JOB_FINISHED, EventType.JOB_FAILED,
EventType.JOB_KILLED).contains(historyEvent.getEventType())) {
return true;
}
return false;
}
@Private
public void handleEvent(JobHistoryEvent event) {
synchronized (lock) {
// If this is JobSubmitted Event, setup the writer
if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) {
try {
AMStartedEvent amStartedEvent =
(AMStartedEvent) event.getHistoryEvent();
setupEventWriter(event.getJobID(), amStartedEvent);
} catch (IOException ioe) {
LOG.error("Error JobHistoryEventHandler in handleEvent: " + event,
ioe);
throw new YarnRuntimeException(ioe);
}
}
// For all events
// (1) Write it out
// (2) Process it for JobSummary
// (3) Process it for ATS (if enabled)
MetaInfo mi = fileMap.get(event.getJobID());
try {
HistoryEvent historyEvent = event.getHistoryEvent();
if (! (historyEvent instanceof NormalizedResourceEvent)) {
mi.writeEvent(historyEvent);
}
processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(),
event.getJobID());
if (LOG.isDebugEnabled()) {
LOG.debug("In HistoryEventHandler "
+ event.getHistoryEvent().getEventType());
}
} catch (IOException e) {
LOG.error("Error writing History Event: " + event.getHistoryEvent(),
e);
throw new YarnRuntimeException(e);
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) {
JobSubmittedEvent jobSubmittedEvent =
(JobSubmittedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime());
mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName());
}
//initialize the launchTime in the JobIndexInfo of MetaInfo
if(event.getHistoryEvent().getEventType() == EventType.JOB_INITED ){
JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
JobQueueChangeEvent jQueueEvent =
(JobQueueChangeEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
}
// If this is JobFinishedEvent, close the writer and setup the job-index
if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
try {
JobFinishedEvent jFinishedEvent =
(JobFinishedEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(
jFinishedEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
// In case of JOB_ERROR, only process all the Done files(e.g. job
// summary, job history file etc.) if it is last AM retry.
if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
if(context.isLastAMRetry())
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED
|| event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) {
try {
JobUnsuccessfulCompletionEvent jucEvent =
(JobUnsuccessfulCompletionEvent) event
.getHistoryEvent();
mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
closeEventWriter(event.getJobID());
processDoneFiles(event.getJobID());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
}
}
private void handleTimelineEvent(JobHistoryEvent event) {
HistoryEvent historyEvent = event.getHistoryEvent();
if (handleTimelineEvent) {
if (timelineV2Client != null) {
processEventForNewTimelineService(historyEvent, event.getJobID(),
event.getTimestamp());
} else if (timelineClient != null) {
processEventForTimelineServer(historyEvent, event.getJobID(),
event.getTimestamp());
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("In HistoryEventHandler, handle timelineEvent:"
+ event.getHistoryEvent().getEventType());
}
}
public void processEventForJobSummary(HistoryEvent event, JobSummary summary,
JobId jobId) {
// context.getJob could be used for some of this info as well.
switch (event.getEventType()) {
case JOB_SUBMITTED:
JobSubmittedEvent jse = (JobSubmittedEvent) event;
summary.setUser(jse.getUserName());
summary.setQueue(jse.getJobQueueName());
summary.setJobSubmitTime(jse.getSubmitTime());
summary.setJobName(jse.getJobName());
break;
case NORMALIZED_RESOURCE:
NormalizedResourceEvent normalizedResourceEvent =
(NormalizedResourceEvent) event;
if (normalizedResourceEvent.getTaskType() == TaskType.MAP) {
summary.setResourcesPerMap((int) normalizedResourceEvent.getMemory());
} else if (normalizedResourceEvent.getTaskType() == TaskType.REDUCE) {
summary.setResourcesPerReduce((int) normalizedResourceEvent.getMemory());
}
break;
case JOB_INITED:
JobInitedEvent jie = (JobInitedEvent) event;
summary.setJobLaunchTime(jie.getLaunchTime());
break;
case MAP_ATTEMPT_STARTED:
TaskAttemptStartedEvent mtase = (TaskAttemptStartedEvent) event;
if (summary.getFirstMapTaskLaunchTime() == 0)
summary.setFirstMapTaskLaunchTime(mtase.getStartTime());
break;
case REDUCE_ATTEMPT_STARTED:
TaskAttemptStartedEvent rtase = (TaskAttemptStartedEvent) event;
if (summary.getFirstReduceTaskLaunchTime() == 0)
summary.setFirstReduceTaskLaunchTime(rtase.getStartTime());
break;
case JOB_FINISHED:
JobFinishedEvent jfe = (JobFinishedEvent) event;
summary.setJobFinishTime(jfe.getFinishTime());
summary.setNumSucceededMaps(jfe.getSucceededMaps());
summary.setNumFailedMaps(jfe.getFailedMaps());
summary.setNumSucceededReduces(jfe.getSucceededReduces());
summary.setNumFailedReduces(jfe.getFailedReduces());
summary.setNumKilledMaps(jfe.getKilledMaps());
summary.setNumKilledReduces(jfe.getKilledReduces());
if (summary.getJobStatus() == null)
summary
.setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
.toString());
// TODO JOB_FINISHED does not have state. Effectively job history does not
// have state about the finished job.
setSummarySlotSeconds(summary, jfe.getTotalCounters());
break;
case JOB_FAILED:
case JOB_KILLED:
Job job = context.getJob(jobId);
JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
- job.getKilledMaps();
int successfulReduces = job.getCompletedReduces()
- job.getFailedReduces() - job.getKilledReduces();
summary.setJobStatus(juce.getStatus());
summary.setNumSucceededMaps(successfulMaps);
summary.setNumSucceededReduces(successfulReduces);
summary.setNumFailedMaps(job.getFailedMaps());
summary.setNumFailedReduces(job.getFailedReduces());
summary.setJobFinishTime(juce.getFinishTime());
summary.setNumKilledMaps(juce.getKilledMaps());
summary.setNumKilledReduces(juce.getKilledReduces());
setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
break;
default:
break;
}
}
private void processEventForTimelineServer(HistoryEvent event, JobId jobId,
long timestamp) {
TimelineEvent tEvent = new TimelineEvent();
tEvent.setEventType(StringUtils.toUpperCase(event.getEventType().name()));
tEvent.setTimestamp(timestamp);
TimelineEntity tEntity = new TimelineEntity();
switch (event.getEventType()) {
case JOB_SUBMITTED:
JobSubmittedEvent jse =
(JobSubmittedEvent) event;
tEvent.addEventInfo("SUBMIT_TIME", jse.getSubmitTime());
tEvent.addEventInfo("QUEUE_NAME", jse.getJobQueueName());
tEvent.addEventInfo("JOB_NAME", jse.getJobName());
tEvent.addEventInfo("USER_NAME", jse.getUserName());
tEvent.addEventInfo("JOB_CONF_PATH", jse.getJobConfPath());
tEvent.addEventInfo("ACLS", jse.getJobAcls());
tEvent.addEventInfo("JOB_QUEUE_NAME", jse.getJobQueueName());
tEvent.addEventInfo("WORKFLOW_ID", jse.getWorkflowId());
tEvent.addEventInfo("WORKFLOW_NAME", jse.getWorkflowName());
tEvent.addEventInfo("WORKFLOW_NAME_NAME", jse.getWorkflowNodeName());
tEvent.addEventInfo("WORKFLOW_ADJACENCIES",
jse.getWorkflowAdjacencies());
tEvent.addEventInfo("WORKFLOW_TAGS", jse.getWorkflowTags());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_STATUS_CHANGED:
JobStatusChangedEvent jsce = (JobStatusChangedEvent) event;
tEvent.addEventInfo("STATUS", jsce.getStatus());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_INFO_CHANGED:
JobInfoChangeEvent jice = (JobInfoChangeEvent) event;
tEvent.addEventInfo("SUBMIT_TIME", jice.getSubmitTime());
tEvent.addEventInfo("LAUNCH_TIME", jice.getLaunchTime());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_INITED:
JobInitedEvent jie = (JobInitedEvent) event;
tEvent.addEventInfo("START_TIME", jie.getLaunchTime());
tEvent.addEventInfo("STATUS", jie.getStatus());
tEvent.addEventInfo("TOTAL_MAPS", jie.getTotalMaps());
tEvent.addEventInfo("TOTAL_REDUCES", jie.getTotalReduces());
tEvent.addEventInfo("UBERIZED", jie.getUberized());
tEntity.setStartTime(jie.getLaunchTime());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_PRIORITY_CHANGED:
JobPriorityChangeEvent jpce = (JobPriorityChangeEvent) event;
tEvent.addEventInfo("PRIORITY", jpce.getPriority().toString());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_QUEUE_CHANGED:
JobQueueChangeEvent jqe = (JobQueueChangeEvent) event;
tEvent.addEventInfo("QUEUE_NAMES", jqe.getJobQueueName());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_FAILED:
case JOB_KILLED:
case JOB_ERROR:
JobUnsuccessfulCompletionEvent juce =
(JobUnsuccessfulCompletionEvent) event;
tEvent.addEventInfo("FINISH_TIME", juce.getFinishTime());
tEvent.addEventInfo("NUM_MAPS",
juce.getSucceededMaps() +
juce.getFailedMaps() +
juce.getKilledMaps());
tEvent.addEventInfo("NUM_REDUCES",
juce.getSucceededReduces() +
juce.getFailedReduces() +
juce.getKilledReduces());
tEvent.addEventInfo("JOB_STATUS", juce.getStatus());
tEvent.addEventInfo("DIAGNOSTICS", juce.getDiagnostics());
tEvent.addEventInfo("SUCCESSFUL_MAPS", juce.getSucceededMaps());
tEvent.addEventInfo("SUCCESSFUL_REDUCES", juce.getSucceededReduces());
tEvent.addEventInfo("FAILED_MAPS", juce.getFailedMaps());
tEvent.addEventInfo("FAILED_REDUCES", juce.getFailedReduces());
tEvent.addEventInfo("KILLED_MAPS", juce.getKilledMaps());
tEvent.addEventInfo("KILLED_REDUCES", juce.getKilledReduces());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case JOB_FINISHED:
JobFinishedEvent jfe = (JobFinishedEvent) event;
tEvent.addEventInfo("FINISH_TIME", jfe.getFinishTime());
tEvent.addEventInfo("NUM_MAPS",
jfe.getSucceededMaps() +
jfe.getFailedMaps() +
jfe.getKilledMaps());
tEvent.addEventInfo("NUM_REDUCES",
jfe.getSucceededReduces() +
jfe.getFailedReduces() +
jfe.getKilledReduces());
tEvent.addEventInfo("FAILED_MAPS", jfe.getFailedMaps());
tEvent.addEventInfo("FAILED_REDUCES", jfe.getFailedReduces());
tEvent.addEventInfo("SUCCESSFUL_MAPS", jfe.getSucceededMaps());
tEvent.addEventInfo("SUCCESSFUL_REDUCES", jfe.getSucceededReduces());
tEvent.addEventInfo("KILLED_MAPS", jfe.getKilledMaps());
tEvent.addEventInfo("KILLED_REDUCES", jfe.getKilledReduces());
tEvent.addEventInfo("MAP_COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(jfe.getMapCounters()));
tEvent.addEventInfo("REDUCE_COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(jfe.getReduceCounters()));
tEvent.addEventInfo("TOTAL_COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(jfe.getTotalCounters()));
tEvent.addEventInfo("JOB_STATUS", JobState.SUCCEEDED.toString());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
case TASK_STARTED:
TaskStartedEvent tse = (TaskStartedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tse.getTaskType().toString());
tEvent.addEventInfo("START_TIME", tse.getStartTime());
tEvent.addEventInfo("SPLIT_LOCATIONS", tse.getSplitLocations());
tEntity.addEvent(tEvent);
tEntity.setEntityId(tse.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case TASK_FAILED:
TaskFailedEvent tfe = (TaskFailedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tfe.getTaskType().toString());
tEvent.addEventInfo("STATUS", TaskStatus.State.FAILED.toString());
tEvent.addEventInfo("FINISH_TIME", tfe.getFinishTime());
tEvent.addEventInfo("ERROR", tfe.getError());
tEvent.addEventInfo("FAILED_ATTEMPT_ID",
tfe.getFailedAttemptID() == null ?
"" : tfe.getFailedAttemptID().toString());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(tfe.getCounters()));
tEntity.addEvent(tEvent);
tEntity.setEntityId(tfe.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case TASK_UPDATED:
TaskUpdatedEvent tue = (TaskUpdatedEvent) event;
tEvent.addEventInfo("FINISH_TIME", tue.getFinishTime());
tEntity.addEvent(tEvent);
tEntity.setEntityId(tue.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case TASK_FINISHED:
TaskFinishedEvent tfe2 = (TaskFinishedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tfe2.getTaskType().toString());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(tfe2.getCounters()));
tEvent.addEventInfo("FINISH_TIME", tfe2.getFinishTime());
tEvent.addEventInfo("STATUS", TaskStatus.State.SUCCEEDED.toString());
tEvent.addEventInfo("SUCCESSFUL_TASK_ATTEMPT_ID",
tfe2.getSuccessfulTaskAttemptId() == null ?
"" : tfe2.getSuccessfulTaskAttemptId().toString());
tEntity.addEvent(tEvent);
tEntity.setEntityId(tfe2.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case MAP_ATTEMPT_STARTED:
case CLEANUP_ATTEMPT_STARTED:
case REDUCE_ATTEMPT_STARTED:
case SETUP_ATTEMPT_STARTED:
TaskAttemptStartedEvent tase = (TaskAttemptStartedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tase.getTaskType().toString());
tEvent.addEventInfo("TASK_ATTEMPT_ID",
tase.getTaskAttemptId().toString());
tEvent.addEventInfo("START_TIME", tase.getStartTime());
tEvent.addEventInfo("HTTP_PORT", tase.getHttpPort());
tEvent.addEventInfo("TRACKER_NAME", tase.getTrackerName());
tEvent.addEventInfo("SHUFFLE_PORT", tase.getShufflePort());
tEvent.addEventInfo("CONTAINER_ID", tase.getContainerId() == null ?
"" : tase.getContainerId().toString());
tEntity.addEvent(tEvent);
tEntity.setEntityId(tase.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case MAP_ATTEMPT_FAILED:
case CLEANUP_ATTEMPT_FAILED:
case REDUCE_ATTEMPT_FAILED:
case SETUP_ATTEMPT_FAILED:
case MAP_ATTEMPT_KILLED:
case CLEANUP_ATTEMPT_KILLED:
case REDUCE_ATTEMPT_KILLED:
case SETUP_ATTEMPT_KILLED:
TaskAttemptUnsuccessfulCompletionEvent tauce =
(TaskAttemptUnsuccessfulCompletionEvent) event;
tEvent.addEventInfo("TASK_TYPE", tauce.getTaskType().toString());
tEvent.addEventInfo("TASK_ATTEMPT_ID",
tauce.getTaskAttemptId() == null ?
"" : tauce.getTaskAttemptId().toString());
tEvent.addEventInfo("FINISH_TIME", tauce.getFinishTime());
tEvent.addEventInfo("ERROR", tauce.getError());
tEvent.addEventInfo("STATUS", tauce.getTaskStatus());
tEvent.addEventInfo("HOSTNAME", tauce.getHostname());
tEvent.addEventInfo("PORT", tauce.getPort());
tEvent.addEventInfo("RACK_NAME", tauce.getRackName());
tEvent.addEventInfo("SHUFFLE_FINISH_TIME", tauce.getFinishTime());
tEvent.addEventInfo("SORT_FINISH_TIME", tauce.getFinishTime());
tEvent.addEventInfo("MAP_FINISH_TIME", tauce.getFinishTime());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(tauce.getCounters()));
tEntity.addEvent(tEvent);
tEntity.setEntityId(tauce.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case MAP_ATTEMPT_FINISHED:
MapAttemptFinishedEvent mafe = (MapAttemptFinishedEvent) event;
tEvent.addEventInfo("TASK_TYPE", mafe.getTaskType().toString());
tEvent.addEventInfo("FINISH_TIME", mafe.getFinishTime());
tEvent.addEventInfo("STATUS", mafe.getTaskStatus());
tEvent.addEventInfo("STATE", mafe.getState());
tEvent.addEventInfo("MAP_FINISH_TIME", mafe.getMapFinishTime());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(mafe.getCounters()));
tEvent.addEventInfo("HOSTNAME", mafe.getHostname());
tEvent.addEventInfo("PORT", mafe.getPort());
tEvent.addEventInfo("RACK_NAME", mafe.getRackName());
tEvent.addEventInfo("ATTEMPT_ID", mafe.getAttemptId() == null ?
"" : mafe.getAttemptId().toString());
tEntity.addEvent(tEvent);
tEntity.setEntityId(mafe.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case REDUCE_ATTEMPT_FINISHED:
ReduceAttemptFinishedEvent rafe = (ReduceAttemptFinishedEvent) event;
tEvent.addEventInfo("TASK_TYPE", rafe.getTaskType().toString());
tEvent.addEventInfo("ATTEMPT_ID", rafe.getAttemptId() == null ?
"" : rafe.getAttemptId().toString());
tEvent.addEventInfo("FINISH_TIME", rafe.getFinishTime());
tEvent.addEventInfo("STATUS", rafe.getTaskStatus());
tEvent.addEventInfo("STATE", rafe.getState());
tEvent.addEventInfo("SHUFFLE_FINISH_TIME", rafe.getShuffleFinishTime());
tEvent.addEventInfo("SORT_FINISH_TIME", rafe.getSortFinishTime());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(rafe.getCounters()));
tEvent.addEventInfo("HOSTNAME", rafe.getHostname());
tEvent.addEventInfo("PORT", rafe.getPort());
tEvent.addEventInfo("RACK_NAME", rafe.getRackName());
tEntity.addEvent(tEvent);
tEntity.setEntityId(rafe.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case SETUP_ATTEMPT_FINISHED:
case CLEANUP_ATTEMPT_FINISHED:
TaskAttemptFinishedEvent tafe = (TaskAttemptFinishedEvent) event;
tEvent.addEventInfo("TASK_TYPE", tafe.getTaskType().toString());
tEvent.addEventInfo("ATTEMPT_ID", tafe.getAttemptId() == null ?
"" : tafe.getAttemptId().toString());
tEvent.addEventInfo("FINISH_TIME", tafe.getFinishTime());
tEvent.addEventInfo("STATUS", tafe.getTaskStatus());
tEvent.addEventInfo("STATE", tafe.getState());
tEvent.addEventInfo("COUNTERS_GROUPS",
JobHistoryEventUtils.countersToJSON(tafe.getCounters()));
tEvent.addEventInfo("HOSTNAME", tafe.getHostname());
tEntity.addEvent(tEvent);
tEntity.setEntityId(tafe.getTaskId().toString());
tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE);
tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString());
break;
case AM_STARTED:
AMStartedEvent ase = (AMStartedEvent) event;
tEvent.addEventInfo("APPLICATION_ATTEMPT_ID",
ase.getAppAttemptId() == null ?
"" : ase.getAppAttemptId().toString());
tEvent.addEventInfo("CONTAINER_ID", ase.getContainerId() == null ?
"" : ase.getContainerId().toString());
tEvent.addEventInfo("NODE_MANAGER_HOST", ase.getNodeManagerHost());
tEvent.addEventInfo("NODE_MANAGER_PORT", ase.getNodeManagerPort());
tEvent.addEventInfo("NODE_MANAGER_HTTP_PORT",
ase.getNodeManagerHttpPort());
tEvent.addEventInfo("START_TIME", ase.getStartTime());
tEvent.addEventInfo("SUBMIT_TIME", ase.getSubmitTime());
tEntity.addEvent(tEvent);
tEntity.setEntityId(jobId.toString());
tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
break;
default:
break;
}
try {
TimelinePutResponse response = timelineClient.putEntities(tEntity);
List<TimelinePutResponse.TimelinePutError> errors = response.getErrors();
if (errors.size() == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Timeline entities are successfully put in event " + event
.getEventType());
}
} else {
for (TimelinePutResponse.TimelinePutError error : errors) {
LOG.error(
"Error when publishing entity [" + error.getEntityType() + ","
+ error.getEntityId() + "], server side error code: "
+ error.getErrorCode());
}
}
} catch (YarnException | IOException | ProcessingException ex) {
LOG.error("Error putting entity {} to Timeline Server",
tEntity.getEntityId(), ex);
}
}
// create JobEntity from HistoryEvent with adding other info, like:
// jobId, timestamp and entityType.
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createJobEntity(HistoryEvent event, long timestamp, JobId jobId,
String entityType, boolean setCreatedTime) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
createBaseEntity(event, timestamp, entityType, setCreatedTime);
entity.setId(jobId.toString());
return entity;
}
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createJobEntity(JobId jobId) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
new org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity();
entity.setId(jobId.toString());
entity.setType(MAPREDUCE_JOB_ENTITY_TYPE);
return entity;
}
// create ApplicationEntity with job finished Metrics from HistoryEvent
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createAppEntityWithJobMetrics(HistoryEvent event, JobId jobId) {
ApplicationEntity entity = new ApplicationEntity();
entity.setId(jobId.getAppId().toString());
entity.setMetrics(event.getTimelineMetrics());
return entity;
}
// create BaseEntity from HistoryEvent with adding other info, like:
// timestamp and entityType.
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createBaseEntity(HistoryEvent event, long timestamp, String entityType,
boolean setCreatedTime) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent tEvent =
event.toTimelineEvent();
tEvent.setTimestamp(timestamp);
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
new org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity();
entity.addEvent(tEvent);
entity.setType(entityType);
if (setCreatedTime) {
entity.setCreatedTime(timestamp);
}
Set<TimelineMetric> timelineMetrics = event.getTimelineMetrics();
if (timelineMetrics != null) {
entity.setMetrics(timelineMetrics);
}
return entity;
}
// create TaskEntity from HistoryEvent with adding other info, like:
// taskId, jobId, timestamp, entityType and relatedJobEntity.
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createTaskEntity(HistoryEvent event, long timestamp, String taskId,
String entityType, String relatedJobEntity, JobId jobId,
boolean setCreatedTime, long taskIdPrefix) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
createBaseEntity(event, timestamp, entityType, setCreatedTime);
entity.setId(taskId);
if (event.getEventType() == EventType.TASK_STARTED) {
entity.addInfo("TASK_TYPE",
((TaskStartedEvent)event).getTaskType().toString());
}
entity.addIsRelatedToEntity(relatedJobEntity, jobId.toString());
entity.setIdPrefix(taskIdPrefix);
return entity;
}
// create TaskAttemptEntity from HistoryEvent with adding other info, like:
// timestamp, taskAttemptId, entityType, relatedTaskEntity and taskId.
private org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
createTaskAttemptEntity(HistoryEvent event, long timestamp,
String taskAttemptId, String entityType, String relatedTaskEntity,
String taskId, boolean setCreatedTime, long taskAttemptIdPrefix) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity entity =
createBaseEntity(event, timestamp, entityType, setCreatedTime);
entity.setId(taskAttemptId);
entity.addIsRelatedToEntity(relatedTaskEntity, taskId);
entity.setIdPrefix(taskAttemptIdPrefix);
return entity;
}
private void publishConfigsOnJobSubmittedEvent(JobSubmittedEvent event,
JobId jobId) {
if (event.getJobConf() == null) {
return;
}
// Publish job configurations both as job and app entity.
// Configs are split into multiple entities if they exceed 100kb in size.
org.apache.hadoop.yarn.api.records.timelineservice.
TimelineEntity jobEntityForConfigs = createJobEntity(jobId);
ApplicationEntity appEntityForConfigs = new ApplicationEntity();
String appId = jobId.getAppId().toString();
appEntityForConfigs.setId(appId);
try {
int configSize = 0;
for (Map.Entry<String, String> entry : event.getJobConf()) {
int size = entry.getKey().length() + entry.getValue().length();
configSize += size;
if (configSize > JobHistoryEventUtils.ATS_CONFIG_PUBLISH_SIZE_BYTES) {
if (jobEntityForConfigs.getConfigs().size() > 0) {
timelineV2Client.putEntities(jobEntityForConfigs);
timelineV2Client.putEntities(appEntityForConfigs);
jobEntityForConfigs = createJobEntity(jobId);
appEntityForConfigs = new ApplicationEntity();
appEntityForConfigs.setId(appId);
}
configSize = size;
}
jobEntityForConfigs.addConfig(entry.getKey(), entry.getValue());
appEntityForConfigs.addConfig(entry.getKey(), entry.getValue());
}
if (configSize > 0) {
timelineV2Client.putEntities(jobEntityForConfigs);
timelineV2Client.putEntities(appEntityForConfigs);
}
} catch (IOException | YarnException e) {
LOG.error("Exception while publishing configs on JOB_SUBMITTED Event " +
" for the job : " + jobId, e);
}
}
private void processEventForNewTimelineService(HistoryEvent event,
JobId jobId, long timestamp) {
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity tEntity =
null;
String taskId = null;
String taskAttemptId = null;
boolean setCreatedTime = false;
long taskIdPrefix = 0;
long taskAttemptIdPrefix = 0;
switch (event.getEventType()) {
// Handle job events
case JOB_SUBMITTED:
setCreatedTime = true;
break;
case JOB_STATUS_CHANGED:
case JOB_INFO_CHANGED:
case JOB_INITED:
case JOB_PRIORITY_CHANGED:
case JOB_QUEUE_CHANGED:
case JOB_FAILED:
case JOB_KILLED:
case JOB_ERROR:
case JOB_FINISHED:
case AM_STARTED:
case NORMALIZED_RESOURCE:
break;
// Handle task events
case TASK_STARTED:
setCreatedTime = true;
taskId = ((TaskStartedEvent)event).getTaskId().toString();
taskIdPrefix = TimelineServiceHelper.
invertLong(((TaskStartedEvent)event).getStartTime());
break;
case TASK_FAILED:
taskId = ((TaskFailedEvent)event).getTaskId().toString();
taskIdPrefix = TimelineServiceHelper.
invertLong(((TaskFailedEvent)event).getStartTime());
break;
case TASK_UPDATED:
taskId = ((TaskUpdatedEvent)event).getTaskId().toString();
break;
case TASK_FINISHED:
taskId = ((TaskFinishedEvent)event).getTaskId().toString();
taskIdPrefix = TimelineServiceHelper.
invertLong(((TaskFinishedEvent)event).getStartTime());
break;
case MAP_ATTEMPT_STARTED:
case REDUCE_ATTEMPT_STARTED:
setCreatedTime = true;
taskId = ((TaskAttemptStartedEvent)event).getTaskId().toString();
taskAttemptId = ((TaskAttemptStartedEvent)event).
getTaskAttemptId().toString();
taskAttemptIdPrefix = TimelineServiceHelper.
invertLong(((TaskAttemptStartedEvent)event).getStartTime());
break;
case CLEANUP_ATTEMPT_STARTED:
case SETUP_ATTEMPT_STARTED:
taskId = ((TaskAttemptStartedEvent)event).getTaskId().toString();
taskAttemptId = ((TaskAttemptStartedEvent)event).
getTaskAttemptId().toString();
break;
case MAP_ATTEMPT_FAILED:
case CLEANUP_ATTEMPT_FAILED:
case REDUCE_ATTEMPT_FAILED:
case SETUP_ATTEMPT_FAILED:
case MAP_ATTEMPT_KILLED:
case CLEANUP_ATTEMPT_KILLED:
case REDUCE_ATTEMPT_KILLED:
case SETUP_ATTEMPT_KILLED:
taskId = ((TaskAttemptUnsuccessfulCompletionEvent)event).
getTaskId().toString();
taskAttemptId = ((TaskAttemptUnsuccessfulCompletionEvent)event).
getTaskAttemptId().toString();
taskAttemptIdPrefix = TimelineServiceHelper.invertLong(
((TaskAttemptUnsuccessfulCompletionEvent)event).getStartTime());
break;
case MAP_ATTEMPT_FINISHED:
taskId = ((MapAttemptFinishedEvent)event).getTaskId().toString();
taskAttemptId = ((MapAttemptFinishedEvent)event).
getAttemptId().toString();
taskAttemptIdPrefix = TimelineServiceHelper.
invertLong(((MapAttemptFinishedEvent)event).getStartTime());
break;
case REDUCE_ATTEMPT_FINISHED:
taskId = ((ReduceAttemptFinishedEvent)event).getTaskId().toString();
taskAttemptId = ((ReduceAttemptFinishedEvent)event).
getAttemptId().toString();
taskAttemptIdPrefix = TimelineServiceHelper.
invertLong(((ReduceAttemptFinishedEvent)event).getStartTime());
break;
case SETUP_ATTEMPT_FINISHED:
case CLEANUP_ATTEMPT_FINISHED:
taskId = ((TaskAttemptFinishedEvent)event).getTaskId().toString();
taskAttemptId = ((TaskAttemptFinishedEvent)event).
getAttemptId().toString();
break;
default:
LOG.warn("EventType: " + event.getEventType() + " cannot be recognized" +
" and handled by timeline service.");
return;
}
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity
appEntityWithJobMetrics = null;
if (taskId == null) {
// JobEntity
tEntity = createJobEntity(event, timestamp, jobId,
MAPREDUCE_JOB_ENTITY_TYPE, setCreatedTime);
if (event.getEventType() == EventType.JOB_FINISHED
&& event.getTimelineMetrics() != null) {
appEntityWithJobMetrics = createAppEntityWithJobMetrics(event, jobId);
}
} else {
if (taskAttemptId == null) {
// TaskEntity
tEntity = createTaskEntity(event, timestamp, taskId,
MAPREDUCE_TASK_ENTITY_TYPE, MAPREDUCE_JOB_ENTITY_TYPE,
jobId, setCreatedTime, taskIdPrefix);
} else {
// TaskAttemptEntity
tEntity = createTaskAttemptEntity(event, timestamp, taskAttemptId,
MAPREDUCE_TASK_ATTEMPT_ENTITY_TYPE, MAPREDUCE_TASK_ENTITY_TYPE,
taskId, setCreatedTime, taskAttemptIdPrefix);
}
}
try {
if (appEntityWithJobMetrics == null) {
timelineV2Client.putEntitiesAsync(tEntity);
} else {
timelineV2Client.putEntities(tEntity, appEntityWithJobMetrics);
}
} catch (IOException | YarnException e) {
LOG.error("Failed to process Event " + event.getEventType()
+ " for the job : " + jobId, e);
return;
}
if (event.getEventType() == EventType.JOB_SUBMITTED) {
// Publish configs after main job submitted event has been posted.
publishConfigsOnJobSubmittedEvent((JobSubmittedEvent)event, jobId);
}
}
private void setSummarySlotSeconds(JobSummary summary, Counters allCounters) {
Counter slotMillisMapCounter = allCounters
.findCounter(JobCounter.SLOTS_MILLIS_MAPS);
if (slotMillisMapCounter != null) {
summary.setMapSlotSeconds(slotMillisMapCounter.getValue() / 1000);
}
Counter slotMillisReduceCounter = allCounters
.findCounter(JobCounter.SLOTS_MILLIS_REDUCES);
if (slotMillisReduceCounter != null) {
summary.setReduceSlotSeconds(slotMillisReduceCounter.getValue() / 1000);
}
}
protected void closeEventWriter(JobId jobId) throws IOException {
final MetaInfo mi = fileMap.get(jobId);
if (mi == null) {
throw new IOException("No MetaInfo found for JobId: [" + jobId + "]");
}
if (!mi.isWriterActive()) {
throw new IOException(
"Inactive Writer: Likely received multiple JobFinished / " +
"JobUnsuccessful events for JobId: ["
+ jobId + "]");
}
// Close the Writer
try {
mi.closeWriter();
} catch (IOException e) {
LOG.error("Error closing writer for JobID: " + jobId);
throw e;
}
}
protected void processDoneFiles(JobId jobId) throws IOException {
final MetaInfo mi = fileMap.get(jobId);
if (mi == null) {
throw new IOException("No MetaInfo found for JobId: [" + jobId + "]");
}
if (mi.getHistoryFile() == null) {
LOG.warn("No file for job-history with " + jobId + " found in cache!");
}
if (mi.getConfFile() == null) {
LOG.warn("No file for jobconf with " + jobId + " found in cache!");
}
// Writing out the summary file.
// TODO JH enhancement - reuse this file to store additional indexing info
// like ACLs, etc. JHServer can use HDFS append to build an index file
// with more info than is available via the filename.
Path qualifiedSummaryDoneFile = null;
FSDataOutputStream summaryFileOut = null;
try {
String doneSummaryFileName = getTempFileName(JobHistoryUtils
.getIntermediateSummaryFileName(jobId));
qualifiedSummaryDoneFile = doneDirFS.makeQualified(new Path(
doneDirPrefixPath, doneSummaryFileName));
summaryFileOut = doneDirFS.create(qualifiedSummaryDoneFile, true);
summaryFileOut.writeUTF(mi.getJobSummary().getJobSummaryString());
summaryFileOut.close();
doneDirFS.setPermission(qualifiedSummaryDoneFile, new FsPermission(
JobHistoryUtils.getConfiguredHistoryIntermediateUserDoneDirPermissions(getConfig())));
} catch (IOException e) {
LOG.info("Unable to write out JobSummaryInfo to ["
+ qualifiedSummaryDoneFile + "]", e);
throw e;
}
try {
// Move historyFile to Done Folder.
Path qualifiedDoneFile = null;
if (mi.getHistoryFile() != null) {
Path historyFile = mi.getHistoryFile();
Path qualifiedLogFile = stagingDirFS.makeQualified(historyFile);
int jobNameLimit =
getConfig().getInt(JHAdminConfig.MR_HS_JOBNAME_LIMIT,
JHAdminConfig.DEFAULT_MR_HS_JOBNAME_LIMIT);
String doneJobHistoryFileName =
getTempFileName(FileNameIndexUtils.getDoneFileName(mi
.getJobIndexInfo(), jobNameLimit));
qualifiedDoneFile =
doneDirFS.makeQualified(new Path(doneDirPrefixPath,
doneJobHistoryFileName));
if(moveToDoneNow(qualifiedLogFile, qualifiedDoneFile)) {
String historyUrl = MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(
getConfig(), context.getApplicationID());
context.setHistoryUrl(historyUrl);
LOG.info("Set historyUrl to " + historyUrl);
}
}
// Move confFile to Done Folder
Path qualifiedConfDoneFile = null;
if (mi.getConfFile() != null) {
Path confFile = mi.getConfFile();
Path qualifiedConfFile = stagingDirFS.makeQualified(confFile);
String doneConfFileName =
getTempFileName(JobHistoryUtils
.getIntermediateConfFileName(jobId));
qualifiedConfDoneFile =
doneDirFS.makeQualified(new Path(doneDirPrefixPath,
doneConfFileName));
moveToDoneNow(qualifiedConfFile, qualifiedConfDoneFile);
}
moveTmpToDone(qualifiedSummaryDoneFile);
moveTmpToDone(qualifiedConfDoneFile);
moveTmpToDone(qualifiedDoneFile);
} catch (IOException e) {
LOG.error("Error closing writer for JobID: " + jobId);
throw e;
}
}
private | JobHistoryEventHandler |
java | apache__camel | components/camel-google/camel-google-calendar/src/generated/java/org/apache/camel/component/google/calendar/CalendarCalendarListEndpointConfiguration.java | {
"start": 2290,
"end": 8602
} | class ____ extends GoogleCalendarConfiguration {
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "delete", description="Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the primary keyword."), @ApiMethod(methodName = "get", description="Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the primary keyword."), @ApiMethod(methodName = "patch", description="Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the primary keyword."), @ApiMethod(methodName = "update", description="Calendar identifier. To retrieve calendar IDs call the calendarList.list method. If you want to access the primary calendar of the currently logged in user, use the primary keyword.")})
private String calendarId;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "insert", description="Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB)"), @ApiMethod(methodName = "patch", description="Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB)"), @ApiMethod(methodName = "update", description="Whether to use the foregroundColor and backgroundColor fields to write the calendar colors (RGB)")})
private java.lang.Boolean colorRgbFormat;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "insert", description="The com.google.api.services.calendar.model.CalendarListEntry"), @ApiMethod(methodName = "patch", description="The com.google.api.services.calendar.model.CalendarListEntry"), @ApiMethod(methodName = "update", description="The com.google.api.services.calendar.model.CalendarListEntry")})
private com.google.api.services.calendar.model.CalendarListEntry content;
@UriParam
@ApiParam(optional = false, apiMethods = {@ApiMethod(methodName = "watch", description="The com.google.api.services.calendar.model.Channel")})
private com.google.api.services.calendar.model.Channel contentChannel;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Maximum number of entries returned on one result page"), @ApiMethod(methodName = "watch", description="Maximum number of entries returned on one result page")})
private java.lang.Integer maxResults;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="The minimum access role for the user in the returned entries"), @ApiMethod(methodName = "watch", description="The minimum access role for the user in the returned entries")})
private java.lang.String minAccessRole;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Token specifying which result page to return"), @ApiMethod(methodName = "watch", description="Token specifying which result page to return")})
private java.lang.String pageToken;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Whether to include deleted calendar list entries in the result"), @ApiMethod(methodName = "watch", description="Whether to include deleted calendar list entries in the result")})
private java.lang.Boolean showDeleted;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Whether to show hidden entries"), @ApiMethod(methodName = "watch", description="Whether to show hidden entries")})
private java.lang.Boolean showHidden;
@UriParam
@ApiParam(optional = true, apiMethods = {@ApiMethod(methodName = "list", description="Token obtained from the nextSyncToken field returned on the last page of results from the previous list request"), @ApiMethod(methodName = "watch", description="Token obtained from the nextSyncToken field returned on the last page of results from the previous list request")})
private java.lang.String syncToken;
public String getCalendarId() {
return calendarId;
}
public void setCalendarId(String calendarId) {
this.calendarId = calendarId;
}
public java.lang.Boolean getColorRgbFormat() {
return colorRgbFormat;
}
public void setColorRgbFormat(java.lang.Boolean colorRgbFormat) {
this.colorRgbFormat = colorRgbFormat;
}
public com.google.api.services.calendar.model.CalendarListEntry getContent() {
return content;
}
public void setContent(com.google.api.services.calendar.model.CalendarListEntry content) {
this.content = content;
}
public com.google.api.services.calendar.model.Channel getContentChannel() {
return contentChannel;
}
public void setContentChannel(com.google.api.services.calendar.model.Channel contentChannel) {
this.contentChannel = contentChannel;
}
public java.lang.Integer getMaxResults() {
return maxResults;
}
public void setMaxResults(java.lang.Integer maxResults) {
this.maxResults = maxResults;
}
public java.lang.String getMinAccessRole() {
return minAccessRole;
}
public void setMinAccessRole(java.lang.String minAccessRole) {
this.minAccessRole = minAccessRole;
}
public java.lang.String getPageToken() {
return pageToken;
}
public void setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
}
public java.lang.Boolean getShowDeleted() {
return showDeleted;
}
public void setShowDeleted(java.lang.Boolean showDeleted) {
this.showDeleted = showDeleted;
}
public java.lang.Boolean getShowHidden() {
return showHidden;
}
public void setShowHidden(java.lang.Boolean showHidden) {
this.showHidden = showHidden;
}
public java.lang.String getSyncToken() {
return syncToken;
}
public void setSyncToken(java.lang.String syncToken) {
this.syncToken = syncToken;
}
}
| CalendarCalendarListEndpointConfiguration |
java | quarkusio__quarkus | extensions/hibernate-orm/runtime/src/main/java/io/quarkus/hibernate/orm/runtime/service/QuarkusConnectionProviderInitiator.java | {
"start": 637,
"end": 2400
} | class ____ implements StandardServiceInitiator<ConnectionProvider> {
public static final QuarkusConnectionProviderInitiator INSTANCE = new QuarkusConnectionProviderInitiator();
@Override
public Class<ConnectionProvider> getServiceInitiated() {
return ConnectionProvider.class;
}
@Override
public ConnectionProvider initiateService(Map configurationValues, ServiceRegistryImplementor registry) {
//First, check that this setup won't need to deal with multi-tenancy at the connection pool level:
final MultiTenancyStrategy strategy = MultiTenancyStrategy.determineMultiTenancyStrategy(configurationValues);
if (strategy == MultiTenancyStrategy.DATABASE || strategy == MultiTenancyStrategy.SCHEMA) {
// nothing to do, but given the separate hierarchies have to handle this here.
return null;
}
//Next, we'll want to try the Quarkus optimised pool:
Object o = configurationValues.get(AvailableSettings.DATASOURCE);
if (o != null) {
final AgroalDataSource ds;
try {
ds = (AgroalDataSource) o;
} catch (ClassCastException cce) {
throw new HibernateException(
"A Datasource was configured as Connection Pool, but it's not the Agroal connection pool. In Quarkus, you need to use Agroal.");
}
return new QuarkusConnectionProvider(ds);
}
//When not using the Quarkus specific Datasource, delegate to traditional bootstrap so to not break
//applications using persistence.xml :
return ConnectionProviderInitiator.INSTANCE.initiateService(configurationValues, registry);
}
}
| QuarkusConnectionProviderInitiator |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/int2darrays/Int2DArrays_assertEmpty_Test.java | {
"start": 1006,
"end": 1250
} | class ____ extends Int2DArraysBaseTest {
@Test
void should_delegate_to_Arrays2D() {
// WHEN
int2DArrays.assertEmpty(info, actual);
// THEN
verify(arrays2d).assertEmpty(info, failures, actual);
}
}
| Int2DArrays_assertEmpty_Test |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/inheritance/joined/FinancialAsset.java | {
"start": 245,
"end": 419
} | class ____ extends Asset {
private double price;
public double getPrice() {
return price;
}
public void setPrice(double price) {
this.price = price;
}
}
| FinancialAsset |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/params/ParameterizedClassIntegrationTests.java | {
"start": 61532,
"end": 62083
} | class ____ {
@Parameter
@ConvertWith(AtomicIntegerConverter.class)
AtomicInteger counter;
@BeforeParameterizedClassInvocation
static void before(AtomicInteger counter) {
assertEquals(2, counter.incrementAndGet());
}
@AfterParameterizedClassInvocation
static void after(AtomicInteger counter) {
assertEquals(4, counter.get());
}
@Test
void test1() {
this.counter.incrementAndGet();
}
@Test
void test2() {
this.counter.incrementAndGet();
}
}
static | LifecycleMethodArgumentInjectionWithFieldInjectionTestCase |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/compression/CompressorUtils.java | {
"start": 906,
"end": 2071
} | class ____ {
/**
* We put two integers before each compressed block, the first integer represents the compressed
* length of the block, and the second one represents the original length of the block.
*/
public static final int HEADER_LENGTH = 8;
public static void writeIntLE(int i, byte[] buf, int offset) {
buf[offset++] = (byte) i;
buf[offset++] = (byte) (i >>> 8);
buf[offset++] = (byte) (i >>> 16);
buf[offset] = (byte) (i >>> 24);
}
public static int readIntLE(byte[] buf, int i) {
return (buf[i] & 0xFF)
| ((buf[i + 1] & 0xFF) << 8)
| ((buf[i + 2] & 0xFF) << 16)
| ((buf[i + 3] & 0xFF) << 24);
}
public static void validateLength(int compressedLen, int originalLen)
throws BufferDecompressionException {
if (originalLen < 0
|| compressedLen < 0
|| (originalLen == 0 && compressedLen != 0)
|| (originalLen != 0 && compressedLen == 0)) {
throw new BufferDecompressionException("Input is corrupted, invalid length.");
}
}
}
| CompressorUtils |
java | quarkusio__quarkus | extensions/qute/runtime/src/main/java/io/quarkus/qute/runtime/extensions/TimeTemplateExtensions.java | {
"start": 3558,
"end": 4871
} | class ____ {
private final String pattern;
private final Locale locale;
private final ZoneId timeZone;
private final int hashCode;
public Key(String pattern, Locale locale, ZoneId timeZone) {
this.pattern = pattern;
this.locale = locale;
this.timeZone = timeZone;
final int prime = 31;
int result = 1;
result = prime * result + ((locale == null) ? 0 : locale.hashCode());
result = prime * result + ((pattern == null) ? 0 : pattern.hashCode());
result = prime * result + ((timeZone == null) ? 0 : timeZone.hashCode());
this.hashCode = result;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Key other = (Key) obj;
return Objects.equals(locale, other.locale) && Objects.equals(pattern, other.pattern)
&& Objects.equals(timeZone, other.timeZone);
}
}
}
| Key |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.