language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
netty__netty
|
buffer/src/main/java/io/netty/buffer/PooledHeapByteBuf.java
|
{
"start": 752,
"end": 7459
}
|
class ____ extends PooledByteBuf<byte[]> {
private static final Recycler<PooledHeapByteBuf> RECYCLER =
new Recycler<PooledHeapByteBuf>() {
@Override
protected PooledHeapByteBuf newObject(Handle<PooledHeapByteBuf> handle) {
return new PooledHeapByteBuf(handle, 0);
}
};
static PooledHeapByteBuf newInstance(int maxCapacity) {
PooledHeapByteBuf buf = RECYCLER.get();
buf.reuse(maxCapacity);
return buf;
}
PooledHeapByteBuf(Handle<? extends PooledHeapByteBuf> recyclerHandle, int maxCapacity) {
super(recyclerHandle, maxCapacity);
}
@Override
public final boolean isDirect() {
return false;
}
@Override
protected byte _getByte(int index) {
return HeapByteBufUtil.getByte(memory, idx(index));
}
@Override
protected short _getShort(int index) {
return HeapByteBufUtil.getShort(memory, idx(index));
}
@Override
protected short _getShortLE(int index) {
return HeapByteBufUtil.getShortLE(memory, idx(index));
}
@Override
protected int _getUnsignedMedium(int index) {
return HeapByteBufUtil.getUnsignedMedium(memory, idx(index));
}
@Override
protected int _getUnsignedMediumLE(int index) {
return HeapByteBufUtil.getUnsignedMediumLE(memory, idx(index));
}
@Override
protected int _getInt(int index) {
return HeapByteBufUtil.getInt(memory, idx(index));
}
@Override
protected int _getIntLE(int index) {
return HeapByteBufUtil.getIntLE(memory, idx(index));
}
@Override
protected long _getLong(int index) {
return HeapByteBufUtil.getLong(memory, idx(index));
}
@Override
protected long _getLongLE(int index) {
return HeapByteBufUtil.getLongLE(memory, idx(index));
}
@Override
public final ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
checkDstIndex(index, length, dstIndex, dst.capacity());
if (dst.hasMemoryAddress() && PlatformDependent.hasUnsafe()) {
PlatformDependent.copyMemory(memory, idx(index), dst.memoryAddress() + dstIndex, length);
} else if (dst.hasArray()) {
getBytes(index, dst.array(), dst.arrayOffset() + dstIndex, length);
} else {
dst.setBytes(dstIndex, memory, idx(index), length);
}
return this;
}
@Override
public final ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
checkDstIndex(index, length, dstIndex, dst.length);
System.arraycopy(memory, idx(index), dst, dstIndex, length);
return this;
}
@Override
public final ByteBuf getBytes(int index, ByteBuffer dst) {
int length = dst.remaining();
checkIndex(index, length);
dst.put(memory, idx(index), length);
return this;
}
@Override
public final ByteBuf getBytes(int index, OutputStream out, int length) throws IOException {
checkIndex(index, length);
out.write(memory, idx(index), length);
return this;
}
@Override
protected void _setByte(int index, int value) {
HeapByteBufUtil.setByte(memory, idx(index), value);
}
@Override
protected void _setShort(int index, int value) {
HeapByteBufUtil.setShort(memory, idx(index), value);
}
@Override
protected void _setShortLE(int index, int value) {
HeapByteBufUtil.setShortLE(memory, idx(index), value);
}
@Override
protected void _setMedium(int index, int value) {
HeapByteBufUtil.setMedium(memory, idx(index), value);
}
@Override
protected void _setMediumLE(int index, int value) {
HeapByteBufUtil.setMediumLE(memory, idx(index), value);
}
@Override
protected void _setInt(int index, int value) {
HeapByteBufUtil.setInt(memory, idx(index), value);
}
@Override
protected void _setIntLE(int index, int value) {
HeapByteBufUtil.setIntLE(memory, idx(index), value);
}
@Override
protected void _setLong(int index, long value) {
HeapByteBufUtil.setLong(memory, idx(index), value);
}
@Override
protected void _setLongLE(int index, long value) {
HeapByteBufUtil.setLongLE(memory, idx(index), value);
}
@Override
public final ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) {
checkSrcIndex(index, length, srcIndex, src.capacity());
if (src.hasMemoryAddress() && PlatformDependent.hasUnsafe()) {
PlatformDependent.copyMemory(src.memoryAddress() + srcIndex, memory, idx(index), length);
} else if (src.hasArray()) {
setBytes(index, src.array(), src.arrayOffset() + srcIndex, length);
} else {
src.getBytes(srcIndex, memory, idx(index), length);
}
return this;
}
@Override
public final ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) {
checkSrcIndex(index, length, srcIndex, src.length);
System.arraycopy(src, srcIndex, memory, idx(index), length);
return this;
}
@Override
public final ByteBuf setBytes(int index, ByteBuffer src) {
int length = src.remaining();
checkIndex(index, length);
src.get(memory, idx(index), length);
return this;
}
@Override
public final int setBytes(int index, InputStream in, int length) throws IOException {
checkIndex(index, length);
return in.read(memory, idx(index), length);
}
@Override
public final ByteBuf copy(int index, int length) {
checkIndex(index, length);
ByteBuf copy = alloc().heapBuffer(length, maxCapacity());
return copy.writeBytes(memory, idx(index), length);
}
@Override
final ByteBuffer duplicateInternalNioBuffer(int index, int length) {
checkIndex(index, length);
return ByteBuffer.wrap(memory, idx(index), length).slice();
}
@Override
public final boolean hasArray() {
return true;
}
@Override
public final byte[] array() {
ensureAccessible();
return memory;
}
@Override
public final int arrayOffset() {
return offset;
}
@Override
public final boolean hasMemoryAddress() {
return false;
}
@Override
public final long memoryAddress() {
throw new UnsupportedOperationException();
}
@Override
protected final ByteBuffer newInternalNioBuffer(byte[] memory) {
return ByteBuffer.wrap(memory);
}
}
|
PooledHeapByteBuf
|
java
|
quarkusio__quarkus
|
extensions/security-jpa-reactive/deployment/src/test/java/io/quarkus/security/jpa/reactive/BcryptPasswordMapperTest.java
|
{
"start": 150,
"end": 646
}
|
class ____ extends JpaSecurityRealmTest {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(testClasses)
.addClass(BCryptUserEntity.class)
.addAsResource("bcrypt-password-mapper/import.sql", "import.sql")
.addAsResource("bcrypt-password-mapper/application.properties", "application.properties"));
}
|
BcryptPasswordMapperTest
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/scheduling/SpeculativeExecutionITCase.java
|
{
"start": 31591,
"end": 32347
}
|
class ____ extends RichSinkFunction<Long> {
private final Map<Long, Long> numberCountResult = new HashMap<>();
@Override
public void invoke(Long value, Context context) throws Exception {
if (slowTaskCounter.getAndDecrement() > 0) {
Thread.sleep(5000);
}
numberCountResult.merge(value, 1L, Long::sum);
}
@Override
public void finish() {
if (getRuntimeContext().getTaskInfo().getAttemptNumber() == 0) {
numberCountResults.put(
getRuntimeContext().getTaskInfo().getIndexOfThisSubtask(),
numberCountResult);
}
}
}
private static
|
NonSpeculativeSinkFunction
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorDoubleEvaluator.java
|
{
"start": 1086,
"end": 3867
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FloorDoubleEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator val;
private final DriverContext driverContext;
private Warnings warnings;
public FloorDoubleEvaluator(Source source, EvalOperator.ExpressionEvaluator val,
DriverContext driverContext) {
this.source = source;
this.val = val;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (DoubleBlock valBlock = (DoubleBlock) val.eval(page)) {
DoubleVector valVector = valBlock.asVector();
if (valVector == null) {
return eval(page.getPositionCount(), valBlock);
}
return eval(page.getPositionCount(), valVector).asBlock();
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += val.baseRamBytesUsed();
return baseRamBytesUsed;
}
public DoubleBlock eval(int positionCount, DoubleBlock valBlock) {
try(DoubleBlock.Builder result = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
switch (valBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
double val = valBlock.getDouble(valBlock.getFirstValueIndex(p));
result.appendDouble(Floor.process(val));
}
return result.build();
}
}
public DoubleVector eval(int positionCount, DoubleVector valVector) {
try(DoubleVector.FixedBuilder result = driverContext.blockFactory().newDoubleVectorFixedBuilder(positionCount)) {
position: for (int p = 0; p < positionCount; p++) {
double val = valVector.getDouble(p);
result.appendDouble(p, Floor.process(val));
}
return result.build();
}
}
@Override
public String toString() {
return "FloorDoubleEvaluator[" + "val=" + val + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(val);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
FloorDoubleEvaluator
|
java
|
elastic__elasticsearch
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/MaxmindIpDataLookups.java
|
{
"start": 37471,
"end": 37998
}
|
interface ____<RESPONSE extends AbstractResponse> {
RESPONSE build(RESPONSE resp, String address, Network network, List<String> locales);
}
/**
* The {@link MaxmindIpDataLookups.AbstractBase} is an abstract base implementation of {@link IpDataLookup} that
* provides common functionality for getting a specific kind of {@link AbstractResponse} from a {@link IpDatabase}.
*
* @param <RESPONSE> the intermediate type of {@link AbstractResponse}
*/
private abstract static
|
ResponseBuilder
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java
|
{
"start": 1722,
"end": 10719
}
|
class ____ implements Metadata.ProjectCustom {
public static final String TYPE = "data_stream";
public static final DataStreamMetadata EMPTY = new DataStreamMetadata(ImmutableOpenMap.of(), ImmutableOpenMap.of());
private static final ParseField DATA_STREAM = new ParseField("data_stream");
private static final ParseField DATA_STREAM_ALIASES = new ParseField("data_stream_aliases");
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<DataStreamMetadata, Void> PARSER = new ConstructingObjectParser<>(TYPE, false, args -> {
ImmutableOpenMap<String, DataStream> dataStreams = (ImmutableOpenMap<String, DataStream>) args[0];
ImmutableOpenMap<String, DataStreamAlias> dataStreamAliases = (ImmutableOpenMap<String, DataStreamAlias>) args[1];
if (dataStreamAliases == null) {
dataStreamAliases = ImmutableOpenMap.of();
}
return new DataStreamMetadata(dataStreams, dataStreamAliases);
});
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> {
ImmutableOpenMap.Builder<String, DataStream> dataStreams = ImmutableOpenMap.builder();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
String name = p.currentName();
dataStreams.put(name, DataStream.fromXContent(p));
}
return dataStreams.build();
}, DATA_STREAM);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> {
ImmutableOpenMap.Builder<String, DataStreamAlias> dataStreams = ImmutableOpenMap.builder();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
DataStreamAlias alias = DataStreamAlias.fromXContent(p);
dataStreams.put(alias.getName(), alias);
}
return dataStreams.build();
}, DATA_STREAM_ALIASES);
}
private final ImmutableOpenMap<String, DataStream> dataStreams;
private final ImmutableOpenMap<String, DataStreamAlias> dataStreamAliases;
public DataStreamMetadata(
ImmutableOpenMap<String, DataStream> dataStreams,
ImmutableOpenMap<String, DataStreamAlias> dataStreamAliases
) {
this.dataStreams = dataStreams;
this.dataStreamAliases = dataStreamAliases;
}
public DataStreamMetadata(StreamInput in) throws IOException {
this(
in.readImmutableOpenMap(StreamInput::readString, DataStream::read),
in.readImmutableOpenMap(StreamInput::readString, DataStreamAlias::new)
);
}
public DataStreamMetadata withAddedDatastream(DataStream datastream) {
final String name = datastream.getName();
final DataStream existing = dataStreams.get(name);
if (datastream.equals(existing)) {
return this;
}
return new DataStreamMetadata(ImmutableOpenMap.builder(dataStreams).fPut(name, datastream).build(), dataStreamAliases);
}
public DataStreamMetadata withAlias(String aliasName, String dataStream, Boolean isWriteDataStream, String filter) {
if (dataStreams.containsKey(dataStream) == false) {
throw new IllegalArgumentException("alias [" + aliasName + "] refers to a non existing data stream [" + dataStream + "]");
}
Map<String, Object> filterAsMap;
if (filter != null) {
filterAsMap = XContentHelper.convertToMap(XContentFactory.xContent(filter), filter, true);
} else {
filterAsMap = null;
}
DataStreamAlias alias = dataStreamAliases.get(aliasName);
if (alias == null) {
String writeDataStream = isWriteDataStream != null && isWriteDataStream ? dataStream : null;
alias = new DataStreamAlias(
aliasName,
List.of(dataStream),
writeDataStream,
filterAsMap == null ? null : Map.of(dataStream, filterAsMap)
);
} else {
DataStreamAlias copy = alias.update(dataStream, isWriteDataStream, filterAsMap);
if (copy == alias) {
return this;
}
alias = copy;
}
return new DataStreamMetadata(dataStreams, ImmutableOpenMap.builder(dataStreamAliases).fPut(aliasName, alias).build());
}
public DataStreamMetadata withRemovedDataStream(String name) {
ImmutableOpenMap.Builder<String, DataStream> existingDataStreams = ImmutableOpenMap.builder(dataStreams);
ImmutableOpenMap.Builder<String, DataStreamAlias> existingDataStreamAliases = ImmutableOpenMap.builder(dataStreamAliases);
existingDataStreams.remove(name);
Set<String> aliasesToDelete = new HashSet<>();
List<DataStreamAlias> aliasesToUpdate = new ArrayList<>();
for (var alias : dataStreamAliases.values()) {
DataStreamAlias copy = alias.removeDataStream(name);
if (copy != null) {
if (copy == alias) {
continue;
}
aliasesToUpdate.add(copy);
} else {
aliasesToDelete.add(alias.getName());
}
}
for (DataStreamAlias alias : aliasesToUpdate) {
existingDataStreamAliases.put(alias.getName(), alias);
}
for (String aliasToDelete : aliasesToDelete) {
existingDataStreamAliases.remove(aliasToDelete);
}
return new DataStreamMetadata(existingDataStreams.build(), existingDataStreamAliases.build());
}
public DataStreamMetadata withRemovedAlias(String aliasName, String dataStreamName, boolean mustExist) {
ImmutableOpenMap.Builder<String, DataStreamAlias> dataStreamAliases = ImmutableOpenMap.builder(this.dataStreamAliases);
DataStreamAlias existing = dataStreamAliases.get(aliasName);
if (mustExist && existing == null) {
throw new ResourceNotFoundException("alias [" + aliasName + "] doesn't exist");
} else if (existing == null) {
return this;
}
DataStreamAlias copy = existing.removeDataStream(dataStreamName);
if (copy == existing) {
return this;
}
if (copy != null) {
dataStreamAliases.put(aliasName, copy);
} else {
dataStreamAliases.remove(aliasName);
}
return new DataStreamMetadata(dataStreams, dataStreamAliases.build());
}
public Map<String, DataStream> dataStreams() {
return this.dataStreams;
}
public Map<String, DataStreamAlias> getDataStreamAliases() {
return dataStreamAliases;
}
@Override
public Diff<Metadata.ProjectCustom> diff(Metadata.ProjectCustom before) {
return new DataStreamMetadata.DataStreamMetadataDiff((DataStreamMetadata) before, this);
}
public static NamedDiff<Metadata.ProjectCustom> readDiffFrom(StreamInput in) throws IOException {
return new DataStreamMetadata.DataStreamMetadataDiff(in);
}
@Override
public EnumSet<Metadata.XContentContext> context() {
return Metadata.ALL_CONTEXTS;
}
@Override
public boolean isRestorable() {
// this metadata is written to the snapshot, however it uses custom logic for restoring
return false;
}
@Override
public String getWriteableName() {
return TYPE;
}
@Override
public TransportVersion getMinimalSupportedVersion() {
return TransportVersion.zero();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeMap(this.dataStreams, StreamOutput::writeWriteable);
out.writeMap(this.dataStreamAliases, StreamOutput::writeWriteable);
}
public static DataStreamMetadata fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
@Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params ignored) {
return Iterators.concat(
ChunkedToXContentHelper.xContentObjectFields(DATA_STREAM.getPreferredName(), dataStreams),
ChunkedToXContentHelper.startObject(DATA_STREAM_ALIASES.getPreferredName()),
dataStreamAliases.values().iterator(),
ChunkedToXContentHelper.endObject()
);
}
@Override
public int hashCode() {
return Objects.hash(this.dataStreams, dataStreamAliases);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj.getClass() != getClass()) {
return false;
}
DataStreamMetadata other = (DataStreamMetadata) obj;
return Objects.equals(this.dataStreams, other.dataStreams) && Objects.equals(this.dataStreamAliases, other.dataStreamAliases);
}
@Override
public String toString() {
return Strings.toString(this);
}
static
|
DataStreamMetadata
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineWriter.java
|
{
"start": 1340,
"end": 2094
}
|
interface ____ {
/**
* Stores entity information to the timeline store. Any errors occurring for
* individual put request objects will be reported in the response.
*
* @param data
* a {@link TimelineEntities} object.
* @return a {@link TimelinePutResponse} object.
* @throws IOException
*/
TimelinePutResponse put(TimelineEntities data) throws IOException;
/**
* Store domain information to the timeline store. If A domain of the
* same ID already exists in the timeline store, it will be COMPLETELY updated
* with the given domain.
*
* @param domain
* a {@link TimelineDomain} object
* @throws IOException
*/
void put(TimelineDomain domain) throws IOException;
}
|
TimelineWriter
|
java
|
apache__camel
|
components/camel-cxf/camel-cxf-common/src/main/java/org/apache/camel/component/cxf/converter/CxfConverter.java
|
{
"start": 1697,
"end": 4787
}
|
class ____ {
private CxfConverter() {
// Helper class
}
@Converter
public static MessageContentsList toMessageContentsList(final Object[] array) {
if (array != null) {
return new MessageContentsList(array);
} else {
return new MessageContentsList();
}
}
@Converter
public static QName toQName(String qname) {
return QName.valueOf(qname);
}
@Converter
public static Object[] toArray(Object object) {
if (object instanceof Collection) {
return ((Collection<?>) object).toArray();
} else {
Object[] answer;
if (object == null) {
answer = new Object[0];
} else {
answer = new Object[1];
answer[0] = object;
}
return answer;
}
}
@Converter
public static String soapMessageToString(final SOAPMessage soapMessage, Exchange exchange)
throws SOAPException, IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
soapMessage.writeTo(baos);
return baos.toString(ExchangeHelper.getCharsetName(exchange));
}
@Converter
public static InputStream soapMessageToInputStream(final SOAPMessage soapMessage, Exchange exchange)
throws SOAPException, IOException {
CachedOutputStream cos = new CachedOutputStream(exchange);
soapMessage.writeTo(cos);
return cos.getInputStream();
}
@Converter
public static DataFormat toDataFormat(final String name) {
return DataFormat.valueOf(name.toUpperCase());
}
@Converter
public static String toString(MessageContentsList value, Exchange exchange) {
if (value != null && value.isEmpty()) {
return null;
}
Object answer = convertTo(String.class, exchange, value, exchange.getContext().getTypeConverterRegistry());
if (answer != null && answer != MISS_VALUE) {
return answer.toString();
}
return null;
}
/**
* Use a fallback type converter so we can convert the embedded list element if the value is MessageContentsList.
* The algorithm of this converter finds the first non-null list element from the list and applies conversion to the
* list element if can determine this MessageContentsList is used in CXF context(first element is the return value
* while others are Holders).
*
* @param type the desired type to be converted to
* @param exchange optional exchange which can be null
* @param value the object to be converted
* @param registry type converter registry
* @return the converted value of the desired type or null if no suitable converter found
*/
@SuppressWarnings("unchecked")
@Converter(fallback = true)
public static <T> T convertTo(
Class<T> type, Exchange exchange, Object value,
TypeConverterRegistry registry) {
// CXF-WS MessageContentsList
|
CxfConverter
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/TransportClearSecurityCacheAction.java
|
{
"start": 1301,
"end": 3394
}
|
class ____ extends TransportNodesAction<
ClearSecurityCacheRequest,
ClearSecurityCacheResponse,
ClearSecurityCacheRequest.Node,
ClearSecurityCacheResponse.Node,
Void> {
private final CacheInvalidatorRegistry cacheInvalidatorRegistry;
@Inject
public TransportClearSecurityCacheAction(
ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
CacheInvalidatorRegistry cacheInvalidatorRegistry
) {
super(
ClearSecurityCacheAction.NAME,
clusterService,
transportService,
actionFilters,
ClearSecurityCacheRequest.Node::new,
threadPool.executor(ThreadPool.Names.MANAGEMENT)
);
this.cacheInvalidatorRegistry = cacheInvalidatorRegistry;
}
@Override
protected ClearSecurityCacheResponse newResponse(
ClearSecurityCacheRequest request,
List<ClearSecurityCacheResponse.Node> nodes,
List<FailedNodeException> failures
) {
return new ClearSecurityCacheResponse(clusterService.getClusterName(), nodes, failures);
}
@Override
protected ClearSecurityCacheRequest.Node newNodeRequest(ClearSecurityCacheRequest request) {
return new ClearSecurityCacheRequest.Node(request);
}
@Override
protected ClearSecurityCacheResponse.Node newNodeResponse(StreamInput in, DiscoveryNode node) throws IOException {
return new ClearSecurityCacheResponse.Node(in);
}
@Override
protected ClearSecurityCacheResponse.Node nodeOperation(ClearSecurityCacheRequest.Node request, Task task) {
if (request.getKeys() == null || request.getKeys().length == 0) {
cacheInvalidatorRegistry.invalidateCache(request.getCacheName());
} else {
cacheInvalidatorRegistry.invalidateByKey(request.getCacheName(), List.of(request.getKeys()));
}
return new ClearSecurityCacheResponse.Node(clusterService.localNode());
}
}
|
TransportClearSecurityCacheAction
|
java
|
quarkusio__quarkus
|
extensions/arc/deployment/src/test/java/io/quarkus/arc/test/lookup/ListInjectionTest.java
|
{
"start": 6355,
"end": 6544
}
|
class ____ implements Converter {
@Override
public String convert(String val) {
return val.toUpperCase();
}
}
@Dependent
static
|
ConverterBravo
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/userguide/util/Book.java
|
{
"start": 307,
"end": 682
}
|
class ____ extends CopyrightableContent {
private Long id;
private String name;
public Book() {
super();
}
public Book(Author a) {
super(a);
}
@Id
@GeneratedValue
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
Book
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/annotation/authentication/configurers/userdetails/DaoAuthenticationConfigurer.java
|
{
"start": 1219,
"end": 1572
}
|
class ____<B extends ProviderManagerBuilder<B>, U extends UserDetailsService>
extends AbstractDaoAuthenticationConfigurer<B, DaoAuthenticationConfigurer<B, U>, U> {
/**
* Creates a new instance
* @param userDetailsService
*/
public DaoAuthenticationConfigurer(U userDetailsService) {
super(userDetailsService);
}
}
|
DaoAuthenticationConfigurer
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/atomic/boolean_/AtomicBooleanAssert_isTrue_Test.java
|
{
"start": 1019,
"end": 1761
}
|
class ____ {
@Test
void should_pass_when_actual_value_is_true() {
AtomicBoolean actual = new AtomicBoolean(true);
assertThat(actual).isTrue();
}
@Test
void should_fail_when_actual_value_is_false() {
AtomicBoolean actual = new AtomicBoolean(false);
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> assertThat(actual).isTrue())
.withMessage(shouldHaveValue(actual, true).create());
}
@Test
void should_fail_when_actual_is_null() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> {
AtomicBoolean actual = null;
assertThat(actual).isTrue();
}).withMessage(actualIsNull());
}
}
|
AtomicBooleanAssert_isTrue_Test
|
java
|
spring-projects__spring-boot
|
smoke-test/spring-boot-smoke-test-actuator-noweb/src/main/java/smoketest/actuator/noweb/HelloWorldService.java
|
{
"start": 729,
"end": 999
}
|
class ____ {
private final ServiceProperties configuration;
public HelloWorldService(ServiceProperties configuration) {
this.configuration = configuration;
}
public String getHelloMessage() {
return "Hello " + this.configuration.getName();
}
}
|
HelloWorldService
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_1719/Target.java
|
{
"start": 247,
"end": 911
}
|
class ____ {
private Set<TargetElement> targetElements = new HashSet<>();
public Set<TargetElement> getTargetElements() {
return targetElements;
}
public void setTargetElements(Set<TargetElement> targetElements) {
this.targetElements = targetElements;
}
public TargetElement addTargetElement(TargetElement element) {
element.updateTarget( this );
getTargetElements().add( element );
return element;
}
public TargetElement removeTargetElement(TargetElement element) {
element.updateTarget( null );
getTargetElements().remove( element );
return element;
}
}
|
Target
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/metamodel/model/domain/MapPersistentAttribute.java
|
{
"start": 325,
"end": 574
}
|
interface ____<D,K,V> extends MapAttribute<D, K, V>, PluralPersistentAttribute<D,Map<K,V>,V> {
PathSource<K> getKeyPathSource();
@Override
SimpleDomainType<K> getKeyType();
@Override
SimpleDomainType<K> getKeyGraphType();
}
|
MapPersistentAttribute
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AutoValueBuilderDefaultsInConstructorTest.java
|
{
"start": 3721,
"end": 4051
}
|
class ____ {
Builder() {
doSomethingOdd();
}
void doSomethingOdd() {}
abstract Builder setFoo(int foo);
abstract Test build();
}
}
""")
.expectUnchanged()
.doTest();
}
}
|
Builder
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/ast/expr/SQLFloatExpr.java
|
{
"start": 911,
"end": 2631
}
|
class ____ extends SQLNumericLiteralExpr implements SQLValuableExpr, Comparable<SQLFloatExpr> {
private float value;
public SQLFloatExpr() {
super(new SQLDataTypeImpl(SQLDataType.Constants.FLOAT));
}
public SQLFloatExpr(String value) {
this();
this.value = Float.parseFloat(value);
}
public SQLFloatExpr(float value) {
this();
this.value = value;
}
public SQLFloatExpr clone() {
return new SQLFloatExpr(value);
}
@Override
public List<SQLObject> getChildren() {
return Collections.emptyList();
}
@Override
public Float getNumber() {
return value;
}
public Float getValue() {
return value;
}
public void setValue(float value) {
this.value = value;
}
protected void accept0(SQLASTVisitor visitor) {
visitor.visit(this);
visitor.endVisit(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SQLFloatExpr that = (SQLFloatExpr) o;
return Double.compare(that.value, value) == 0;
}
@Override
public int hashCode() {
long temp = Double.doubleToLongBits(value);
return (int) (temp ^ (temp >>> 32));
}
@Override
public void setNumber(Number number) {
if (number == null) {
this.setValue(Float.NaN);
return;
}
this.setValue(number.floatValue());
}
@Override
public int compareTo(SQLFloatExpr o) {
return Float.compare(value, o.value);
}
}
|
SQLFloatExpr
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/adaptive/Created.java
|
{
"start": 988,
"end": 1513
}
|
class ____ extends StateWithoutExecutionGraph {
private final Context context;
Created(Context context, Logger logger) {
super(context, logger);
this.context = context;
}
@Override
public JobStatus getJobStatus() {
return JobStatus.INITIALIZING;
}
/** Starts the scheduling by going into the {@link WaitingForResources} state. */
void startScheduling() {
context.goToWaitingForResources(null);
}
/** Context of the {@link Created} state. */
|
Created
|
java
|
square__retrofit
|
retrofit/java-test/src/test/java/retrofit2/RequestFactoryTest.java
|
{
"start": 62790,
"end": 63384
}
|
class ____ {
@Multipart //
@POST("/foo/bar/") //
Call<ResponseBody> method(@Part List<RequestBody> part) {
return null;
}
}
try {
buildRequest(Example.class, new Object[] {null});
fail();
} catch (IllegalArgumentException e) {
assertThat(e)
.hasMessageThat()
.isEqualTo(
"@Part annotation must supply a name or use MultipartBody.Part parameter type. (parameter 'part')\n"
+ " for method Example.method");
}
}
@Test
public void multipartArrayRequiresName() {
|
Example
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/impl/ScheduledPollConsumerBackoffTest.java
|
{
"start": 1167,
"end": 4790
}
|
class ____ extends ContextTestSupport {
private static int commits;
private static int errors;
@Test
public void testBackoffIdle() {
final Endpoint endpoint = getMockEndpoint("mock:foo");
final MockScheduledPollConsumer consumer = createMockScheduledPollConsumer(endpoint);
consumer.run();
consumer.run();
assertEquals(2, commits);
// now it should backoff 4 times
consumer.run();
consumer.run();
consumer.run();
consumer.run();
assertEquals(3, commits);
// and now we poll again
consumer.run();
consumer.run();
assertEquals(4, commits);
// now it should backoff 4 times
consumer.run();
consumer.run();
consumer.run();
consumer.run();
assertEquals(6, commits);
consumer.run();
assertEquals(6, commits);
consumer.stop();
}
@Test
public void testBackoffError() {
final Endpoint endpoint = getMockEndpoint("mock:foo");
final Exception expectedException = new Exception("Hello, I should be thrown on shutdown only!");
final MockScheduledPollConsumer consumer = createMockScheduledPollConsumer(endpoint, expectedException);
consumer.run();
consumer.run();
consumer.run();
assertEquals(3, errors);
// now it should backoff 4 times
consumer.run();
consumer.run();
consumer.run();
consumer.run();
assertEquals(4, errors);
// and now we poll again
consumer.run();
consumer.run();
consumer.run();
assertEquals(6, errors);
// now it should backoff 4 times
consumer.run();
consumer.run();
consumer.run();
consumer.run();
assertEquals(8, errors);
consumer.stop();
}
private static MockScheduledPollConsumer createMockScheduledPollConsumer(Endpoint endpoint, Exception expectedException) {
MockScheduledPollConsumer consumer = new MockScheduledPollConsumer(endpoint, expectedException);
consumer.setBackoffMultiplier(4);
consumer.setBackoffErrorThreshold(3);
consumer.setPollStrategy(new PollingConsumerPollStrategy() {
public boolean begin(Consumer consumer, Endpoint endpoint) {
return true;
}
public void commit(Consumer consumer, Endpoint endpoint, int polledMessages) {
commits++;
}
public boolean rollback(Consumer consumer, Endpoint endpoint, int retryCounter, Exception e) {
errors++;
return false;
}
});
consumer.start();
return consumer;
}
private static MockScheduledPollConsumer createMockScheduledPollConsumer(Endpoint endpoint) {
MockScheduledPollConsumer consumer = new MockScheduledPollConsumer(endpoint, null);
consumer.setBackoffMultiplier(4);
consumer.setBackoffIdleThreshold(2);
consumer.setPollStrategy(new PollingConsumerPollStrategy() {
public boolean begin(Consumer consumer, Endpoint endpoint) {
return true;
}
public void commit(Consumer consumer, Endpoint endpoint, int polledMessages) {
commits++;
}
public boolean rollback(Consumer consumer, Endpoint endpoint, int retryCounter, Exception e) {
return false;
}
});
consumer.start();
return consumer;
}
}
|
ScheduledPollConsumerBackoffTest
|
java
|
processing__processing4
|
app/src/processing/app/syntax/im/CompositionTextManager.java
|
{
"start": 1086,
"end": 1380
}
|
class ____ {
private JEditTextArea textArea;
private String prevComposeString;
private int prevCommittedCount;
private boolean isInputProcess;
private int initialCaretPosition;
public static final int COMPOSING_UNDERBAR_HEIGHT = 5;
/**
* Create text manager
|
CompositionTextManager
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/oidc/http/converter/OidcProviderConfigurationHttpMessageConverter.java
|
{
"start": 2143,
"end": 5765
}
|
class ____
extends AbstractHttpMessageConverter<OidcProviderConfiguration> {
private static final ParameterizedTypeReference<Map<String, Object>> STRING_OBJECT_MAP = new ParameterizedTypeReference<>() {
};
private final GenericHttpMessageConverter<Object> jsonMessageConverter = HttpMessageConverters
.getJsonMessageConverter();
private Converter<Map<String, Object>, OidcProviderConfiguration> providerConfigurationConverter = new OidcProviderConfigurationConverter();
private Converter<OidcProviderConfiguration, Map<String, Object>> providerConfigurationParametersConverter = OidcProviderConfiguration::getClaims;
public OidcProviderConfigurationHttpMessageConverter() {
super(MediaType.APPLICATION_JSON, new MediaType("application", "*+json"));
}
@Override
protected boolean supports(Class<?> clazz) {
return OidcProviderConfiguration.class.isAssignableFrom(clazz);
}
@Override
@SuppressWarnings("unchecked")
protected OidcProviderConfiguration readInternal(Class<? extends OidcProviderConfiguration> clazz,
HttpInputMessage inputMessage) throws HttpMessageNotReadableException {
try {
Map<String, Object> providerConfigurationParameters = (Map<String, Object>) this.jsonMessageConverter
.read(STRING_OBJECT_MAP.getType(), null, inputMessage);
return this.providerConfigurationConverter.convert(providerConfigurationParameters);
}
catch (Exception ex) {
throw new HttpMessageNotReadableException(
"An error occurred reading the OpenID Provider Configuration: " + ex.getMessage(), ex,
inputMessage);
}
}
@Override
protected void writeInternal(OidcProviderConfiguration providerConfiguration, HttpOutputMessage outputMessage)
throws HttpMessageNotWritableException {
try {
Map<String, Object> providerConfigurationResponseParameters = this.providerConfigurationParametersConverter
.convert(providerConfiguration);
this.jsonMessageConverter.write(providerConfigurationResponseParameters, STRING_OBJECT_MAP.getType(),
MediaType.APPLICATION_JSON, outputMessage);
}
catch (Exception ex) {
throw new HttpMessageNotWritableException(
"An error occurred writing the OpenID Provider Configuration: " + ex.getMessage(), ex);
}
}
/**
* Sets the {@link Converter} used for converting the OpenID Provider Configuration
* parameters to an {@link OidcProviderConfiguration}.
* @param providerConfigurationConverter the {@link Converter} used for converting to
* an {@link OidcProviderConfiguration}
*/
public final void setProviderConfigurationConverter(
Converter<Map<String, Object>, OidcProviderConfiguration> providerConfigurationConverter) {
Assert.notNull(providerConfigurationConverter, "providerConfigurationConverter cannot be null");
this.providerConfigurationConverter = providerConfigurationConverter;
}
/**
* Sets the {@link Converter} used for converting the
* {@link OidcProviderConfiguration} to a {@code Map} representation of the OpenID
* Provider Configuration.
* @param providerConfigurationParametersConverter the {@link Converter} used for
* converting to a {@code Map} representation of the OpenID Provider Configuration
*/
public final void setProviderConfigurationParametersConverter(
Converter<OidcProviderConfiguration, Map<String, Object>> providerConfigurationParametersConverter) {
Assert.notNull(providerConfigurationParametersConverter,
"providerConfigurationParametersConverter cannot be null");
this.providerConfigurationParametersConverter = providerConfigurationParametersConverter;
}
private static final
|
OidcProviderConfigurationHttpMessageConverter
|
java
|
apache__camel
|
core/camel-util/src/main/java/org/apache/camel/util/function/VoidFunction.java
|
{
"start": 1043,
"end": 1227
}
|
interface ____<I> {
/**
* Applies this function to the given argument.
*
* @param in the function argument
*/
void apply(I in) throws Exception;
}
|
VoidFunction
|
java
|
apache__flink
|
flink-streaming-java/src/test/java/org/apache/flink/streaming/runtime/tasks/TwoInputStreamTaskTest.java
|
{
"start": 4727,
"end": 25937
}
|
class ____ {
/**
* This test verifies that open() and close() are correctly called. This test also verifies that
* timestamps of emitted elements are correct. {@link CoStreamMap} assigns the input timestamp
* to emitted elements.
*/
@Test
void testOpenCloseAndTimestamps() throws Exception {
final TwoInputStreamTaskTestHarness<String, Integer, String> testHarness =
new TwoInputStreamTaskTestHarness<>(
TwoInputStreamTask::new,
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.INT_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
CoStreamMap<String, Integer, String> coMapOperator =
new CoStreamMap<>(new TestOpenCloseMapFunction());
streamConfig.setStreamOperator(coMapOperator);
streamConfig.setOperatorID(new OperatorID());
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processElement(new StreamRecord<>("Hello", initialTime + 1), 0, 0);
expectedOutput.add(new StreamRecord<>("Hello", initialTime + 1));
// wait until the input is processed to ensure ordering of the output
testHarness.waitForInputProcessing();
testHarness.processElement(new StreamRecord<>(1337, initialTime + 2), 1, 0);
expectedOutput.add(new StreamRecord<>("1337", initialTime + 2));
testHarness.waitForInputProcessing();
testHarness.endInput();
testHarness.waitForTaskCompletion();
assertThat(TestOpenCloseMapFunction.closeCalled)
.as("RichFunction methods were not called.")
.isTrue();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
}
/**
* This test verifies that watermarks and watermark statuses are correctly forwarded. This also
* checks whether watermarks are forwarded only when we have received watermarks from all
* inputs. The forwarded watermark must be the minimum of the watermarks of all active inputs.
*/
@Test
void testWatermarkAndWatermarkStatusForwarding() throws Exception {
final TwoInputStreamTaskTestHarness<String, Integer, String> testHarness =
new TwoInputStreamTaskTestHarness<>(
TwoInputStreamTask::new,
2,
2,
new int[] {1, 2},
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.INT_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
CoStreamMap<String, Integer, String> coMapOperator = new CoStreamMap<>(new IdentityMap());
streamConfig.setStreamOperator(coMapOperator);
streamConfig.setOperatorID(new OperatorID());
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
long initialTime = 0L;
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processElement(new Watermark(initialTime), 0, 0);
testHarness.processElement(new Watermark(initialTime), 0, 1);
testHarness.processElement(new Watermark(initialTime), 1, 0);
// now the output should still be empty
testHarness.waitForInputProcessing();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processElement(new Watermark(initialTime), 1, 1);
// now the watermark should have propagated, Map simply forward Watermarks
testHarness.waitForInputProcessing();
expectedOutput.add(new Watermark(initialTime));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// contrary to checkpoint barriers these elements are not blocked by watermarks
testHarness.processElement(new StreamRecord<>("Hello", initialTime), 0, 0);
testHarness.processElement(new StreamRecord<>(42, initialTime), 1, 1);
expectedOutput.add(new StreamRecord<>("Hello", initialTime));
expectedOutput.add(new StreamRecord<>("42", initialTime));
testHarness.waitForInputProcessing();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processElement(new Watermark(initialTime + 4), 0, 0);
testHarness.processElement(new Watermark(initialTime + 3), 0, 1);
testHarness.processElement(new Watermark(initialTime + 3), 1, 0);
testHarness.processElement(new Watermark(initialTime + 2), 1, 1);
// check whether we get the minimum of all the watermarks, this must also only occur in
// the output after the two StreamRecords
expectedOutput.add(new Watermark(initialTime + 2));
testHarness.waitForInputProcessing();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// advance watermark from one of the inputs, now we should get a new one since the
// minimum increases
testHarness.processElement(new Watermark(initialTime + 4), 1, 1);
testHarness.waitForInputProcessing();
expectedOutput.add(new Watermark(initialTime + 3));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// advance the other two inputs, now we should get a new one since the
// minimum increases again
testHarness.processElement(new Watermark(initialTime + 4), 0, 1);
testHarness.processElement(new Watermark(initialTime + 4), 1, 0);
testHarness.waitForInputProcessing();
expectedOutput.add(new Watermark(initialTime + 4));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// test whether idle input channels are acknowledged correctly when forwarding watermarks
testHarness.processElement(WatermarkStatus.IDLE, 0, 1);
testHarness.processElement(WatermarkStatus.IDLE, 1, 0);
testHarness.processElement(new Watermark(initialTime + 6), 0, 0);
testHarness.processElement(
new Watermark(initialTime + 5), 1, 1); // this watermark should be advanced first
testHarness.processElement(WatermarkStatus.IDLE, 1, 1); // once this is acknowledged,
testHarness.waitForInputProcessing();
expectedOutput.add(new Watermark(initialTime + 5));
expectedOutput.add(new Watermark(initialTime + 6));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// make all input channels idle and check that the operator's idle status is forwarded
testHarness.processElement(WatermarkStatus.IDLE, 0, 0);
testHarness.waitForInputProcessing();
expectedOutput.add(WatermarkStatus.IDLE);
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// make some input channels active again and check that the operator's active status is
// forwarded only once
testHarness.processElement(WatermarkStatus.ACTIVE, 1, 0);
testHarness.processElement(WatermarkStatus.ACTIVE, 0, 1);
testHarness.waitForInputProcessing();
expectedOutput.add(WatermarkStatus.ACTIVE);
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.endInput();
testHarness.waitForTaskCompletion();
List<String> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
assertThat(resultElements).hasSize(2);
}
/** This test verifies that checkpoint barriers are correctly forwarded. */
@Test
void testCheckpointBarriers() throws Exception {
final TwoInputStreamTaskTestHarness<String, Integer, String> testHarness =
new TwoInputStreamTaskTestHarness<>(
TwoInputStreamTask::new,
2,
2,
new int[] {1, 2},
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.INT_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
CoStreamMap<String, Integer, String> coMapOperator = new CoStreamMap<>(new IdentityMap());
streamConfig.setStreamOperator(coMapOperator);
streamConfig.setOperatorID(new OperatorID());
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
long initialTime = 0L;
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processEvent(
new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
0);
// This one should go through
testHarness.processElement(new StreamRecord<>("Ciao-0-0", initialTime), 0, 1);
expectedOutput.add(new StreamRecord<>("Ciao-0-0", initialTime));
testHarness.waitForInputProcessing();
// These elements should be forwarded, since we did not yet receive a checkpoint barrier
// on that input, only add to same input, otherwise we would not know the ordering
// of the output since the Task might read the inputs in any order
testHarness.processElement(new StreamRecord<>(11, initialTime), 1, 1);
testHarness.processElement(new StreamRecord<>(111, initialTime), 1, 1);
expectedOutput.add(new StreamRecord<>("11", initialTime));
expectedOutput.add(new StreamRecord<>("111", initialTime));
testHarness.waitForInputProcessing();
// Wait to allow input to end up in the output.
// TODO Use count down latches instead as a cleaner solution
for (int i = 0; i < 20; ++i) {
if (testHarness.getOutput().size() >= expectedOutput.size()) {
break;
} else {
Thread.sleep(100);
}
}
// we should not yet see the barrier, only the two elements from non-blocked input
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processEvent(
new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
1);
testHarness.processEvent(
new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
0);
testHarness.processEvent(
new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
1);
testHarness.waitForInputProcessing();
testHarness.endInput();
testHarness.waitForTaskCompletion();
// now we should see the barrier
expectedOutput.add(
new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()));
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
List<String> resultElements =
TestHarnessUtil.getRawElementsFromOutput(testHarness.getOutput());
assertThat(resultElements).hasSize(3);
}
/**
* This test verifies that checkpoint barriers and barrier buffers work correctly with
* concurrent checkpoint barriers where one checkpoint is "overtaking" another checkpoint, i.e.
* some inputs receive barriers from an earlier checkpoint, thereby blocking, then all inputs
* receive barriers from a later checkpoint.
*/
@Test
void testOvertakingCheckpointBarriers() throws Exception {
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.addJobConfig(
CheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(1))
.addInput(BasicTypeInfo.STRING_TYPE_INFO, 2)
.addInput(BasicTypeInfo.INT_TYPE_INFO, 2)
.setupOutputForSingletonOperatorChain(
new MultipleInputStreamTaskTest
.MapToStringMultipleInputOperatorFactory(2))
.build()) {
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
long initialTime = 0L;
testHarness.processEvent(
new CheckpointBarrier(
0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
0);
// These elements should be forwarded, since we did not yet receive a checkpoint barrier
// on that input, only add to same input, otherwise we would not know the ordering
// of the output since the Task might read the inputs in any order
testHarness.processElement(new StreamRecord<>(42, initialTime), 1, 1);
testHarness.processElement(new StreamRecord<>(1337, initialTime), 1, 1);
expectedOutput.add(new StreamRecord<>("42", initialTime));
expectedOutput.add(new StreamRecord<>("1337", initialTime));
testHarness.processAll();
// we should not yet see the barrier, only the two elements from non-blocked input
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// Now give a later barrier to all inputs, this should unblock the first channel
testHarness.processEvent(
new CheckpointBarrier(
1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
1);
testHarness.processEvent(
new CheckpointBarrier(
1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
0);
testHarness.processEvent(
new CheckpointBarrier(
1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
0);
testHarness.processEvent(
new CheckpointBarrier(
1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
1);
expectedOutput.add(new CancelCheckpointMarker(0));
expectedOutput.add(
new CheckpointBarrier(
1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()));
testHarness.processAll();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
// Then give the earlier barrier, these should be ignored
testHarness.processEvent(
new CheckpointBarrier(
0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
0,
1);
testHarness.processEvent(
new CheckpointBarrier(
0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
0);
testHarness.processEvent(
new CheckpointBarrier(
0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()),
1,
1);
testHarness.waitForTaskCompletion();
TestHarnessUtil.assertOutputEquals(
"Output was not correct.", expectedOutput, testHarness.getOutput());
}
}
@Test
void testOperatorMetricReuse() throws Exception {
final TwoInputStreamTaskTestHarness<String, String, String> testHarness =
new TwoInputStreamTaskTestHarness<>(
TwoInputStreamTask::new,
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
testHarness
.setupOperatorChain(new OperatorID(), new DuplicatingOperator())
.chain(
new OperatorID(),
new OneInputStreamTaskTest.DuplicatingOperator(),
BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new SerializerConfigImpl()))
.chain(
new OperatorID(),
new OneInputStreamTaskTest.DuplicatingOperator(),
BasicTypeInfo.STRING_TYPE_INFO.createSerializer(new SerializerConfigImpl()))
.finish();
final TaskMetricGroup taskMetricGroup =
TaskManagerMetricGroup.createTaskManagerMetricGroup(
NoOpMetricRegistry.INSTANCE, "host", ResourceID.generate())
.addJob(new JobID(), "jobname")
.addTask(createExecutionAttemptId(), "task");
final StreamMockEnvironment env =
new StreamMockEnvironment(
testHarness.jobConfig,
testHarness.taskConfig,
testHarness.memorySize,
new MockInputSplitProvider(),
testHarness.bufferSize,
new TestTaskStateManager()) {
@Override
public TaskMetricGroup getMetricGroup() {
return taskMetricGroup;
}
};
final Counter numRecordsInCounter =
taskMetricGroup.getIOMetricGroup().getNumRecordsInCounter();
final Counter numRecordsOutCounter =
taskMetricGroup.getIOMetricGroup().getNumRecordsOutCounter();
testHarness.invoke(env);
testHarness.waitForTaskRunning();
final int numRecords1 = 5;
final int numRecords2 = 3;
for (int x = 0; x < numRecords1; x++) {
testHarness.processElement(new StreamRecord<>("hello"), 0, 0);
}
for (int x = 0; x < numRecords2; x++) {
testHarness.processElement(new StreamRecord<>("hello"), 1, 0);
}
testHarness.waitForInputProcessing();
assertThat(numRecordsInCounter.getCount()).isEqualTo(numRecords1 + numRecords2);
assertThat(numRecordsOutCounter.getCount())
.isEqualTo((numRecords1 + numRecords2) * 2 * 2 * 2);
testHarness.endInput();
testHarness.waitForTaskCompletion();
}
@Test
void testSkipExecutionsIfFinishedOnRestore() throws Exception {
OperatorID nonSourceOperatorId = new OperatorID();
try (StreamTaskMailboxTestHarness<String> testHarness =
new StreamTaskMailboxTestHarnessBuilder<>(
TwoInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO)
.setCollectNetworkEvents()
.addJobConfig(
CheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(1))
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE)
.setupOperatorChain(
nonSourceOperatorId, new TestFinishedOnRestoreStreamOperator())
.finishForSingletonOperatorChain(StringSerializer.INSTANCE)
.build()) {
testHarness.processElement(Watermark.MAX_WATERMARK, 0);
testHarness.processElement(Watermark.MAX_WATERMARK, 1);
testHarness.waitForTaskCompletion();
assertThat(testHarness.getOutput())
.containsExactly(Watermark.MAX_WATERMARK, new EndOfData(StopMode.DRAIN));
}
}
static
|
TwoInputStreamTaskTest
|
java
|
quarkusio__quarkus
|
extensions/grpc/deployment/src/main/java/io/quarkus/grpc/deployment/GrpcClientProcessor.java
|
{
"start": 16162,
"end": 27890
}
|
interface ____ points
// Note that we cannot use injection points metadata because the build can fail with unsatisfied dependency before
Set<DotName> serviceInterfaces = new HashSet<>();
for (ClassInfo serviceInterface : index.getIndex().getKnownDirectImplementors(GrpcDotNames.MUTINY_SERVICE)) {
serviceInterfaces.add(serviceInterface.name());
}
for (AnnotationInstance injectAnnotation : index.getIndex().getAnnotations(DotNames.INJECT)) {
if (injectAnnotation.target().kind() == Kind.FIELD) {
FieldInfo field = injectAnnotation.target().asField();
if (serviceInterfaces.contains(field.type().name()) && field.annotations().size() == 1) {
// e.g. @Inject Greeter
throw new IllegalStateException("A gRPC service injection is missing the @GrpcClient qualifier: "
+ field.declaringClass().name() + "#" + field.name());
}
} else if (injectAnnotation.target().kind() == Kind.METHOD) {
// CDI initializer
MethodInfo method = injectAnnotation.target().asMethod();
short position = 0;
for (Type param : method.parameterTypes()) {
position++;
if (serviceInterfaces.contains(param.name())) {
// e.g. @Inject void setGreeter(Greeter greeter)
Set<AnnotationInstance> annotations = new HashSet<>();
for (AnnotationInstance annotation : method.annotations()) {
if (annotation.target().kind() == Kind.METHOD_PARAMETER
&& annotation.target().asMethodParameter().position() == position) {
annotations.add(annotation);
}
}
if (annotations.size() > 1) {
throw new IllegalStateException("A gRPC service injection is missing the @GrpcClient qualifier: "
+ method.declaringClass().name() + "#" + method.name() + "()");
}
}
}
}
}
}
@BuildStep
InjectionPointTransformerBuildItem transformInjectionPoints() {
return new InjectionPointTransformerBuildItem(new InjectionPointsTransformer() {
@Override
public void transform(TransformationContext ctx) {
// If annotated with @GrpcClient and no explicit value is used, i.e. @GrpcClient(),
// then we need to determine the service name from the annotated element and transform the injection point
AnnotationInstance clientAnnotation = Annotations.find(ctx.getQualifiers(), GrpcDotNames.GRPC_CLIENT);
if (clientAnnotation != null && clientAnnotation.value() == null) {
String clientName = null;
AnnotationTarget annotationTarget = ctx.getAnnotationTarget();
if (ctx.getAnnotationTarget().kind() == Kind.FIELD) {
clientName = clientAnnotation.target().asField().name();
} else if (annotationTarget.kind() == Kind.METHOD_PARAMETER) {
MethodParameterInfo param = clientAnnotation.target().asMethodParameter();
// We don't need to check if parameter names are recorded - that's validated elsewhere
clientName = param.method().parameterName(param.position());
}
if (clientName != null) {
ctx.transform().remove(GrpcDotNames::isGrpcClient)
.add(AnnotationInstance.builder(GrpcDotNames.GRPC_CLIENT).value(clientName).build())
.done();
}
}
}
@Override
public boolean appliesTo(Type requiredType) {
return true;
}
});
}
@SuppressWarnings("deprecation")
@Record(ExecutionTime.RUNTIME_INIT)
@BuildStep
SyntheticBeanBuildItem clientInterceptorStorage(GrpcClientRecorder recorder, RecorderContext recorderContext,
BeanArchiveIndexBuildItem beanArchiveIndex) {
IndexView index = beanArchiveIndex.getIndex();
GrpcInterceptors interceptors = GrpcInterceptors.gatherInterceptors(index,
GrpcDotNames.CLIENT_INTERCEPTOR);
// Let's gather all the non-abstract, non-global interceptors, from these we'll filter out ones used per-service ones
// The rest, if anything stays, should be logged as problematic
Set<String> superfluousInterceptors = new HashSet<>(interceptors.nonGlobalInterceptors);
// Remove the metrics interceptors
for (String MICROMETER_INTERCEPTOR : MICROMETER_INTERCEPTORS) {
superfluousInterceptors.remove(MICROMETER_INTERCEPTOR);
}
List<AnnotationInstance> found = new ArrayList<>(index.getAnnotations(GrpcDotNames.REGISTER_CLIENT_INTERCEPTOR));
for (AnnotationInstance annotation : index.getAnnotations(GrpcDotNames.REGISTER_CLIENT_INTERCEPTOR_LIST)) {
for (AnnotationInstance nested : annotation.value().asNestedArray()) {
found.add(AnnotationInstance.create(nested.name(), annotation.target(), nested.values()));
}
}
for (AnnotationInstance annotation : found) {
String interceptorClassName = annotation.value().asString();
superfluousInterceptors.remove(interceptorClassName);
}
Set<Class<?>> perClientInterceptors = new HashSet<>();
for (String perClientInterceptor : interceptors.nonGlobalInterceptors) {
perClientInterceptors.add(recorderContext.classProxy(perClientInterceptor));
}
Set<Class<?>> globalInterceptors = new HashSet<>();
for (String globalInterceptor : interceptors.globalInterceptors) {
globalInterceptors.add(recorderContext.classProxy(globalInterceptor));
}
// it's okay if this one is not used:
superfluousInterceptors.remove(StorkMeasuringGrpcInterceptor.class.getName());
superfluousInterceptors.remove(VertxStorkMeasuringGrpcInterceptor.class.getName());
if (!superfluousInterceptors.isEmpty()) {
LOGGER.warnf("At least one unused gRPC client interceptor found: %s. If there are meant to be used globally, " +
"annotate them with @GlobalInterceptor.", String.join(", ", superfluousInterceptors));
}
return SyntheticBeanBuildItem.configure(ClientInterceptorStorage.class)
.unremovable()
.runtimeValue(recorder.initClientInterceptorStorage(perClientInterceptors, globalInterceptors))
.setRuntimeInit()
.done();
}
@BuildStep
UnremovableBeanBuildItem unremovableClientInterceptors() {
return UnremovableBeanBuildItem.beanTypes(GrpcDotNames.CLIENT_INTERCEPTOR);
}
@BuildStep
UnremovableBeanBuildItem unremovableChannelBuilderCustomizers() {
return UnremovableBeanBuildItem.beanTypes(GrpcDotNames.CHANNEL_BUILDER_CUSTOMIZER);
}
@BuildStep
UnremovableBeanBuildItem unremovableServerBuilderCustomizers() {
return UnremovableBeanBuildItem.beanTypes(GrpcDotNames.SERVER_BUILDER_CUSTOMIZER);
}
Set<String> getRegisteredInterceptors(InjectionPointInfo injectionPoint) {
Set<AnnotationInstance> qualifiers = injectionPoint.getRequiredQualifiers();
if (qualifiers.size() <= 1) {
return Collections.emptySet();
}
Set<String> interceptors = new HashSet<>();
for (AnnotationInstance qualifier : qualifiers) {
if (qualifier.name().equals(GrpcDotNames.REGISTER_CLIENT_INTERCEPTOR)) {
interceptors.add(qualifier.value().asClass().name().toString());
}
}
return interceptors;
}
private DeploymentException invalidInjectionPoint(InjectionPointInfo injectionPoint) {
return new DeploymentException(
injectionPoint.getRequiredType() + " cannot be injected into " + injectionPoint.getTargetInfo()
+ " - only Mutiny service interfaces, blocking stubs, reactive stubs based on Mutiny and io.grpc.Channel can be injected via @GrpcClient");
}
private void generateChannelProducer(BlockCreator bc, String clientName, ClientInfo client) {
bc.return_(bc.invokeStatic(CREATE_CHANNEL_METHOD, Const.of(clientName),
bc.setOf(client.interceptors.stream().toList(), Const::of)));
}
private static Set<DotName> getRawTypeClosure(ClassInfo classInfo, IndexView index) {
Set<DotName> types = new HashSet<>();
types.add(classInfo.name());
// Interfaces
for (DotName name : classInfo.interfaceNames()) {
ClassInfo interfaceClassInfo = index.getClassByName(name);
if (interfaceClassInfo != null) {
types.addAll(getRawTypeClosure(interfaceClassInfo, index));
} else {
// Interface not found in the index
types.add(name);
}
}
// Superclass
DotName superName = classInfo.superName();
if (superName != null && !DotNames.OBJECT.equals(superName)) {
ClassInfo superClassInfo = index.getClassByName(superName);
if (superClassInfo != null) {
types.addAll(getRawTypeClosure(superClassInfo, index));
} else {
// Superclass not found in the index
types.add(superName);
}
}
return types;
}
private void generateClientProducer(BlockCreator bc, String clientName, ClientInfo clientInfo) {
Const name = Const.of(clientName);
// First obtain the channel instance for the given service name
Expr channel = bc.invokeStatic(RETRIEVE_CHANNEL_METHOD, name,
bc.setOf(clientInfo.interceptors.stream().toList(), Const::of));
Expr client;
if (clientInfo.type == ClientType.MUTINY_CLIENT) {
// Instantiate the client, e.g. new HealthClient(serviceName,channel,GrpcClientConfigProvider.getStubConfigurator())
client = bc.new_(
ConstructorDesc.of(classDescOf(clientInfo.implName), String.class, Channel.class, BiFunction.class),
name, channel, bc.invokeStatic(GrpcDotNames.GET_STUB_CONFIGURATOR));
} else {
// Create the stub, e.g. newBlockingStub(channel)
MethodDesc factoryMethod = ClassMethodDesc.of(convertToServiceName(clientInfo.className),
clientInfo.type.getFactoryMethodName(),
MethodTypeDesc.of(classDescOf(clientInfo.className), Reflection2Gizmo.classDescOf(Channel.class)));
client = bc.invokeStatic(factoryMethod, channel);
// If needed, modify the call options, e.g. stub = stub.withCompression("gzip")
client = bc.invokeStatic(CONFIGURE_STUB, name, client);
if (clientInfo.type.isBlocking()) {
client = bc.invokeStatic(ADD_BLOCKING_CLIENT_INTERCEPTOR, client);
}
}
bc.return_(client);
}
private static ClassDesc convertToServiceName(DotName stubName) {
if (stubName.isInner()) {
return classDescOf(stubName.prefix());
} else {
return classDescOf(stubName);
}
}
}
|
injection
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/jobmaster/slotpool/RequirementListener.java
|
{
"start": 1253,
"end": 3358
}
|
class ____ {
private final ComponentMainThreadExecutor componentMainThreadExecutor;
private final Duration slotRequestMaxInterval;
private ScheduledFuture<?> slotRequestFuture;
private ResourceCounter requirements = ResourceCounter.empty();
RequirementListener(
ComponentMainThreadExecutor componentMainThreadExecutor,
@Nonnull Duration slotRequestMaxInterval) {
this.componentMainThreadExecutor = componentMainThreadExecutor;
this.slotRequestMaxInterval = slotRequestMaxInterval;
}
void increaseRequirements(ResourceCounter requirements) {
if (slotRequestMaxInterval.toMillis() <= 0L) {
this.requirements = this.requirements.add(requirements);
return;
}
if (!slotSlotRequestFutureAssignable()) {
slotRequestFuture.cancel(true);
}
slotRequestFuture =
componentMainThreadExecutor.schedule(
() -> this.checkSlotRequestMaxInterval(requirements),
slotRequestMaxInterval.toMillis(),
TimeUnit.MILLISECONDS);
}
void decreaseRequirements(ResourceCounter requirements) {
this.requirements = this.requirements.subtract(requirements);
}
ResourceCounter getRequirements() {
return requirements;
}
void tryWaitSlotRequestIsDone() {
if (Objects.nonNull(slotRequestFuture)) {
try {
slotRequestFuture.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
}
private boolean slotSlotRequestFutureAssignable() {
return slotRequestFuture == null
|| slotRequestFuture.isDone()
|| slotRequestFuture.isCancelled();
}
private void checkSlotRequestMaxInterval(ResourceCounter requirements) {
if (slotRequestMaxInterval.toMillis() <= 0L) {
return;
}
this.requirements = this.requirements.add(requirements);
}
}
|
RequirementListener
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/annotations/indexcoll/Version.java
|
{
"start": 445,
"end": 1132
}
|
class ____ {
private Integer id;
private String codeName;
private String number;
private Software software;
@Id
@GeneratedValue
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
@Column(name="`code_name`")
public String getCodeName() {
return codeName;
}
public void setCodeName(String codeName) {
this.codeName = codeName;
}
@Column(name="version_nbr")
public String getNumber() {
return number;
}
public void setNumber(String number) {
this.number = number;
}
@ManyToOne
public Software getSoftware() {
return software;
}
public void setSoftware(Software software) {
this.software = software;
}
}
|
Version
|
java
|
apache__camel
|
components/camel-cron/src/main/java/org/apache/camel/component/cron/api/CamelCronConfiguration.java
|
{
"start": 1052,
"end": 1916
}
|
class ____ {
@UriPath
@Metadata(required = true)
private String name;
@UriParam
@Metadata(required = true)
private String schedule;
public String getName() {
return name;
}
/**
* The name of the cron trigger
*/
public void setName(String name) {
this.name = name;
}
public String getSchedule() {
return schedule;
}
/**
* A cron expression that will be used to generate events
*/
public void setSchedule(String schedule) {
this.schedule = schedule;
}
@Override
public String toString() {
return new StringJoiner(", ", CamelCronConfiguration.class.getSimpleName() + "[", "]")
.add("name='" + name + "'")
.add("schedule='" + schedule + "'")
.toString();
}
}
|
CamelCronConfiguration
|
java
|
resilience4j__resilience4j
|
resilience4j-rxjava2/src/main/java/io/github/resilience4j/bulkhead/operator/MaybeBulkhead.java
|
{
"start": 937,
"end": 1587
}
|
class ____<T> extends Maybe<T> {
private final Maybe<T> upstream;
private final Bulkhead bulkhead;
MaybeBulkhead(Maybe<T> upstream, Bulkhead bulkhead) {
this.upstream = upstream;
this.bulkhead = bulkhead;
}
@Override
protected void subscribeActual(MaybeObserver<? super T> downstream) {
if (bulkhead.tryAcquirePermission()) {
upstream.subscribe(new BulkheadMaybeObserver(downstream));
} else {
downstream.onSubscribe(EmptyDisposable.INSTANCE);
downstream.onError(BulkheadFullException.createBulkheadFullException(bulkhead));
}
}
|
MaybeBulkhead
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/FieldCanBeFinalTest.java
|
{
"start": 1258,
"end": 1522
}
|
interface ____ {
int x = 42;
static int y = 42;
}
""")
.doTest();
}
@Test
public void simple() {
compilationHelper
.addSourceLines(
"Test.java",
"""
|
Anno
|
java
|
google__dagger
|
hilt-android/main/java/dagger/hilt/android/EarlyEntryPoints.java
|
{
"start": 1225,
"end": 1421
}
|
interface ____ a component manager holder. Note that this
* performs an unsafe cast and so callers should be sure that the given component/component
* manager matches the early entry point
|
given
|
java
|
apache__commons-lang
|
src/main/java/org/apache/commons/lang3/ClassUtils.java
|
{
"start": 33582,
"end": 33631
}
|
class ____.
* </p>
* <p>
* If the
|
name
|
java
|
apache__camel
|
components/camel-bindy/src/main/java/org/apache/camel/dataformat/bindy/annotation/BindyConverter.java
|
{
"start": 1087,
"end": 1227
}
|
class ____ be used to convert the String field to the attribute's data
* type.
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @
|
will
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/UnnecessaryBoxedVariable.java
|
{
"start": 3323,
"end": 13412
}
|
class ____ extends BugChecker implements CompilationUnitTreeMatcher {
private static final Matcher<ExpressionTree> VALUE_OF_MATCHER =
staticMethod().onClass(UnnecessaryBoxedVariable::isBoxableType).named("valueOf");
private final WellKnownKeep wellKnownKeep;
@Inject
UnnecessaryBoxedVariable(WellKnownKeep wellKnownKeep) {
this.wellKnownKeep = wellKnownKeep;
}
@Override
public Description matchCompilationUnit(CompilationUnitTree tree, VisitorState state) {
FindBoxedUsagesScanner usages = new FindBoxedUsagesScanner(state);
usages.scan(tree, null);
new SuppressibleTreePathScanner<Void, Void>(state) {
@Override
public Void visitVariable(VariableTree tree, Void unused) {
VisitorState innerState = state.withPath(getCurrentPath());
unboxed(tree, innerState)
.flatMap(u -> handleVariable(u, usages, tree, innerState))
.ifPresent(state::reportMatch);
return super.visitVariable(tree, null);
}
}.scan(tree, null);
return NO_MATCH;
}
private Optional<Description> handleVariable(
Type unboxed, FindBoxedUsagesScanner usages, VariableTree tree, VisitorState state) {
VarSymbol varSymbol = getSymbol(tree);
switch (varSymbol.getKind()) {
case PARAMETER:
if (!canChangeMethodSignature(state, (MethodSymbol) varSymbol.getEnclosingElement())
|| state.getPath().getParentPath().getLeaf() instanceof LambdaExpressionTree) {
return Optional.empty();
}
// Fall through.
case LOCAL_VARIABLE:
if (!variableMatches(tree, state)) {
return Optional.empty();
}
break;
case FIELD:
if (wellKnownKeep.shouldKeep(tree)
|| !ASTHelpers.canBeRemoved(varSymbol)
|| !ASTHelpers.isStatic(varSymbol)
|| !ASTHelpers.isConsideredFinal(varSymbol)
|| usages.boxedUsageFound.contains(varSymbol)) {
return Optional.empty();
}
var initializer = tree.getInitializer();
if (initializer == null || !ASTHelpers.getType(initializer).isPrimitive()) {
return Optional.empty();
}
break;
case BINDING_VARIABLE: // Revisit if https://openjdk.org/jeps/488 happens.
default:
return Optional.empty();
}
return fixVariable(unboxed, usages, tree, state);
}
private Optional<Description> fixVariable(
Type unboxed, FindBoxedUsagesScanner usages, VariableTree tree, VisitorState state) {
VarSymbol varSymbol = getSymbol(tree);
if (usages.boxedUsageFound.contains(varSymbol)) {
return Optional.empty();
}
if (!usages.dereferenced.contains(varSymbol) && varSymbol.getKind() == ElementKind.PARAMETER) {
// If it isn't used and it is a parameter, don't fix it, because this could introduce a new
// NPE.
return Optional.empty();
}
SuggestedFix.Builder fixBuilder = SuggestedFix.builder();
fixBuilder.replace(tree.getType(), unboxed.tsym.getSimpleName().toString());
fixMethodInvocations(usages.fixableSimpleMethodInvocations.get(varSymbol), fixBuilder, state);
fixNullCheckInvocations(usages.fixableNullCheckInvocations.get(varSymbol), fixBuilder, state);
fixCastingInvocations(usages.fixableCastMethodInvocations.get(varSymbol), fixBuilder, state);
// Remove @Nullable annotation, if present.
AnnotationTree nullableAnnotation =
ASTHelpers.getAnnotationWithSimpleName(tree.getModifiers().getAnnotations(), "Nullable");
if (nullableAnnotation == null) {
return Optional.of(describeMatch(tree, fixBuilder.build()));
}
fixBuilder.replace(nullableAnnotation, "");
var message =
switch (varSymbol.getKind()) {
case FIELD ->
"This field is assigned a primitive value, and not used in a boxed context. Prefer"
+ " using the primitive type directly.";
default ->
"All usages of this @Nullable variable would result in a NullPointerException when"
+ " it actually is null. Use the primitive type if this variable should never"
+ " be null, or else fix the code to avoid unboxing or invoking its instance"
+ " methods.";
};
return Optional.of(
buildDescription(tree).setMessage(message).addFix(fixBuilder.build()).build());
}
private static Optional<Type> unboxed(Tree tree, VisitorState state) {
Type type = ASTHelpers.getType(tree);
if (type == null || !type.isReference()) {
return Optional.empty();
}
Type unboxed = state.getTypes().unboxedType(type);
if (unboxed == null
|| unboxed.getTag() == TypeTag.NONE
// Don't match java.lang.Void.
|| unboxed.getTag() == TypeTag.VOID) {
return Optional.empty();
}
return Optional.of(unboxed);
}
private static void fixNullCheckInvocations(
List<TreePath> nullCheckInvocations, SuggestedFix.Builder fixBuilder, VisitorState state) {
for (TreePath pathForTree : nullCheckInvocations) {
checkArgument(pathForTree.getLeaf() instanceof MethodInvocationTree);
MethodInvocationTree methodInvocation = (MethodInvocationTree) pathForTree.getLeaf();
TargetType targetType = TargetType.targetType(state.withPath(pathForTree));
if (targetType == null) {
// If the check is the only thing in a statement, remove the statement.
StatementTree statementTree =
ASTHelpers.findEnclosingNode(pathForTree, StatementTree.class);
if (statementTree != null) {
fixBuilder.delete(statementTree);
}
} else {
// If it's an expression, we can replace simply with the first argument.
fixBuilder.replace(
methodInvocation, state.getSourceForNode(methodInvocation.getArguments().getFirst()));
}
}
}
private static void fixMethodInvocations(
List<MethodInvocationTree> simpleMethodInvocations,
SuggestedFix.Builder fixBuilder,
VisitorState state) {
for (MethodInvocationTree methodInvocation : simpleMethodInvocations) {
ExpressionTree receiver = ASTHelpers.getReceiver(methodInvocation);
Type receiverType = ASTHelpers.getType(receiver);
MemberSelectTree methodSelect = (MemberSelectTree) methodInvocation.getMethodSelect();
fixBuilder.replace(
methodInvocation,
String.format(
"%s.%s(%s)",
receiverType.tsym.getSimpleName(),
methodSelect.getIdentifier(),
state.getSourceForNode(receiver)));
}
}
private static void fixCastingInvocations(
List<TreePath> castMethodInvocations, SuggestedFix.Builder fixBuilder, VisitorState state) {
for (TreePath castPath : castMethodInvocations) {
MethodInvocationTree castInvocation = (MethodInvocationTree) castPath.getLeaf();
ExpressionTree receiver = ASTHelpers.getReceiver(castInvocation);
Type expressionType = ASTHelpers.getType(castInvocation);
if (castPath.getParentPath() != null
&& castPath.getParentPath().getLeaf() instanceof ExpressionStatementTree) {
// If we were to replace X.intValue(); with (int) x;, the code wouldn't compile because
// that's not a statement. Instead, just delete.
fixBuilder.delete(castPath.getParentPath().getLeaf());
} else {
Type unboxedReceiverType = state.getTypes().unboxedType(ASTHelpers.getType(receiver));
if (unboxedReceiverType.getTag() == expressionType.getTag()) {
// someInteger.intValue() can just become someInt.
fixBuilder.replace(castInvocation, state.getSourceForNode(receiver));
} else {
// someInteger.otherPrimitiveValue() can become (otherPrimitive) someInt.
fixBuilder.replace(
castInvocation,
String.format(
"(%s) %s",
expressionType.tsym.getSimpleName(), state.getSourceForNode(receiver)));
}
}
}
}
/**
* Check to see if the variable should be considered for replacement, i.e.
*
* <ul>
* <li>A variable without an initializer
* <li>Enhanced for loop variables can be replaced if they are loops over primitive arrays
* <li>A variable initialized with a primitive value (which is then auto-boxed)
* <li>A variable initialized with an invocation of {@code Boxed.valueOf}, since that can be
* replaced with {@code Boxed.parseBoxed}.
* </ul>
*/
private static boolean variableMatches(VariableTree tree, VisitorState state) {
ExpressionTree expression = tree.getInitializer();
if (expression == null) {
Tree leaf = state.getPath().getParentPath().getLeaf();
if (!(leaf instanceof EnhancedForLoopTree node)) {
return true;
}
Type expressionType = ASTHelpers.getType(node.getExpression());
if (expressionType == null) {
return false;
}
Type elemtype = state.getTypes().elemtype(expressionType);
// Be conservative - if elemtype is null, treat it as if it is a loop over a wrapped type.
return elemtype != null && elemtype.isPrimitive();
}
Type initializerType = ASTHelpers.getType(expression);
if (initializerType == null) {
return false;
}
if (initializerType.isPrimitive()) {
return true;
}
// Don't count X.valueOf(...) as a boxed usage, since it can be replaced with X.parseX.
return VALUE_OF_MATCHER.matches(expression, state);
}
private static boolean isBoxableType(Type type, VisitorState state) {
Type unboxedType = state.getTypes().unboxedType(type);
return unboxedType != null && unboxedType.getTag() != TypeTag.NONE;
}
private static boolean canChangeMethodSignature(VisitorState state, MethodSymbol methodSymbol) {
return !ASTHelpers.methodCanBeOverridden(methodSymbol)
&& ASTHelpers.findSuperMethods(methodSymbol, state.getTypes()).isEmpty()
&& !ASTHelpers.isRecord(methodSymbol);
}
private static
|
UnnecessaryBoxedVariable
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java
|
{
"start": 1990,
"end": 2271
}
|
class ____ extends
* {@link DecayFunctionBuilder}, setup a PARSER field with this class, and
* register them in {@link SearchModule#registerScoreFunctions} or {@link SearchPlugin#getScoreFunctions}
* See {@link GaussDecayFunctionBuilder#PARSER} for an example.
*/
public final
|
that
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/AllocationRoutedStepTests.java
|
{
"start": 1942,
"end": 26880
}
|
class ____ extends AbstractStepTestCase<AllocationRoutedStep> {
@Override
public AllocationRoutedStep createRandomInstance() {
StepKey stepKey = randomStepKey();
StepKey nextStepKey = randomStepKey();
return new AllocationRoutedStep(stepKey, nextStepKey);
}
@Override
public AllocationRoutedStep mutateInstance(AllocationRoutedStep instance) {
StepKey key = instance.getKey();
StepKey nextKey = instance.getNextStepKey();
switch (between(0, 1)) {
case 0 -> key = new StepKey(key.phase(), key.action(), key.name() + randomAlphaOfLength(5));
case 1 -> nextKey = new StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
default -> throw new AssertionError("Illegal randomisation branch");
}
return new AllocationRoutedStep(key, nextKey);
}
@Override
public AllocationRoutedStep copyInstance(AllocationRoutedStep instance) {
return new AllocationRoutedStep(instance.getKey(), instance.getNextStepKey());
}
public void testConditionMet() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> includes = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Map<String, String> excludes = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Map<String, String> requires = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
includes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
excludes.forEach((k, v) -> { existingSettings.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); });
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED));
logger.info(
"running test with routing configurations:\n\t includes: [{}]\n\t excludes: [{}]\n\t requires: [{}]",
includes,
excludes,
requires
);
AllocationRoutedStep step = createRandomInstance();
assertAllocateStatus(
index,
1,
0,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(true, null)
);
}
public void testRequireConditionMetOnlyOneCopyAllocated() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> requires = Map.of(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "foo", "bar");
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
boolean primaryOnNode1 = randomBoolean();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
assertAllocateStatus(
index,
1,
0,
step,
existingSettings,
node1Settings,
Settings.builder(),
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 1))
);
}
public void testClusterExcludeFiltersConditionMetOnlyOneCopyAllocated() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
boolean primaryOnNode1 = randomBoolean();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(existingSettings)
.numberOfShards(1)
.numberOfReplicas(1)
.build();
Map<String, IndexMetadata> indices = Map.of(index.getName(), indexMetadata);
Settings clusterSettings = Settings.builder().put("cluster.routing.allocation.exclude._id", "node1").build();
Settings.Builder nodeSettingsBuilder = Settings.builder();
final var project = ProjectMetadata.builder(randomProjectIdOrDefault()).indices(indices).build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.metadata(Metadata.builder().put(project).transientSettings(clusterSettings))
.nodes(
DiscoveryNodes.builder()
.add(
DiscoveryNodeUtils.builder("node1")
.applySettings(nodeSettingsBuilder.build())
.address(new TransportAddress(TransportAddress.META_ADDRESS, 9200))
.build()
)
.add(
DiscoveryNodeUtils.builder("node2")
.applySettings(nodeSettingsBuilder.build())
.address(new TransportAddress(TransportAddress.META_ADDRESS, 9201))
.build()
)
)
.putRoutingTable(project.id(), RoutingTable.builder().add(indexRoutingTable).build())
.build()
.projectState(project.id());
Result actualResult = step.isConditionMet(index, state);
Result expectedResult = new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(1, 1));
assertEquals(expectedResult.complete(), actualResult.complete());
assertEquals(expectedResult.informationContext(), actualResult.informationContext());
}
public void testExcludeConditionMetOnlyOneCopyAllocated() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> excludes = Map.of(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "foo", "bar");
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
excludes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
boolean primaryOnNode1 = randomBoolean();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
assertAllocateStatus(
index,
1,
0,
step,
existingSettings,
node1Settings,
Settings.builder(),
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 1))
);
}
public void testIncludeConditionMetOnlyOneCopyAllocated() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> includes = Map.of(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "foo", "bar");
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
includes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
boolean primaryOnNode1 = randomBoolean();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
assertAllocateStatus(
index,
1,
0,
step,
existingSettings,
node1Settings,
Settings.builder(),
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 1))
);
}
public void testConditionNotMetDueToRelocation() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> requires = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._id", "node1")
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
boolean primaryOnNode1 = randomBoolean();
ShardRouting shardOnNode1 = TestShardRouting.newShardRouting(
new ShardId(index, 0),
"node1",
primaryOnNode1,
ShardRoutingState.STARTED
);
shardOnNode1 = shardOnNode1.relocate("node3", 230);
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(shardOnNode1)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
assertAllocateStatus(
index,
1,
0,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 2))
);
}
public void testExecuteAllocateNotComplete() throws Exception {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> includes = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Map<String, String> excludes = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Map<String, String> requires = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey) || map.keySet().stream().anyMatch(excludes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
includes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
excludes.forEach((k, v) -> { existingSettings.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); });
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 1), "node2", true, ShardRoutingState.STARTED));
logger.info(
"running test with routing configurations:\n\t includes: [{}]\n\t excludes: [{}]\n\t requires: [{}]",
includes,
excludes,
requires
);
AllocationRoutedStep step = createRandomInstance();
assertAllocateStatus(
index,
2,
0,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 1))
);
}
public void testExecuteAllocateNotCompleteOnlyOneCopyAllocated() throws Exception {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> includes = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Map<String, String> excludes = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Map<String, String> requires = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey) || map.keySet().stream().anyMatch(excludes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
includes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
excludes.forEach((k, v) -> { existingSettings.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); });
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
boolean primaryOnNode1 = randomBoolean();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", primaryOnNode1, ShardRoutingState.STARTED))
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node2", primaryOnNode1 == false, ShardRoutingState.STARTED));
AllocationRoutedStep step = new AllocationRoutedStep(randomStepKey(), randomStepKey());
logger.info(
"running test with routing configurations:\n\t includes: [{}]\n\t excludes: [{}]\n\t requires: [{}]",
includes,
excludes,
requires
);
assertAllocateStatus(
index,
2,
0,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(false, allShardsActiveAllocationInfo(0, 1))
);
}
public void testExecuteAllocateUnassigned() throws Exception {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Map<String, String> includes = AllocateActionTests.randomAllocationRoutingMap(1, 5);
Map<String, String> excludes = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Map<String, String> requires = randomValueOtherThanMany(
map -> map.keySet().stream().anyMatch(includes::containsKey) || map.keySet().stream().anyMatch(excludes::containsKey),
() -> AllocateActionTests.randomAllocationRoutingMap(1, 5)
);
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
includes.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
excludes.forEach((k, v) -> { existingSettings.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + k, v); });
requires.forEach((k, v) -> {
existingSettings.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + k, v);
node1Settings.put(Node.NODE_ATTRIBUTES.getKey() + k, v);
});
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED))
.addShard(
shardRoutingBuilder(new ShardId(index, 1), null, true, ShardRoutingState.UNASSIGNED).withUnassignedInfo(
buildUnassignedInfo("the shard is intentionally unassigned")
).build()
);
logger.info(
"running test with routing configurations:\n\t includes: [{}]\n\t excludes: [{}]\n\t requires: [{}]",
includes,
excludes,
requires
);
AllocationRoutedStep step = createRandomInstance();
assertAllocateStatus(
index,
2,
0,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(false, waitingForActiveShardsAllocationInfo(0))
);
}
/**
* this tests the scenario where
*
* PUT index
* {
* "settings": {
* "number_of_replicas": 0,
* "number_of_shards": 1
* }
* }
*
* PUT index/_settings
* {
* "number_of_replicas": 1,
* "index.routing.allocation.include._name": "{node-name}"
* }
*/
public void testExecuteReplicasNotAllocatedOnSingleNode() {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
Settings.Builder existingSettings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID());
Settings.Builder node1Settings = Settings.builder();
Settings.Builder node2Settings = Settings.builder();
IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(index)
.addShard(TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.STARTED))
.addShard(
shardRoutingBuilder(new ShardId(index, 0), null, false, ShardRoutingState.UNASSIGNED).withUnassignedInfo(
new UnassignedInfo(Reason.REPLICA_ADDED, "no attempt")
).build()
);
AllocationRoutedStep step = createRandomInstance();
assertAllocateStatus(
index,
1,
1,
step,
existingSettings,
node1Settings,
node2Settings,
indexRoutingTable,
new ClusterStateWaitStep.Result(false, waitingForActiveShardsAllocationInfo(1))
);
}
public void testExecuteIndexMissing() throws Exception {
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
ProjectState state = projectStateWithEmptyProject();
AllocationRoutedStep step = createRandomInstance();
Result actualResult = step.isConditionMet(index, state);
assertFalse(actualResult.complete());
assertNull(actualResult.informationContext());
}
private void assertAllocateStatus(
Index index,
int shards,
int replicas,
AllocationRoutedStep step,
Settings.Builder existingSettings,
Settings.Builder node1Settings,
Settings.Builder node2Settings,
IndexRoutingTable.Builder indexRoutingTable,
ClusterStateWaitStep.Result expectedResult
) {
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(existingSettings)
.numberOfShards(shards)
.numberOfReplicas(replicas)
.build();
Map<String, IndexMetadata> indices = Map.of(index.getName(), indexMetadata);
final var project = ProjectMetadata.builder(randomProjectIdOrDefault()).indices(indices).build();
ProjectState state = ClusterState.builder(ClusterName.DEFAULT)
.putProjectMetadata(project)
.nodes(
DiscoveryNodes.builder()
.add(
DiscoveryNodeUtils.builder("node1")
.applySettings(node1Settings.build())
.address(new TransportAddress(TransportAddress.META_ADDRESS, 9200))
.build()
)
.add(
DiscoveryNodeUtils.builder("node2")
.applySettings(node2Settings.build())
.address(new TransportAddress(TransportAddress.META_ADDRESS, 9201))
.build()
)
)
.putRoutingTable(project.id(), RoutingTable.builder().add(indexRoutingTable).build())
.build()
.projectState(project.id());
Result actualResult = step.isConditionMet(index, state);
assertEquals(expectedResult.complete(), actualResult.complete());
assertEquals(expectedResult.informationContext(), actualResult.informationContext());
}
}
|
AllocationRoutedStepTests
|
java
|
apache__camel
|
core/camel-main/src/test/java/org/apache/camel/main/MainIoCTest.java
|
{
"start": 1783,
"end": 3472
}
|
class ____ we get IoC
main.configure().addRoutesBuilder(MyRouteBuilder.class);
// manually bind
main.bind("myBar", new MyBar());
// should be null before init
assertNull(main.getCamelContext());
// for testing that we can init camel and it has loaded configuration and routes and whatnot
main.init();
// and now its created
assertNotNull(main.getCamelContext());
// should be 1 route model
assertEquals(1, ((ModelCamelContext) main.getCamelContext()).getRouteDefinitions().size());
// and the configuration should have registered beans
assertNotNull(main.getCamelContext().getRegistry().lookupByName("MyCoolBean"));
assertEquals("Tiger", main.getCamelContext().getRegistry().lookupByName("coolStuff"));
// start it
main.start();
CamelContext camelContext = main.getCamelContext();
assertNotNull(camelContext);
MockEndpoint endpoint = camelContext.getEndpoint("mock:results", MockEndpoint.class);
endpoint.expectedBodiesReceived("World");
main.getCamelTemplate().sendBody("direct:start", "<message>1</message>");
endpoint.assertIsSatisfied();
// should also auto-configure direct/seda components from application.properties
SedaComponent seda = camelContext.getComponent("seda", SedaComponent.class);
assertEquals(500, seda.getQueueSize());
assertEquals(2, seda.getConcurrentConsumers());
DirectComponent direct = camelContext.getComponent("direct", DirectComponent.class);
assertEquals(1234, direct.getTimeout());
// should have called the configure
|
so
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java
|
{
"start": 4230,
"end": 7607
}
|
class ____ {
private static final Logger LOG =
LoggerFactory.getLogger(TestContainerLaunchRPC.class);
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Test
void testHadoopProtoRPCTimeout() throws Exception {
testRPCTimeout(HadoopYarnProtoRPC.class.getName());
}
private void testRPCTimeout(String rpcClass) throws Exception {
Configuration conf = new Configuration();
// set timeout low for the test
conf.setInt("yarn.rpc.nm-command-timeout", 3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL, rpcClass);
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
Server server = rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, null, 1);
server.start();
try {
ContainerManagementProtocol proxy = (ContainerManagementProtocol) rpc.getProxy(
ContainerManagementProtocol.class,
server.getListenerAddress(), conf);
ContainerLaunchContext containerLaunchContext = recordFactory
.newRecordInstance(ContainerLaunchContext.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, 0);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 100);
NodeId nodeId = NodeId.newInstance("localhost", 1234);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken =
newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
StartContainersRequest.newInstance(list);
try {
proxy.startContainers(allRequests);
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
assertEquals(SocketTimeoutException.class.getName(), e.getClass().getName(),
"Error, exception is not: " + SocketTimeoutException.class.getName());
return;
}
} finally {
server.stop();
}
fail("timeout exception should have occurred!");
}
public static Token newContainerToken(NodeId nodeId, byte[] password,
ContainerTokenIdentifier tokenIdentifier) {
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
// NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
Token containerToken =
Token.newInstance(tokenIdentifier.getBytes(),
ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return containerToken;
}
public
|
TestContainerLaunchRPC
|
java
|
elastic__elasticsearch
|
x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java
|
{
"start": 1700,
"end": 3274
}
|
class ____ extends SourceValueFetcherIndexFieldData.Builder<SortedNumericLongValues> {
public Builder(
String fieldName,
ValuesSourceType valuesSourceType,
ValueFetcher valueFetcher,
SourceProvider sourceProvider,
ToScriptFieldFactory<SortedNumericLongValues> toScriptFieldFactory
) {
super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory);
}
@Override
public SourceValueFetcherSortedUnsignedLongIndexFieldData build(IndexFieldDataCache cache, CircuitBreakerService breakerService) {
return new SourceValueFetcherSortedUnsignedLongIndexFieldData(
fieldName,
valuesSourceType,
valueFetcher,
sourceProvider,
toScriptFieldFactory
);
}
}
protected SourceValueFetcherSortedUnsignedLongIndexFieldData(
String fieldName,
ValuesSourceType valuesSourceType,
ValueFetcher valueFetcher,
SourceProvider sourceProvider,
ToScriptFieldFactory<SortedNumericLongValues> toScriptFieldFactory
) {
super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory);
}
@Override
public SourceValueFetcherLeafFieldData<SortedNumericLongValues> loadDirect(LeafReaderContext context) {
return new SourceValueFetcherSortedUnsignedLongLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider);
}
private static
|
Builder
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/InternalPriorityQueueTestBase.java
|
{
"start": 13583,
"end": 15150
}
|
class ____ extends AbstractHeapPriorityQueueElement
implements Keyed<Long>, PriorityComparable<TestElement> {
private final long key;
private final long priority;
public TestElement(long key, long priority) {
this.key = key;
this.priority = priority;
}
@Override
public int comparePriorityTo(@Nonnull TestElement other) {
return Long.compare(priority, other.priority);
}
public Long getKey() {
return key;
}
public long getPriority() {
return priority;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TestElement that = (TestElement) o;
return key == that.key && priority == that.priority;
}
@Override
public int hashCode() {
return Objects.hash(getKey(), getPriority());
}
public TestElement deepCopy() {
return new TestElement(key, priority);
}
@Override
public String toString() {
return "TestElement{" + "key=" + key + ", priority=" + priority + '}';
}
}
/**
* Serializer for {@link TestElement}. The serialization format produced by this serializer
* allows lexicographic ordering by {@link TestElement#getPriority}.
*/
protected static
|
TestElement
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/querydsl/query/LeafQueryTests.java
|
{
"start": 833,
"end": 2333
}
|
class ____ extends Query {
private DummyLeafQuery(Source source) {
super(source);
}
@Override
protected QueryBuilder asBuilder() {
return null;
}
@Override
protected String innerToString() {
return "";
}
@Override
public boolean containsPlan() {
return false;
}
}
public void testEqualsAndHashCode() {
DummyLeafQuery query = new DummyLeafQuery(SourceTests.randomSource());
checkEqualsAndHashCode(query, LeafQueryTests::copy, LeafQueryTests::mutate);
}
private static DummyLeafQuery copy(DummyLeafQuery query) {
return new DummyLeafQuery(query.source());
}
private static DummyLeafQuery mutate(DummyLeafQuery query) {
return new DummyLeafQuery(SourceTests.mutate(query.source()));
}
public void testNot() {
var q = new LeafQueryTests.DummyLeafQuery(new Source(Location.EMPTY, "test"));
assertThat(q.negate(new Source(Location.EMPTY, "not")), equalTo(new NotQuery(new Source(Location.EMPTY, "not"), q)));
}
public void testNotNot() {
var q = new LeafQueryTests.DummyLeafQuery(new Source(Location.EMPTY, "test"));
assertThat(q.negate(Source.EMPTY).negate(Source.EMPTY), equalTo(q));
}
public void testToString() {
assertEquals("DummyLeafQuery@1:2[]", new DummyLeafQuery(new Source(1, 1, StringUtils.EMPTY)).toString());
}
}
|
DummyLeafQuery
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4291MojoRequiresOnlineModeTest.java
|
{
"start": 1040,
"end": 3760
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Test that the mojo annotation @requiresOnline is recognized. For a direct mojo invocation, this means to fail
* when Maven is in offline mode but the mojo requires online model.
*
* @throws Exception in case of failure
*/
@Test
public void testitDirectInvocation() throws Exception {
File testDir = extractResources("/mng-4291");
// First, build the test plugin
Verifier verifier = newVerifier(new File(testDir, "maven-it-plugin-online").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Then, run the test project that uses the plugin
verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.setLogFileName("log-direct.txt");
verifier.addCliArgument("--offline");
try {
verifier.addCliArgument("org.apache.maven.its.plugins:maven-it-plugin-online:2.1-SNAPSHOT:touch");
verifier.execute();
verifier.verifyErrorFreeLog();
fail("Request to execute online mojo during offline mode did not fail the build.");
} catch (VerificationException e) {
// expected
}
}
/**
* Test that the mojo annotation @requiresOnline is recognized. For a mojo invocation bound to a lifecycle phase,
* this means to skip the mojo when Maven is in offline mode but the mojo requires online mode.
*
* @throws Exception in case of failure
*/
@Test
public void testitLifecycleInvocation() throws Exception {
File testDir = extractResources("/mng-4291");
// First, build the test plugin
Verifier verifier = newVerifier(new File(testDir, "maven-it-plugin-online").getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.addCliArgument("install");
verifier.execute();
verifier.verifyErrorFreeLog();
// Then, run the test project that uses the plugin
verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.setLogFileName("log-lifecycle.txt");
verifier.addCliArgument("--offline");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
verifier.verifyFileNotPresent("target/touch.txt");
}
}
|
MavenITmng4291MojoRequiresOnlineModeTest
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/annotation/web/configurers/HeadersConfigurer.java
|
{
"start": 20324,
"end": 22082
}
|
class ____ {
private XXssProtectionHeaderWriter writer;
private XXssConfig() {
enable();
}
/**
* Sets the value of the X-XSS-PROTECTION header. OWASP recommends using
* {@link XXssProtectionHeaderWriter.HeaderValue#DISABLED}.
*
* If {@link XXssProtectionHeaderWriter.HeaderValue#DISABLED}, will specify that
* X-XSS-Protection is disabled. For example:
*
* <pre>
* X-XSS-Protection: 0
* </pre>
*
* If {@link XXssProtectionHeaderWriter.HeaderValue#ENABLED}, will contain a value
* of 1, but will not specify the mode as blocked. In this instance, any content
* will be attempted to be fixed. For example:
*
* <pre>
* X-XSS-Protection: 1
* </pre>
*
* If {@link XXssProtectionHeaderWriter.HeaderValue#ENABLED_MODE_BLOCK}, will
* contain a value of 1 and will specify mode as blocked. The content will be
* replaced with "#". For example:
*
* <pre>
* X-XSS-Protection: 1; mode=block
* </pre>
* @param headerValue the new header value
* @since 5.8
*/
public XXssConfig headerValue(XXssProtectionHeaderWriter.HeaderValue headerValue) {
this.writer.setHeaderValue(headerValue);
return this;
}
/**
* Disables X-XSS-Protection header (does not include it)
* @return the {@link HeadersConfigurer} for additional configuration
*/
public HeadersConfigurer<H> disable() {
this.writer = null;
return HeadersConfigurer.this;
}
/**
* Ensures the X-XSS-Protection header is enabled if it is not already.
* @return the {@link XXssConfig} for additional customization
*/
private XXssConfig enable() {
if (this.writer == null) {
this.writer = new XXssProtectionHeaderWriter();
}
return this;
}
}
public final
|
XXssConfig
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/cache/StdCachedValue.java
|
{
"start": 727,
"end": 2587
}
|
class ____<K, V> implements CachedValue<K, V> {
private final K key;
private final V value;
private final long ttl;
private final long maxIdleTime;
private long creationTime;
private long lastAccess;
private final WrappedLock lock = new WrappedLock();
public StdCachedValue(K key, V value, long ttl, long maxIdleTime) {
this.value = value;
this.ttl = ttl;
this.key = key;
this.maxIdleTime = maxIdleTime;
if (ttl != 0 || maxIdleTime != 0) {
creationTime = System.currentTimeMillis();
lastAccess = creationTime;
}
}
@Override
public boolean isExpired() {
if (maxIdleTime == 0 && ttl == 0) {
return false;
}
long currentTime = System.currentTimeMillis();
if (ttl != 0 && creationTime + ttl < currentTime) {
return true;
}
if (maxIdleTime != 0 && lastAccess + maxIdleTime < currentTime) {
return true;
}
return false;
}
@Override
public long getExpireTime() {
if (maxIdleTime == 0 && ttl == 0) {
return 0;
}
long expireTime = Long.MAX_VALUE;
if (maxIdleTime != 0) {
expireTime = Math.min(expireTime, lastAccess + maxIdleTime);
}
if (ttl != 0) {
expireTime = Math.min(expireTime, creationTime + ttl);
}
return expireTime;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
lastAccess = System.currentTimeMillis();
return value;
}
@Override
public String toString() {
return "CachedValue [key=" + key + ", value=" + value + "]";
}
@Override
public WrappedLock getLock() {
return lock;
}
}
|
StdCachedValue
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-core/src/main/java/org/springframework/security/oauth2/core/oidc/user/OidcUserAuthority.java
|
{
"start": 1329,
"end": 5618
}
|
class ____ extends OAuth2UserAuthority {
@Serial
private static final long serialVersionUID = -4675866280835753141L;
private final OidcIdToken idToken;
private final OidcUserInfo userInfo;
/**
* Constructs a {@code OidcUserAuthority} using the provided parameters.
* @param idToken the {@link OidcIdToken ID Token} containing claims about the user
*/
public OidcUserAuthority(OidcIdToken idToken) {
this(idToken, null);
}
/**
* Constructs a {@code OidcUserAuthority} using the provided parameters and defaults
* {@link #getAuthority()} to {@code OIDC_USER}.
* @param idToken the {@link OidcIdToken ID Token} containing claims about the user
* @param userInfo the {@link OidcUserInfo UserInfo} containing claims about the user,
* may be {@code null}
*/
public OidcUserAuthority(OidcIdToken idToken, OidcUserInfo userInfo) {
this("OIDC_USER", idToken, userInfo);
}
/**
* Constructs a {@code OidcUserAuthority} using the provided parameters and defaults
* {@link #getAuthority()} to {@code OIDC_USER}.
* @param idToken the {@link OidcIdToken ID Token} containing claims about the user
* @param userInfo the {@link OidcUserInfo UserInfo} containing claims about the user,
* may be {@code null}
* @param userNameAttributeName the attribute name used to access the user's name from
* the attributes
* @since 6.4
*/
public OidcUserAuthority(OidcIdToken idToken, OidcUserInfo userInfo, @Nullable String userNameAttributeName) {
this("OIDC_USER", idToken, userInfo, userNameAttributeName);
}
/**
* Constructs a {@code OidcUserAuthority} using the provided parameters.
* @param authority the authority granted to the user
* @param idToken the {@link OidcIdToken ID Token} containing claims about the user
* @param userInfo the {@link OidcUserInfo UserInfo} containing claims about the user,
* may be {@code null}
*/
public OidcUserAuthority(String authority, OidcIdToken idToken, OidcUserInfo userInfo) {
this(authority, idToken, userInfo, IdTokenClaimNames.SUB);
}
/**
* Constructs a {@code OidcUserAuthority} using the provided parameters.
* @param authority the authority granted to the user
* @param idToken the {@link OidcIdToken ID Token} containing claims about the user
* @param userInfo the {@link OidcUserInfo UserInfo} containing claims about the user,
* may be {@code null}
* @param userNameAttributeName the attribute name used to access the user's name from
* the attributes
* @since 6.4
*/
public OidcUserAuthority(String authority, OidcIdToken idToken, OidcUserInfo userInfo,
@Nullable String userNameAttributeName) {
super(authority, collectClaims(idToken, userInfo), userNameAttributeName);
this.idToken = idToken;
this.userInfo = userInfo;
}
/**
* Returns the {@link OidcIdToken ID Token} containing claims about the user.
* @return the {@link OidcIdToken} containing claims about the user.
*/
public OidcIdToken getIdToken() {
return this.idToken;
}
/**
* Returns the {@link OidcUserInfo UserInfo} containing claims about the user, may be
* {@code null}.
* @return the {@link OidcUserInfo} containing claims about the user, or {@code null}
*/
public OidcUserInfo getUserInfo() {
return this.userInfo;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || this.getClass() != obj.getClass()) {
return false;
}
if (!super.equals(obj)) {
return false;
}
OidcUserAuthority that = (OidcUserAuthority) obj;
if (!this.getIdToken().equals(that.getIdToken())) {
return false;
}
return (this.getUserInfo() != null) ? this.getUserInfo().equals(that.getUserInfo())
: that.getUserInfo() == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + this.getIdToken().hashCode();
result = 31 * result + ((this.getUserInfo() != null) ? this.getUserInfo().hashCode() : 0);
return result;
}
static Map<String, Object> collectClaims(OidcIdToken idToken, OidcUserInfo userInfo) {
Assert.notNull(idToken, "idToken cannot be null");
Map<String, Object> claims = new HashMap<>();
if (userInfo != null) {
claims.putAll(userInfo.getClaims());
}
claims.putAll(idToken.getClaims());
return claims;
}
}
|
OidcUserAuthority
|
java
|
apache__kafka
|
clients/src/test/java/org/apache/kafka/common/security/authenticator/TestDigestLoginModule.java
|
{
"start": 1586,
"end": 3398
}
|
class ____ implements AuthenticateCallbackHandler {
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
}
@Override
public void handle(Callback[] callbacks) {
String username = null;
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nameCallback = (NameCallback) callback;
if (TestJaasConfig.USERNAME.equals(nameCallback.getDefaultName())) {
nameCallback.setName(nameCallback.getDefaultName());
username = TestJaasConfig.USERNAME;
}
} else if (callback instanceof PasswordCallback) {
PasswordCallback passwordCallback = (PasswordCallback) callback;
if (TestJaasConfig.USERNAME.equals(username))
passwordCallback.setPassword(TestJaasConfig.PASSWORD.toCharArray());
} else if (callback instanceof RealmCallback) {
RealmCallback realmCallback = (RealmCallback) callback;
realmCallback.setText(realmCallback.getDefaultText());
} else if (callback instanceof AuthorizeCallback) {
AuthorizeCallback authCallback = (AuthorizeCallback) callback;
if (TestJaasConfig.USERNAME.equals(authCallback.getAuthenticationID())) {
authCallback.setAuthorized(true);
authCallback.setAuthorizedID(authCallback.getAuthenticationID());
}
}
}
}
@Override
public void close() {
}
}
}
|
DigestServerCallbackHandler
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/ComponentScanAnnotationIntegrationTests.java
|
{
"start": 21633,
"end": 21908
}
|
class ____ {}
@Configuration
@ComponentScan(basePackages = "example.scannable",
scopedProxy = ScopedProxyMode.INTERFACES,
useDefaultFilters = false,
includeFilters = @Filter(type=FilterType.REGEX, pattern = "((?:[a-z.]+))ScopedProxyTestBean"))
|
ComponentScanWithScopedProxy
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jdbc/src/main/java/org/springframework/boot/jdbc/autoconfigure/DataSourceProperties.java
|
{
"start": 1682,
"end": 9731
}
|
class ____ implements BeanClassLoaderAware, InitializingBean {
@SuppressWarnings("NullAway.Init")
private ClassLoader classLoader;
/**
* Whether to generate a random datasource name.
*/
private boolean generateUniqueName = true;
/**
* Datasource name to use if "generate-unique-name" is false. Defaults to "testdb"
* when using an embedded database, otherwise null.
*/
private @Nullable String name;
/**
* Fully qualified name of the DataSource implementation to use. By default, a
* connection pool implementation is auto-detected from the classpath.
*/
private @Nullable Class<? extends DataSource> type;
/**
* Fully qualified name of the JDBC driver. Auto-detected based on the URL by default.
*/
private @Nullable String driverClassName;
/**
* JDBC URL of the database.
*/
private @Nullable String url;
/**
* Login username of the database.
*/
private @Nullable String username;
/**
* Login password of the database.
*/
private @Nullable String password;
/**
* JNDI location of the datasource. Class, url, username and password are ignored when
* set.
*/
private @Nullable String jndiName;
/**
* Connection details for an embedded database. Defaults to the most suitable embedded
* database that is available on the classpath.
*/
@SuppressWarnings("NullAway.Init")
private EmbeddedDatabaseConnection embeddedDatabaseConnection;
private Xa xa = new Xa();
private @Nullable String uniqueName;
@Override
public void setBeanClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
}
@Override
public void afterPropertiesSet() throws Exception {
if (this.embeddedDatabaseConnection == null) {
this.embeddedDatabaseConnection = EmbeddedDatabaseConnection.get(this.classLoader);
}
}
/**
* Initialize a {@link DataSourceBuilder} with the state of this instance.
* @return a {@link DataSourceBuilder} initialized with the customizations defined on
* this instance
*/
public DataSourceBuilder<?> initializeDataSourceBuilder() {
return DataSourceBuilder.create(getClassLoader())
.type(getType())
.driverClassName(determineDriverClassName())
.url(determineUrl())
.username(determineUsername())
.password(determinePassword());
}
public boolean isGenerateUniqueName() {
return this.generateUniqueName;
}
public void setGenerateUniqueName(boolean generateUniqueName) {
this.generateUniqueName = generateUniqueName;
}
public @Nullable String getName() {
return this.name;
}
public void setName(@Nullable String name) {
this.name = name;
}
public @Nullable Class<? extends DataSource> getType() {
return this.type;
}
public void setType(@Nullable Class<? extends DataSource> type) {
this.type = type;
}
/**
* Return the configured driver or {@code null} if none was configured.
* @return the configured driver
* @see #determineDriverClassName()
*/
public @Nullable String getDriverClassName() {
return this.driverClassName;
}
public void setDriverClassName(@Nullable String driverClassName) {
this.driverClassName = driverClassName;
}
/**
* Determine the driver to use based on this configuration and the environment.
* @return the driver to use
*/
public String determineDriverClassName() {
String driverClassName = findDriverClassName();
if (!StringUtils.hasText(driverClassName)) {
throw new DataSourceBeanCreationException("Failed to determine a suitable driver class", this,
this.embeddedDatabaseConnection);
}
return driverClassName;
}
@Nullable String findDriverClassName() {
if (StringUtils.hasText(this.driverClassName)) {
Assert.state(driverClassIsLoadable(this.driverClassName),
() -> "Cannot load driver class: " + this.driverClassName);
return this.driverClassName;
}
String driverClassName = null;
if (StringUtils.hasText(this.url)) {
driverClassName = DatabaseDriver.fromJdbcUrl(this.url).getDriverClassName();
}
if (!StringUtils.hasText(driverClassName)) {
driverClassName = this.embeddedDatabaseConnection.getDriverClassName();
}
return driverClassName;
}
private boolean driverClassIsLoadable(String driverClassName) {
try {
ClassUtils.forName(driverClassName, null);
return true;
}
catch (UnsupportedClassVersionError ex) {
// Driver library has been compiled with a later JDK, propagate error
throw ex;
}
catch (Throwable ex) {
return false;
}
}
/**
* Return the configured url or {@code null} if none was configured.
* @return the configured url
* @see #determineUrl()
*/
public @Nullable String getUrl() {
return this.url;
}
public void setUrl(@Nullable String url) {
this.url = url;
}
/**
* Determine the url to use based on this configuration and the environment.
* @return the url to use
*/
public String determineUrl() {
if (StringUtils.hasText(this.url)) {
return this.url;
}
String databaseName = determineDatabaseName();
String url = (databaseName != null) ? this.embeddedDatabaseConnection.getUrl(databaseName) : null;
if (!StringUtils.hasText(url)) {
throw new DataSourceBeanCreationException("Failed to determine suitable jdbc url", this,
this.embeddedDatabaseConnection);
}
return url;
}
/**
* Determine the name to used based on this configuration.
* @return the database name to use or {@code null}
*/
public @Nullable String determineDatabaseName() {
if (this.generateUniqueName) {
if (this.uniqueName == null) {
this.uniqueName = UUID.randomUUID().toString();
}
return this.uniqueName;
}
if (StringUtils.hasLength(this.name)) {
return this.name;
}
if (this.embeddedDatabaseConnection != EmbeddedDatabaseConnection.NONE) {
return "testdb";
}
return null;
}
/**
* Return the configured username or {@code null} if none was configured.
* @return the configured username
* @see #determineUsername()
*/
public @Nullable String getUsername() {
return this.username;
}
public void setUsername(@Nullable String username) {
this.username = username;
}
/**
* Determine the username to use based on this configuration and the environment.
* @return the username to use
*/
public @Nullable String determineUsername() {
if (StringUtils.hasText(this.username)) {
return this.username;
}
if (EmbeddedDatabaseConnection.isEmbedded(findDriverClassName(), determineUrl())) {
return "sa";
}
return null;
}
/**
* Return the configured password or {@code null} if none was configured.
* @return the configured password
* @see #determinePassword()
*/
public @Nullable String getPassword() {
return this.password;
}
public void setPassword(@Nullable String password) {
this.password = password;
}
/**
* Determine the password to use based on this configuration and the environment.
* @return the password to use
*/
public @Nullable String determinePassword() {
if (StringUtils.hasText(this.password)) {
return this.password;
}
if (EmbeddedDatabaseConnection.isEmbedded(findDriverClassName(), determineUrl())) {
return "";
}
return null;
}
public @Nullable String getJndiName() {
return this.jndiName;
}
/**
* Allows the DataSource to be managed by the container and obtained through JNDI. The
* {@code URL}, {@code driverClassName}, {@code username} and {@code password} fields
* will be ignored when using JNDI lookups.
* @param jndiName the JNDI name
*/
public void setJndiName(@Nullable String jndiName) {
this.jndiName = jndiName;
}
public EmbeddedDatabaseConnection getEmbeddedDatabaseConnection() {
return this.embeddedDatabaseConnection;
}
public void setEmbeddedDatabaseConnection(EmbeddedDatabaseConnection embeddedDatabaseConnection) {
this.embeddedDatabaseConnection = embeddedDatabaseConnection;
}
public ClassLoader getClassLoader() {
return this.classLoader;
}
public Xa getXa() {
return this.xa;
}
public void setXa(Xa xa) {
this.xa = xa;
}
/**
* XA Specific datasource settings.
*/
public static
|
DataSourceProperties
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/ref/RefTest16.java
|
{
"start": 136,
"end": 985
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
Person pA = new Person("a");
Person pB = new Person("b");
Family fA = new Family();
fA.setMembers(new Person[] { pA, pB });
fA.setMaster(pA);
Person pC = new Person("c");
Person pD = new Person("d");
Family fB = new Family();
fB.setMembers(new Person[] { pC, pD });
fB.setMaster(pC);
Family[] familyArray = new Family[] { fA, fB };
String text = JSON.toJSONString(familyArray);
System.out.println(text);
Family[] result = JSON.parseObject(text, Family[].class);
Assert.assertSame(result[0].getMaster(), result[0].getMembers()[0]);
Assert.assertSame(result[1].getMaster(), result[1].getMembers()[0]);
}
public static
|
RefTest16
|
java
|
apache__maven
|
compat/maven-compat/src/test/java/org/apache/maven/repository/legacy/StringWagon.java
|
{
"start": 1594,
"end": 3080
}
|
class ____ extends StreamWagon {
private Map<String, String> expectedContent = new HashMap<>();
public void addExpectedContent(String resourceName, String expectedContent) {
this.expectedContent.put(resourceName, expectedContent);
}
public String[] getSupportedProtocols() {
return new String[] {"string"};
}
@Override
public void closeConnection() throws ConnectionException {}
@Override
public void fillInputData(InputData inputData)
throws TransferFailedException, ResourceDoesNotExistException, AuthorizationException {
Resource resource = inputData.getResource();
String content = expectedContent.get(resource.getName());
if (content != null) {
resource.setContentLength(content.length());
resource.setLastModified(System.currentTimeMillis());
inputData.setInputStream(new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)));
} else {
throw new ResourceDoesNotExistException("No content provided for " + resource.getName());
}
}
@Override
public void fillOutputData(OutputData outputData) throws TransferFailedException {
outputData.setOutputStream(new ByteArrayOutputStream());
}
@Override
protected void openConnectionInternal() throws ConnectionException, AuthenticationException {}
public void clearExpectedContent() {
expectedContent.clear();
}
}
|
StringWagon
|
java
|
quarkusio__quarkus
|
integration-tests/main/src/test/java/io/quarkus/it/main/QuarkusTestNestedPerClassLifecycleTestCase.java
|
{
"start": 1310,
"end": 1456
}
|
class ____ {
@Test
public void verifyCounter() {
assertEquals(2, counter.incrementAndGet());
}
}
}
|
NestedTest
|
java
|
spring-projects__spring-security
|
oauth2/oauth2-authorization-server/src/main/java/org/springframework/security/oauth2/server/authorization/oidc/http/converter/OidcClientRegistrationHttpMessageConverter.java
|
{
"start": 2383,
"end": 5875
}
|
class ____ extends AbstractHttpMessageConverter<OidcClientRegistration> {
private static final ParameterizedTypeReference<Map<String, Object>> STRING_OBJECT_MAP = new ParameterizedTypeReference<>() {
};
private final GenericHttpMessageConverter<Object> jsonMessageConverter = HttpMessageConverters
.getJsonMessageConverter();
private Converter<Map<String, Object>, OidcClientRegistration> clientRegistrationConverter = new MapOidcClientRegistrationConverter();
private Converter<OidcClientRegistration, Map<String, Object>> clientRegistrationParametersConverter = new OidcClientRegistrationMapConverter();
public OidcClientRegistrationHttpMessageConverter() {
super(MediaType.APPLICATION_JSON, new MediaType("application", "*+json"));
}
@Override
protected boolean supports(Class<?> clazz) {
return OidcClientRegistration.class.isAssignableFrom(clazz);
}
@Override
@SuppressWarnings("unchecked")
protected OidcClientRegistration readInternal(Class<? extends OidcClientRegistration> clazz,
HttpInputMessage inputMessage) throws HttpMessageNotReadableException {
try {
Map<String, Object> clientRegistrationParameters = (Map<String, Object>) this.jsonMessageConverter
.read(STRING_OBJECT_MAP.getType(), null, inputMessage);
return this.clientRegistrationConverter.convert(clientRegistrationParameters);
}
catch (Exception ex) {
throw new HttpMessageNotReadableException(
"An error occurred reading the OpenID Client Registration: " + ex.getMessage(), ex, inputMessage);
}
}
@Override
protected void writeInternal(OidcClientRegistration clientRegistration, HttpOutputMessage outputMessage)
throws HttpMessageNotWritableException {
try {
Map<String, Object> clientRegistrationParameters = this.clientRegistrationParametersConverter
.convert(clientRegistration);
this.jsonMessageConverter.write(clientRegistrationParameters, STRING_OBJECT_MAP.getType(),
MediaType.APPLICATION_JSON, outputMessage);
}
catch (Exception ex) {
throw new HttpMessageNotWritableException(
"An error occurred writing the OpenID Client Registration: " + ex.getMessage(), ex);
}
}
/**
* Sets the {@link Converter} used for converting the OpenID Client Registration
* parameters to an {@link OidcClientRegistration}.
* @param clientRegistrationConverter the {@link Converter} used for converting to an
* {@link OidcClientRegistration}
*/
public final void setClientRegistrationConverter(
Converter<Map<String, Object>, OidcClientRegistration> clientRegistrationConverter) {
Assert.notNull(clientRegistrationConverter, "clientRegistrationConverter cannot be null");
this.clientRegistrationConverter = clientRegistrationConverter;
}
/**
* Sets the {@link Converter} used for converting the {@link OidcClientRegistration}
* to a {@code Map} representation of the OpenID Client Registration parameters.
* @param clientRegistrationParametersConverter the {@link Converter} used for
* converting to a {@code Map} representation of the OpenID Client Registration
* parameters
*/
public final void setClientRegistrationParametersConverter(
Converter<OidcClientRegistration, Map<String, Object>> clientRegistrationParametersConverter) {
Assert.notNull(clientRegistrationParametersConverter, "clientRegistrationParametersConverter cannot be null");
this.clientRegistrationParametersConverter = clientRegistrationParametersConverter;
}
private static final
|
OidcClientRegistrationHttpMessageConverter
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/common/io/stream/AbstractWriteableEnumTestCase.java
|
{
"start": 1075,
"end": 1234
}
|
enum ____ a stream often uses the ordinal value.
*/
public abstract void testValidOrdinals();
/**
* Test that the conversion from a string to
|
to
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/lock/PessimisticForceIncrementLockingStrategy.java
|
{
"start": 669,
"end": 2030
}
|
class ____ implements LockingStrategy {
private final EntityPersister lockable;
private final LockMode lockMode;
/**
* Construct locking strategy.
*
* @param lockable The metadata for the entity to be locked.
* @param lockMode Indicates the type of lock to be acquired.
*/
public PessimisticForceIncrementLockingStrategy(EntityPersister lockable, LockMode lockMode) {
this.lockable = lockable;
this.lockMode = lockMode;
// ForceIncrement can be used for PESSIMISTIC_READ, PESSIMISTIC_WRITE or PESSIMISTIC_FORCE_INCREMENT
if ( lockMode.lessThan( LockMode.PESSIMISTIC_READ ) ) {
throw new HibernateException( "[" + lockMode + "] not valid for [" + lockable.getEntityName() + "]" );
}
}
@Override
public void lock(Object id, Object version, Object object, int timeout, SharedSessionContractImplementor session) {
if ( !lockable.isVersioned() ) {
throw new HibernateException( "[" + lockMode + "] not supported for non-versioned entities [" + lockable.getEntityName() + "]" );
}
final var entry = session.getPersistenceContextInternal().getEntry( object );
OptimisticLockHelper.forceVersionIncrement( object, entry, session );
}
/**
* Retrieve the specific lock mode defined.
*
* @return The specific lock mode.
*/
protected LockMode getLockMode() {
return lockMode;
}
}
|
PessimisticForceIncrementLockingStrategy
|
java
|
netty__netty
|
codec-http2/src/test/java/io/netty/handler/codec/http2/Http2DefaultFramesTest.java
|
{
"start": 853,
"end": 1790
}
|
class ____ {
@SuppressWarnings("SimplifiableJUnitAssertion")
@Test
public void testEqualOperation() {
// in this case, 'goAwayFrame' and 'unknownFrame' will also have an EMPTY_BUFFER data
// so we want to check that 'dflt' will not consider them equal.
DefaultHttp2GoAwayFrame goAwayFrame = new DefaultHttp2GoAwayFrame(1);
DefaultHttp2UnknownFrame unknownFrame = new DefaultHttp2UnknownFrame((byte) 1, new Http2Flags((short) 1));
DefaultByteBufHolder dflt = new DefaultByteBufHolder(Unpooled.EMPTY_BUFFER);
try {
// not using 'assertNotEquals' to be explicit about which object we are calling .equals() on
assertFalse(dflt.equals(goAwayFrame));
assertFalse(dflt.equals(unknownFrame));
} finally {
goAwayFrame.release();
unknownFrame.release();
dflt.release();
}
}
}
|
Http2DefaultFramesTest
|
java
|
quarkusio__quarkus
|
independent-projects/resteasy-reactive/server/runtime/src/main/java/org/jboss/resteasy/reactive/server/spi/ResteasyReactiveContainerRequestFilter.java
|
{
"start": 197,
"end": 534
}
|
interface ____ extends ContainerRequestFilter {
@Override
default void filter(ContainerRequestContext requestContext) throws IOException {
filter((ResteasyReactiveContainerRequestContext) requestContext);
}
void filter(ResteasyReactiveContainerRequestContext requestContext);
}
|
ResteasyReactiveContainerRequestFilter
|
java
|
spring-projects__spring-framework
|
spring-r2dbc/src/main/java/org/springframework/r2dbc/core/NamedParameterExpander.java
|
{
"start": 1478,
"end": 3949
}
|
class ____ {
/**
* Default maximum number of entries for the SQL cache: 256.
*/
public static final int DEFAULT_CACHE_LIMIT = 256;
/** Cache of original SQL String to ParsedSql representation. */
private final ConcurrentLruCache<String, ParsedSql> parsedSqlCache =
new ConcurrentLruCache<>(DEFAULT_CACHE_LIMIT, NamedParameterUtils::parseSqlStatement);
/**
* Obtain a parsed representation of the given SQL statement.
* <p>The default implementation uses an LRU cache with an upper limit of 256 entries.
* @param sql the original SQL statement
* @return a representation of the parsed SQL statement
*/
private ParsedSql getParsedSql(String sql) {
return this.parsedSqlCache.get(sql);
}
/**
* Parse the SQL statement and locate any placeholders or named parameters.
* Named parameters are substituted for a native placeholder, and any
* select list is expanded to the required number of placeholders. Select
* lists may contain an array of objects, and in that case the placeholders
* will be grouped and enclosed with parentheses. This allows for the use of
* "expression lists" in the SQL statement like:
* <pre class="code">
* select id, name, state from table where (name, age) in (('John', 35), ('Ann', 50))
* </pre>
* <p>The parameter values passed in are used to determine the number of
* placeholders to be used for a select list. Select lists should be limited
* to 100 or fewer elements. A larger number of elements is not guaranteed to be
* supported by the database and is strictly vendor-dependent.
* @param sql the original SQL statement
* @param bindMarkersFactory the bind marker factory
* @param paramSource the source for named parameters
* @return the expanded sql that accepts bind parameters and allows for execution
* without further translation wrapped as {@link PreparedOperation}.
*/
public PreparedOperation<String> expand(
String sql, BindMarkersFactory bindMarkersFactory, BindParameterSource paramSource) {
ParsedSql parsedSql = getParsedSql(sql);
return NamedParameterUtils.substituteNamedParameters(parsedSql, bindMarkersFactory, paramSource);
}
/**
* Parse the SQL statement and locate any placeholders or named parameters.
* Named parameters are returned as result of this method invocation.
* @return the parameter names
*/
public List<String> getParameterNames(String sql) {
return getParsedSql(sql).getParameterNames();
}
}
|
NamedParameterExpander
|
java
|
spring-projects__spring-framework
|
spring-web/src/main/java/org/springframework/http/server/reactive/ServerHttpRequest.java
|
{
"start": 1309,
"end": 3412
}
|
interface ____ extends HttpRequest, ReactiveHttpInputMessage {
/**
* Return an id that represents the underlying connection, if available,
* or the request for the purpose of correlating log messages.
* @since 5.1
* @see org.springframework.web.server.ServerWebExchange#getLogPrefix()
*/
String getId();
/**
* Returns a structured representation of the full request path up to but
* not including the {@link #getQueryParams() query}.
* <p>The returned path is subdivided into a
* {@link RequestPath#contextPath()} portion and the remaining
* {@link RequestPath#pathWithinApplication() pathWithinApplication} portion.
* The latter can be passed into methods of
* {@link org.springframework.web.util.pattern.PathPattern} for path
* matching purposes.
*/
RequestPath getPath();
/**
* Return a read-only map with parsed and decoded query parameter values.
*/
MultiValueMap<String, String> getQueryParams();
/**
* Return a read-only map of cookies sent by the client.
*/
MultiValueMap<String, HttpCookie> getCookies();
/**
* Return the local address the request was accepted on, if available.
* @since 5.2.3
*/
default @Nullable InetSocketAddress getLocalAddress() {
return null;
}
/**
* Return the remote address where this request is connected to, if available.
*/
default @Nullable InetSocketAddress getRemoteAddress() {
return null;
}
/**
* Return the SSL session information if the request has been transmitted
* over a secure protocol including SSL certificates, if available.
* @return the session information, or {@code null} if none available
* @since 5.0.2
*/
default @Nullable SslInfo getSslInfo() {
return null;
}
/**
* Return a builder to mutate properties of this request by wrapping it
* with {@link ServerHttpRequestDecorator} and returning either mutated
* values or delegating back to this instance.
*/
default ServerHttpRequest.Builder mutate() {
return new DefaultServerHttpRequestBuilder(this);
}
/**
* Builder for mutating an existing {@link ServerHttpRequest}.
*/
|
ServerHttpRequest
|
java
|
assertj__assertj-core
|
assertj-core/src/test/java/org/assertj/core/api/InstanceOfAssertFactoriesTest.java
|
{
"start": 74089,
"end": 74769
}
|
class ____ {
private final Object actual = "string";
@Test
void createAssert() {
// WHEN
AbstractCharSequenceAssert<?, ? extends CharSequence> result = CHAR_SEQUENCE.createAssert(actual);
// THEN
result.startsWith("str");
}
@Test
void createAssert_with_ValueProvider() {
// GIVEN
ValueProvider<?> valueProvider = mockThatDelegatesTo(type -> actual);
// WHEN
AbstractCharSequenceAssert<?, ? extends CharSequence> result = CHAR_SEQUENCE.createAssert(valueProvider);
// THEN
result.startsWith("str");
verify(valueProvider).apply(CharSequence.class);
}
}
@Nested
|
CharSequence_Factory
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/LastLongByTimestampAggregatorFunction.java
|
{
"start": 1010,
"end": 10081
}
|
class ____ implements AggregatorFunction {
private static final List<IntermediateStateDesc> INTERMEDIATE_STATE_DESC = List.of(
new IntermediateStateDesc("timestamps", ElementType.LONG),
new IntermediateStateDesc("values", ElementType.LONG),
new IntermediateStateDesc("seen", ElementType.BOOLEAN) );
private final DriverContext driverContext;
private final LongLongState state;
private final List<Integer> channels;
public LastLongByTimestampAggregatorFunction(DriverContext driverContext, List<Integer> channels,
LongLongState state) {
this.driverContext = driverContext;
this.channels = channels;
this.state = state;
}
public static LastLongByTimestampAggregatorFunction create(DriverContext driverContext,
List<Integer> channels) {
return new LastLongByTimestampAggregatorFunction(driverContext, channels, LastLongByTimestampAggregator.initSingle(driverContext));
}
public static List<IntermediateStateDesc> intermediateStateDesc() {
return INTERMEDIATE_STATE_DESC;
}
@Override
public int intermediateBlockCount() {
return INTERMEDIATE_STATE_DESC.size();
}
@Override
public void addRawInput(Page page, BooleanVector mask) {
if (mask.allFalse()) {
// Entire page masked away
} else if (mask.allTrue()) {
addRawInputNotMasked(page);
} else {
addRawInputMasked(page, mask);
}
}
private void addRawInputMasked(Page page, BooleanVector mask) {
LongBlock valueBlock = page.getBlock(channels.get(0));
LongBlock timestampBlock = page.getBlock(channels.get(1));
LongVector valueVector = valueBlock.asVector();
if (valueVector == null) {
addRawBlock(valueBlock, timestampBlock, mask);
return;
}
LongVector timestampVector = timestampBlock.asVector();
if (timestampVector == null) {
addRawBlock(valueBlock, timestampBlock, mask);
return;
}
addRawVector(valueVector, timestampVector, mask);
}
private void addRawInputNotMasked(Page page) {
LongBlock valueBlock = page.getBlock(channels.get(0));
LongBlock timestampBlock = page.getBlock(channels.get(1));
LongVector valueVector = valueBlock.asVector();
if (valueVector == null) {
addRawBlock(valueBlock, timestampBlock);
return;
}
LongVector timestampVector = timestampBlock.asVector();
if (timestampVector == null) {
addRawBlock(valueBlock, timestampBlock);
return;
}
addRawVector(valueVector, timestampVector);
}
private void addRawVector(LongVector valueVector, LongVector timestampVector) {
// Find the first value up front in the Vector path which is more complex but should be faster
int valuesPosition = 0;
while (state.seen() == false && valuesPosition < valueVector.getPositionCount()) {
long valueValue = valueVector.getLong(valuesPosition);
long timestampValue = timestampVector.getLong(valuesPosition);
LastLongByTimestampAggregator.first(state, valueValue, timestampValue);
valuesPosition++;
state.seen(true);
break;
}
while (valuesPosition < valueVector.getPositionCount()) {
long valueValue = valueVector.getLong(valuesPosition);
long timestampValue = timestampVector.getLong(valuesPosition);
LastLongByTimestampAggregator.combine(state, valueValue, timestampValue);
valuesPosition++;
}
}
private void addRawVector(LongVector valueVector, LongVector timestampVector,
BooleanVector mask) {
// Find the first value up front in the Vector path which is more complex but should be faster
int valuesPosition = 0;
while (state.seen() == false && valuesPosition < valueVector.getPositionCount()) {
if (mask.getBoolean(valuesPosition) == false) {
valuesPosition++;
continue;
}
long valueValue = valueVector.getLong(valuesPosition);
long timestampValue = timestampVector.getLong(valuesPosition);
LastLongByTimestampAggregator.first(state, valueValue, timestampValue);
valuesPosition++;
state.seen(true);
break;
}
while (valuesPosition < valueVector.getPositionCount()) {
if (mask.getBoolean(valuesPosition) == false) {
valuesPosition++;
continue;
}
long valueValue = valueVector.getLong(valuesPosition);
long timestampValue = timestampVector.getLong(valuesPosition);
LastLongByTimestampAggregator.combine(state, valueValue, timestampValue);
valuesPosition++;
}
}
private void addRawBlock(LongBlock valueBlock, LongBlock timestampBlock) {
for (int p = 0; p < valueBlock.getPositionCount(); p++) {
int valueValueCount = valueBlock.getValueCount(p);
if (valueValueCount == 0) {
continue;
}
int timestampValueCount = timestampBlock.getValueCount(p);
if (timestampValueCount == 0) {
continue;
}
int valueStart = valueBlock.getFirstValueIndex(p);
int valueEnd = valueStart + valueValueCount;
for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) {
long valueValue = valueBlock.getLong(valueOffset);
int timestampStart = timestampBlock.getFirstValueIndex(p);
int timestampEnd = timestampStart + timestampValueCount;
for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) {
long timestampValue = timestampBlock.getLong(timestampOffset);
// Check seen in every iteration to save on complexity in the Block path
if (state.seen()) {
LastLongByTimestampAggregator.combine(state, valueValue, timestampValue);
} else {
state.seen(true);
LastLongByTimestampAggregator.first(state, valueValue, timestampValue);
}
}
}
}
}
private void addRawBlock(LongBlock valueBlock, LongBlock timestampBlock, BooleanVector mask) {
for (int p = 0; p < valueBlock.getPositionCount(); p++) {
if (mask.getBoolean(p) == false) {
continue;
}
int valueValueCount = valueBlock.getValueCount(p);
if (valueValueCount == 0) {
continue;
}
int timestampValueCount = timestampBlock.getValueCount(p);
if (timestampValueCount == 0) {
continue;
}
int valueStart = valueBlock.getFirstValueIndex(p);
int valueEnd = valueStart + valueValueCount;
for (int valueOffset = valueStart; valueOffset < valueEnd; valueOffset++) {
long valueValue = valueBlock.getLong(valueOffset);
int timestampStart = timestampBlock.getFirstValueIndex(p);
int timestampEnd = timestampStart + timestampValueCount;
for (int timestampOffset = timestampStart; timestampOffset < timestampEnd; timestampOffset++) {
long timestampValue = timestampBlock.getLong(timestampOffset);
// Check seen in every iteration to save on complexity in the Block path
if (state.seen()) {
LastLongByTimestampAggregator.combine(state, valueValue, timestampValue);
} else {
state.seen(true);
LastLongByTimestampAggregator.first(state, valueValue, timestampValue);
}
}
}
}
}
@Override
public void addIntermediateInput(Page page) {
assert channels.size() == intermediateBlockCount();
assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size();
Block timestampsUncast = page.getBlock(channels.get(0));
if (timestampsUncast.areAllValuesNull()) {
return;
}
LongVector timestamps = ((LongBlock) timestampsUncast).asVector();
assert timestamps.getPositionCount() == 1;
Block valuesUncast = page.getBlock(channels.get(1));
if (valuesUncast.areAllValuesNull()) {
return;
}
LongVector values = ((LongBlock) valuesUncast).asVector();
assert values.getPositionCount() == 1;
Block seenUncast = page.getBlock(channels.get(2));
if (seenUncast.areAllValuesNull()) {
return;
}
BooleanVector seen = ((BooleanBlock) seenUncast).asVector();
assert seen.getPositionCount() == 1;
LastLongByTimestampAggregator.combineIntermediate(state, timestamps.getLong(0), values.getLong(0), seen.getBoolean(0));
}
@Override
public void evaluateIntermediate(Block[] blocks, int offset, DriverContext driverContext) {
state.toIntermediate(blocks, offset, driverContext);
}
@Override
public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) {
if (state.seen() == false) {
blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1);
return;
}
blocks[offset] = LastLongByTimestampAggregator.evaluateFinal(state, driverContext);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName()).append("[");
sb.append("channels=").append(channels);
sb.append("]");
return sb.toString();
}
@Override
public void close() {
state.close();
}
}
|
LastLongByTimestampAggregatorFunction
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/assertion/RecursiveAssertionAssert_withIntrospectionStrategy_Test.java
|
{
"start": 4044,
"end": 5092
}
|
class ____".formatted(fieldName,
objectClass.getCanonicalName()),
e);
}
}
@Override
public String getDescription() {
return "introspection ignoring Date fields";
}
}
Object objectGraphWithNullDateValue() {
Author pramodSadalage = new Author("Pramod Sadalage", "p.sadalage@recursive.test");
Author martinFowler = new Author("Martin Fowler", "m.fowler@recursive.test");
Author kentBeck = new Author("Kent Beck", "k.beck@recursive.test");
Book noSqlDistilled = new Book("NoSql Distilled", array(pramodSadalage, martinFowler));
pramodSadalage.books.add(noSqlDistilled);
martinFowler.books.add(noSqlDistilled);
Book refactoring = new Book("Refactoring", array(martinFowler, kentBeck));
martinFowler.books.add(refactoring);
kentBeck.books.add(refactoring);
return pramodSadalage;
}
|
earlier
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentCreatorTest.java
|
{
"start": 15522,
"end": 16206
}
|
class ____ {}",
"}");
CompilerTests.daggerCompiler(componentFile)
.withProcessingOptions(compilerOptions)
.compile(
subject -> {
subject.hasErrorCount(1);
subject.hasErrorContaining(messages.mustBeAbstract()).onSource(componentFile);
});
}
@Test
public void testCreatorOneConstructorWithArgsFails() {
Source componentFile =
preprocessedJavaSource(
"test.SimpleComponent",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Provider;",
"",
"@Component",
"abstract
|
Builder
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/records/impl/pb/ApplicationAttemptStartDataPBImpl.java
|
{
"start": 1672,
"end": 6497
}
|
class ____ extends
ApplicationAttemptStartData {
ApplicationAttemptStartDataProto proto = ApplicationAttemptStartDataProto
.getDefaultInstance();
ApplicationAttemptStartDataProto.Builder builder = null;
boolean viaProto = false;
public ApplicationAttemptStartDataPBImpl() {
builder = ApplicationAttemptStartDataProto.newBuilder();
}
public ApplicationAttemptStartDataPBImpl(
ApplicationAttemptStartDataProto proto) {
this.proto = proto;
viaProto = true;
}
private ApplicationAttemptId applicationAttemptId;
private ContainerId masterContainerId;
@Override
public ApplicationAttemptId getApplicationAttemptId() {
if (this.applicationAttemptId != null) {
return this.applicationAttemptId;
}
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.applicationAttemptId =
convertFromProtoFormat(p.getApplicationAttemptId());
return this.applicationAttemptId;
}
@Override
public void
setApplicationAttemptId(ApplicationAttemptId applicationAttemptId) {
maybeInitBuilder();
if (applicationAttemptId == null) {
builder.clearApplicationAttemptId();
}
this.applicationAttemptId = applicationAttemptId;
}
@Override
public String getHost() {
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasHost()) {
return null;
}
return p.getHost();
}
@Override
public void setHost(String host) {
maybeInitBuilder();
if (host == null) {
builder.clearHost();
return;
}
builder.setHost(host);
}
@Override
public int getRPCPort() {
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
return p.getRpcPort();
}
@Override
public void setRPCPort(int rpcPort) {
maybeInitBuilder();
builder.setRpcPort(rpcPort);
}
@Override
public ContainerId getMasterContainerId() {
if (this.masterContainerId != null) {
return this.masterContainerId;
}
ApplicationAttemptStartDataProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasApplicationAttemptId()) {
return null;
}
this.masterContainerId = convertFromProtoFormat(p.getMasterContainerId());
return this.masterContainerId;
}
@Override
public void setMasterContainerId(ContainerId masterContainerId) {
maybeInitBuilder();
if (masterContainerId == null) {
builder.clearMasterContainerId();
}
this.masterContainerId = masterContainerId;
}
public ApplicationAttemptStartDataProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
@Override
public int hashCode() {
return getProto().hashCode();
}
@Override
public boolean equals(Object other) {
if (other == null)
return false;
if (other.getClass().isAssignableFrom(this.getClass())) {
return this.getProto().equals(this.getClass().cast(other).getProto());
}
return false;
}
@Override
public String toString() {
return TextFormat.shortDebugString(getProto());
}
private void mergeLocalToBuilder() {
if (this.applicationAttemptId != null
&& !((ApplicationAttemptIdPBImpl) this.applicationAttemptId).getProto()
.equals(builder.getApplicationAttemptId())) {
builder
.setApplicationAttemptId(convertToProtoFormat(this.applicationAttemptId));
}
if (this.masterContainerId != null
&& !((ContainerIdPBImpl) this.masterContainerId).getProto().equals(
builder.getMasterContainerId())) {
builder
.setMasterContainerId(convertToProtoFormat(this.masterContainerId));
}
}
private void mergeLocalToProto() {
if (viaProto) {
maybeInitBuilder();
}
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = ApplicationAttemptStartDataProto.newBuilder(proto);
}
viaProto = false;
}
private ApplicationAttemptIdPBImpl convertFromProtoFormat(
ApplicationAttemptIdProto applicationAttemptId) {
return new ApplicationAttemptIdPBImpl(applicationAttemptId);
}
private ApplicationAttemptIdProto convertToProtoFormat(
ApplicationAttemptId applicationAttemptId) {
return ((ApplicationAttemptIdPBImpl) applicationAttemptId).getProto();
}
private ContainerIdPBImpl
convertFromProtoFormat(ContainerIdProto containerId) {
return new ContainerIdPBImpl(containerId);
}
private ContainerIdProto convertToProtoFormat(ContainerId masterContainerId) {
return ((ContainerIdPBImpl) masterContainerId).getProto();
}
}
|
ApplicationAttemptStartDataPBImpl
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/proxy/jdbc/ResultSetProxyImpl.java
|
{
"start": 997,
"end": 53502
}
|
class ____ extends WrapperProxyImpl implements ResultSetProxy {
private final ResultSet resultSet;
private final StatementProxy statement;
private final String sql;
protected int cursorIndex;
protected int fetchRowCount;
protected long constructNano;
protected final JdbcSqlStat sqlStat;
private int closeCount;
private long readStringLength;
private long readBytesLength;
private int openInputStreamCount;
private int openReaderCount;
private Map<Integer, Integer> logicColumnMap;
private Map<Integer, Integer> physicalColumnMap;
private List<Integer> hiddenColumns;
private FilterChainImpl filterChain;
public ResultSetProxyImpl(StatementProxy statement, ResultSet resultSet, long id, String sql) {
super(resultSet, id);
this.statement = statement;
this.resultSet = resultSet;
this.sql = sql;
sqlStat = this.statement.getSqlStat();
}
public long getConstructNano() {
return constructNano;
}
public void setConstructNano(long constructNano) {
this.constructNano = constructNano;
}
public void setConstructNano() {
if (this.constructNano <= 0) {
this.constructNano = System.nanoTime();
}
}
public int getCursorIndex() {
return cursorIndex;
}
public int getFetchRowCount() {
return fetchRowCount;
}
public String getSql() {
return sql;
}
public JdbcSqlStat getSqlStat() {
return sqlStat;
}
public ResultSet getResultSetRaw() {
return resultSet;
}
public StatementProxy getStatementProxy() {
return this.statement;
}
public FilterChainImpl createChain() {
FilterChainImpl chain = this.filterChain;
if (chain == null) {
chain = new FilterChainImpl(this.statement.getConnectionProxy().getDirectDataSource());
} else {
this.filterChain = null;
}
return chain;
}
public void recycleFilterChain(FilterChainImpl chain) {
chain.reset();
this.filterChain = chain;
}
@Override
public boolean absolute(int row) throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_absolute(this, row);
recycleFilterChain(chain);
return value;
}
@Override
public void afterLast() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_afterLast(this);
recycleFilterChain(chain);
}
@Override
public void beforeFirst() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_beforeFirst(this);
recycleFilterChain(chain);
}
@Override
public void cancelRowUpdates() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_cancelRowUpdates(this);
recycleFilterChain(chain);
}
@Override
public void clearWarnings() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_clearWarnings(this);
recycleFilterChain(chain);
}
@Override
public void close() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_close(this);
closeCount++;
recycleFilterChain(chain);
}
@Override
public void deleteRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_deleteRow(this);
recycleFilterChain(chain);
}
@Override
public int findColumn(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_findColumn(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public boolean first() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_first(this);
recycleFilterChain(chain);
return value;
}
@Override
public Array getArray(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Array value = chain.resultSet_getArray(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Array getArray(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Array value = chain.resultSet_getArray(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getAsciiStream(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getAsciiStream(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getAsciiStream(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getAsciiStream(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public BigDecimal getBigDecimal(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
BigDecimal value = chain.resultSet_getBigDecimal(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
BigDecimal value = chain.resultSet_getBigDecimal(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException {
FilterChainImpl chain = createChain();
BigDecimal value = chain.resultSet_getBigDecimal(this, columnIndex, scale);
recycleFilterChain(chain);
return value;
}
@Override
public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException {
FilterChainImpl chain = createChain();
BigDecimal value = chain.resultSet_getBigDecimal(this, columnLabel, scale);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getBinaryStream(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getBinaryStream(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getBinaryStream(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getBinaryStream(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Blob getBlob(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Blob value = chain.resultSet_getBlob(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Blob getBlob(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Blob value = chain.resultSet_getBlob(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public boolean getBoolean(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_getBoolean(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public boolean getBoolean(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_getBoolean(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public byte getByte(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
byte value = chain.resultSet_getByte(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public byte getByte(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
byte value = chain.resultSet_getByte(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public byte[] getBytes(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
byte[] value = chain.resultSet_getBytes(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public byte[] getBytes(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
byte[] value = chain.resultSet_getBytes(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Reader getCharacterStream(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Reader value = chain.resultSet_getCharacterStream(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Reader getCharacterStream(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Reader value = chain.resultSet_getCharacterStream(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Clob getClob(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Clob value = chain.resultSet_getClob(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Clob getClob(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Clob value = chain.resultSet_getClob(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public int getConcurrency() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getConcurrency(this);
recycleFilterChain(chain);
return value;
}
@Override
public String getCursorName() throws SQLException {
FilterChainImpl chain = createChain();
String value = chain.resultSet_getCursorName(this);
recycleFilterChain(chain);
return value;
}
@Override
public Date getDate(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Date value = chain.resultSet_getDate(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Date getDate(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Date value = chain.resultSet_getDate(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Date getDate(int columnIndex, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Date value = chain.resultSet_getDate(this, columnIndex, cal);
recycleFilterChain(chain);
return value;
}
@Override
public Date getDate(String columnLabel, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Date value = chain.resultSet_getDate(this, columnLabel, cal);
recycleFilterChain(chain);
return value;
}
@Override
public double getDouble(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
double value = chain.resultSet_getDouble(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public double getDouble(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
double value = chain.resultSet_getDouble(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public int getFetchDirection() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getFetchDirection(this);
recycleFilterChain(chain);
return value;
}
@Override
public int getFetchSize() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getFetchSize(this);
recycleFilterChain(chain);
return value;
}
@Override
public float getFloat(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
float value = chain.resultSet_getFloat(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public float getFloat(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
float value = chain.resultSet_getFloat(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public int getHoldability() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getHoldability(this);
recycleFilterChain(chain);
return value;
}
@Override
public int getInt(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getInt(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public int getInt(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getInt(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public long getLong(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
long value = chain.resultSet_getLong(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public long getLong(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
long value = chain.resultSet_getLong(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
FilterChainImpl chain = createChain();
ResultSetMetaData value = chain.resultSet_getMetaData(this);
recycleFilterChain(chain);
return value;
}
@Override
public Reader getNCharacterStream(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Reader value = chain.resultSet_getNCharacterStream(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Reader getNCharacterStream(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Reader value = chain.resultSet_getNCharacterStream(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public NClob getNClob(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
NClob value = chain.resultSet_getNClob(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public NClob getNClob(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
NClob value = chain.resultSet_getNClob(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public String getNString(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
String value = chain.resultSet_getNString(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public String getNString(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
String value = chain.resultSet_getNString(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Object getObject(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Object value = chain.resultSet_getObject(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Object getObject(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Object value = chain.resultSet_getObject(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Object getObject(int columnIndex, Map<String, Class<?>> map) throws SQLException {
FilterChainImpl chain = createChain();
Object value = chain.resultSet_getObject(this, columnIndex, map);
recycleFilterChain(chain);
return value;
}
@Override
public Object getObject(String columnLabel, Map<String, Class<?>> map) throws SQLException {
FilterChainImpl chain = createChain();
Object value = chain.resultSet_getObject(this, columnLabel, map);
recycleFilterChain(chain);
return value;
}
@Override
public Ref getRef(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Ref value = chain.resultSet_getRef(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Ref getRef(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Ref value = chain.resultSet_getRef(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public int getRow() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getRow(this);
recycleFilterChain(chain);
return value;
}
@Override
public RowId getRowId(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
RowId value = chain.resultSet_getRowId(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public RowId getRowId(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
RowId value = chain.resultSet_getRowId(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public SQLXML getSQLXML(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
SQLXML value = chain.resultSet_getSQLXML(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public SQLXML getSQLXML(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
SQLXML value = chain.resultSet_getSQLXML(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public short getShort(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
short value = chain.resultSet_getShort(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public short getShort(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
short value = chain.resultSet_getShort(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Statement getStatement() throws SQLException {
FilterChainImpl chain = createChain();
Statement stmt = chain.resultSet_getStatement(this);
recycleFilterChain(chain);
return stmt;
}
@Override
public String getString(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
String value = chain.resultSet_getString(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public String getString(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
String value = chain.resultSet_getString(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Time getTime(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Time value = chain.resultSet_getTime(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Time getTime(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Time value = chain.resultSet_getTime(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Time getTime(int columnIndex, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Time value = chain.resultSet_getTime(this, columnIndex, cal);
recycleFilterChain(chain);
return value;
}
@Override
public Time getTime(String columnLabel, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Time value = chain.resultSet_getTime(this, columnLabel, cal);
recycleFilterChain(chain);
return value;
}
@Override
public Timestamp getTimestamp(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
Timestamp value = chain.resultSet_getTimestamp(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public Timestamp getTimestamp(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
Timestamp value = chain.resultSet_getTimestamp(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Timestamp value = chain.resultSet_getTimestamp(this, columnIndex, cal);
recycleFilterChain(chain);
return value;
}
@Override
public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException {
FilterChainImpl chain = createChain();
Timestamp value = chain.resultSet_getTimestamp(this, columnLabel, cal);
recycleFilterChain(chain);
return value;
}
@Override
public int getType() throws SQLException {
FilterChainImpl chain = createChain();
int value = chain.resultSet_getType(this);
recycleFilterChain(chain);
return value;
}
@Override
public URL getURL(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
URL value = chain.resultSet_getURL(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public URL getURL(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
URL value = chain.resultSet_getURL(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getUnicodeStream(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getUnicodeStream(this, columnIndex);
recycleFilterChain(chain);
return value;
}
@Override
public InputStream getUnicodeStream(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
InputStream value = chain.resultSet_getUnicodeStream(this, columnLabel);
recycleFilterChain(chain);
return value;
}
@Override
public SQLWarning getWarnings() throws SQLException {
FilterChainImpl chain = createChain();
SQLWarning value = chain.resultSet_getWarnings(this);
recycleFilterChain(chain);
return value;
}
@Override
public void insertRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_insertRow(this);
recycleFilterChain(chain);
}
@Override
public boolean isAfterLast() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_isAfterLast(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean isBeforeFirst() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_isBeforeFirst(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean isClosed() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_isClosed(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean isFirst() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_isFirst(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean isLast() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_isLast(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean last() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_last(this);
recycleFilterChain(chain);
return value;
}
@Override
public void moveToCurrentRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_moveToCurrentRow(this);
recycleFilterChain(chain);
}
@Override
public void moveToInsertRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_moveToInsertRow(this);
recycleFilterChain(chain);
}
@Override
public boolean next() throws SQLException {
FilterChainImpl chain = createChain();
boolean moreRows = chain.resultSet_next(this);
if (moreRows) {
cursorIndex++;
if (cursorIndex > fetchRowCount) {
fetchRowCount = cursorIndex;
}
}
recycleFilterChain(chain);
return moreRows;
}
@Override
public boolean previous() throws SQLException {
FilterChainImpl chain = createChain();
boolean moreRows = chain.resultSet_previous(this);
if (moreRows) {
cursorIndex--;
}
recycleFilterChain(chain);
return moreRows;
}
@Override
public void refreshRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_refreshRow(this);
recycleFilterChain(chain);
}
@Override
public boolean relative(int rows) throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_relative(this, rows);
recycleFilterChain(chain);
return value;
}
@Override
public boolean rowDeleted() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_rowDeleted(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean rowInserted() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_rowInserted(this);
recycleFilterChain(chain);
return value;
}
@Override
public boolean rowUpdated() throws SQLException {
FilterChainImpl chain = createChain();
boolean value = chain.resultSet_rowUpdated(this);
recycleFilterChain(chain);
return value;
}
@Override
public void setFetchDirection(int direction) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_setFetchDirection(this, direction);
recycleFilterChain(chain);
}
@Override
public void setFetchSize(int rows) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_setFetchSize(this, rows);
recycleFilterChain(chain);
}
@Override
public void updateArray(int columnIndex, Array x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateArray(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateArray(String columnLabel, Array x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateArray(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateAsciiStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBigDecimal(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBigDecimal(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBinaryStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBlob(int columnIndex, Blob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBlob(String columnLabel, Blob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateBlob(int columnIndex, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBlob(String columnLabel, InputStream x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateBlob(int columnIndex, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBlob(String columnLabel, InputStream x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBlob(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateBoolean(int columnIndex, boolean x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBoolean(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBoolean(String columnLabel, boolean x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBoolean(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateByte(int columnIndex, byte x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateByte(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateByte(String columnLabel, byte x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateByte(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateBytes(int columnIndex, byte[] x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBytes(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateBytes(String columnLabel, byte[] x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateBytes(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(String columnLabel, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(String columnLabel, Reader x, int length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateCharacterStream(String columnLabel, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateCharacterStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateClob(int columnIndex, Clob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateClob(String columnLabel, Clob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateClob(int columnIndex, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateClob(String columnLabel, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateClob(int columnIndex, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateClob(String columnLabel, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateClob(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateDate(int columnIndex, Date x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateDate(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateDate(String columnLabel, Date x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateDate(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateDouble(int columnIndex, double x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateDouble(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateDouble(String columnLabel, double x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateDouble(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateFloat(int columnIndex, float x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateFloat(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateFloat(String columnLabel, float x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateFloat(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateInt(int columnIndex, int x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateInt(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateInt(String columnLabel, int x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateInt(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateLong(int columnIndex, long x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateLong(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateLong(String columnLabel, long x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateLong(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNCharacterStream(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateNCharacterStream(String columnLabel, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNCharacterStream(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNCharacterStream(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateNCharacterStream(String columnLabel, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNCharacterStream(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateNClob(int columnIndex, NClob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateNClob(String columnLabel, NClob x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateNClob(int columnIndex, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateNClob(String columnLabel, Reader x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateNClob(int columnIndex, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnIndex, x, length);
recycleFilterChain(chain);
}
@Override
public void updateNClob(String columnLabel, Reader x, long length) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNClob(this, columnLabel, x, length);
recycleFilterChain(chain);
}
@Override
public void updateNString(int columnIndex, String x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNString(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateNString(String columnLabel, String x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNString(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateNull(int columnIndex) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNull(this, columnIndex);
recycleFilterChain(chain);
}
@Override
public void updateNull(String columnLabel) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateNull(this, columnLabel);
recycleFilterChain(chain);
}
@Override
public void updateObject(int columnIndex, Object x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateObject(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateObject(String columnLabel, Object x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateObject(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateObject(this, columnIndex, x, scaleOrLength);
recycleFilterChain(chain);
}
@Override
public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateObject(this, columnLabel, x, scaleOrLength);
recycleFilterChain(chain);
}
@Override
public void updateRef(int columnIndex, Ref x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateRef(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateRef(String columnLabel, Ref x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateRef(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateRow() throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateRow(this);
recycleFilterChain(chain);
}
@Override
public void updateRowId(int columnIndex, RowId x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateRowId(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateRowId(String columnLabel, RowId x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateRowId(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateSQLXML(int columnIndex, SQLXML x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateSQLXML(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateSQLXML(String columnLabel, SQLXML x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateSQLXML(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateShort(int columnIndex, short x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateShort(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateShort(String columnLabel, short x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateShort(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateString(int columnIndex, String x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateString(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateString(String columnLabel, String x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateString(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateTime(int columnIndex, Time x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateTime(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateTime(String columnLabel, Time x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateTime(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateTimestamp(this, columnIndex, x);
recycleFilterChain(chain);
}
@Override
public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException {
FilterChainImpl chain = createChain();
chain.resultSet_updateTimestamp(this, columnLabel, x);
recycleFilterChain(chain);
}
@Override
public boolean wasNull() throws SQLException {
FilterChainImpl chain = createChain();
boolean result = chain.resultSet_wasNull(this);
recycleFilterChain(chain);
return result;
}
@Override
public <T> T getObject(int columnIndex, Class<T> type) throws SQLException {
FilterChainImpl chain = createChain();
T value = chain.resultSet_getObject(this, columnIndex, type);
recycleFilterChain(chain);
return value;
}
@Override
public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
FilterChainImpl chain = createChain();
T value = chain.resultSet_getObject(this, columnLabel, type);
recycleFilterChain(chain);
return value;
}
public int getCloseCount() {
return closeCount;
}
@Override
public void addReadStringLength(int length) {
this.readStringLength += length;
}
@Override
public long getReadStringLength() {
return readStringLength;
}
@Override
public void addReadBytesLength(int length) {
this.readBytesLength += length;
}
@Override
public long getReadBytesLength() {
return readBytesLength;
}
@Override
public void incrementOpenInputStreamCount() {
openInputStreamCount++;
}
@Override
public int getOpenInputStreamCount() {
return openInputStreamCount;
}
@Override
public void incrementOpenReaderCount() {
openReaderCount++;
}
@Override
public int getOpenReaderCount() {
return openReaderCount;
}
@SuppressWarnings("unchecked")
@Override
public <T> T unwrap(Class<T> iface) throws SQLException {
if (iface == ResultSetProxy.class || iface == ResultSetProxyImpl.class) {
return (T) this;
}
return super.unwrap(iface);
}
@Override
public boolean isWrapperFor(Class<?> iface) throws SQLException {
if (iface == ResultSetProxy.class || iface == ResultSetProxyImpl.class) {
return true;
}
return super.isWrapperFor(iface);
}
@Override
public int getPhysicalColumn(int logicColumn) {
if (logicColumnMap == null) {
return logicColumn;
}
return logicColumnMap.get(logicColumn);
}
@Override
public int getLogicColumn(int physicalColumn) {
if (physicalColumnMap == null) {
return physicalColumn;
}
return physicalColumnMap.get(physicalColumn);
}
@Override
public int getHiddenColumnCount() {
if (hiddenColumns == null) {
return 0;
}
return hiddenColumns.size();
}
@Override
public List<Integer> getHiddenColumns() {
return this.hiddenColumns;
}
@Override
public void setLogicColumnMap(Map<Integer, Integer> logicColumnMap) {
this.logicColumnMap = logicColumnMap;
}
@Override
public void setPhysicalColumnMap(Map<Integer, Integer> physicalColumnMap) {
this.physicalColumnMap = physicalColumnMap;
}
@Override
public void setHiddenColumns(List<Integer> hiddenColumns) {
this.hiddenColumns = hiddenColumns;
}
}
|
ResultSetProxyImpl
|
java
|
resilience4j__resilience4j
|
resilience4j-commons-configuration/src/test/java/io/github/resilience4j/commons/configuration/timelimiter/configure/CommonsConfigurationTimeLimiterRegistryTest.java
|
{
"start": 1319,
"end": 2643
}
|
class ____ {
@Test
public void testRateLimiterRegistryFromPropertiesFile() throws ConfigurationException {
Configuration config = CommonsConfigurationUtil.getConfiguration(PropertiesConfiguration.class, TestConstants.RESILIENCE_CONFIG_PROPERTIES_FILE_NAME);
TimeLimiterRegistry registry = CommonsConfigurationTimeLimiterRegistry.of(config, new CompositeCustomizer<>(List.of()));
Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A);
Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B);
}
@Test
public void testRateLimiterRegistryFromYamlFile() throws ConfigurationException {
Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME);
TimeLimiterRegistry registry = CommonsConfigurationTimeLimiterRegistry.of(config, new CompositeCustomizer<>(List.of()));
Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A);
Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B);
}
}
|
CommonsConfigurationTimeLimiterRegistryTest
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
|
{
"start": 18518,
"end": 20286
}
|
class ____ {
private final Path wrapperScriptPath;
/**
* Return the path for the wrapper script.
*
* @return the path for the wrapper script
*/
public Path getWrapperScriptPath() {
return wrapperScriptPath;
}
/**
* Write out the wrapper script for the container launch script. This method
* will create the script at the configured wrapper script path.
*
* @param launchDst the script to launch
* @param pidFile the file that will hold the PID
* @throws IOException if the wrapper script cannot be created
* @see #getWrapperScriptPath
*/
public void writeLocalWrapperScript(Path launchDst, Path pidFile)
throws IOException {
try (DataOutputStream out =
lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE));
PrintStream pout =
new PrintStream(out, false, "UTF-8")) {
writeLocalWrapperScript(launchDst, pidFile, pout);
}
}
/**
* Write out the wrapper script for the container launch script.
*
* @param launchDst the script to launch
* @param pidFile the file that will hold the PID
* @param pout the stream to use to write out the wrapper script
*/
protected abstract void writeLocalWrapperScript(Path launchDst,
Path pidFile, PrintStream pout);
/**
* Create an instance for the given container working directory.
*
* @param containerWorkDir the working directory for the container
*/
protected LocalWrapperScriptBuilder(Path containerWorkDir) {
this.wrapperScriptPath = new Path(containerWorkDir,
Shell.appendScriptExtension("default_container_executor"));
}
}
/**
* This
|
LocalWrapperScriptBuilder
|
java
|
junit-team__junit5
|
junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java
|
{
"start": 25006,
"end": 27342
}
|
class ____ for the types of parameters accepted
* by the method.
*
* <p>Array parameter types may be specified using either the JVM's internal
* String representation (e.g., {@code [[I} for {@code int[][]},
* {@code [Ljava.lang.String;} for {@code java.lang.String[]}, etc.) or
* <em>source code syntax</em> (e.g., {@code int[][]}, {@code java.lang.String[]},
* etc.).
*
* <table class="plain">
* <caption>Examples</caption>
* <tr><th>Method</th><th>Fully Qualified Method Name</th></tr>
* <tr><td>{@code java.lang.String.chars()}</td><td>{@code java.lang.String#chars}</td></tr>
* <tr><td>{@code java.lang.String.chars()}</td><td>{@code java.lang.String#chars()}</td></tr>
* <tr><td>{@code java.lang.String.equalsIgnoreCase(String)}</td><td>{@code java.lang.String#equalsIgnoreCase(java.lang.String)}</td></tr>
* <tr><td>{@code java.lang.String.substring(int, int)}</td><td>{@code java.lang.String#substring(int, int)}</td></tr>
* <tr><td>{@code example.Calc.avg(int[])}</td><td>{@code example.Calc#avg([I)}</td></tr>
* <tr><td>{@code example.Calc.avg(int[])}</td><td>{@code example.Calc#avg(int[])}</td></tr>
* <tr><td>{@code example.Matrix.multiply(double[][])}</td><td>{@code example.Matrix#multiply([[D)}</td></tr>
* <tr><td>{@code example.Matrix.multiply(double[][])}</td><td>{@code example.Matrix#multiply(double[][])}</td></tr>
* <tr><td>{@code example.Service.process(String[])}</td><td>{@code example.Service#process([Ljava.lang.String;)}</td></tr>
* <tr><td>{@code example.Service.process(String[])}</td><td>{@code example.Service#process(java.lang.String[])}</td></tr>
* <tr><td>{@code example.Service.process(String[][])}</td><td>{@code example.Service#process([[Ljava.lang.String;)}</td></tr>
* <tr><td>{@code example.Service.process(String[][])}</td><td>{@code example.Service#process(java.lang.String[][])}</td></tr>
* </table>
*
* @param fullyQualifiedMethodName the fully qualified name of the method to
* select; never {@code null} or blank
* @see MethodSelector
*/
public static MethodSelector selectMethod(String fullyQualifiedMethodName) throws PreconditionViolationException {
return selectMethod((ClassLoader) null, fullyQualifiedMethodName);
}
/**
* Create a {@code MethodSelector} for the supplied <em>fully qualified
* method name</em> and
|
names
|
java
|
apache__kafka
|
connect/runtime/src/main/java/org/apache/kafka/connect/util/SafeObjectInputStream.java
|
{
"start": 993,
"end": 2439
}
|
class ____ extends ObjectInputStream {
protected static final Set<String> DEFAULT_NO_DESERIALIZE_CLASS_NAMES = Set.of(
"org.apache.commons.collections.functors.InvokerTransformer",
"org.apache.commons.collections.functors.InstantiateTransformer",
"org.apache.commons.collections4.functors.InvokerTransformer",
"org.apache.commons.collections4.functors.InstantiateTransformer",
"org.codehaus.groovy.runtime.ConvertedClosure",
"org.codehaus.groovy.runtime.MethodClosure",
"org.springframework.beans.factory.ObjectFactory",
"com.sun.org.apache.xalan.internal.xsltc.trax.TemplatesImpl",
"org.apache.xalan.xsltc.trax.TemplatesImpl"
);
public SafeObjectInputStream(InputStream in) throws IOException {
super(in);
}
@Override
protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
String name = desc.getName();
if (isBlocked(name)) {
throw new SecurityException("Illegal type to deserialize: prevented for security reasons");
}
return super.resolveClass(desc);
}
private boolean isBlocked(String name) {
for (String list : DEFAULT_NO_DESERIALIZE_CLASS_NAMES) {
if (name.endsWith(list)) {
return true;
}
}
return false;
}
}
|
SafeObjectInputStream
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/persister/entity/mutation/DeleteCoordinatorStandard.java
|
{
"start": 966,
"end": 6695
}
|
class ____ extends AbstractDeleteCoordinator {
public DeleteCoordinatorStandard(EntityPersister entityPersister, SessionFactoryImplementor factory) {
super( entityPersister, factory );
}
@Override
protected MutationOperationGroup generateOperationGroup(
Object rowId,
Object[] loadedState,
boolean applyVersion,
SharedSessionContractImplementor session) {
final var deleteGroupBuilder = new MutationGroupBuilder( MutationType.DELETE, entityPersister() );
entityPersister().forEachMutableTableReverse( (tableMapping) -> {
final var tableDeleteBuilder = tableMapping.isCascadeDeleteEnabled()
? new TableDeleteBuilderSkipped( tableMapping )
: new TableDeleteBuilderStandard( entityPersister(), tableMapping, factory() );
deleteGroupBuilder.addTableDetailsBuilder( tableDeleteBuilder );
} );
applyTableDeleteDetails( deleteGroupBuilder, rowId, loadedState, applyVersion, session );
return createOperationGroup( null, deleteGroupBuilder.buildMutationGroup() );
}
private void applyTableDeleteDetails(
MutationGroupBuilder deleteGroupBuilder,
Object rowId,
Object[] loadedState,
boolean applyVersion,
SharedSessionContractImplementor session) {
// first, the table key column(s)
deleteGroupBuilder.forEachTableMutationBuilder( (builder) -> {
final var tableMapping = (EntityTableMapping) builder.getMutatingTable().getTableMapping();
final var tableDeleteBuilder = (TableDeleteBuilder) builder;
applyKeyRestriction( rowId, entityPersister(), tableDeleteBuilder, tableMapping );
} );
if ( applyVersion ) {
// apply any optimistic locking
applyOptimisticLocking( deleteGroupBuilder, loadedState, session );
final var persister = entityPersister();
if ( persister.hasPartitionedSelectionMapping() ) {
final var attributeMappings = persister.getAttributeMappings();
for ( int m = 0; m < attributeMappings.size(); m++ ) {
final var attributeMapping = attributeMappings.get( m );
final int jdbcTypeCount = attributeMapping.getJdbcTypeCount();
for ( int i = 0; i < jdbcTypeCount; i++ ) {
final var selectableMapping = attributeMapping.getSelectable( i );
if ( selectableMapping.isPartitioned() ) {
final String tableNameForMutation =
persister.physicalTableNameForMutation( selectableMapping );
final RestrictedTableMutationBuilder<?, ?> rootTableMutationBuilder =
deleteGroupBuilder.findTableDetailsBuilder( tableNameForMutation );
rootTableMutationBuilder.addKeyRestrictionLeniently( selectableMapping );
}
}
}
}
}
}
protected void applyOptimisticLocking(
MutationGroupBuilder mutationGroupBuilder,
Object[] loadedState,
SharedSessionContractImplementor session) {
final var optimisticLockStyle = entityPersister().optimisticLockStyle();
if ( optimisticLockStyle.isVersion() && entityPersister().getVersionMapping() != null ) {
applyVersionBasedOptLocking( mutationGroupBuilder );
}
else if ( loadedState != null && entityPersister().optimisticLockStyle().isAllOrDirty() ) {
applyNonVersionOptLocking(
optimisticLockStyle,
mutationGroupBuilder,
loadedState,
session
);
}
}
protected void applyVersionBasedOptLocking(MutationGroupBuilder mutationGroupBuilder) {
assert entityPersister().optimisticLockStyle() == OptimisticLockStyle.VERSION;
assert entityPersister().getVersionMapping() != null;
final String tableNameForMutation =
entityPersister().physicalTableNameForMutation( entityPersister().getVersionMapping() );
final RestrictedTableMutationBuilder<?,?> rootTableMutationBuilder =
mutationGroupBuilder.findTableDetailsBuilder( tableNameForMutation );
rootTableMutationBuilder.addOptimisticLockRestriction( entityPersister().getVersionMapping() );
}
protected void applyNonVersionOptLocking(
OptimisticLockStyle lockStyle,
MutationGroupBuilder mutationGroupBuilder,
Object[] loadedState,
SharedSessionContractImplementor session) {
final var persister = entityPersister();
assert loadedState != null;
assert lockStyle.isAllOrDirty();
assert persister.optimisticLockStyle().isAllOrDirty();
assert session != null;
final boolean[] versionability = persister.getPropertyVersionability();
for ( int attributeIndex = 0; attributeIndex < versionability.length; attributeIndex++ ) {
// only makes sense to lock on singular attributes which are not excluded from optimistic locking
if ( versionability[attributeIndex] ) {
final var attribute = persister.getAttributeMapping( attributeIndex );
if ( !attribute.isPluralAttributeMapping() ) {
breakDownJdbcValues( mutationGroupBuilder, session, attribute, loadedState[attributeIndex] );
}
}
}
}
private void breakDownJdbcValues(
MutationGroupBuilder mutationGroupBuilder,
SharedSessionContractImplementor session,
AttributeMapping attribute,
Object loadedValue) {
final RestrictedTableMutationBuilder<?, ?> tableMutationBuilder =
mutationGroupBuilder.findTableDetailsBuilder( attribute.getContainingTableExpression() );
if ( tableMutationBuilder != null ) {
final var optimisticLockBindings = tableMutationBuilder.getOptimisticLockBindings();
if ( optimisticLockBindings != null ) {
attribute.breakDownJdbcValues(
loadedValue,
(valueIndex, value, jdbcValueMapping) -> {
if ( !tableMutationBuilder.getKeyRestrictionBindings()
.containsColumn(
jdbcValueMapping.getSelectableName(),
jdbcValueMapping.getJdbcMapping()
) ) {
optimisticLockBindings.consume( valueIndex, value, jdbcValueMapping );
}
}
,
session
);
}
}
}
}
|
DeleteCoordinatorStandard
|
java
|
quarkusio__quarkus
|
extensions/security-webauthn/runtime/src/main/java/io/quarkus/security/webauthn/WebAuthnRunTimeConfig.java
|
{
"start": 897,
"end": 2423
}
|
enum ____ {
ES256(-7),
ES384(-35),
ES512(-36),
PS256(-37),
PS384(-38),
PS512(-39),
ES256K(-47),
RS256(-257),
RS384(-258),
RS512(-259),
RS1(-65535),
EdDSA(-8);
private final int coseId;
COSEAlgorithm(int coseId) {
this.coseId = coseId;
}
public static COSEAlgorithm valueOf(int coseId) {
switch (coseId) {
case -7:
return ES256;
case -35:
return ES384;
case -36:
return ES512;
case -37:
return PS256;
case -38:
return PS384;
case -39:
return PS512;
case -47:
return ES256K;
case -257:
return RS256;
case -258:
return RS384;
case -259:
return RS512;
case -65535:
return RS1;
case -8:
return EdDSA;
default:
throw new IllegalArgumentException("Unknown cose-id: " + coseId);
}
}
public int coseId() {
return coseId;
}
}
/**
* AttestationConveyancePreference
* https://www.w3.org/TR/webauthn/#attestation-convey
*/
public
|
COSEAlgorithm
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/state/FunctionSnapshotContext.java
|
{
"start": 1138,
"end": 1233
}
|
interface ____ provides meta information about the
* checkpoint.
*/
@PublicEvolving
public
|
mainly
|
java
|
apache__hadoop
|
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
|
{
"start": 1499,
"end": 2272
}
|
class ____ {
static final Logger LOG = LoggerFactory.getLogger(JerseyResource.class);
public static final String PATH = "path";
public static final String OP = "op";
@GET
@Path("{" + PATH + ":.*}")
@Produces({MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
public Response get(
@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
@QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
) throws IOException {
LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
final Map<String, Object> m = new TreeMap<String, Object>();
m.put(PATH, path);
m.put(OP, op);
final String js = JSON.toString(m);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
|
JerseyResource
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/BadImportTest.java
|
{
"start": 21702,
"end": 21884
}
|
interface ____ {}
}
""")
.addSourceLines(
"Test.java",
"""
@org.immutables.value.Value.Immutable
|
Immutable
|
java
|
apache__camel
|
components/camel-soap/src/main/java/org/apache/camel/dataformat/soap/Soap12DataFormatAdapter.java
|
{
"start": 8289,
"end": 10686
}
|
class ____
* determined by using the elementNameStrategy. The qName of the fault detail should match the WebFault annotation
* of the Exception class. If no fault detail is set a {@link jakarta.xml.ws.soap.SOAPFaultException} is created.
*
* @param fault Soap fault
* @return created Exception
*/
private Exception createExceptionFromFault(String soapAction, Fault fault) {
StringBuilder sb = new StringBuilder();
for (Reasontext text : fault.getReason().getText()) {
sb.append(text.getValue());
}
String message = sb.toString();
Detail faultDetail = fault.getDetail();
if (faultDetail == null || faultDetail.getAny().isEmpty()) {
try {
return new SOAPFaultException(SOAPFactory.newInstance().createFault(message, fault.getCode().getValue()));
} catch (SOAPException e) {
throw new RuntimeCamelException(e);
}
}
Object detailObj = faultDetail.getAny().get(0);
if (!(detailObj instanceof JAXBElement)) {
try {
return new SOAPFaultException(SOAPFactory.newInstance().createFault(message, fault.getCode().getValue()));
} catch (SOAPException e) {
throw new RuntimeCamelException(e);
}
}
JAXBElement<?> detailEl = (JAXBElement<?>) detailObj;
Class<? extends Exception> exceptionClass
= getDataFormat().getElementNameStrategy().findExceptionForSoapActionAndFaultName(soapAction,
detailEl.getName());
Constructor<? extends Exception> messageConstructor;
Constructor<? extends Exception> constructor;
try {
Object detail = JAXBIntrospector.getValue(detailEl);
try {
constructor = exceptionClass.getConstructor(String.class, detail.getClass());
return constructor.newInstance(message, detail);
} catch (NoSuchMethodException e) {
messageConstructor = exceptionClass.getConstructor(String.class);
return messageConstructor.newInstance(message);
}
} catch (Exception e) {
throw new RuntimeCamelException(e);
}
}
@Override
public String getSoapPackageName() {
return SOAP_PACKAGE_NAME;
}
}
|
is
|
java
|
apache__maven
|
compat/maven-compat/src/main/java/org/apache/maven/artifact/repository/DefaultArtifactRepositoryFactory.java
|
{
"start": 1459,
"end": 4834
}
|
class ____ implements ArtifactRepositoryFactory {
@Inject
private org.apache.maven.repository.legacy.repository.ArtifactRepositoryFactory factory;
@Inject
private LegacySupport legacySupport;
@Inject
private PlexusContainer container;
@Override
public ArtifactRepositoryLayout getLayout(String layoutId) throws UnknownRepositoryLayoutException {
return factory.getLayout(layoutId);
}
@Override
public ArtifactRepository createDeploymentArtifactRepository(
String id, String url, String layoutId, boolean uniqueVersion) throws UnknownRepositoryLayoutException {
return injectSession(factory.createDeploymentArtifactRepository(id, url, layoutId, uniqueVersion), false);
}
@Override
public ArtifactRepository createDeploymentArtifactRepository(
String id, String url, ArtifactRepositoryLayout repositoryLayout, boolean uniqueVersion) {
return injectSession(
factory.createDeploymentArtifactRepository(id, url, repositoryLayout, uniqueVersion), false);
}
@Override
public ArtifactRepository createArtifactRepository(
String id,
String url,
String layoutId,
ArtifactRepositoryPolicy snapshots,
ArtifactRepositoryPolicy releases)
throws UnknownRepositoryLayoutException {
return injectSession(factory.createArtifactRepository(id, url, layoutId, snapshots, releases), true);
}
@Override
public ArtifactRepository createArtifactRepository(
String id,
String url,
ArtifactRepositoryLayout repositoryLayout,
ArtifactRepositoryPolicy snapshots,
ArtifactRepositoryPolicy releases) {
return injectSession(factory.createArtifactRepository(id, url, repositoryLayout, snapshots, releases), true);
}
@Override
public void setGlobalUpdatePolicy(String updatePolicy) {
factory.setGlobalUpdatePolicy(updatePolicy);
}
@Override
public void setGlobalChecksumPolicy(String checksumPolicy) {
factory.setGlobalChecksumPolicy(checksumPolicy);
}
private ArtifactRepository injectSession(ArtifactRepository repository, boolean mirrors) {
RepositorySystemSession session = legacySupport.getRepositorySession();
if (session != null && repository != null && !isLocalRepository(repository)) {
List<ArtifactRepository> repositories = Arrays.asList(repository);
RepositorySystem repositorySystem;
try {
repositorySystem = container.lookup(RepositorySystem.class);
} catch (ComponentLookupException e) {
throw new IllegalStateException("Unable to lookup " + RepositorySystem.class.getName());
}
if (mirrors) {
repositorySystem.injectMirror(session, repositories);
}
repositorySystem.injectProxy(session, repositories);
repositorySystem.injectAuthentication(session, repositories);
}
return repository;
}
private boolean isLocalRepository(ArtifactRepository repository) {
// unfortunately, the API doesn't allow to tell a remote repo and the local repo apart...
return "local".equals(repository.getId());
}
}
|
DefaultArtifactRepositoryFactory
|
java
|
quarkusio__quarkus
|
tcks/microprofile-opentelemetry/src/test/java/io/quarkus/tck/opentelemetry/ExecutorProvider.java
|
{
"start": 158,
"end": 472
}
|
class ____ implements Executor {
private final ExecutorService executorService;
public ExecutorProvider() {
this.executorService = Arc.container().getExecutorService();
}
@Override
public void execute(Runnable command) {
executorService.execute(command);
}
}
|
ExecutorProvider
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_3200/Issue3283.java
|
{
"start": 215,
"end": 895
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
VO v = new VO();
v.v0 = 1001L;
v.v1 = 101;
String str = JSON.toJSONString(v, SerializerFeature.WriteNonStringValueAsString);
JSONObject object = JSON.parseObject(str);
assertEquals("1001", object.get("v0"));
assertEquals("101", object.get("v1"));
}
public void test_for_issue_1() throws Exception {
VO v = new VO();
v.v0 = 19007199254740991L;
String str = JSON.toJSONString(v, SerializerFeature.BrowserCompatible);
assertEquals("{\"v0\":\"19007199254740991\"}", str);
}
public static
|
Issue3283
|
java
|
apache__rocketmq
|
proxy/src/test/java/org/apache/rocketmq/proxy/grpc/v2/common/GrpcConverterTest.java
|
{
"start": 1038,
"end": 1667
}
|
class ____ {
@Test
public void testBuildMessageQueue() {
String topic = "topic";
String brokerName = "brokerName";
int queueId = 1;
MessageExt messageExt = new MessageExt();
messageExt.setQueueId(queueId);
messageExt.setTopic(topic);
MessageQueue messageQueue = GrpcConverter.getInstance().buildMessageQueue(messageExt, brokerName);
assertThat(messageQueue.getTopic().getName()).isEqualTo(topic);
assertThat(messageQueue.getBroker().getName()).isEqualTo(brokerName);
assertThat(messageQueue.getId()).isEqualTo(queueId);
}
}
|
GrpcConverterTest
|
java
|
spring-projects__spring-security
|
buildSrc/src/test/resources/samples/jacoco/java/src/main/java/sample/TheClass.java
|
{
"start": 24,
"end": 140
}
|
class ____ {
public boolean doStuff(boolean b) {
if(b) {
return true;
} else {
return false;
}
}
}
|
TheClass
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/context/annotation/LazyAutowiredAnnotationBeanPostProcessorTests.java
|
{
"start": 9304,
"end": 9703
}
|
class ____ implements TestBeanHolder {
private TestBean testBean;
@Autowired @Lazy
public void setTestBean(TestBean testBean) {
if (this.testBean != null) {
throw new IllegalStateException("Already called");
}
this.testBean = testBean;
}
@Override
public TestBean getTestBean() {
return this.testBean;
}
}
public static
|
MethodResourceInjectionBeanWithMethodLevelLazy
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java
|
{
"start": 1755,
"end": 23662
}
|
class ____ extends HttpResource {
/**
* A value that will never match anything in the JSON response body, thus limiting it to "{}".
*/
public static final String FILTER_PATH_NONE = "$NONE";
/**
* A value that will match any top-level key and an inner "version" field, like '{"any-key":{"version":123}}'.
*/
public static final String FILTER_PATH_RESOURCE_VERSION = "*.version";
/**
* Use this to avoid getting any JSON response from a request.
*/
public static final Map<String, String> NO_BODY_PARAMETERS = Collections.singletonMap("filter_path", FILTER_PATH_NONE);
/**
* Use this to retrieve the version of template and pipeline resources in their JSON response from a request.
*/
public static final Map<String, String> RESOURCE_VERSION_PARAMETERS = Collections.singletonMap(
"filter_path",
FILTER_PATH_RESOURCE_VERSION
);
/**
* The default set of acceptable exists response codes for GET requests.
*/
public static final Set<Integer> GET_EXISTS = Collections.singleton(RestStatus.OK.getStatus());
/**
* The default set of <em>acceptable</em> response codes for GET requests to represent that it does NOT exist.
*/
public static final Set<Integer> GET_DOES_NOT_EXIST = Collections.singleton(RestStatus.NOT_FOUND.getStatus());
/**
* The default parameters to use for any request.
*/
protected final Map<String, String> defaultParameters;
/**
* Create a new {@link PublishableHttpResource} that {@linkplain #isDirty() is dirty}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout Master timeout to use with any request.
* @param baseParameters The base parameters to specify for the request.
*/
protected PublishableHttpResource(
final String resourceOwnerName,
@Nullable final TimeValue masterTimeout,
final Map<String, String> baseParameters
) {
this(resourceOwnerName, masterTimeout, baseParameters, true);
}
/**
* Create a new {@link PublishableHttpResource}.
*
* @param resourceOwnerName The user-recognizable name.
* @param masterTimeout timeout to use with any request.
* @param baseParameters The base parameters to specify for the request.
* @param dirty Whether the resource is dirty or not
*/
protected PublishableHttpResource(
final String resourceOwnerName,
@Nullable final TimeValue masterTimeout,
final Map<String, String> baseParameters,
final boolean dirty
) {
super(resourceOwnerName, dirty);
if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) {
final Map<String, String> parameters = Maps.newMapWithExpectedSize(baseParameters.size() + 1);
parameters.putAll(baseParameters);
parameters.put(REST_MASTER_TIMEOUT_PARAM, masterTimeout.toString());
this.defaultParameters = Collections.unmodifiableMap(parameters);
} else {
this.defaultParameters = baseParameters;
}
}
/**
* Get the default parameters to use with every request.
*
* @return Never {@code null}.
*/
public Map<String, String> getDefaultParameters() {
return defaultParameters;
}
/**
* Perform whatever is necessary to check and publish this {@link PublishableHttpResource}.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource is available for use. {@code false} to stop.
*/
@Override
protected final void doCheckAndPublish(final RestClient client, final ActionListener<ResourcePublishResult> listener) {
doCheck(client, listener.delegateFailureAndWrap((l, exists) -> {
if (exists) {
// it already exists, so we can skip publishing it
l.onResponse(ResourcePublishResult.ready());
} else {
doPublish(client, l);
}
}));
}
/**
* Determine if the current resource exists.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource already available to use. {@code false} otherwise.
*/
protected abstract void doCheck(RestClient client, ActionListener<Boolean> listener);
/**
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint with a version greater than or equal
* to the expected version.
* <p>
* This provides the base-level check for any resource that does not need to care about its response beyond existence (and likely does
* not need to inspect its contents).
* <p>
* This expects responses in the form of:
* <pre><code>
* {
* "resourceName": {
* "version": 6000002
* }
* }
* </code></pre>
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource was successfully published. {@code false} otherwise.
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
* @param resourceName The name of the resource (e.g., "template123").
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
* @param xContent The XContent used to parse the response.
* @param minimumVersion The minimum version allowed without being replaced (expected to be the last updated version).
*/
protected void versionCheckForResource(
final RestClient client,
final ActionListener<Boolean> listener,
final Logger logger,
final String resourceBasePath,
final String resourceName,
final String resourceType,
final String resourceOwnerName,
final String resourceOwnerType,
final XContent xContent,
final int minimumVersion
) {
final CheckedFunction<Response, Boolean, IOException> responseChecker = (response) -> shouldReplaceResource(
response,
xContent,
resourceName,
minimumVersion
);
checkForResource(
client,
listener,
logger,
resourceBasePath,
resourceName,
resourceType,
resourceOwnerName,
resourceOwnerType,
GET_EXISTS,
GET_DOES_NOT_EXIST,
responseChecker,
this::alwaysReplaceResource
);
}
/**
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint.
* <p>
* This provides the base-level check for any resource that cares about existence and also its contents.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource was successfully published. {@code false} otherwise.
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template"), if any.
* @param resourceName The name of the resource (e.g., "template123").
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
* @param exists Response codes that represent {@code EXISTS}.
* @param doesNotExist Response codes that represent {@code DOES_NOT_EXIST}.
* @param responseChecker Returns {@code true} if the resource should be replaced.
* @param doesNotExistResponseChecker Returns {@code true} if the resource should be replaced.
*/
protected void checkForResource(
final RestClient client,
final ActionListener<Boolean> listener,
final Logger logger,
final String resourceBasePath,
final String resourceName,
final String resourceType,
final String resourceOwnerName,
final String resourceOwnerType,
final Set<Integer> exists,
final Set<Integer> doesNotExist,
final CheckedFunction<Response, Boolean, IOException> responseChecker,
final CheckedFunction<Response, Boolean, IOException> doesNotExistResponseChecker
) {
logger.trace("checking if {} [{}] exists on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
final Request request = new Request("GET", resourceBasePath + "/" + resourceName);
addDefaultParameters(request);
// avoid exists and DNE parameters from being an exception by default
final Set<Integer> expectedResponseCodes = Sets.union(exists, doesNotExist);
request.addParameter(IGNORE_RESPONSE_CODES_PARAM, expectedResponseCodes.stream().map(Object::toString).collect(joining(",")));
client.performRequestAsync(request, new ResponseListener() {
@Override
public void onSuccess(final Response response) {
try {
final int statusCode = response.getStatusLine().getStatusCode();
// checking the content is the job of whoever called this function by checking the tuple's response
if (exists.contains(statusCode)) {
logger.debug("{} [{}] found on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
// if we should replace it -- true -- then the resource "does not exist" as far as the caller is concerned
listener.onResponse(false == responseChecker.apply(response));
} else if (doesNotExist.contains(statusCode)) {
logger.debug(
"{} [{}] does not exist on the [{}] {}",
resourceType,
resourceName,
resourceOwnerName,
resourceOwnerType
);
// if we should replace it -- true -- then the resource "does not exist" as far as the caller is concerned
listener.onResponse(false == doesNotExistResponseChecker.apply(response));
} else {
onFailure(new ResponseException(response));
}
} catch (Exception e) {
logger.error(() -> format("failed to parse [%s/%s] on the [%s]", resourceBasePath, resourceName, resourceOwnerName), e);
onFailure(e);
}
}
@Override
public void onFailure(final Exception exception) {
if (exception instanceof ResponseException) {
final Response response = ((ResponseException) exception).getResponse();
final int statusCode = response.getStatusLine().getStatusCode();
logger.error(
() -> format(
"failed to verify %s [%s] on the [%s] %s with status code [%s]",
resourceType,
resourceName,
resourceOwnerName,
resourceOwnerType,
statusCode
),
exception
);
} else {
logger.error(
() -> format(
"failed to verify %s [%s] on the [%s] %s",
resourceType,
resourceName,
resourceOwnerName,
resourceOwnerType
),
exception
);
}
listener.onFailure(exception);
}
});
}
/**
* Publish the current resource.
* <p>
* This is only invoked if {@linkplain #doCheck(RestClient, ActionListener) the check} fails.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource is available to use. Otherwise {@code false}.
*/
protected abstract void doPublish(RestClient client, ActionListener<ResourcePublishResult> listener);
/**
* Upload the {@code resourceName} to the {@code resourceBasePath} endpoint.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if the resource was successfully published. {@code false} otherwise.
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
* @param resourceName The name of the resource (e.g., "template123").
* @param parameters Map of query string parameters, if any.
* @param body The {@link HttpEntity} that makes up the body of the request.
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
*/
protected void putResource(
final RestClient client,
final ActionListener<ResourcePublishResult> listener,
final Logger logger,
final String resourceBasePath,
final String resourceName,
final Map<String, String> parameters,
final java.util.function.Supplier<HttpEntity> body,
final String resourceType,
final String resourceOwnerName,
final String resourceOwnerType
) {
logger.trace("uploading {} [{}] to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
final Request request = new Request("PUT", resourceBasePath + "/" + resourceName);
addDefaultParameters(request);
addParameters(request, parameters);
request.setEntity(body.get());
client.performRequestAsync(request, new ResponseListener() {
@Override
public void onSuccess(final Response response) {
final int statusCode = response.getStatusLine().getStatusCode();
// 200 or 201
if (statusCode == RestStatus.OK.getStatus() || statusCode == RestStatus.CREATED.getStatus()) {
logger.debug("{} [{}] uploaded to the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
listener.onResponse(ResourcePublishResult.ready());
} else {
onFailure(new RuntimeException("[" + resourceBasePath + "/" + resourceName + "] responded with [" + statusCode + "]"));
}
}
@Override
public void onFailure(final Exception exception) {
logger.error(
() -> format(
"failed to upload %s [%s] on the [%s] %s",
resourceType,
resourceName,
resourceOwnerName,
resourceOwnerType
),
exception
);
listener.onFailure(exception);
}
});
}
/**
* Delete the {@code resourceName} using the {@code resourceBasePath} endpoint.
* <p>
* Note to callers: this will add an "ignore" parameter to the request so that 404 is not an exception and therefore considered
* successful if it's not found. You can override this behavior by specifying any valid value for "ignore", at which point 404
* responses will result in {@code false} and logged failure.
*
* @param client The REST client to make the request(s).
* @param listener Returns {@code true} if it successfully deleted the item; <em>never</em> {@code false}.
* @param logger The logger to use for status messages.
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
* @param resourceName The name of the resource (e.g., "template123").
* @param resourceType The type of resource (e.g., "monitoring template").
* @param resourceOwnerName The user-recognizeable resource owner.
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
*/
protected void deleteResource(
final RestClient client,
final ActionListener<Boolean> listener,
final Logger logger,
final String resourceBasePath,
final String resourceName,
final String resourceType,
final String resourceOwnerName,
final String resourceOwnerType
) {
logger.trace("deleting {} [{}] from the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
final Request request = new Request("DELETE", resourceBasePath + "/" + resourceName);
addDefaultParameters(request);
if (false == defaultParameters.containsKey(IGNORE_RESPONSE_CODES_PARAM)) {
// avoid 404 being an exception by default
request.addParameter(IGNORE_RESPONSE_CODES_PARAM, Integer.toString(NOT_FOUND.getStatus()));
}
client.performRequestAsync(request, new ResponseListener() {
@Override
public void onSuccess(Response response) {
final int statusCode = response.getStatusLine().getStatusCode();
// 200 or 404 (not found is just as good as deleting it!)
if (statusCode == RestStatus.OK.getStatus() || statusCode == RestStatus.NOT_FOUND.getStatus()) {
logger.debug("{} [{}] deleted from the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
listener.onResponse(true);
} else {
onFailure(new RuntimeException("[" + resourceBasePath + "/" + resourceName + "] responded with [" + statusCode + "]"));
}
}
@Override
public void onFailure(Exception exception) {
logger.error(
() -> format(
"failed to delete %s [%s] on the [%s] %s",
resourceType,
resourceName,
resourceOwnerName,
resourceOwnerType
),
exception
);
listener.onFailure(exception);
}
});
}
/**
* Determine if the current resource should replaced the checked one based on its version (or lack thereof).
* <p>
* This expects a response like (where {@code resourceName} is replaced with its value):
* <pre><code>
* {
* "resourceName": {
* "version": 6000002
* }
* }
* </code></pre>
*
* @param response The filtered response from the _template/{name} or _ingest/pipeline/{name} resource APIs
* @param xContent The XContent parser to use
* @param resourceName The name of the looked up resource, which is expected to be the top-level key
* @param minimumVersion The minimum version allowed without being replaced (expected to be the last updated version).
* @return {@code true} represents that it should be replaced. {@code false} that it should be left alone.
* @throws IOException if any issue occurs while parsing the {@code xContent} {@code response}.
* @throws RuntimeException if the response format is changed.
*/
protected boolean shouldReplaceResource(
final Response response,
final XContent xContent,
final String resourceName,
final int minimumVersion
) throws IOException {
// no named content used; so EMPTY is fine
final Map<String, Object> resources = XContentHelper.convertToMap(xContent, response.getEntity().getContent(), false);
// if it's empty, then there's no version in the response thanks to filter_path
if (resources.isEmpty() == false) {
@SuppressWarnings("unchecked")
final Map<String, Object> resource = (Map<String, Object>) resources.get(resourceName);
final Object version = resource != null ? resource.get("version") : null;
// the version in the template is expected to include the alpha/beta/rc codes as well
if (version instanceof Number) {
return ((Number) version).intValue() < minimumVersion;
}
}
return true;
}
/**
* A useful placeholder for {@link CheckedFunction}s that want to always return {@code true}.
*
* @param response Unused.
* @return Always {@code true}.
*/
protected boolean alwaysReplaceResource(final Response response) {
return true;
}
private void addDefaultParameters(final Request request) {
PublishableHttpResource.addParameters(request, defaultParameters);
}
private static void addParameters(final Request request, final Map<String, String> parameters) {
for (final Map.Entry<String, String> param : parameters.entrySet()) {
request.addParameter(param.getKey(), param.getValue());
}
}
}
|
PublishableHttpResource
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/MonoAny.java
|
{
"start": 1820,
"end": 3098
}
|
class ____<T> extends
Operators.BaseFluxToMonoOperator<T, Boolean> {
final Predicate<? super T> predicate;
boolean done;
AnySubscriber(CoreSubscriber<? super Boolean> actual, Predicate<? super T> predicate) {
super(actual);
this.predicate = predicate;
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.TERMINATED) return done;
return super.scanUnsafe(key);
}
@Override
public void onNext(T t) {
if (done) {
Operators.onDiscard(t, this.actual.currentContext());
return;
}
boolean b;
try {
b = predicate.test(t);
} catch (Throwable e) {
done = true;
actual.onError(Operators.onOperatorError(s, e, t, actual.currentContext()));
return;
}
if (b) {
done = true;
s.cancel();
this.actual.onNext(true);
this.actual.onComplete();
}
}
@Override
public void onError(Throwable t) {
if (done) {
Operators.onErrorDropped(t, actual.currentContext());
return;
}
done = true;
actual.onError(t);
}
@Override
public void onComplete() {
if (done) {
return;
}
done = true;
completePossiblyEmpty();
}
@Override
Boolean accumulatedValue() {
return false;
}
}
}
|
AnySubscriber
|
java
|
netty__netty
|
handler/src/test/java/io/netty/handler/ssl/OpenSslKeyMaterialManagerTest.java
|
{
"start": 1151,
"end": 3150
}
|
class ____ {
@Test
public void testChooseClientAliasReturnsNull() throws SSLException {
OpenSsl.ensureAvailability();
X509ExtendedKeyManager keyManager = new X509ExtendedKeyManager() {
@Override
public String[] getClientAliases(String s, Principal[] principals) {
return EmptyArrays.EMPTY_STRINGS;
}
@Override
public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) {
return null;
}
@Override
public String[] getServerAliases(String s, Principal[] principals) {
return EmptyArrays.EMPTY_STRINGS;
}
@Override
public String chooseServerAlias(String s, Principal[] principals, Socket socket) {
return null;
}
@Override
public X509Certificate[] getCertificateChain(String s) {
return EmptyArrays.EMPTY_X509_CERTIFICATES;
}
@Override
public PrivateKey getPrivateKey(String s) {
return null;
}
};
OpenSslKeyMaterialManager manager = new OpenSslKeyMaterialManager(
new OpenSslKeyMaterialProvider(keyManager, null) {
@Override
OpenSslKeyMaterial chooseKeyMaterial(ByteBufAllocator allocator, String alias) throws Exception {
fail("Should not be called when alias is null");
return null;
}
}, false);
SslContext context = SslContextBuilder.forClient().sslProvider(SslProvider.OPENSSL).build();
OpenSslEngine engine =
(OpenSslEngine) context.newEngine(UnpooledByteBufAllocator.DEFAULT);
manager.setKeyMaterialClientSide(engine, EmptyArrays.EMPTY_STRINGS, null);
ReferenceCountUtil.release(engine);
ReferenceCountUtil.release(context);
}
}
|
OpenSslKeyMaterialManagerTest
|
java
|
spring-projects__spring-security
|
config/src/test/java/org/springframework/security/config/annotation/web/builders/NamespaceHttpTests.java
|
{
"start": 22316,
"end": 22914
}
|
class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().authenticated())
.securityContext((context) -> context
.securityContextRepository(new NullSecurityContextRepository()))
.formLogin(withDefaults());
// @formatter:on
return http.build();
}
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager(PasswordEncodedUser.user());
}
}
@Configuration
@EnableWebSecurity
static
|
SecurityContextRepoConfig
|
java
|
quarkusio__quarkus
|
integration-tests/hibernate-validator/src/test/java/io/quarkus/hibernate/validator/runtime/ArcProxyBeanMetaDataClassNormalizerTest.java
|
{
"start": 1683,
"end": 1922
}
|
class ____ extends Original implements Subclass {
}
/**
* Simulates an object injected through {@link io.quarkus.test.InjectMock}
* or {@code io.quarkus.test.junit.mockito.InjectSpy}.
*/
private static
|
FirstSubclass
|
java
|
apache__camel
|
components/camel-jolt/src/test/java/org/apache/camel/component/jolt/JoltRefTest.java
|
{
"start": 1133,
"end": 2230
}
|
class ____ extends CamelTestSupport {
private static final String TEMP = """
{
"a": "aa",
"b": "bb"
}""";
@Test
public void testRef() {
Exchange exchange = template.request("direct:a", exchange1 -> {
Map<String, String> body = new HashMap<>();
body.put("Hello", "World");
exchange1.getIn().setBody(body);
});
assertEquals(3, exchange.getMessage().getBody(Map.class).size());
assertEquals("aa", exchange.getMessage().getBody(Map.class).get("a"));
assertEquals("bb", exchange.getMessage().getBody(Map.class).get("b"));
assertEquals("World", exchange.getMessage().getBody(Map.class).get("Hello"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
context.getRegistry().bind("mytemp", TEMP);
from("direct:a").to(
"jolt:ref:mytemp?transformDsl=Defaultr");
}
};
}
}
|
JoltRefTest
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/ProcessTableFunction.java
|
{
"start": 1633,
"end": 4311
}
|
class ____ a user-defined process table function (PTF).
*
* <p>PTFs are the most powerful function kind for Flink SQL and Table API. They enable implementing
* user-defined operators that can be as feature-rich as built-in operations. PTFs can take
* (partitioned) tables to produce a new table. They have access to Flink's managed state,
* event-time and timer services, and underlying table changelogs.
*
* <p>A process table function (PTF) maps zero, one, or multiple tables to zero, one, or multiple
* rows (or structured types). Scalar arguments are also supported. If the output record consists of
* only one field, the wrapper can be omitted, and a scalar value can be emitted that will be
* implicitly wrapped into a row by the runtime.
*
* <h1>Table Semantics and Virtual Processors</h1>
*
* <p>PTFs can produce a new table by consuming tables as arguments. For scalability, input tables
* are distributed across so-called "virtual processors". A virtual processor, as defined by the SQL
* standard, executes a PTF instance and has access only to a portion of the entire table. The
* argument declaration decides about the size of the portion and co-location of data. Conceptually,
* tables can be processed either "per row" (i.e. with row semantics) or "per set" (i.e. with set
* semantics).
*
* <h2>Table Argument with Row Semantics</h2>
*
* <p>A PTF that takes a table with row semantics assumes that there is no correlation between rows
* and each row can be processed independently. The framework is free in how to distribute rows
* across virtual processors and each virtual processor has access only to the currently processed
* row.
*
* <h2>Table Argument with Set Semantics</h2>
*
* <p>A PTF that takes a table with set semantics assumes that there is a correlation between rows.
* When calling the function, the PARTITION BY clause defines the columns for correlation. The
* framework ensures that all rows belonging to same set are co-located. A PTF instance is able to
* access all rows belonging to the same set. In other words: The virtual processor is scoped by a
* key context.
*
* <p>It is also possible not to provide a key ({@link ArgumentTrait#OPTIONAL_PARTITION_BY}), in
* which case only one virtual processor handles the entire table, thereby losing scalability
* benefits.
*
* <h1>Implementation</h1>
*
* <p>The behavior of a {@link ProcessTableFunction} can be defined by implementing a custom
* evaluation method. The evaluation method must be declared publicly, not static, and named <code>
* eval</code>. Overloading is not supported.
*
* <p>For storing a user-defined function in a catalog, the
|
for
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
|
{
"start": 26974,
"end": 28433
}
|
class ____ implements the <code>org.apache.kafka.streams.errors.ProductionExceptionHandler</code> interface.";
/**
* {@code default.dsl.store}
* @deprecated Since 3.7. Use {@link #DSL_STORE_SUPPLIERS_CLASS_CONFIG} instead.
*/
@Deprecated
@SuppressWarnings("WeakerAccess")
public static final String DEFAULT_DSL_STORE_CONFIG = "default.dsl.store";
@Deprecated
public static final String DEFAULT_DSL_STORE_DOC = "The default state store type used by DSL operators.";
@Deprecated
public static final String ROCKS_DB = "rocksDB";
@Deprecated
public static final String IN_MEMORY = "in_memory";
@Deprecated
public static final String DEFAULT_DSL_STORE = ROCKS_DB;
/** {@code dsl.store.suppliers.class } */
public static final String DSL_STORE_SUPPLIERS_CLASS_CONFIG = "dsl.store.suppliers.class";
static final String DSL_STORE_SUPPLIERS_CLASS_DOC = "Defines which store implementations to plug in to DSL operators. Must implement the <code>org.apache.kafka.streams.state.DslStoreSuppliers</code> interface.";
static final Class<?> DSL_STORE_SUPPLIERS_CLASS_DEFAULT = BuiltInDslStoreSuppliers.RocksDBDslStoreSuppliers.class;
/** {@code default key.serde} */
@SuppressWarnings("WeakerAccess")
public static final String DEFAULT_KEY_SERDE_CLASS_CONFIG = "default.key.serde";
private static final String DEFAULT_KEY_SERDE_CLASS_DOC = "Default serializer / deserializer
|
that
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/filter/wall/mysql/MySqlWallTest_union.java
|
{
"start": 790,
"end": 2748
}
|
class ____ extends TestCase {
public void testUnion() throws Exception {
WallConfig config = new WallConfig();
config.setSelectUnionCheck(true);
assertFalse(WallUtils.isValidateMySql("select f1, f2 from t where id=1 union select 1, 2", config)); // not end of comment
assertFalse(WallUtils.isValidateMySql("select f1, f2 from t where id=1 union select 1, 2 --", config));
assertTrue(WallUtils.isValidateMySql("select f1, f2 from t union select 1, 2", config)); // no where
assertFalse(WallUtils.isValidateMySql("select f1, f2 from t where id=1 union select null, '1', 2 --", config));
assertTrue(WallUtils.isValidateMySql("select f1, f2 from t where id=1 union select c1, c2", config)); //union select item is not const
assertTrue(WallUtils.isValidateMySql("SELECT typeid, typename FROM (SELECT typeid, typename FROM materialtype UNION ALL SELECT ? AS typeid, ? AS typename) a ORDER BY typeid",
config)); // union select item has alias
assertFalse(WallUtils.isValidateMySql("select f1, f2 from (select 1 as f1, 2 as f2) t union select 'u1', 'u2' --", config)); // from is subQuery
assertTrue(WallUtils.isValidateMySql("select f1, f2 from t where id=1 union select 'u1' as u1, 'u2' as u2", config)); // union select item has alias
}
public void testUnion2() throws Exception {
// assertFalse(
// WallUtils.isValidateMySql("SELECT name, surname FROM users WHERE name='' UNION SELECT @@version, 'string1'")
// );
assertFalse(
WallUtils.isValidateMySql("SELECT name, surname FROM users WHERE name='' UNION SELECT /*! @@version,*/ 'string1'")
);
assertFalse(
WallUtils.isValidateMySql("SELECT name, surname FROM users WHERE name=' ' UNION SELECT /*! (select table_name FROM information_schema.tables limit 1,1),*/ 'string1'")
);
}
}
|
MySqlWallTest_union
|
java
|
assertj__assertj-core
|
assertj-core/src/main/java/org/assertj/core/api/PathAssert.java
|
{
"start": 693,
"end": 728
}
|
class ____ {@link Path}s
*/
public
|
for
|
java
|
apache__camel
|
components/camel-dapr/src/main/java/org/apache/camel/component/dapr/operations/DaprOperationHandler.java
|
{
"start": 898,
"end": 1299
}
|
interface ____ {
/**
* Execute the configured operation on this exchange.
*
* @param exchange the current exchange
*/
DaprOperationResponse handle(Exchange exchange);
/**
* Validates configuration based on operation on this exchange.
*
* @param exchange the current exchange
*/
void validateConfiguration(Exchange exchange);
}
|
DaprOperationHandler
|
java
|
quarkusio__quarkus
|
devtools/gradle/gradle-extension-plugin/src/test/java/io/quarkus/extension/gradle/tasks/ValidateExtensionTaskTest.java
|
{
"start": 505,
"end": 4032
}
|
class ____ {
@TempDir
File testProjectDir;
@Test
public void shouldValidateExtensionDependencies() throws IOException {
TestUtils.createExtensionProject(testProjectDir, false, List.of("io.quarkus:quarkus-core"),
List.of("io.quarkus:quarkus-core-deployment"));
BuildResult validationResult = GradleRunner.create()
.withPluginClasspath()
.withProjectDir(testProjectDir)
.withArguments(QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME)
.build();
assertThat(validationResult.task(":runtime:" + QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME).getOutcome())
.isEqualTo(TaskOutcome.SUCCESS);
}
@Test
public void shouldDetectMissionExtensionDependency() throws IOException {
TestUtils.createExtensionProject(testProjectDir, false, List.of("io.quarkus:quarkus-jdbc-h2"), List.of());
BuildResult validationResult = GradleRunner.create()
.withPluginClasspath()
.withProjectDir(testProjectDir)
.withArguments(QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME)
.buildAndFail();
assertThat(validationResult.task(":runtime:" + QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME).getOutcome())
.isEqualTo(TaskOutcome.FAILED);
assertThat(validationResult.getOutput()).contains("Quarkus Extension Dependency Verification Error");
assertThat(validationResult.getOutput())
.contains("The following deployment artifact(s) were found to be missing in the deployment module:");
assertThat(validationResult.getOutput()).contains("- io.quarkus:quarkus-jdbc-h2-deployment");
}
@Test
public void shouldDetectInvalidRuntimeDependency() throws IOException {
TestUtils.createExtensionProject(testProjectDir, false,
List.of("io.quarkus:quarkus-core", "io.quarkus:quarkus-core-deployment"), List.of());
BuildResult validationResult = GradleRunner.create()
.withPluginClasspath()
.withProjectDir(testProjectDir)
.withArguments(QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME)
.buildAndFail();
assertThat(validationResult.task(":runtime:" + QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME).getOutcome())
.isEqualTo(TaskOutcome.FAILED);
assertThat(validationResult.getOutput()).contains("Quarkus Extension Dependency Verification Error");
assertThat(validationResult.getOutput())
.contains("The following deployment artifact(s) appear on the runtime classpath:");
assertThat(validationResult.getOutput()).contains("- io.quarkus:quarkus-core-deployment");
}
@Test
public void shouldSkipValidationWhenDisabled() throws IOException {
TestUtils.createExtensionProject(testProjectDir, true,
List.of("io.quarkus:quarkus-core", "io.quarkus:quarkus-core-deployment"), List.of());
BuildResult validationResult = GradleRunner.create()
.withPluginClasspath()
.withProjectDir(testProjectDir)
.withArguments(QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME)
.build();
assertThat(validationResult.task(":runtime:" + QuarkusExtensionPlugin.VALIDATE_EXTENSION_TASK_NAME).getOutcome())
.isEqualTo(TaskOutcome.SKIPPED);
}
}
|
ValidateExtensionTaskTest
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/builder/impl/DefaultScriptComponentBuilder.java
|
{
"start": 1025,
"end": 1615
}
|
class ____ extends DefaultComponentAndConfigurationBuilder<ScriptComponentBuilder>
implements ScriptComponentBuilder {
public DefaultScriptComponentBuilder(
final DefaultConfigurationBuilder<? extends Configuration> builder,
final String name,
final String language,
final String text) {
super(builder, name, "Script");
if (language != null) {
addAttribute("language", language);
}
if (text != null) {
addAttribute("text", text);
}
}
}
|
DefaultScriptComponentBuilder
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
|
{
"start": 47423,
"end": 99270
}
|
interface ____ reporting client progress
* @param buffersize underlying buffer size
* @param checksumOpt checksum options
*
* @return output stream
*
* @see ClientProtocol#create for detailed description of exceptions thrown
*/
public DFSOutputStream create(String src, FsPermission permission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt) throws IOException {
return create(src, permission, flag, createParent, replication, blockSize,
progress, buffersize, checksumOpt, null);
}
private FsPermission applyUMask(FsPermission permission) {
if (permission == null) {
permission = FsPermission.getFileDefault();
}
return FsCreateModes.applyUMask(permission, dfsClientConf.getUMask());
}
private FsPermission applyUMaskDir(FsPermission permission) {
if (permission == null) {
permission = FsPermission.getDirDefault();
}
return FsCreateModes.applyUMask(permission, dfsClientConf.getUMask());
}
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
* a hint to where the namenode should place the file blocks.
* The favored nodes hint is not persisted in HDFS. Hence it may be honored
* at the creation time only. HDFS could move the blocks during balancing or
* replication, to move the blocks from favored nodes. A value of null means
* no favored nodes for this create
*/
public DFSOutputStream create(String src, FsPermission permission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes)
throws IOException {
return create(src, permission, flag, createParent, replication, blockSize,
progress, buffersize, checksumOpt, favoredNodes, null);
}
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* Progressable, int, ChecksumOpt, InetSocketAddress[])} with the addition of
* ecPolicyName that is used to specify a specific erasure coding policy
* instead of inheriting any policy from this new file's parent directory.
* This policy will be persisted in HDFS. A value of null means inheriting
* parent groups' whatever policy.
*/
public DFSOutputStream create(String src, FsPermission permission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
String ecPolicyName) throws IOException {
return create(src, permission, flag, createParent, replication, blockSize,
progress, buffersize, checksumOpt, favoredNodes, ecPolicyName, null);
}
/**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
* Progressable, int, ChecksumOpt, InetSocketAddress[], String)}
* with the storagePolicy that is used to specify a specific storage policy
* instead of inheriting any policy from this new file's parent directory.
* This policy will be persisted in HDFS. A value of null means inheriting
* parent groups' whatever policy.
*/
public DFSOutputStream create(String src, FsPermission permission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes,
String ecPolicyName, String storagePolicy)
throws IOException {
checkOpen();
final FsPermission masked = applyUMask(permission);
LOG.debug("{}: masked={}", src, masked);
final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
src, masked, flag, createParent, replication, blockSize, progress,
dfsClientConf.createChecksum(checksumOpt),
getFavoredNodesStr(favoredNodes), ecPolicyName, storagePolicy);
beginFileLease(result.getUniqKey(), result);
return result;
}
private String[] getFavoredNodesStr(InetSocketAddress[] favoredNodes) {
String[] favoredNodeStrs = null;
if (favoredNodes != null) {
favoredNodeStrs = new String[favoredNodes.length];
for (int i = 0; i < favoredNodes.length; i++) {
favoredNodeStrs[i] =
favoredNodes[i].getHostName() + ":" + favoredNodes[i].getPort();
}
}
return favoredNodeStrs;
}
/**
* Append to an existing file if {@link CreateFlag#APPEND} is present
*/
private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
Progressable progress) throws IOException {
if (flag.contains(CreateFlag.APPEND)) {
HdfsFileStatus stat = getFileInfo(src);
if (stat == null) { // No file to append to
// New file needs to be created if create option is present
if (!flag.contains(CreateFlag.CREATE)) {
throw new FileNotFoundException(
"failed to append to non-existent file " + src + " on client "
+ clientName);
}
return null;
}
return callAppend(src, flag, progress, null);
}
return null;
}
/**
* Same as {{@link #create(String, FsPermission, EnumSet, short, long,
* Progressable, int, ChecksumOpt)} except that the permission
* is absolute (ie has already been masked with umask.
*/
public DFSOutputStream primitiveCreate(String src, FsPermission absPermission,
EnumSet<CreateFlag> flag, boolean createParent, short replication,
long blockSize, Progressable progress, int buffersize,
ChecksumOpt checksumOpt) throws IOException {
checkOpen();
CreateFlag.validate(flag);
DFSOutputStream result = primitiveAppend(src, flag, progress);
if (result == null) {
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
flag, createParent, replication, blockSize, progress, checksum,
null, null, null);
}
beginFileLease(result.getUniqKey(), result);
return result;
}
/**
* Creates a symbolic link.
*
* @see ClientProtocol#createSymlink(String, String,FsPermission, boolean)
*/
public void createSymlink(String target, String link, boolean createParent)
throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("createSymlink", target)) {
final FsPermission dirPerm = applyUMask(null);
namenode.createSymlink(target, link, dirPerm, createParent);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Resolve the *first* symlink, if any, in the path.
*
* @see ClientProtocol#getLinkTarget(String)
*/
public String getLinkTarget(String path) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getLinkTarget", path)) {
return namenode.getLinkTarget(path);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class);
}
}
/**
* Invoke namenode append RPC.
* It retries in case of some {@link RetriableException}.
*/
private LastBlockWithStatus callAppend(String src,
EnumSetWritable<CreateFlag> flag) throws IOException {
final long startTime = Time.monotonicNow();
for(;;) {
try {
return namenode.append(src, clientName, flag);
} catch(RemoteException re) {
if (Time.monotonicNow() - startTime > 5000
|| !RetriableException.class.getName().equals(
re.getClassName())) {
throw re;
}
try { // sleep and retry
Thread.sleep(500);
} catch (InterruptedException e) {
throw DFSUtilClient.toInterruptedIOException("callAppend", e);
}
}
}
}
/** Method to get stream returned by append call */
private DFSOutputStream callAppend(String src, EnumSet<CreateFlag> flag,
Progressable progress, String[] favoredNodes) throws IOException {
CreateFlag.validateForAppend(flag);
try {
final LastBlockWithStatus blkWithStatus = callAppend(src,
new EnumSetWritable<>(flag, CreateFlag.class));
HdfsFileStatus status = blkWithStatus.getFileStatus();
if (status == null) {
LOG.debug("NameNode is on an older version, request file " +
"info with additional RPC call for file: {}", src);
status = getFileInfo(src);
}
return DFSOutputStream.newStreamForAppend(this, src, flag, progress,
blkWithStatus.getLastBlock(), status,
dfsClientConf.createChecksum(null), favoredNodes);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnsupportedOperationException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Append to an existing HDFS file.
*
* @param src file name
* @param buffersize buffer size
* @param flag indicates whether to append data to a new block instead of
* the last block
* @param progress for reporting write-progress; null is acceptable.
* @param statistics file system statistics; null is acceptable.
* @return an output stream for writing into the file
*
* @see ClientProtocol#append(String, String, EnumSetWritable)
*/
public HdfsDataOutputStream append(final String src, final int buffersize,
EnumSet<CreateFlag> flag, final Progressable progress,
final FileSystem.Statistics statistics) throws IOException {
final DFSOutputStream out = append(src, buffersize, flag, null, progress);
return createWrappedOutputStream(out, statistics, out.getInitialLen());
}
/**
* Append to an existing HDFS file.
*
* @param src file name
* @param buffersize buffer size
* @param flag indicates whether to append data to a new block instead of the
* last block
* @param progress for reporting write-progress; null is acceptable.
* @param statistics file system statistics; null is acceptable.
* @param favoredNodes FavoredNodes for new blocks
* @return an output stream for writing into the file
* @see ClientProtocol#append(String, String, EnumSetWritable)
*/
public HdfsDataOutputStream append(final String src, final int buffersize,
EnumSet<CreateFlag> flag, final Progressable progress,
final FileSystem.Statistics statistics,
final InetSocketAddress[] favoredNodes) throws IOException {
final DFSOutputStream out = append(src, buffersize, flag,
getFavoredNodesStr(favoredNodes), progress);
return createWrappedOutputStream(out, statistics, out.getInitialLen());
}
private DFSOutputStream append(String src, int buffersize,
EnumSet<CreateFlag> flag, String[] favoredNodes, Progressable progress)
throws IOException {
checkOpen();
final DFSOutputStream result = callAppend(src, flag, progress,
favoredNodes);
beginFileLease(result.getUniqKey(), result);
return result;
}
/**
* Set replication for an existing file.
* @param src file name
* @param replication replication to set the file to
*
* @see ClientProtocol#setReplication(String, short)
*/
public boolean setReplication(String src, short replication)
throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("setReplication", src)) {
return namenode.setReplication(src, replication);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Set storage policy for an existing file/directory
* @param src file/directory name
* @param policyName name of the storage policy
*/
public void setStoragePolicy(String src, String policyName)
throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("setStoragePolicy", src)) {
namenode.setStoragePolicy(src, policyName);
} catch (RemoteException e) {
throw e.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Unset storage policy set for a given file/directory.
* @param src file/directory name
*/
public void unsetStoragePolicy(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("unsetStoragePolicy", src)) {
namenode.unsetStoragePolicy(src);
} catch (RemoteException e) {
throw e.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* @param path file/directory name
* @return Get the storage policy for specified path
*/
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getStoragePolicy", path)) {
return namenode.getStoragePolicy(path);
} catch (RemoteException e) {
throw e.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class);
}
}
/**
* @return All the existing storage policies
*/
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getStoragePolicies")) {
return namenode.getStoragePolicies();
}
}
/**
* Rename file or directory.
* @see ClientProtocol#rename(String, String)
* @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
*/
@Deprecated
public boolean rename(String src, String dst) throws IOException {
checkOpen();
try (TraceScope ignored = newSrcDstTraceScope("rename", src, dst)) {
return namenode.rename(src, dst);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class,
ParentNotDirectoryException.class);
}
}
/**
* Move blocks from src to trg and delete src
* See {@link ClientProtocol#concat}.
*/
public void concat(String trg, String [] srcs) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("concat")) {
namenode.concat(trg, srcs);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Rename file or directory.
* @see ClientProtocol#rename2(String, String, Options.Rename...)
*/
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
try (TraceScope ignored = newSrcDstTraceScope("rename2", src, dst)) {
namenode.rename2(src, dst, options);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Truncate a file to an indicated size
* See {@link ClientProtocol#truncate}.
*/
public boolean truncate(String src, long newLength) throws IOException {
checkOpen();
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
try (TraceScope ignored = newPathTraceScope("truncate", src)) {
return namenode.truncate(src, newLength, clientName);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
UnresolvedPathException.class);
}
}
/**
* Delete file or directory.
* See {@link ClientProtocol#delete(String, boolean)}.
*/
@Deprecated
public boolean delete(String src) throws IOException {
checkOpen();
return delete(src, true);
}
/**
* delete file or directory.
* delete contents of the directory if non empty and recursive
* set to true
*
* @see ClientProtocol#delete(String, boolean)
*/
public boolean delete(String src, boolean recursive) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("delete", src)) {
return namenode.delete(src, recursive);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class,
PathIsNotEmptyDirectoryException.class);
}
}
/**
* Implemented using getFileInfo(src)
*/
public boolean exists(String src) throws IOException {
checkOpen();
return getFileInfo(src) != null;
}
/**
* Get a partial listing of the indicated directory
* No block locations need to be fetched
*/
public DirectoryListing listPaths(String src, byte[] startAfter)
throws IOException {
return listPaths(src, startAfter, false);
}
/**
* Get a partial listing of the indicated directory
*
* Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
* if the application wants to fetch a listing starting from
* the first entry in the directory
*
* @see ClientProtocol#getListing(String, byte[], boolean)
*/
public DirectoryListing listPaths(String src, byte[] startAfter,
boolean needLocation) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("listPaths", src)) {
return namenode.getListing(src, startAfter, needLocation);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Get a batched listing for the indicated directories
*
* @see ClientProtocol#getBatchedListing(String[], byte[], boolean)
*/
public BatchedDirectoryListing batchedListPaths(
String[] srcs, byte[] startAfter, boolean needLocation)
throws IOException {
checkOpen();
try {
return namenode.getBatchedListing(srcs, startAfter, needLocation);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
* @return object containing information regarding the file
* or null if file not found
*
* @see ClientProtocol#getFileInfo(String) for description of exceptions
*/
public HdfsFileStatus getFileInfo(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getFileInfo", src)) {
return namenode.getFileInfo(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Get the file info for a specific file or directory.
* @param src The string representation of the path to the file
* @param needBlockToken Include block tokens in {@link LocatedBlocks}.
* When block tokens are included, this call is a superset of
* {@link #getBlockLocations(String, long)}.
* @return object containing information regarding the file
* or null if file not found
*
* @see DFSClient#open(HdfsPathHandle, int, boolean)
* @see ClientProtocol#getFileInfo(String) for description of
* exceptions
*/
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getLocatedFileInfo", src)) {
return namenode.getLocatedFileInfo(src, needBlockToken);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Close status of a file
* @return true if file is already closed
*/
public boolean isFileClosed(String src) throws IOException{
checkOpen();
try (TraceScope ignored = newPathTraceScope("isFileClosed", src)) {
return namenode.isFileClosed(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Get the file info for a specific file or directory. If src
* refers to a symlink then the FileStatus of the link is returned.
* @param src path to a file or directory.
*
* For description of exceptions thrown
* @see ClientProtocol#getFileLinkInfo(String)
*/
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getFileLinkInfo", src)) {
return namenode.getFileLinkInfo(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
UnresolvedPathException.class);
}
}
@InterfaceAudience.Private
public void clearDataEncryptionKey() {
LOG.debug("Clearing encryption key");
synchronized (this) {
encryptionKey = null;
}
}
/**
* @return true if data sent between this client and DNs should be encrypted,
* false otherwise.
* @throws IOException in the event of error communicating with the NN
*/
boolean shouldEncryptData() throws IOException {
FsServerDefaults d = getServerDefaults();
return d != null && d.getEncryptDataTransfer();
}
@Override
public DataEncryptionKey newDataEncryptionKey() throws IOException {
if (shouldEncryptData()) {
synchronized (this) {
if (encryptionKey == null ||
encryptionKey.expiryDate < Time.now()) {
LOG.debug("Getting new encryption token from NN");
encryptionKey = namenode.getDataEncryptionKey();
}
return encryptionKey;
}
} else {
return null;
}
}
@VisibleForTesting
public DataEncryptionKey getEncryptionKey() {
return encryptionKey;
}
private FileChecksum getFileChecksumInternal(
String src, long length, ChecksumCombineMode combineMode)
throws IOException {
checkOpen();
Preconditions.checkArgument(length >= 0);
LocatedBlocks blockLocations = null;
FileChecksumHelper.FileChecksumComputer maker = null;
ErasureCodingPolicy ecPolicy = null;
if (length > 0) {
blockLocations = getBlockLocations(src, length);
ecPolicy = blockLocations.getErasureCodingPolicy();
}
maker = ecPolicy != null ?
new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
length, blockLocations, namenode, this, ecPolicy, combineMode) :
new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
blockLocations, namenode, this, combineMode);
maker.compute();
return maker.getFileChecksum();
}
/**
* Get the checksum of the whole file or a range of the file. Note that the
* range always starts from the beginning of the file. The file can be
* in replicated form, or striped mode. Depending on the
* dfs.checksum.combine.mode, checksums may or may not be comparable between
* different block layout forms.
*
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
* @see DistributedFileSystem#getFileChecksum(Path)
*/
public FileChecksum getFileChecksumWithCombineMode(String src, long length)
throws IOException {
ChecksumCombineMode combineMode = getConf().getChecksumCombineMode();
return getFileChecksumInternal(src, length, combineMode);
}
/**
* Get the checksum of the whole file or a range of the file. Note that the
* range always starts from the beginning of the file. The file can be
* in replicated form, or striped mode. It can be used to checksum and compare
* two replicated files, or two striped files, but not applicable for two
* files of different block layout forms.
*
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
* @see DistributedFileSystem#getFileChecksum(Path)
*/
public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
throws IOException {
return (MD5MD5CRC32FileChecksum) getFileChecksumInternal(
src, length, ChecksumCombineMode.MD5MD5CRC);
}
protected LocatedBlocks getBlockLocations(String src,
long length) throws IOException {
//get block locations for the file range
LocatedBlocks blockLocations = callGetBlockLocations(namenode,
src, 0, length);
if (null == blockLocations) {
throw new FileNotFoundException("File does not exist: " + src);
}
if (blockLocations.isUnderConstruction()) {
throw new IOException("Fail to get checksum, since file " + src
+ " is under construction.");
}
return blockLocations;
}
protected IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
Token<BlockTokenIdentifier> blockToken)
throws IOException {
return DFSUtilClient.connectToDN(dn, timeout, conf, saslClient,
socketFactory, getConf().isConnectToDnViaHostname(), this, blockToken);
}
/**
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
* for the first byte of that replica. This is used for compatibility
* with older HDFS versions which did not include the checksum type in
* OpBlockChecksumResponseProto.
*
* @param lb the located block
* @param dn the connected datanode
* @return the inferred checksum type
* @throws IOException if an error occurs
*/
protected Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
throws IOException {
IOStreamPair pair = connectToDN(dn, dfsClientConf.getSocketTimeout(),
lb.getBlockToken());
try {
new Sender((DataOutputStream) pair.out).readBlock(lb.getBlock(),
lb.getBlockToken(), clientName,
0, 1, true, CachingStrategy.newDefaultStrategy());
final BlockOpResponseProto reply =
BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(pair.in));
String logInfo = "trying to read " + lb.getBlock() + " from datanode " +
dn;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
return PBHelperClient.convert(
reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtilsClient.cleanupWithLogger(LOG, pair.in, pair.out);
}
}
/**
* Set permissions to a file or directory.
* @param src path name.
* @param permission permission to set to
*
* @see ClientProtocol#setPermission(String, FsPermission)
*/
public void setPermission(String src, FsPermission permission)
throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("setPermission", src)) {
namenode.setPermission(src, permission);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Set file or directory owner.
* @param src path name.
* @param username user id.
* @param groupname user group.
*
* @see ClientProtocol#setOwner(String, String, String)
*/
public void setOwner(String src, String username, String groupname)
throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("setOwner", src)) {
namenode.setOwner(src, username, groupname);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
SafeModeException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
private long getStateByIndex(int stateIndex) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getStats")) {
long[] states = namenode.getStats();
return states.length > stateIndex ? states[stateIndex] : -1;
}
}
/**
* @see ClientProtocol#getStats()
*/
public FsStatus getDiskStatus() throws IOException {
try (TraceScope ignored = tracer.newScope("getStats")) {
long[] states = namenode.getStats();
return new FsStatus(getStateAtIndex(states, 0),
getStateAtIndex(states, 1), getStateAtIndex(states, 2));
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public static long getStateAtIndex(long[] states, int index) {
return states.length > index ? states[index] : -1;
}
/**
* Returns count of blocks with no good replicas left. Normally should be
* zero.
* @throws IOException
*/
public long getMissingBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_BLOCKS_IDX);
}
/**
* Returns count of blocks with replication factor 1 and have
* lost the only replica.
* @throws IOException
*/
public long getMissingReplOneBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX);
}
/**
* Returns count of blocks pending on deletion.
* @throws IOException
*/
public long getPendingDeletionBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_PENDING_DELETION_BLOCKS_IDX);
}
/**
* Returns aggregated count of blocks with less redundancy.
* @throws IOException
*/
public long getLowRedundancyBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX);
}
/**
* Returns count of blocks with at least one replica marked corrupt.
* @throws IOException
*/
public long getCorruptBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_CORRUPT_BLOCKS_IDX);
}
/**
* Returns number of bytes that reside in Blocks with future generation
* stamps.
* @return Bytes in Blocks with future generation stamps.
* @throws IOException
*/
public long getBytesInFutureBlocks() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX);
}
/**
* @return a list in which each entry describes a corrupt file/block
* @throws IOException
*/
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
checkOpen();
try (TraceScope ignored
= newPathTraceScope("listCorruptFileBlocks", path)) {
return namenode.listCorruptFileBlocks(path, cookie);
}
}
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("datanodeReport")) {
return namenode.getDatanodeReport(type);
}
}
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("datanodeStorageReport")) {
return namenode.getDatanodeStorageReport(type);
}
}
/**
* Enter, leave or get safe mode.
*
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
*/
public boolean setSafeMode(SafeModeAction action) throws IOException {
checkOpen();
return setSafeMode(action, false);
}
/**
* Enter, leave or get safe mode.
*
* @param action
* One of SafeModeAction.GET, SafeModeAction.ENTER and
* SafeModeActiob.LEAVE
* @param isChecked
* If true, then check only active namenode's safemode status, else
* check first namenode's status.
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
*/
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
throws IOException{
try (TraceScope ignored = tracer.newScope("setSafeMode")) {
return namenode.setSafeMode(action, isChecked);
}
}
/**
* Create one snapshot.
*
* @param snapshotRoot The directory where the snapshot is to be taken
* @param snapshotName Name of the snapshot
* @return the snapshot path.
* @see ClientProtocol#createSnapshot(String, String)
*/
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("createSnapshot")) {
return namenode.createSnapshot(snapshotRoot, snapshotName);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Delete a snapshot of a snapshottable directory.
*
* @param snapshotRoot The snapshottable directory that the
* to-be-deleted snapshot belongs to
* @param snapshotName The name of the to-be-deleted snapshot
* @throws IOException
* @see ClientProtocol#deleteSnapshot(String, String)
*/
public void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("deleteSnapshot")) {
namenode.deleteSnapshot(snapshotRoot, snapshotName);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Rename a snapshot.
* @param snapshotDir The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
* @throws IOException
* @see ClientProtocol#renameSnapshot(String, String, String)
*/
public void renameSnapshot(String snapshotDir, String snapshotOldName,
String snapshotNewName) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("renameSnapshot")) {
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Get all the current snapshottable directories.
* @return All the current snapshottable directories
* @throws IOException
* @see ClientProtocol#getSnapshottableDirListing()
*/
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getSnapshottableDirListing")) {
return namenode.getSnapshottableDirListing();
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Get listing of all the snapshots for a snapshottable directory.
*
* @return Information about all the snapshots for a snapshottable directory
* @throws IOException If an I/O error occurred
* @see ClientProtocol#getSnapshotListing(String)
*/
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getSnapshotListing")) {
return namenode.getSnapshotListing(snapshotRoot);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Allow snapshot on a directory.
*
* @see ClientProtocol#allowSnapshot(String snapshotRoot)
*/
public void allowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("allowSnapshot")) {
namenode.allowSnapshot(snapshotRoot);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Disallow snapshot on a directory.
*
* @see ClientProtocol#disallowSnapshot(String snapshotRoot)
*/
public void disallowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("disallowSnapshot")) {
namenode.disallowSnapshot(snapshotRoot);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Get the difference between two snapshots, or between a snapshot and the
* current tree of a directory.
* @see ClientProtocol#getSnapshotDiffReport
*/
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
String fromSnapshot, String toSnapshot) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getSnapshotDiffReport")) {
Preconditions.checkArgument(fromSnapshot != null,
"null fromSnapshot");
Preconditions.checkArgument(toSnapshot != null,
"null toSnapshot");
return namenode
.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Get the difference between two snapshots of a directory iteratively.
* @see ClientProtocol#getSnapshotDiffReportListing
*/
public SnapshotDiffReportListing getSnapshotDiffReportListing(
String snapshotDir, String fromSnapshot, String toSnapshot,
byte[] startPath, int index) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("getSnapshotDiffReport")) {
return namenode
.getSnapshotDiffReportListing(snapshotDir, fromSnapshot, toSnapshot,
startPath, index);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("addCacheDirective")) {
return namenode.addCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("modifyCacheDirective")) {
namenode.modifyCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void removeCacheDirective(long id)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("removeCacheDirective")) {
namenode.removeCacheDirective(id);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
checkOpen();
return new CacheDirectiveIterator(namenode, filter, tracer);
}
public void addCachePool(CachePoolInfo info) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("addCachePool")) {
namenode.addCachePool(info);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void modifyCachePool(CachePoolInfo info) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("modifyCachePool")) {
namenode.modifyCachePool(info);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public void removeCachePool(String poolName) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("removeCachePool")) {
namenode.removeCachePool(poolName);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
checkOpen();
return new CachePoolIterator(namenode, tracer);
}
/**
* Save namespace image.
*
* @see ClientProtocol#saveNamespace(long, long)
*/
boolean saveNamespace(long timeWindow, long txGap) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("saveNamespace")) {
return namenode.saveNamespace(timeWindow, txGap);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class);
}
}
/**
* Rolls the edit log on the active NameNode.
* @return the txid of the new log segment
*
* @see ClientProtocol#rollEdits()
*/
long rollEdits() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("rollEdits")) {
return namenode.rollEdits();
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class);
}
}
@VisibleForTesting
ExtendedBlock getPreviousBlock(String key) {
return filesBeingWritten.get(key).getBlock();
}
/**
* enable/disable restore failed storage.
*
* @see ClientProtocol#restoreFailedStorage(String arg)
*/
boolean restoreFailedStorage(String arg) throws IOException{
checkOpen();
try (TraceScope ignored = tracer.newScope("restoreFailedStorage")) {
return namenode.restoreFailedStorage(arg);
}
}
/**
* Refresh the hosts and exclude files. (Rereads them.)
* See {@link ClientProtocol#refreshNodes()}
* for more details.
*
* @see ClientProtocol#refreshNodes()
*/
public void refreshNodes() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("refreshNodes")) {
namenode.refreshNodes();
}
}
/**
* Dumps DFS data structures into specified file.
*
* @see ClientProtocol#metaSave(String)
*/
public void metaSave(String pathname) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("metaSave")) {
namenode.metaSave(pathname);
}
}
/**
* Requests the namenode to tell all datanodes to use a new, non-persistent
* bandwidth value for dfs.datanode.balance.bandwidthPerSec.
* See {@link ClientProtocol#setBalancerBandwidth(long)}
* for more details.
*
* @see ClientProtocol#setBalancerBandwidth(long)
*/
public void setBalancerBandwidth(long bandwidth) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("setBalancerBandwidth")) {
namenode.setBalancerBandwidth(bandwidth);
}
}
/**
* @see ClientProtocol#finalizeUpgrade()
*/
public void finalizeUpgrade() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("finalizeUpgrade")) {
namenode.finalizeUpgrade();
}
}
/**
* @see ClientProtocol#upgradeStatus()
*/
public boolean upgradeStatus() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
return namenode.upgradeStatus();
}
}
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("rollingUpgrade")) {
return namenode.rollingUpgrade(action);
}
}
@Deprecated
public boolean mkdirs(String src) throws IOException {
return mkdirs(src, null, true);
}
/**
* Create a directory (or hierarchy of directories) with the given
* name and permission.
*
* @param src The path of the directory being created
* @param permission The permission of the directory being created.
* If permission == null, use {@link FsPermission#getDirDefault()}.
* @param createParent create missing parent directory if true
*
* @return True if the operation success.
*
* @see ClientProtocol#mkdirs(String, FsPermission, boolean)
*/
public boolean mkdirs(String src, FsPermission permission,
boolean createParent) throws IOException {
final FsPermission masked = applyUMaskDir(permission);
return primitiveMkdir(src, masked, createParent);
}
/**
* Same {{@link #mkdirs(String, FsPermission, boolean)} except
* that the permissions has already been masked against umask.
*/
public boolean primitiveMkdir(String src, FsPermission absPermission)
throws IOException {
return primitiveMkdir(src, absPermission, true);
}
/**
* Same {{@link #mkdirs(String, FsPermission, boolean)} except
* that the permissions has already been masked against umask.
*/
public boolean primitiveMkdir(String src, FsPermission absPermission,
boolean createParent) throws IOException {
checkOpen();
if (absPermission == null) {
absPermission = applyUMaskDir(null);
}
LOG.debug("{}: masked={}", src, absPermission);
try (TraceScope ignored = tracer.newScope("mkdir")) {
return namenode.mkdirs(src, absPermission, createParent);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
InvalidPathException.class,
FileAlreadyExistsException.class,
FileNotFoundException.class,
ParentNotDirectoryException.class,
SafeModeException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Get {@link ContentSummary} rooted at the specified directory.
* @param src The string representation of the path
*
* @see ClientProtocol#getContentSummary(String)
*/
ContentSummary getContentSummary(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getContentSummary", src)) {
return namenode.getContentSummary(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class);
}
}
/**
* Get {@link QuotaUsage} rooted at the specified directory.
* @param src The string representation of the path
*
* @see ClientProtocol#getQuotaUsage(String)
*/
QuotaUsage getQuotaUsage(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getQuotaUsage", src)) {
return namenode.getQuotaUsage(src);
} catch(RemoteException re) {
IOException ioe = re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class,
RpcNoSuchMethodException.class);
if (ioe instanceof RpcNoSuchMethodException) {
LOG.debug("The version of namenode doesn't support getQuotaUsage API." +
" Fall back to use getContentSummary API.");
return getContentSummary(src);
} else {
throw ioe;
}
}
}
/**
* Sets or resets quotas for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
void setQuota(String src, long namespaceQuota, long storagespaceQuota)
throws IOException {
checkOpen();
// sanity check
if ((namespaceQuota <= 0 &&
namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
namespaceQuota != HdfsConstants.QUOTA_RESET) ||
(storagespaceQuota < 0 &&
storagespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Invalid values for quota : " +
namespaceQuota + " and " +
storagespaceQuota);
}
try (TraceScope ignored = newPathTraceScope("setQuota", src)) {
// Pass null as storage type for traditional namespace/storagespace quota.
namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* Sets or resets quotas by storage type for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
void setQuotaByStorageType(String src, StorageType type, long quota)
throws IOException {
checkOpen();
if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
quota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" +
quota);
}
if (type == null) {
throw new IllegalArgumentException("Invalid storage type(null)");
}
if (!type.supportTypeQuota()) {
throw new IllegalArgumentException(
"Don't support Quota for storage type : " + type.toString());
}
try (TraceScope ignored = newPathTraceScope("setQuotaByStorageType", src)) {
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* set the modification and access time of a file.
*
* @see ClientProtocol#setTimes(String, long, long)
*/
public void setTimes(String src, long mtime, long atime) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("setTimes", src)) {
namenode.setTimes(src, mtime, atime);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* @deprecated use {@link HdfsDataInputStream} instead.
*/
@Deprecated
public static
|
for
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/jpa/criteria/valuehandlingmode/inline/EntityGraphTest.java
|
{
"start": 2510,
"end": 2730
}
|
class ____ {
@Id
@GeneratedValue
public Integer id;
@ManyToOne(fetch = FetchType.LAZY)
public Bar bar;
@ManyToOne(fetch = FetchType.LAZY)
public Baz baz;
}
@Entity
@Table(name = "bar")
public static
|
Foo
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/formatstring/AnnotateFormatMethodTest.java
|
{
"start": 1185,
"end": 1658
}
|
class ____ {
// BUG: Diagnostic contains: FormatMethod
String formatMe(String formatString, Object... args) {
return String.format(formatString, args);
}
}
""")
.doTest();
}
@Test
public void passedThroughToLambda() {
compilationHelper
.addSourceLines(
"AnnotateFormatMethodPositiveCases.java",
"""
|
AnnotateFormatMethodPositiveCases
|
java
|
spring-projects__spring-framework
|
spring-core/src/main/java/org/springframework/util/JdkIdGenerator.java
|
{
"start": 822,
"end": 942
}
|
class ____ implements IdGenerator {
@Override
public UUID generateId() {
return UUID.randomUUID();
}
}
|
JdkIdGenerator
|
java
|
micronaut-projects__micronaut-core
|
http-client-tck/src/main/java/io/micronaut/http/client/tck/tests/DecompressionConfigTest.java
|
{
"start": 1354,
"end": 4004
}
|
class ____ {
static final String SPEC_NAME = "DecompressionConfigTest";
private static final byte[] UNCOMPRESSED = "Hello, gzip!".getBytes(StandardCharsets.UTF_8);
private static final byte[] GZIPPED = gzip(UNCOMPRESSED);
@Test
void gzipPreservedWhenDecompressionDisabled() throws Exception {
try (ServerUnderTest server = ServerUnderTestProviderUtils.getServerUnderTestProvider()
.getServer(SPEC_NAME, Map.of("micronaut.http.client.decompression-enabled", "false"));
RawHttpClient client = server.getApplicationContext().createBean(RawHttpClient.class);
ByteBodyHttpResponse<?> response = Mono.from(
client.exchange(HttpRequest.GET(server.getURL().get() + "/decompression/gzip"), null, null))
.cast(ByteBodyHttpResponse.class)
.block()) {
// Body should still be compressed
byte[] body = response.byteBody().buffer().get().toByteArray();
Assertions.assertArrayEquals(GZIPPED, body);
// Header should be preserved
Assertions.assertEquals("gzip", response.getHeaders().get(HttpHeaders.CONTENT_ENCODING));
}
}
@Test
void gzipIsDecompressedByDefault() throws Exception {
try (ServerUnderTest server = ServerUnderTestProviderUtils.getServerUnderTestProvider().getServer(SPEC_NAME);
RawHttpClient client = server.getApplicationContext().createBean(RawHttpClient.class);
ByteBodyHttpResponse<?> response = Mono.from(
client.exchange(HttpRequest.GET(server.getURL().get() + "/decompression/gzip"), null, null))
.cast(ByteBodyHttpResponse.class)
.block()) {
// Body should be decompressed to the original content
byte[] body = response.byteBody().buffer().get().toByteArray();
Assertions.assertArrayEquals(UNCOMPRESSED, body);
// Content-Encoding header should be removed by the decompressor
Assertions.assertFalse(response.getHeaders().contains(HttpHeaders.CONTENT_ENCODING));
}
}
private static byte[] gzip(byte[] data) {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (GZIPOutputStream gzip = new GZIPOutputStream(baos)) {
gzip.write(data);
}
return baos.toByteArray();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Controller("/decompression")
@Requires(property = "spec.name", value = SPEC_NAME)
static
|
DecompressionConfigTest
|
java
|
alibaba__druid
|
core/src/main/java/com/alibaba/druid/sql/dialect/synapse/parser/SynapseCreateTableParser.java
|
{
"start": 441,
"end": 3123
}
|
class ____ extends SQLCreateTableParser {
public SynapseCreateTableParser(String sql) {
super(sql);
this.dbType = DbType.synapse;
}
public SynapseCreateTableParser(SQLExprParser exprParser) {
super(exprParser);
this.dbType = DbType.synapse;
}
@Override
protected SynapseCreateTableStatement newCreateStatement() {
return new SynapseCreateTableStatement();
}
@Override
protected void parseCreateTableRest(SQLCreateTableStatement stmt) {
if (stmt instanceof SynapseCreateTableStatement) {
SynapseCreateTableStatement synapseStmt = (SynapseCreateTableStatement) stmt;
if (lexer.token() == Token.WITH) {
lexer.nextToken();
accept(Token.LPAREN);
parseSynapseWithOptions(synapseStmt);
accept(Token.RPAREN);
}
}
super.parseCreateTableRest(stmt);
}
private void parseSynapseWithOptions(SynapseCreateTableStatement stmt) {
for (;;) {
if (lexer.identifierEquals("DISTRIBUTION")) {
lexer.nextToken();
accept(Token.EQ);
if (lexer.identifierEquals("HASH")) {
lexer.nextToken();
accept(Token.LPAREN);
SQLExpr distributionColumn = this.exprParser.expr();
stmt.setDistribution(distributionColumn);
stmt.setDistributionHash(true);
accept(Token.RPAREN);
} else if (lexer.identifierEquals("ROUND_ROBIN")) {
lexer.nextToken();
stmt.setDistributionHash(false);
} else if (lexer.identifierEquals("REPLICATE")) {
lexer.nextToken();
stmt.setDistributionHash(false);
}
} else if (lexer.identifierEquals("CLUSTERED")) {
lexer.nextToken();
accept(Token.INDEX);
accept(Token.LPAREN);
for (;;) {
SQLExpr column = this.exprParser.expr();
stmt.getClusteredIndexColumns().add(column);
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
accept(Token.RPAREN);
} else {
break;
}
if (lexer.token() == Token.COMMA) {
lexer.nextToken();
continue;
}
break;
}
}
}
|
SynapseCreateTableParser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.