focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void registerService(String serviceName, String groupName, Instance instance) throws NacosException {
NAMING_LOGGER.info("[REGISTER-SERVICE] {} registering service {} with instance: {}", namespaceId, serviceName,
instance);
String groupedServiceName = NamingUtils.getGroupedName(serviceName, groupName);
if (instance.isEphemeral()) {
throw new UnsupportedOperationException(
"Do not support register ephemeral instances by HTTP, please use gRPC replaced.");
}
final Map<String, String> params = new HashMap<>(32);
params.put(CommonParams.NAMESPACE_ID, namespaceId);
params.put(CommonParams.SERVICE_NAME, groupedServiceName);
params.put(CommonParams.GROUP_NAME, groupName);
params.put(CommonParams.CLUSTER_NAME, instance.getClusterName());
params.put(IP_PARAM, instance.getIp());
params.put(PORT_PARAM, String.valueOf(instance.getPort()));
params.put(WEIGHT_PARAM, String.valueOf(instance.getWeight()));
params.put(REGISTER_ENABLE_PARAM, String.valueOf(instance.isEnabled()));
params.put(HEALTHY_PARAM, String.valueOf(instance.isHealthy()));
params.put(EPHEMERAL_PARAM, String.valueOf(instance.isEphemeral()));
params.put(META_PARAM, JacksonUtils.toJson(instance.getMetadata()));
reqApi(UtilAndComs.nacosUrlInstance, params, HttpMethod.POST);
}
|
@Test
void testRegisterService() throws Exception {
//given
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
HttpRestResult<Object> a = new HttpRestResult<Object>();
a.setData("127.0.0.1:8848");
a.setCode(200);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a);
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
String serviceName = "service1";
String groupName = "group1";
Instance instance = new Instance();
instance.setEphemeral(false);
//when
clientProxy.registerService(serviceName, groupName, instance);
//then
verify(nacosRestTemplate, times(1)).exchangeForm(any(), any(), any(), any(), any(), any());
}
|
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsExactly_primitiveFloatArray_failure() {
expectFailureWhenTestingThat(array(1.1f, 2.2f, 3.3f))
.usingExactEquality()
.containsExactly(array(2.2f, 1.1f));
assertFailureKeys(
"value of", "unexpected (1)", "---", "expected", "testing whether", "but was");
assertFailureValue("unexpected (1)", Float.toString(3.3f));
}
|
public ClassTemplateSpec generate(DataSchema schema, DataSchemaLocation location)
{
pushCurrentLocation(location);
final ClassTemplateSpec result = processSchema(schema, null, null);
popCurrentLocation();
return result;
}
|
@Test(dataProvider = "customTypeDataForUnion")
public void testCustomInfoForUnionMembers(final List<DataSchema> customTypedSchemas)
{
final UnionDataSchema union = new UnionDataSchema();
List<UnionDataSchema.Member> members = customTypedSchemas.stream()
.map(UnionDataSchema.Member::new)
.collect(Collectors.toCollection(ArrayList::new));
union.setMembers(members, null);
final TyperefDataSchema typeref = new TyperefDataSchema(new Name(INPUT_SCHEMA_NAME));
typeref.setReferencedType(union);
final TemplateSpecGenerator generator = new TemplateSpecGenerator(_resolver);
final UnionTemplateSpec spec = (UnionTemplateSpec) generator.generate(typeref, _location);
for (int i = 0; i < customTypedSchemas.size(); ++i)
{
Assert.assertNotNull(spec.getMembers().get(i).getCustomInfo());
Assert.assertEquals(spec.getMembers().get(i).getCustomInfo().getCustomClass().getClassName(),
CustomTypeUtil.getJavaCustomTypeClassNameFromSchema((TyperefDataSchema) customTypedSchemas.get(i)));
}
}
|
private static Timestamp fromLong(Long elapsedSinceEpoch, TimestampPrecise precise) throws IllegalArgumentException {
final long seconds;
final int nanos;
switch (precise) {
case Millis:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) THOUSAND);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) THOUSAND) * MILLION;
break;
case Micros:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) MILLION);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) MILLION) * THOUSAND;
break;
case Nanos:
seconds = Math.floorDiv(elapsedSinceEpoch, (long) BILLION);
nanos = (int) Math.floorMod(elapsedSinceEpoch, (long) BILLION);
break;
default:
throw new IllegalArgumentException("Unknown precision: " + precise);
}
if (seconds < SECONDS_LOWERLIMIT || seconds > SECONDS_UPPERLIMIT) {
throw new IllegalArgumentException("given seconds is out of range");
}
if (nanos < NANOSECONDS_LOWERLIMIT || nanos > NANOSECONDS_UPPERLIMIT) {
// NOTE here is unexpected cases because exceeded part is
// moved to seconds by floor methods
throw new IllegalArgumentException("given nanos is out of range");
}
return Timestamp.newBuilder().setSeconds(seconds).setNanos(nanos).build();
}
|
@Test
void timestampMicrosConversionSecondsUpperLimit() throws Exception {
assertThrows(IllegalArgumentException.class, () -> {
TimestampMicrosConversion conversion = new TimestampMicrosConversion();
long exceeded = (ProtoConversions.SECONDS_UPPERLIMIT + 1) * 1000000;
conversion.fromLong(exceeded, TIMESTAMP_MICROS_SCHEMA, LogicalTypes.timestampMicros());
});
}
|
@Override
public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException {
final AttributedList<Path> children = new AttributedList<>();
if(replies.isEmpty()) {
return children;
}
// At least one entry successfully parsed
boolean success = false;
for(String line : replies) {
final Map<String, Map<String, String>> file = this.parseFacts(line);
if(null == file) {
log.error(String.format("Error parsing line %s", line));
continue;
}
for(Map.Entry<String, Map<String, String>> f : file.entrySet()) {
final String name = f.getKey();
// size -- Size in octets
// modify -- Last modification time
// create -- Creation time
// type -- Entry type
// unique -- Unique id of file/directory
// perm -- File permissions, whether read, write, execute is allowed for the login id.
// lang -- Language of the file name per IANA [11] registry.
// media-type -- MIME media-type of file contents per IANA registry.
// charset -- Character set per IANA registry (if not UTF-8)
final Map<String, String> facts = f.getValue();
if(!facts.containsKey("type")) {
log.error(String.format("No type fact in line %s", line));
continue;
}
final Path parsed;
if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory));
}
else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file));
}
else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) {
parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink));
// Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar
final String[] type = facts.get("type").split(":");
if(type.length == 2) {
final String target = type[1];
if(target.startsWith(String.valueOf(Path.DELIMITER))) {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file)));
}
else {
parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file)));
}
}
else {
log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line));
continue;
}
}
else {
log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line));
continue;
}
if(!success) {
if(parsed.isDirectory() && directory.getName().equals(name)) {
log.warn(String.format("Possibly bogus response line %s", line));
}
else {
success = true;
}
}
if(name.equals(".") || name.equals("..")) {
if(log.isDebugEnabled()) {
log.debug(String.format("Skip %s", name));
}
continue;
}
if(facts.containsKey("size")) {
parsed.attributes().setSize(Long.parseLong(facts.get("size")));
}
if(facts.containsKey("unix.uid")) {
parsed.attributes().setOwner(facts.get("unix.uid"));
}
if(facts.containsKey("unix.owner")) {
parsed.attributes().setOwner(facts.get("unix.owner"));
}
if(facts.containsKey("unix.gid")) {
parsed.attributes().setGroup(facts.get("unix.gid"));
}
if(facts.containsKey("unix.group")) {
parsed.attributes().setGroup(facts.get("unix.group"));
}
if(facts.containsKey("unix.mode")) {
parsed.attributes().setPermission(new Permission(facts.get("unix.mode")));
}
else if(facts.containsKey("perm")) {
if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) {
Permission.Action user = Permission.Action.none;
final String flags = facts.get("perm");
if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) {
// RETR command may be applied to that object
// Listing commands, LIST, NLST, and MLSD may be applied
user = user.or(Permission.Action.read);
}
if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) {
user = user.or(Permission.Action.write);
}
if(StringUtils.contains(flags, 'e')) {
// CWD command naming the object should succeed
user = user.or(Permission.Action.execute);
if(parsed.isDirectory()) {
user = user.or(Permission.Action.read);
}
}
final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none);
parsed.attributes().setPermission(permission);
}
}
if(facts.containsKey("modify")) {
// Time values are always represented in UTC
parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify")));
}
if(facts.containsKey("create")) {
// Time values are always represented in UTC
parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create")));
}
children.add(parsed);
}
}
if(!success) {
throw new FTPInvalidListException(children);
}
return children;
}
|
@Test
public void testParseMlsdMode664() throws Exception {
Path path = new Path(
"/www", EnumSet.of(Path.Type.directory));
String[] replies = new String[]{
"modify=19990307234236;perm=adfr;size=60;type=file;unique=FE03U10001724;UNIX.group=1001;UNIX.mode=0664;UNIX.owner=2000; kalahari.diz"
};
final AttributedList<Path> children = new FTPMlsdListResponseReader()
.read(path, Arrays.asList(replies));
assertEquals(1, children.size());
assertEquals("664", children.get(0).attributes().getPermission().getMode());
}
|
public RawLog newPublication(final long correlationId, final int termBufferLength, final boolean useSparseFiles)
{
return newInstance(publicationsDir, correlationId, termBufferLength, useSparseFiles);
}
|
@Test
void shouldCreateCorrectLengthAndZeroedFilesForPublication()
{
rawLog = fileStoreLogFactory.newPublication(CREATION_ID, TERM_BUFFER_LENGTH, PRE_ZERO_LOG);
assertEquals(TERM_BUFFER_LENGTH, rawLog.termLength());
final UnsafeBuffer[] termBuffers = rawLog.termBuffers();
assertEquals(PARTITION_COUNT, termBuffers.length);
for (final UnsafeBuffer termBuffer : termBuffers)
{
assertEquals(TERM_BUFFER_LENGTH, termBuffer.capacity());
assertEquals(0, termBuffer.getByte(0));
assertEquals(0, termBuffer.getByte(TERM_BUFFER_LENGTH - 1));
}
final UnsafeBuffer metaData = rawLog.metaData();
assertEquals(LogBufferDescriptor.LOG_META_DATA_LENGTH, metaData.capacity());
assertEquals(0, metaData.getByte(0));
assertEquals(0, metaData.getByte(LogBufferDescriptor.LOG_META_DATA_LENGTH - 1));
}
|
static int validatePubsubMessageSize(PubsubMessage message, int maxPublishBatchSize)
throws SizeLimitExceededException {
int payloadSize = message.getPayload().length;
if (payloadSize > PUBSUB_MESSAGE_DATA_MAX_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message data field of length "
+ payloadSize
+ " exceeds maximum of "
+ PUBSUB_MESSAGE_DATA_MAX_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
int totalSize = payloadSize;
@Nullable Map<String, String> attributes = message.getAttributeMap();
if (attributes != null) {
if (attributes.size() > PUBSUB_MESSAGE_MAX_ATTRIBUTES) {
throw new SizeLimitExceededException(
"Pubsub message contains "
+ attributes.size()
+ " attributes which exceeds the maximum of "
+ PUBSUB_MESSAGE_MAX_ATTRIBUTES
+ ". See https://cloud.google.com/pubsub/quotas#resource_limits");
}
// Consider attribute encoding overhead, so it doesn't go over the request limits
totalSize += attributes.size() * PUBSUB_MESSAGE_ATTRIBUTE_ENCODE_ADDITIONAL_BYTES;
for (Map.Entry<String, String> attribute : attributes.entrySet()) {
String key = attribute.getKey();
int keySize = key.getBytes(StandardCharsets.UTF_8).length;
if (keySize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message attribute key '"
+ key
+ "' exceeds the maximum of "
+ PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
totalSize += keySize;
String value = attribute.getValue();
int valueSize = value.getBytes(StandardCharsets.UTF_8).length;
if (valueSize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES) {
throw new SizeLimitExceededException(
"Pubsub message attribute value for key '"
+ key
+ "' starting with '"
+ value.substring(0, Math.min(256, value.length()))
+ "' exceeds the maximum of "
+ PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES
+ " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits");
}
totalSize += valueSize;
}
}
if (totalSize > maxPublishBatchSize) {
throw new SizeLimitExceededException(
"Pubsub message of length "
+ totalSize
+ " exceeds maximum of "
+ maxPublishBatchSize
+ " bytes, when considering the payload and attributes. "
+ "See https://cloud.google.com/pubsub/quotas#resource_limits");
}
return totalSize;
}
|
@Test
public void testValidatePubsubMessageSizeAttributeKeyTooLarge() {
byte[] data = new byte[1024];
String attributeKey = RandomStringUtils.randomAscii(257);
String attributeValue = "value";
Map<String, String> attributes = ImmutableMap.of(attributeKey, attributeValue);
PubsubMessage message = new PubsubMessage(data, attributes);
assertThrows(
SizeLimitExceededException.class,
() ->
PreparePubsubWriteDoFn.validatePubsubMessageSize(
message, PUBSUB_MESSAGE_MAX_TOTAL_SIZE));
}
|
@Override
public SchemaKGroupedTable groupBy(
final FormatInfo valueFormat,
final List<Expression> groupByExpressions,
final Stacker contextStacker
) {
// Since tables must have a key, we know that the keyFormat is both
// not NONE and has at least one column; this allows us to inherit
// the key format directly (as opposed to the logic in SchemaKStream)
final KeyFormat groupedKeyFormat = SerdeFeaturesFactory.sanitizeKeyFormat(
KeyFormat.nonWindowed(keyFormat.getFormatInfo(), keyFormat.getFeatures()),
toSqlTypes(groupByExpressions),
true
);
final TableGroupBy<K> step = ExecutionStepFactory.tableGroupBy(
contextStacker,
sourceTableStep,
InternalFormats.of(groupedKeyFormat, valueFormat),
groupByExpressions
);
return new SchemaKGroupedTable(
step,
resolveSchema(step),
groupedKeyFormat,
ksqlConfig,
functionRegistry);
}
|
@Test
public void testGroupBy() {
// Given:
final String selectQuery = "SELECT col0, col1, col2 FROM test2 EMIT CHANGES;";
final PlanNode logicalPlan = buildLogicalPlan(selectQuery);
initialSchemaKTable = buildSchemaKTableFromPlan(logicalPlan);
final List<Expression> groupByExpressions = Arrays.asList(TEST_2_COL_2, TEST_2_COL_1);
// When:
final SchemaKGroupedTable groupedSchemaKTable = initialSchemaKTable.groupBy(
valueFormat.getFormatInfo(),
groupByExpressions,
childContextStacker
);
// Then:
assertThat(groupedSchemaKTable, instanceOf(SchemaKGroupedTable.class));
}
|
public static Object invokeMethod(Object target, Method method, Object... args)
throws InvocationTargetException, IllegalArgumentException, SecurityException {
while (true) {
if (!method.isAccessible()) {
method.setAccessible(true);
}
try {
return method.invoke(target, args);
} catch (IllegalAccessException ignore) {
// avoid other threads executing `method.setAccessible(false)`
}
}
}
|
@Test
public void testInvokeMethod() throws NoSuchMethodException, InvocationTargetException {
Assertions.assertEquals(0, ReflectionUtil.invokeMethod("", "length"));
Assertions.assertEquals(3,
ReflectionUtil.invokeMethod("foo", "length"));
Assertions.assertThrows(NoSuchMethodException.class,
() -> ReflectionUtil.invokeMethod("", "size"));
}
|
@Override
public Map<Uuid, Set<Integer>> partitions() {
return partitions;
}
|
@Test
public void testAttributes() {
Map<Uuid, Set<Integer>> partitions = mkAssignment(
mkTopicAssignment(Uuid.randomUuid(), 1, 2, 3)
);
Assignment assignment = new Assignment(partitions);
assertEquals(partitions, assignment.partitions());
}
|
public static void writePositionToBlockBuilder(Block block, int position, BlockBuilder blockBuilder)
{
if (block instanceof DictionaryBlock) {
position = ((DictionaryBlock) block).getId(position);
block = ((DictionaryBlock) block).getDictionary();
}
if (blockBuilder instanceof MapBlockBuilder) {
writePositionToMapBuilder(block, position, (MapBlockBuilder) blockBuilder);
}
else if (blockBuilder instanceof ArrayBlockBuilder) {
writePositionToArrayBuilder(block, position, (ArrayBlockBuilder) blockBuilder);
}
else if (blockBuilder instanceof RowBlockBuilder) {
writePositionToRowBuilder(block, position, (RowBlockBuilder) blockBuilder);
}
else {
block.writePositionTo(position, blockBuilder);
}
}
|
@Test
public void testMapBlockBuilder()
{
BlockBuilder blockBuilder1 = TEST_MAP_TYPE.createBlockBuilder(null, 1);
BlockBuilder mapBlockBuilder = blockBuilder1.beginBlockEntry();
writeValuesToMapBuilder(mapBlockBuilder);
Block expectedBlock = blockBuilder1.closeEntry().build();
// write values to a new block using BlockBuilderUtil
BlockBuilder blockBuilder2 = TEST_MAP_TYPE.createBlockBuilder(null, 1);
writePositionToBlockBuilder(expectedBlock, 0, blockBuilder2);
Block newBlock = blockBuilder2.build();
assertEquals(newBlock, expectedBlock);
}
|
@SuppressWarnings("unchecked")
public static PipelineIR configToPipelineIR(final List<SourceWithMetadata> sourcesWithMetadata,
final boolean supportEscapes, ConfigVariableExpander cve) throws InvalidIRException {
return compileSources(sourcesWithMetadata, supportEscapes, cve);
}
|
@Test
public void testConfigToPipelineIR() throws Exception {
SourceWithMetadata swm = new SourceWithMetadata("proto", "path", 1, 1, "input {stdin{}} output{stdout{}}");
final ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider());
final PipelineIR pipelineIR =
ConfigCompiler.configToPipelineIR(Collections.singletonList(swm), false, cve);
assertThat(pipelineIR.getOutputPluginVertices().size(), is(1));
assertThat(pipelineIR.getFilterPluginVertices().size(), is(0));
}
|
@VisibleForTesting
ExportResult<MediaContainerResource> exportOneDrivePhotos(TokensAndUrlAuthData authData,
Optional<IdOnlyContainerResource> albumData, Optional<PaginationData> paginationData,
UUID jobId) throws IOException {
Optional<String> albumId = Optional.empty();
if (albumData.isPresent()) {
albumId = Optional.of(albumData.get().getId());
}
Optional<String> paginationUrl = getDrivePaginationToken(paginationData);
MicrosoftDriveItemsResponse driveItemsResponse;
if (paginationData.isPresent() || albumData.isPresent()) {
driveItemsResponse =
getOrCreateMediaInterface(authData).getDriveItems(albumId, paginationUrl);
} else {
driveItemsResponse = getOrCreateMediaInterface(authData).getDriveItemsFromSpecialFolder(
MicrosoftSpecialFolder.FolderType.photos);
}
PaginationData nextPageData = setNextPageToken(driveItemsResponse);
ContinuationData continuationData = new ContinuationData(nextPageData);
MediaContainerResource containerResource;
MicrosoftDriveItem[] driveItems = driveItemsResponse.getDriveItems();
List<MediaAlbum> albums = new ArrayList<>();
List<PhotoModel> photos = new ArrayList<>();
List<VideoModel> videos = new ArrayList<>();
if (driveItems != null && driveItems.length > 0) {
for (MicrosoftDriveItem driveItem : driveItems) {
MediaAlbum album = tryConvertDriveItemToMediaAlbum(driveItem, jobId);
if (album != null) {
albums.add(album);
continuationData.addContainerResource(new IdOnlyContainerResource(driveItem.id));
continue;
}
PhotoModel photo = tryConvertDriveItemToPhotoModel(albumId, driveItem, jobId);
if (photo != null) {
photos.add(photo);
continue;
}
VideoModel video = tryConvertDriveItemToVideoModel(albumId, driveItem, jobId);
if (video != null) {
videos.add(video);
continue;
}
}
}
ExportResult.ResultType result =
nextPageData == null ? ExportResult.ResultType.END : ExportResult.ResultType.CONTINUE;
containerResource = new MediaContainerResource(albums, photos, videos);
return new ExportResult<>(result, containerResource, continuationData);
}
|
@Test
public void exportAlbumWithoutNextPage() throws IOException {
// Setup
MicrosoftDriveItem folderItem = setUpSingleAlbum();
when(driveItemsResponse.getDriveItems()).thenReturn(new MicrosoftDriveItem[] {folderItem});
when(driveItemsResponse.getNextPageLink()).thenReturn(null);
StringPaginationToken inputPaginationToken =
new StringPaginationToken(DRIVE_TOKEN_PREFIX + DRIVE_PAGE_URL);
// Run
ExportResult<MediaContainerResource> result = microsoftMediaExporter.exportOneDrivePhotos(
null, Optional.empty(), Optional.of(inputPaginationToken), uuid);
// Verify method calls
verify(mediaInterface).getDriveItems(Optional.empty(), Optional.of(DRIVE_PAGE_URL));
verify(driveItemsResponse).getDriveItems();
// Verify next pagination token is absent
ContinuationData continuationData = result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken).isEqualTo(null);
// Verify one album is ready for import
Collection<MediaAlbum> actualAlbums = result.getExportedData().getAlbums();
assertThat(actualAlbums.stream().map(MediaAlbum::getId).collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
// Verify photos should be empty (in the root)
Collection<PhotoModel> actualPhotos = result.getExportedData().getPhotos();
assertThat(actualPhotos).isEmpty();
// Verify there is one container ready for sub-processing
List<ContainerResource> actualResources = continuationData.getContainerResources();
assertThat(actualResources.stream()
.map(a -> ((IdOnlyContainerResource) a).getId())
.collect(Collectors.toList()))
.containsExactly(FOLDER_ID);
}
|
public WatermarkAssignerOperator(
int rowtimeFieldIndex,
WatermarkGenerator watermarkGenerator,
long idleTimeout,
ProcessingTimeService processingTimeService) {
this.rowtimeFieldIndex = rowtimeFieldIndex;
this.watermarkGenerator = watermarkGenerator;
this.idleTimeout = idleTimeout;
this.chainingStrategy = ChainingStrategy.ALWAYS;
this.processingTimeService = checkNotNull(processingTimeService);
}
|
@Test
public void testWatermarkAssignerOperator() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
createTestHarness(0, WATERMARK_GENERATOR, -1);
testHarness.getExecutionConfig().setAutoWatermarkInterval(50);
long currentTime = 0;
testHarness.open();
testHarness.processElement(new StreamRecord<>(GenericRowData.of(1L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(2L)));
testHarness.processWatermark(new Watermark(2)); // this watermark should be ignored
testHarness.processElement(new StreamRecord<>(GenericRowData.of(3L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(4L)));
// validate first part of the sequence. we poll elements until our
// watermark updates to "3", which must be the result of the "4" element.
{
ConcurrentLinkedQueue<Object> output = testHarness.getOutput();
long nextElementValue = 1L;
long lastWatermark = -1L;
while (lastWatermark < 3) {
if (output.size() > 0) {
Object next = output.poll();
assertThat(next).isNotNull();
Tuple2<Long, Long> update =
validateElement(next, nextElementValue, lastWatermark);
nextElementValue = update.f0;
lastWatermark = update.f1;
// check the invariant
assertThat(lastWatermark).isLessThan(nextElementValue);
} else {
currentTime = currentTime + 10;
testHarness.setProcessingTime(currentTime);
}
}
output.clear();
}
testHarness.processElement(new StreamRecord<>(GenericRowData.of(4L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(5L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(6L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(7L)));
testHarness.processElement(new StreamRecord<>(GenericRowData.of(8L)));
// validate the next part of the sequence. we poll elements until our
// watermark updates to "7", which must be the result of the "8" element.
{
ConcurrentLinkedQueue<Object> output = testHarness.getOutput();
long nextElementValue = 4L;
long lastWatermark = 2L;
while (lastWatermark < 7) {
if (output.size() > 0) {
Object next = output.poll();
assertThat(next).isNotNull();
Tuple2<Long, Long> update =
validateElement(next, nextElementValue, lastWatermark);
nextElementValue = update.f0;
lastWatermark = update.f1;
// check the invariant
assertThat(lastWatermark).isLessThan(nextElementValue);
} else {
currentTime = currentTime + 10;
testHarness.setProcessingTime(currentTime);
}
}
output.clear();
}
testHarness.processWatermark(new Watermark(Long.MAX_VALUE));
assertThat(((Watermark) testHarness.getOutput().poll()).getTimestamp())
.isEqualTo(Long.MAX_VALUE);
}
|
public static String normalize(final String path) {
return normalize(path, true);
}
|
@Test
public void testPathName() {
{
Path path = new Path(PathNormalizer.normalize(
"/path/to/file/"), EnumSet.of(Path.Type.directory));
assertEquals("file", path.getName());
assertEquals("/path/to/file", path.getAbsolute());
}
{
Path path = new Path(PathNormalizer.normalize(
"/path/to/file"), EnumSet.of(Path.Type.directory));
assertEquals("file", path.getName());
assertEquals("/path/to/file", path.getAbsolute());
}
}
|
public static boolean containsLowerCase(final long word) {
return applyLowerCasePattern(word) != 0;
}
|
@Test
void containsLowerCaseLong() {
// given
final byte[] asciiTable = getExtendedAsciiTable();
shuffleArray(asciiTable, random);
// when
for (int idx = 0; idx < asciiTable.length; idx += Long.BYTES) {
final long value = getLong(asciiTable, idx);
final boolean actual = SWARUtil.containsLowerCase(value);
boolean expected = false;
for (int i = 0; i < Long.BYTES; i++) {
expected |= Character.isLowerCase(asciiTable[idx + i]);
}
// then
assertEquals(expected, actual);
}
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String dataType = typeDefine.getDataType().toUpperCase();
switch (dataType) {
case REDSHIFT_BOOLEAN:
builder.sourceType(REDSHIFT_BOOLEAN);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case REDSHIFT_SMALLINT:
builder.sourceType(REDSHIFT_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case REDSHIFT_INTEGER:
builder.sourceType(REDSHIFT_INTEGER);
builder.dataType(BasicType.INT_TYPE);
break;
case REDSHIFT_BIGINT:
builder.sourceType(REDSHIFT_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case REDSHIFT_REAL:
builder.sourceType(REDSHIFT_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case REDSHIFT_DOUBLE_PRECISION:
builder.sourceType(REDSHIFT_DOUBLE_PRECISION);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case REDSHIFT_NUMERIC:
Long precision = typeDefine.getPrecision();
Integer scale = typeDefine.getScale();
if (precision == null || precision <= 0) {
precision = Long.valueOf(DEFAULT_PRECISION);
scale = DEFAULT_SCALE;
} else if (precision > MAX_PRECISION) {
scale = scale - (int) (precision - MAX_PRECISION);
precision = Long.valueOf(MAX_PRECISION);
}
builder.sourceType(String.format("%s(%d,%d)", REDSHIFT_NUMERIC, precision, scale));
builder.dataType(new DecimalType(Math.toIntExact(precision), scale));
break;
case REDSHIFT_CHARACTER:
Long characterLength = typeDefine.getLength();
if (characterLength == null || characterLength <= 0) {
characterLength = Long.valueOf(MAX_CHARACTER_LENGTH);
}
builder.sourceType(String.format("%s(%d)", REDSHIFT_CHARACTER, characterLength));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(characterLength);
break;
case REDSHIFT_CHARACTER_VARYING:
Long characterVaryingLength = typeDefine.getLength();
if (characterVaryingLength == null || characterVaryingLength <= 0) {
characterVaryingLength = Long.valueOf(MAX_CHARACTER_VARYING_LENGTH);
}
builder.sourceType(
String.format(
"%s(%d)", REDSHIFT_CHARACTER_VARYING, characterVaryingLength));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(characterVaryingLength);
break;
case REDSHIFT_HLLSKETCH:
builder.sourceType(REDSHIFT_HLLSKETCH);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(MAX_HLLSKETCH_LENGTH);
break;
case REDSHIFT_SUPER:
builder.sourceType(REDSHIFT_SUPER);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(MAX_SUPER_LENGTH);
break;
case REDSHIFT_VARBYTE:
case REDSHIFT_BINARY_VARYING:
builder.sourceType(
String.format(
"%s(%d)", typeDefine.getDataType(), MAX_BINARY_VARYING_LENGTH));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(MAX_BINARY_VARYING_LENGTH);
break;
case REDSHIFT_TIME:
builder.sourceType(REDSHIFT_TIME);
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(MAX_TIME_SCALE);
break;
case REDSHIFT_TIMETZ:
builder.sourceType(REDSHIFT_TIMETZ);
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(MAX_TIME_SCALE);
break;
case REDSHIFT_TIMESTAMP:
builder.sourceType(REDSHIFT_TIMESTAMP);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(MAX_TIMESTAMP_SCALE);
break;
case REDSHIFT_TIMESTAMPTZ:
builder.sourceType(REDSHIFT_TIMESTAMPTZ);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(MAX_TIMESTAMP_SCALE);
break;
default:
try {
return super.convert(typeDefine);
} catch (SeaTunnelRuntimeException e) {
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.REDSHIFT,
typeDefine.getDataType(),
typeDefine.getName());
}
}
return builder.build();
}
|
@Test
public void testConvertTimestamp() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("TIMESTAMP WITHOUT TIME ZONE")
.dataType("TIMESTAMP WITHOUT TIME ZONE")
.build();
Column column = RedshiftTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType());
Assertions.assertEquals(RedshiftTypeConverter.MAX_TIMESTAMP_SCALE, column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("TIMESTAMP WITH TIME ZONE")
.dataType("TIMESTAMP WITH TIME ZONE")
.build();
column = RedshiftTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType());
Assertions.assertEquals(RedshiftTypeConverter.MAX_TIMESTAMP_SCALE, column.getScale());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
public RequestConfig getDefaultRequestConfig() {
return defaultRequestConfig;
}
|
@Test
void getDefaultRequestConfig_returns_config_provided_at_construction() {
assertThat(configuredClient.getDefaultRequestConfig()).isEqualTo(defaultRequestConfigMock);
}
|
@Override
protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config);
validateSourceConnectorExactlyOnceSupport(config, result, connector);
validateSourceConnectorTransactionBoundary(config, result, connector);
return result;
}
|
@Test
public void testConnectorTransactionBoundaryValidation() {
herder = exactlyOnceHerder();
Map<String, String> config = new HashMap<>();
config.put(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString());
SourceConnector connectorMock = mock(SourceConnector.class);
when(connectorMock.canDefineTransactionBoundaries(eq(config)))
.thenReturn(ConnectorTransactionBoundaries.SUPPORTED);
Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig(
connectorMock, SourceConnectorConfig.configDef(), config);
List<String> errors = validatedConfigs.get(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG).errorMessages();
assertEquals(Collections.emptyList(), errors);
}
|
public String getName() {
return name;
}
|
@Test
public void hasAName() throws Exception {
assertThat(handler.getName())
.isEqualTo("handler");
}
|
public static <T> T loadData(Map<String, Object> config,
T existingData,
Class<T> dataCls) {
try {
String existingConfigJson = MAPPER.writeValueAsString(existingData);
Map<String, Object> existingConfig = MAPPER.readValue(existingConfigJson, Map.class);
Map<String, Object> newConfig = new HashMap<>();
newConfig.putAll(existingConfig);
newConfig.putAll(config);
String configJson = MAPPER.writeValueAsString(newConfig);
return MAPPER.readValue(configJson, dataCls);
} catch (IOException e) {
throw new RuntimeException("Failed to load config into existing configuration data", e);
}
}
|
@Test
public void testLoadReaderConfigurationData() {
ReaderConfigurationData confData = new ReaderConfigurationData();
confData.setTopicName("unknown");
confData.setReceiverQueueSize(1000000);
confData.setReaderName("unknown-reader");
Map<String, Object> config = new HashMap<>();
config.put("topicNames", ImmutableSet.of("test-topic"));
config.put("receiverQueueSize", 100);
confData = ConfigurationDataUtils.loadData(config, confData, ReaderConfigurationData.class);
assertEquals("test-topic", confData.getTopicName());
assertEquals(100, confData.getReceiverQueueSize());
assertEquals("unknown-reader", confData.getReaderName());
}
|
public MediaType detect(InputStream input, Metadata metadata) throws IOException {
if (input == null) {
return MediaType.OCTET_STREAM;
}
input.mark(offsetRangeEnd + length);
try {
int offset = 0;
// Skip bytes at the beginning, using skip() or read()
while (offset < offsetRangeBegin) {
long n = input.skip(offsetRangeBegin - offset);
if (n > 0) {
offset += n;
} else if (input.read() != -1) {
offset += 1;
} else {
return MediaType.OCTET_STREAM;
}
}
// Fill in the comparison window
byte[] buffer = new byte[length + (offsetRangeEnd - offsetRangeBegin)];
int n = input.read(buffer);
if (n > 0) {
offset += n;
}
while (n != -1 && offset < offsetRangeEnd + length) {
int bufferOffset = offset - offsetRangeBegin;
n = input.read(buffer, bufferOffset, buffer.length - bufferOffset);
// increment offset - in case not all read (see testDetectStreamReadProblems)
if (n > 0) {
offset += n;
}
}
if (this.isRegex) {
int flags = 0;
if (this.isStringIgnoreCase) {
flags = Pattern.CASE_INSENSITIVE;
}
Pattern p = Pattern.compile(new String(this.pattern, UTF_8), flags);
ByteBuffer bb = ByteBuffer.wrap(buffer);
CharBuffer result = ISO_8859_1.decode(bb);
Matcher m = p.matcher(result);
boolean match = false;
// Loop until we've covered the entire offset range
for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) {
m.region(i, length + i);
match = m.lookingAt(); // match regex from start of region
if (match) {
return type;
}
}
} else {
if (offset < offsetRangeBegin + length) {
return MediaType.OCTET_STREAM;
}
// Loop until we've covered the entire offset range
for (int i = 0; i <= offsetRangeEnd - offsetRangeBegin; i++) {
boolean match = true;
int masked;
for (int j = 0; match && j < length; j++) {
masked = (buffer[i + j] & mask[j]);
if (this.isStringIgnoreCase) {
masked = Character.toLowerCase(masked);
}
match = (masked == pattern[j]);
}
if (match) {
return type;
}
}
}
return MediaType.OCTET_STREAM;
} finally {
input.reset();
}
}
|
@Test
public void testDetectStreamReadProblems() throws Exception {
byte[] data = "abcdefghijklmnopqrstuvwxyz0123456789".getBytes(US_ASCII);
MediaType testMT = new MediaType("application", "test");
Detector detector = new MagicDetector(testMT, data, null, false, 0, 0);
// Deliberately prevent InputStream.read(...) from reading the entire
// buffer in one go
InputStream stream = new RestrictiveInputStream(data);
assertEquals(testMT, detector.detect(stream, new Metadata()));
}
|
public List<String> all() {
char[] chars = new char[MAX_CHAR_LENGTH];
List<String> value = depth(this.root, new ArrayList<String>(), chars, 0);
return value;
}
|
@Test
public void all() throws Exception {
TrieTree trieTree = new TrieTree();
trieTree.insert("ABC");
trieTree.insert("abC");
List<String> all = trieTree.all();
String result = "";
for (String s : all) {
result += s + ",";
System.out.println(s);
}
Assert.assertTrue("ABC,abC,".equals(result));
}
|
@Override
public Path createFile(String filename) throws IOException {
return createFile(filename, (InputStream) null);
}
|
@Test
void shouldThrowExceptionGivenFileAlreadyExist() throws IOException {
String workingDirId = IdUtils.create();
TestWorkingDir workingDirectory = new TestWorkingDir(workingDirId, new LocalWorkingDir(Path.of("/tmp/sub/dir/tmp/"), workingDirId));
workingDirectory.createFile("folder/file.txt", "1".getBytes(StandardCharsets.UTF_8));
Assertions.assertThrows(FileAlreadyExistsException.class, () -> {
workingDirectory.createFile("folder/file.txt", "2".getBytes(StandardCharsets.UTF_8));
});
}
|
public Path createTempFile(String suffix) throws IOException {
String actualSuffix = StringUtils.isBlank(suffix) ? ".tmp" : suffix;
final Path path = tempFileDir == null ? Files.createTempFile("apache-tika-", actualSuffix) :
Files.createTempFile(tempFileDir, "apache-tika-", actualSuffix);
addResource(() -> {
try {
Files.delete(path);
} catch (IOException e) {
// delete when exit if current delete fail
LOG.warn("delete tmp file fail, will delete it on exit");
path.toFile().deleteOnExit();
}
});
return path;
}
|
@Test
public void testFileDeletion() throws IOException {
Path tempFile;
try (TemporaryResources tempResources = new TemporaryResources()) {
tempFile = tempResources.createTempFile();
assertTrue(Files.exists(tempFile), "Temp file should exist while TempResources is used");
}
assertTrue(Files.notExists(tempFile),
"Temp file should not exist after TempResources is closed");
}
|
@Override
public void isNotEqualTo(@Nullable Object expected) {
super.isNotEqualTo(expected);
}
|
@Test
public void isNotEqualTo_WithoutToleranceParameter_Success_Shorter() {
assertThat(array(2.2f, 3.3f)).isNotEqualTo(array(2.2f));
}
|
@Override
public Optional<Period> chooseBin(final List<Period> availablePeriods, final QueryExecutionStats stats) {
return availablePeriods.stream()
.filter(per -> matches(per, stats.effectiveTimeRange()))
.findFirst();
}
|
@Test
void testChoosesProperPeriod() {
final Optional<Period> chosenPeriod = toTest.chooseBin(
List.of(Period.days(1), Period.days(2), Period.days(3)),
getQueryExecutionStats(42, AbsoluteRange.create(
DateTime.now(DateTimeZone.UTC).minusDays(1).minusHours(23),
DateTime.now(DateTimeZone.UTC))
)
);
assertTrue(chosenPeriod.isPresent());
assertEquals(chosenPeriod.get(), Period.days(2));
}
|
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
}
|
@Test
void group_nodes_are_marked_maintenance_if_group_availability_too_low_by_orchestrator() {
final ClusterFixture fixture = ClusterFixture
.forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3))
.bringEntireClusterUp()
.proposeStorageNodeWantedState(4, State.MAINTENANCE, NodeState.ORCHESTRATOR_RESERVED_DESCRIPTION)
.proposeStorageNodeWantedState(5, State.MAINTENANCE, NodeState.ORCHESTRATOR_RESERVED_DESCRIPTION);
final ClusterStateGenerator.Params params = fixture.generatorParams();
// Both node 4 & 5 are in maintenance by Orchestrator, which will force the other nodes in the
// group to maintenance (node 3).
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
assertThat(state.toString(), equalTo("distributor:9 .3.s:d storage:9 .3.s:m .4.s:m .5.s:m"));
}
|
public T add(String str) {
requireNonNull(str, JVM_OPTION_NOT_NULL_ERROR_MESSAGE);
String value = str.trim();
if (isInvalidOption(value)) {
throw new IllegalArgumentException("a JVM option can't be empty and must start with '-'");
}
checkMandatoryOptionOverwrite(value);
options.add(value);
return castThis();
}
|
@Test
public void add_checks_against_mandatory_options_is_case_sensitive() {
String[] optionOverrides = {
randomPrefix,
randomPrefix + randomAlphanumeric(1),
randomPrefix + randomAlphanumeric(2),
randomPrefix + randomAlphanumeric(3),
randomPrefix + randomAlphanumeric(4),
randomPrefix + randomValue.substring(1),
randomPrefix + randomValue.substring(2),
randomPrefix + randomValue.substring(3)
};
JvmOptions underTest = new JvmOptions(ImmutableMap.of(randomPrefix, randomValue));
for (String optionOverride : optionOverrides) {
underTest.add(optionOverride.toUpperCase(Locale.ENGLISH));
}
}
|
@Override
public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthenticationException {
if (response.length == 1 && response[0] == OAuthBearerSaslClient.BYTE_CONTROL_A && errorMessage != null) {
log.debug("Received %x01 response from client after it received our error");
throw new SaslAuthenticationException(errorMessage);
}
errorMessage = null;
OAuthBearerClientInitialResponse clientResponse;
try {
clientResponse = new OAuthBearerClientInitialResponse(response);
} catch (SaslException e) {
log.debug(e.getMessage());
throw e;
}
return process(clientResponse.tokenValue(), clientResponse.authorizationId(), clientResponse.extensions());
}
|
@Test
public void illegalToken() throws Exception {
byte[] bytes = saslServer.evaluateResponse(clientInitialResponse(null, true, Collections.emptyMap()));
String challenge = new String(bytes, StandardCharsets.UTF_8);
assertEquals("{\"status\":\"invalid_token\"}", challenge);
}
|
public void consume() throws InterruptedException {
var item = queue.take();
LOGGER.info("Consumer [{}] consume item [{}] produced by [{}]", name,
item.id(), item.producer());
}
|
@Test
void testConsume() throws Exception {
final var queue = spy(new ItemQueue());
for (var id = 0; id < ITEM_COUNT; id++) {
queue.put(new Item("producer", id));
}
reset(queue); // Don't count the preparation above as interactions with the queue
final var consumer = new Consumer("consumer", queue);
for (var id = 0; id < ITEM_COUNT; id++) {
consumer.consume();
}
verify(queue, times(ITEM_COUNT)).take();
}
|
@Override
public void deleteLevel(Long id) {
// 校验存在
validateLevelExists(id);
// 校验分组下是否有用户
validateLevelHasUser(id);
// 删除
memberLevelMapper.deleteById(id);
}
|
@Test
public void testDeleteLevel_success() {
// mock 数据
MemberLevelDO dbLevel = randomPojo(MemberLevelDO.class);
memberlevelMapper.insert(dbLevel);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbLevel.getId();
// 调用
levelService.deleteLevel(id);
// 校验数据不存在了
assertNull(memberlevelMapper.selectById(id));
}
|
private FederationResult(List<TargetResult> targetResults) {
this.targetResults = targetResults;
if (targetResults.stream().anyMatch(TargetResult::isMandatory))
targetsToWaitFor = targetResults.stream().filter(TargetResult::isMandatory)
.collect(Collectors.toCollection(ArrayList::new));
else
targetsToWaitFor = new ArrayList<>(targetResults);
}
|
@Test
void testFederationResult() {
assertTimeout(ImmutableSet.of(), 100, 200, 180);
assertTimeout(ImmutableSet.of(), 480, 400, 400);
assertTimeout(ImmutableSet.of("dsp1"), 260, 280, 220);
assertTimeout(ImmutableSet.of("organic"), 520, 160, 160);
assertTimeout(ImmutableSet.of("dsp2"), 200, 220, 230);
assertTimeout(ImmutableSet.of(), 200, 220, 210);
assertTimeout(ImmutableSet.of("dsp1", "dsp2"), 200, 260, 260);
assertTimeout(ImmutableSet.of("organic"), 520, 260, 260);
}
|
@Override
public ObjectNode encode(KubevirtNetwork network, CodecContext context) {
checkNotNull(network, "Kubevirt network cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(NETWORK_ID, network.networkId())
.put(TYPE, network.type().name())
.put(NAME, network.name())
.put(MTU, network.mtu())
.put(GATEWAY_IP, network.gatewayIp().toString())
.put(DEFAULT_ROUTE, network.defaultRoute())
.put(CIDR, network.cidr());
if (network.segmentId() != null) {
result.put(SEGMENT_ID, network.segmentId());
}
if (network.hostRoutes() != null && !network.hostRoutes().isEmpty()) {
ArrayNode hostRoutes = context.mapper().createArrayNode();
network.hostRoutes().forEach(hostRoute -> {
ObjectNode hostRouteJson =
context.codec(KubevirtHostRoute.class).encode(hostRoute, context);
hostRoutes.add(hostRouteJson);
});
result.set(HOST_ROUTES, hostRoutes);
}
if (network.ipPool() != null) {
ObjectNode ipPoolJson = context.codec(KubevirtIpPool.class).encode(network.ipPool(), context);
result.set(IP_POOL, ipPoolJson);
}
if (network.dnses() != null && !network.dnses().isEmpty()) {
ArrayNode dnses = context.mapper().createArrayNode();
network.dnses().forEach(dns -> {
dnses.add(dns.toString());
});
result.set(DNSES, dnses);
}
return result;
}
|
@Test
public void testKubevirtNetworkEncode() {
KubevirtHostRoute hostRoute1 = new KubevirtHostRoute(IpPrefix.valueOf("10.10.10.0/24"),
IpAddress.valueOf("20.20.20.1"));
KubevirtHostRoute hostRoute2 = new KubevirtHostRoute(IpPrefix.valueOf("20.20.20.0/24"),
IpAddress.valueOf("10.10.10.1"));
KubevirtIpPool ipPool = new KubevirtIpPool(IpAddress.valueOf("10.10.10.100"),
IpAddress.valueOf("10.10.10.200"));
KubevirtNetwork network = DefaultKubevirtNetwork.builder()
.networkId("net-1")
.name("net-1")
.type(KubevirtNetwork.Type.FLAT)
.gatewayIp(IpAddress.valueOf("10.10.10.1"))
.defaultRoute(true)
.mtu(1500)
.cidr("10.10.10.0/24")
.hostRoutes(ImmutableSet.of(hostRoute1, hostRoute2))
.ipPool(ipPool)
.dnses(ImmutableSet.of(IpAddress.valueOf("8.8.8.8")))
.build();
ObjectNode networkJson = kubevirtNetworkCodec.encode(network, context);
assertThat(networkJson, matchesKubevirtNetwork(network));
}
|
public ElasticProfile find(String profileId) {
return this.stream()
.filter(elasticProfile -> elasticProfile.getId().equals(profileId))
.findFirst().orElse(null);
}
|
@Test
public void shouldFindProfileById() throws Exception {
assertThat(new ElasticProfiles().find("foo"), is(nullValue()));
ElasticProfile profile = new ElasticProfile("foo", "prod-cluster");
assertThat(new ElasticProfiles(profile).find("foo"), is(profile));
}
|
public Result<List<ConfigInfo>> getConfigList(ConfigInfo request, PluginType pluginType, boolean exactMatchFlag) {
Result<?> result = checkConnection(request);
if (!result.isSuccess()) {
return new Result<>(result.getCode(), result.getMessage());
}
String requestGroup = request.getGroup();
ConfigClient client = getConfigClient(request.getNamespace());
if (client instanceof NacosClient) {
requestGroup = NacosUtils.rebuildGroup(requestGroup);
}
Map<String, List<String>> configMap = client.getConfigList(request.getKey(), requestGroup, exactMatchFlag);
List<ConfigInfo> configInfoList = new ArrayList<>();
PluginConfigHandler handler = pluginType.getHandler();
for (Map.Entry<String, List<String>> entry : configMap.entrySet()) {
String group = entry.getKey();
if (client instanceof NacosClient) {
group = NacosUtils.convertGroup(group);
} else if (client instanceof KieClient) {
group = group.replace(KieConstants.CONNECTOR, KieConstants.SEPARATOR);
group = group.replace(KieConstants.DEFAULT_LABEL_PRE, StringUtils.EMPTY);
}
if (!exactMatchFlag && !handler.verifyConfigurationGroup(group)) {
continue;
}
for (String configKey : entry.getValue()) {
if (!exactMatchFlag && !handler.verifyConfigurationKey(configKey)) {
continue;
}
ConfigInfo configInfo = handler.parsePluginInfo(configKey, group);
if (!exactMatchFlag && !handler.filterConfiguration(request, configInfo)) {
continue;
}
configInfo.setNamespace(request.getNamespace());
configInfoList.add(configInfo);
}
}
return new Result<>(ResultCodeType.SUCCESS.getCode(), null, configInfoList);
}
|
@Test
public void getConfigList() {
ConfigInfo configInfo = new ConfigInfo();
configInfo.setGroup(GROUP);
configInfo.setKey(KEY);
configInfo.setPluginType(PluginType.SPRINGBOOT_REGISTRY.getPluginName());
Result<List<ConfigInfo>> result = configService.getConfigList(configInfo, PluginType.SPRINGBOOT_REGISTRY, false);
Assert.assertTrue(result.isSuccess());
Assert.assertNotNull(result.getData());
Assert.assertEquals(result.getData().size(), 1);
ConfigInfo info = result.getData().get(0);
Assert.assertEquals(info.getGroup(), GROUP);
Assert.assertEquals(info.getKey(), KEY);
Assert.assertEquals(info.getServiceName(), SERVICE_NAME);
Assert.assertEquals(info.getAppName(), APP_NAME);
Assert.assertNull(info.getEnvironment());
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 1) {
if ("dateIntToTs".equals(methodName)) {
return dateIntToTs(args[0]);
} else if ("tsToDateInt".equals(methodName)) {
return tsToDateInt(args[0]);
}
} else if (args.length == 2) {
if ("incrementDateInt".equals(methodName)) {
return incrementDateInt(args[0], args[1]);
} else if ("timeoutForDateTimeDeadline".equals(methodName)) {
return timeoutForDateTimeDeadline(args[0], args[1]);
} else if ("timeoutForDateIntDeadline".equals(methodName)) {
return timeoutForDateIntDeadline(args[0], args[1]);
}
} else if (args.length == 3) {
if ("dateIntsBetween".equals(methodName)) {
return dateIntsBetween(args[0], args[1], args[2]);
} else if ("intsBetween".equals(methodName)) {
return intsBetween(args[0], args[1], args[2]);
}
} else if (args.length == 5 && "dateIntHourToTs".equals(methodName)) {
return dateIntHourToTs(args);
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = NumberFormatException.class)
public void testInvalidCallIntsBetween() {
SelUtilFunc.INSTANCE.call(
"intsBetween", new SelType[] {SelString.of("foo"), SelLong.of(3), SelLong.of(1)});
}
|
@Udf(description = "Returns a masked version of the input string. The first n characters"
+ " will be replaced according to the default masking rules.")
@SuppressWarnings("MethodMayBeStatic") // Invoked via reflection
public String mask(
@UdfParameter("input STRING to be masked") final String input,
@UdfParameter("number of characters to mask from the start") final int numChars
) {
return doMask(new Masker(), input, numChars);
}
|
@Test
public void shouldMaskAllCharsIfLengthTooLong() {
final String result = udf.mask("AbCd#$123xy Z", 999);
assertThat(result, is("XxXx--nnnxx-X"));
}
|
public String summarize(final ExecutionStep<?> step) {
return summarize(step, "").summary;
}
|
@Test
public void shouldSummarizeWithSource() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.keyColumn(SystemColumns.ROWKEY_NAME, SqlTypes.STRING)
.valueColumn(ColumnName.of("L1"), SqlTypes.STRING)
.build();
final ExecutionStep<?> step = givenStep(StreamSelect.class, "child", schema, sourceStep);
// When:
final String summary = planSummaryBuilder.summarize(step);
// Then:
assertThat(summary, is(
" > [ PROJECT ] | Schema: ROWKEY STRING KEY, L1 STRING | Logger: QID.child"
+ "\n\t\t > [ SOURCE ] | Schema: ROWKEY STRING KEY, L0 INTEGER | Logger: QID.src\n"
));
}
|
public static boolean isBlank(final String value) {
return StringUtils.isBlank(value);
}
|
@Test
public void testIsBlank() {
assertTrue(JOrphanUtils.isBlank(""));
assertTrue(JOrphanUtils.isBlank(null));
assertTrue(JOrphanUtils.isBlank(" "));
assertFalse(JOrphanUtils.isBlank(" zdazd dzd "));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void syncCodegenFromDB(Long tableId) {
// 校验是否已经存在
CodegenTableDO table = codegenTableMapper.selectById(tableId);
if (table == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
// 从数据库中,获得数据库表结构
TableInfo tableInfo = databaseTableService.getTable(table.getDataSourceConfigId(), table.getTableName());
// 执行同步
syncCodegen0(tableId, tableInfo);
}
|
@Test
@Disabled // TODO @芋艿:这个单测会随机性失败,需要定位下;
public void testSyncCodegenFromDB() {
// mock 数据(CodegenTableDO)
CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setTableName("t_yunai")
.setDataSourceConfigId(1L).setScene(CodegenSceneEnum.ADMIN.getScene()));
codegenTableMapper.insert(table);
CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())
.setColumnName("id"));
codegenColumnMapper.insert(column01);
CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())
.setColumnName("name"));
codegenColumnMapper.insert(column02);
// 准备参数
Long tableId = table.getId();
// mock 方法(TableInfo)
TableInfo tableInfo = mock(TableInfo.class);
when(databaseTableService.getTable(eq(1L), eq("t_yunai")))
.thenReturn(tableInfo);
when(tableInfo.getComment()).thenReturn("芋艿");
// mock 方法(TableInfo fields)
TableField field01 = mock(TableField.class);
when(field01.getComment()).thenReturn("主键");
TableField field03 = mock(TableField.class);
when(field03.getComment()).thenReturn("分类");
List<TableField> fields = Arrays.asList(field01, field03);
when(tableInfo.getFields()).thenReturn(fields);
when(databaseTableService.getTable(eq(1L), eq("t_yunai")))
.thenReturn(tableInfo);
// mock 方法(CodegenTableDO)
List<CodegenColumnDO> newColumns = randomPojoList(CodegenColumnDO.class);
when(codegenBuilder.buildColumns(eq(table.getId()), argThat(tableFields -> {
assertEquals(2, tableFields.size());
assertSame(tableInfo.getFields(), tableFields);
return true;
}))).thenReturn(newColumns);
// 调用
codegenService.syncCodegenFromDB(tableId);
// 断言
List<CodegenColumnDO> dbColumns = codegenColumnMapper.selectList();
assertEquals(newColumns.size(), dbColumns.size());
assertPojoEquals(newColumns.get(0), dbColumns.get(0));
assertPojoEquals(newColumns.get(1), dbColumns.get(1));
}
|
@Override
public void verify(byte[] data, byte[] signature, MessageDigest digest) {
verify(data, new EcSignature(signature), digest);
}
|
@Test
public void shouldThrowValidationExceptionIfSignatureIsInvalid() {
thrown.expect(VerificationException.class);
thrown.expectMessage("Invalid signature");
verify(D.add(BigInteger.ONE), Q, "SHA-256");
}
|
public String namespaceProperties(Namespace ns) {
return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns), "properties");
}
|
@Test
public void testNamespaceProperties() {
Namespace ns = Namespace.of("ns");
assertThat(withPrefix.namespaceProperties(ns))
.isEqualTo("v1/ws/catalog/namespaces/ns/properties");
assertThat(withoutPrefix.namespaceProperties(ns)).isEqualTo("v1/namespaces/ns/properties");
}
|
public static void addMapPopulation(final Map<String, MethodDeclaration> toAdd,
final BlockStmt body,
final String mapName) {
Map<String, Expression> toAddExpr = toAdd.entrySet().stream().collect(Collectors.toMap(
Map.Entry::getKey,
entry -> {
MethodReferenceExpr methodReferenceExpr = new MethodReferenceExpr();
methodReferenceExpr.setScope(new ThisExpr());
methodReferenceExpr.setIdentifier(entry.getValue().getNameAsString());
return methodReferenceExpr;
}
));
addMapPopulationExpressions(toAddExpr, body, mapName);
}
|
@Test
void addMapPopulation() {
final Map<String, MethodDeclaration> toAdd = IntStream.range(0, 5).boxed().collect(Collectors.toMap(index -> "KEY_" + index, index -> getMethodDeclaration("METHOD_" + index)));
BlockStmt body = new BlockStmt();
String mapName = "MAP_NAME";
CommonCodegenUtils.addMapPopulation(toAdd, body, mapName);
NodeList<Statement> statements = body.getStatements();
assertThat(statements).hasSize(toAdd.size());
for (Statement statement : statements) {
assertThat(statement).isInstanceOf(ExpressionStmt.class);
ExpressionStmt expressionStmt = (ExpressionStmt) statement;
com.github.javaparser.ast.expr.Expression expression = expressionStmt.getExpression();
assertThat(expression).isInstanceOf(MethodCallExpr.class);
MethodCallExpr methodCallExpr = (MethodCallExpr) expression;
final NodeList<com.github.javaparser.ast.expr.Expression> arguments = methodCallExpr.getArguments();
assertThat(arguments).hasSize(2);
assertThat(arguments.get(0)).isInstanceOf(StringLiteralExpr.class);
assertThat(arguments.get(1)).isInstanceOf(MethodReferenceExpr.class);
MethodReferenceExpr methodReferenceExpr = (MethodReferenceExpr) arguments.get(1);
assertThat(methodReferenceExpr.getScope()).isInstanceOf(ThisExpr.class);
final com.github.javaparser.ast.expr.Expression scope = methodCallExpr.getScope().orElse(null);
assertThat(scope).isNotNull();
assertThat(scope).isInstanceOf(NameExpr.class);
assertThat(((NameExpr) scope).getNameAsString()).isEqualTo(mapName);
}
for (Map.Entry<String, MethodDeclaration> entry : toAdd.entrySet()) {
int matchingDeclarations = (int) statements.stream().filter(statement -> {
ExpressionStmt expressionStmt = (ExpressionStmt) statement;
com.github.javaparser.ast.expr.Expression expression = expressionStmt.getExpression();
MethodCallExpr methodCallExpr = (MethodCallExpr) expression;
final NodeList<com.github.javaparser.ast.expr.Expression> arguments = methodCallExpr.getArguments();
if (!entry.getKey().equals(((StringLiteralExpr) arguments.get(0)).getValue())) {
return false;
}
MethodReferenceExpr methodReferenceExpr = (MethodReferenceExpr) arguments.get(1);
return entry.getValue().getName().asString().equals(methodReferenceExpr.getIdentifier());
}).count();
assertThat(matchingDeclarations).isEqualTo(1);
}
}
|
public void start() {
if (running)
throw new IllegalStateException();
start = ticks.ticks();
running = true;
}
|
@Test
void twiceStarted() {
assertThrows(IllegalStateException.class, () -> {
FakeTicks f = new FakeTicks();
Stopwatch s = new Stopwatch(f);
s.start();
s.start();
});
}
|
@Override
public ByteOrder getByteOrder() {
return isBigEndian ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN;
}
|
@Test
public void testGetByteOrder() {
ByteArrayObjectDataOutput outLE = new ByteArrayObjectDataOutput(10, mockSerializationService, LITTLE_ENDIAN);
ByteArrayObjectDataOutput outBE = new ByteArrayObjectDataOutput(10, mockSerializationService, BIG_ENDIAN);
assertEquals(LITTLE_ENDIAN, outLE.getByteOrder());
assertEquals(BIG_ENDIAN, outBE.getByteOrder());
}
|
@Operation(summary = "downloadTaskLog", description = "DOWNLOAD_TASK_INSTANCE_LOG_NOTES")
@Parameters({
@Parameter(name = "taskInstanceId", description = "TASK_ID", required = true, schema = @Schema(implementation = int.class, example = "100"))
})
@GetMapping(value = "/download-log")
@ResponseBody
@ApiException(DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR)
public ResponseEntity downloadTaskLog(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "taskInstanceId") int taskInstanceId) {
byte[] logBytes = loggerService.getLogBytes(loginUser, taskInstanceId);
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION,
"attachment; filename=\"" + System.currentTimeMillis() + ".log" + "\"")
.body(logBytes);
}
|
@Test
public void testDownloadTaskLog() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("taskInstId", "1501");
MvcResult mvcResult = mockMvc.perform(get("/log/download-log")
.header("sessionId", sessionId)
.params(paramsMap))
.andExpect(status().isOk())
/* .andExpect(content().contentType(MediaType.APPLICATION_JSON)) */
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
public Map<String, String> penRequestAllowed(PenRequest request) throws PenRequestException, SharedServiceClientException {
final List<PenRequestStatus> result = repository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo());
checkIfTooSoonOrTooOften(result);
return statusOK;
}
|
@Test
public void penRequestAllowedWithValidPreviousRequests() throws PenRequestException, SharedServiceClientException {
// create three penRequests with a RequestDateTime, 24 hours apart
PenRequestStatus firstStatus = new PenRequestStatus();
firstStatus.setRequestDatetime(LocalDateTime.of(2019, 1, 1, 00, 01));
mockStatusList.add(firstStatus);
PenRequestStatus secondStatus = new PenRequestStatus();
secondStatus.setRequestDatetime(LocalDateTime.of(2019, 1, 2, 00, 01));
mockStatusList.add(secondStatus);
PenRequestStatus thirdStatus = new PenRequestStatus();
thirdStatus.setRequestDatetime(LocalDateTime.of(2018, 1, 3, 00, 01));
mockStatusList.add(thirdStatus);
// return arraylist with one dummy penrequest
Mockito.when(mockRepository.findByBsnAndDocTypeAndSequenceNo(request.getBsn(), request.getDocType(), request.getSequenceNo())).thenReturn(mockStatusList);
Map<String, String> result = service.penRequestAllowed(request);
Map<String, String> expectedMap = new HashMap<String, String>() {{
put("status", "OK");
}};
assertEquals(expectedMap, result);
}
|
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
}
|
@Test
void controlRequestExtendRecording2()
{
internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L);
final ExtendRecordingRequest2Encoder requestEncoder = new ExtendRecordingRequest2Encoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.controlSessionId(9)
.correlationId(78)
.recordingId(1010101)
.streamId(43)
.sourceLocation(SourceLocation.LOCAL)
.autoStop(BooleanType.TRUE)
.channel("extend me");
dissectControlRequest(CMD_IN_EXTEND_RECORDING2, buffer, 0, builder);
assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_EXTEND_RECORDING2.name() + " [12/32]:" +
" controlSessionId=9" +
" correlationId=78" +
" recordingId=1010101" +
" streamId=43" +
" sourceLocation=" + SourceLocation.LOCAL +
" autoStop=" + BooleanType.TRUE +
" channel=extend me",
builder.toString());
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path file : files.keySet()) {
callback.delete(file);
final SMBSession.DiskShareWrapper share = session.openShare(file);
try {
if(file.isFile() || file.isSymbolicLink()) {
share.get().rm(new SMBPathContainerService(session).getKey(file));
}
else if(file.isDirectory()) {
share.get().rmdir(new SMBPathContainerService(session).getKey(file), true);
}
}
catch(SMBRuntimeException e) {
throw new SMBExceptionMappingService().map("Cannot delete {0}", e, file);
}
finally {
session.releaseShare(share);
}
}
}
|
@Test
public void testDeleteFileAndFolder() throws Exception {
final Path home = new DefaultHomeFinderService(session).find();
final Path folder = new SMBDirectoryFeature(session).mkdir(
new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file = new SMBTouchFeature(session).touch(
new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new SMBFindFeature(session).find(file));
new SMBDeleteFeature(session).delete(Arrays.asList(file, folder), new DisabledPasswordCallback(), new Delete.DisabledCallback());
assertFalse(new SMBFindFeature(session).find(file));
}
|
@Override
public Type field(StructField field, Type typeResult) {
return typeResult;
}
|
@Test
public void testNestedTypeConversion() {
Type converted =
DeltaLakeDataTypeVisitor.visit(
deltaNestedSchema, new DeltaLakeTypeToType(deltaNestedSchema));
Schema convertedSchema = new Schema(converted.asNestedType().asStructType().fields());
assertThat(convertedSchema.findType(INNER_ATOMIC_SCHEMA)).isInstanceOf(Types.StructType.class);
assertThat(convertedSchema.findField(INNER_ATOMIC_SCHEMA).isOptional()).isTrue();
assertThat(
convertedSchema
.findType(INNER_ATOMIC_SCHEMA)
.asStructType()
.fieldType(OPTIONAL_BOOLEAN_TYPE))
.isInstanceOf(Types.BooleanType.class);
assertThat(
convertedSchema
.findType(INNER_ATOMIC_SCHEMA)
.asStructType()
.fieldType(REQUIRED_BINARY_TYPE))
.isInstanceOf(Types.BinaryType.class);
assertThat(
convertedSchema
.findType(INNER_ATOMIC_SCHEMA)
.asStructType()
.field(REQUIRED_BINARY_TYPE)
.isRequired())
.isTrue();
assertThat(convertedSchema.findType(STRING_LONG_MAP_TYPE)).isInstanceOf(Types.MapType.class);
assertThat(convertedSchema.findType(STRING_LONG_MAP_TYPE).asMapType().keyType())
.isInstanceOf(Types.StringType.class);
assertThat(convertedSchema.findType(STRING_LONG_MAP_TYPE).asMapType().valueType())
.isInstanceOf(Types.LongType.class);
assertThat(convertedSchema.findType(DOUBLE_ARRAY_TYPE)).isInstanceOf(Types.ListType.class);
assertThat(convertedSchema.findField(DOUBLE_ARRAY_TYPE).isRequired()).isTrue();
assertThat(convertedSchema.findType(DOUBLE_ARRAY_TYPE).asListType().isElementOptional())
.isTrue();
assertThat(convertedSchema.findType(STRUCT_ARRAY_TYPE)).isInstanceOf(Types.ListType.class);
assertThat(convertedSchema.findField(STRUCT_ARRAY_TYPE).isRequired()).isTrue();
assertThat(convertedSchema.findType(STRUCT_ARRAY_TYPE).asListType().isElementOptional())
.isTrue();
assertThat(convertedSchema.findType(STRUCT_ARRAY_TYPE).asListType().elementType())
.isInstanceOf(Types.StructType.class);
assertThat(
convertedSchema
.findType(STRUCT_ARRAY_TYPE)
.asListType()
.elementType()
.asStructType()
.fieldType(OPTIONAL_BOOLEAN_TYPE))
.isInstanceOf(Types.BooleanType.class);
assertThat(
convertedSchema
.findType(STRUCT_ARRAY_TYPE)
.asListType()
.elementType()
.asStructType()
.field(OPTIONAL_BOOLEAN_TYPE)
.isOptional())
.isTrue();
assertThat(
convertedSchema
.findType(STRUCT_ARRAY_TYPE)
.asListType()
.elementType()
.asStructType()
.fieldType(REQUIRED_BINARY_TYPE))
.isInstanceOf(Types.BinaryType.class);
assertThat(
convertedSchema
.findType(STRUCT_ARRAY_TYPE)
.asListType()
.elementType()
.asStructType()
.field(REQUIRED_BINARY_TYPE)
.isRequired())
.isTrue();
}
|
@Override
public void updateLevel(int level) {
Preconditions.checkArgument(
level >= 0 && level <= MAX_LEVEL,
"level(" + level + ") must be non-negative and no more than " + MAX_LEVEL);
Preconditions.checkArgument(
level <= this.topLevel + 1,
"top level "
+ topLevel
+ " must be updated level by level, but new level is "
+ level);
if (levelIndex.length < level) {
long[] newLevelIndex = new long[this.levelIndex.length * 2];
initLevelIndex(newLevelIndex);
System.arraycopy(this.levelIndex, 0, newLevelIndex, 0, this.levelIndex.length);
this.levelIndex = newLevelIndex;
}
if (topLevel < level) {
topLevel = level;
}
}
|
@Test
void testUpdateToNegativeLevel() {
assertThatThrownBy(() -> heapHeadIndex.updateLevel(-1))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
@SuppressWarnings("UseOfSystemOutOrSystemErr")
public void run(Namespace namespace, Liquibase liquibase) throws Exception {
final Set<Class<? extends DatabaseObject>> compareTypes = new HashSet<>();
if (isTrue(namespace.getBoolean("columns"))) {
compareTypes.add(Column.class);
}
if (isTrue(namespace.getBoolean("data"))) {
compareTypes.add(Data.class);
}
if (isTrue(namespace.getBoolean("foreign-keys"))) {
compareTypes.add(ForeignKey.class);
}
if (isTrue(namespace.getBoolean("indexes"))) {
compareTypes.add(Index.class);
}
if (isTrue(namespace.getBoolean("primary-keys"))) {
compareTypes.add(PrimaryKey.class);
}
if (isTrue(namespace.getBoolean("sequences"))) {
compareTypes.add(Sequence.class);
}
if (isTrue(namespace.getBoolean("tables"))) {
compareTypes.add(Table.class);
}
if (isTrue(namespace.getBoolean("unique-constraints"))) {
compareTypes.add(UniqueConstraint.class);
}
if (isTrue(namespace.getBoolean("views"))) {
compareTypes.add(View.class);
}
final DiffToChangeLog diffToChangeLog = new DiffToChangeLog(new DiffOutputControl());
final Database database = liquibase.getDatabase();
final String filename = namespace.getString("output");
if (filename != null) {
try (PrintStream file = new PrintStream(filename, StandardCharsets.UTF_8.name())) {
generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, file, compareTypes);
}
} else {
generateChangeLog(database, database.getDefaultSchema(), diffToChangeLog, outputStream, compareTypes);
}
}
|
@Test
void testDumpSchema() throws Exception {
dumpCommand.run(null, new Namespace(ATTRIBUTE_NAMES.stream()
.collect(Collectors.toMap(a -> a, b -> true))), existedDbConf);
final Element changeSet = getFirstElement(toXmlDocument(baos).getDocumentElement(), "changeSet");
assertCreateTable(changeSet);
}
|
public boolean shouldDropFrame(final InetSocketAddress address, final UnsafeBuffer buffer, final int length)
{
return false;
}
|
@Test
void shouldDropSingleFrameOnce()
{
final FixedLossGenerator fixedLossGenerator = new FixedLossGenerator(0, 0, 1408);
assertTrue(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 0, 1408));
assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 0, 1408));
}
|
@Override
protected void write(final MySQLPacketPayload payload) {
for (Object each : data) {
if (null == each) {
payload.writeInt1(NULL);
continue;
}
writeDataIntoPayload(payload, each);
}
}
|
@Test
void assertWrite() {
long now = System.currentTimeMillis();
Timestamp timestamp = new Timestamp(now);
MySQLTextResultSetRowPacket actual = new MySQLTextResultSetRowPacket(Arrays.asList(null, "value", BigDecimal.ONE, new byte[]{}, timestamp, Boolean.TRUE));
actual.write(payload);
verify(payload).writeInt1(0xfb);
verify(payload).writeStringLenenc("value");
verify(payload).writeStringLenenc("1");
if (0 == timestamp.getNanos()) {
verify(payload).writeStringLenenc(timestamp.toString().split("\\.")[0]);
} else {
verify(payload).writeStringLenenc(timestamp.toString());
}
verify(payload).writeBytesLenenc(new byte[]{1});
}
|
public long getMaxWeight() {
return workerCacheBytes;
}
|
@Test
public void testMaxWeight() throws Exception {
assertEquals(400 * MEGABYTES, cache.getMaxWeight());
}
|
@Override
public String getMethod() {
return PATH;
}
|
@Test
public void testGetChatMenuButtonAsDefault() {
GetChatMenuButton getChatMenuButton = GetChatMenuButton
.builder()
.build();
assertEquals("getChatMenuButton", getChatMenuButton.getMethod());
assertDoesNotThrow(getChatMenuButton::validate);
}
|
public static boolean isTmpFile(String uri) {
String[] splits = StringUtils.splitByWholeSeparator(uri, TMP);
if (splits.length < 2) {
return false;
}
try {
UUID.fromString(splits[splits.length - 1]);
return true;
} catch (IllegalArgumentException e) {
return false;
}
}
|
@Test
public void testIsTmpFile() {
assertTrue(SegmentCompletionUtils.isTmpFile("hdfs://foo.tmp.550e8400-e29b-41d4-a716-446655440000"));
assertFalse(SegmentCompletionUtils.isTmpFile("hdfs://foo.tmp."));
assertFalse(SegmentCompletionUtils.isTmpFile(".tmp.550e8400-e29b-41d4-a716-446655440000"));
assertFalse(SegmentCompletionUtils.isTmpFile("hdfs://foo.tmp.55"));
}
|
@Override
public void getErrors(ErrorCollection errors, String parentLocation) {
String location = this.getLocation(parentLocation);
if (new NameTypeValidator().isNameInvalid(name)) {
errors.addError(location, NameTypeValidator.errorMessage("parameter", name));
}
}
|
@Test
public void shouldAddAnErrorIfParameterNameIsInvalid() {
CRParameter crParameter = new CRParameter("#$$%@", null);
ErrorCollection errorCollection = new ErrorCollection();
crParameter.getErrors(errorCollection, "TEST");
assertThat(errorCollection.getErrorsAsText()).contains("Invalid parameter name '#$$%@'. This must be alphanumeric and can contain underscores, hyphens and periods (however, it cannot start with a period). The maximum allowed length is 255 characters.");
}
|
protected Optional<BrokerData> findOneBroker(String topic) throws Exception {
try {
List<BrokerData> brokerDatas = topicRouteService.getAllMessageQueueView(ProxyContext.createForInner(this.getClass()), topic).getTopicRouteData().getBrokerDatas();
int skipNum = random.nextInt(brokerDatas.size());
return brokerDatas.stream().skip(skipNum).findFirst();
} catch (Exception e) {
if (TopicRouteHelper.isTopicNotExistError(e)) {
return Optional.empty();
}
throw e;
}
}
|
@Test
public void findOneBroker() {
Set<String> resultBrokerNames = new HashSet<>();
// run 1000 times to test the random
for (int i = 0; i < 1000; i++) {
Optional<BrokerData> brokerData = null;
try {
brokerData = this.clusterMetadataService.findOneBroker(TOPIC);
resultBrokerNames.add(brokerData.get().getBrokerName());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
// we should choose two brokers
assertEquals(2, resultBrokerNames.size());
}
|
@Deprecated(forRemoval = true)
@Nonnull
protected static Path backwardsCompatible(@Nonnull Path path, NodeId nodeId, String configProperty) {
final Path nodeIdSubdir = path.resolve(nodeId.getNodeId());
if(Files.exists(nodeIdSubdir) && Files.isDirectory(nodeIdSubdir)) {
LOG.warn("Caution, this datanode instance uses old format of directories. Please configure {} to point directly to {}", configProperty, nodeIdSubdir.toAbsolutePath());
return nodeIdSubdir;
}
return path;
}
|
@Deprecated(forRemoval = true)
@SuppressWarnings("removal")
@Test
void testBackwardsCompatibility(@TempDir Path tempDir) throws IOException {
final Path withoutSubdir = DatanodeDirectories.backwardsCompatible(tempDir, new SimpleNodeId("5ca1ab1e-0000-4000-a000-000000000000"), "my_config_property");
Assertions.assertThat(withoutSubdir).isEqualTo(tempDir);
Files.createDirectories(tempDir.resolve("5ca1ab1e-0000-4000-a000-000000000000"));
final Path withSubdir = DatanodeDirectories.backwardsCompatible(tempDir, new SimpleNodeId("5ca1ab1e-0000-4000-a000-000000000000"), "my_config_property");
Assertions.assertThat(withSubdir)
.startsWith(tempDir)
.endsWith(Path.of("5ca1ab1e-0000-4000-a000-000000000000"));
}
|
public BlobConfiguration getConfiguration() {
return configuration;
}
|
@Test
void testHierarchicalBlobName() {
context.getRegistry().bind("creds", storageSharedKeyCredential());
BlobEndpoint endpoint = (BlobEndpoint) context
.getEndpoint(
"azure-storage-blob://camelazure/container?blobName=blob/sub&credentials=#creds&credentialType=SHARED_KEY_CREDENTIAL");
assertEquals("blob/sub", endpoint.getConfiguration().getBlobName());
}
|
public RowMetaAndData getRow() {
RowMetaAndData row = new RowMetaAndData();
// First the type
row.addValue( new ValueMetaString( "type" ), getTypeDesc() );
// The filename
row.addValue( new ValueMetaString( "filename" ), file.getName().getBaseName() );
// The path
row.addValue( new ValueMetaString( "path" ), Const.optionallyDecodeUriString( file.getName().getURI() ) );
// The origin parent
row.addValue( new ValueMetaString( "parentorigin" ), originParent );
// The origin
row.addValue( new ValueMetaString( "origin" ), origin );
// The comment
row.addValue( new ValueMetaString( "comment" ), comment );
// The timestamp
row.addValue( new ValueMetaDate( "timestamp" ), timestamp );
return row;
}
|
@Test
public void testGetRow() throws KettleFileException, FileSystemException {
File tempDir = new File( new TemporaryFolder().toString() );
FileObject tempFile = KettleVFS.createTempFile( "prefix", "suffix", tempDir.toString() );
Date timeBeforeFile = Calendar.getInstance().getTime();
ResultFile resultFile = new ResultFile( ResultFile.FILE_TYPE_GENERAL, tempFile, "myOriginParent", "myOrigin" );
Date timeAfterFile = Calendar.getInstance().getTime();
assertNotNull( resultFile );
RowMetaInterface rm = resultFile.getRow().getRowMeta();
assertEquals( 7, rm.getValueMetaList().size() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 0 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 1 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 2 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 3 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 4 ).getType() );
assertEquals( ValueMetaInterface.TYPE_STRING, rm.getValueMeta( 5 ).getType() );
assertEquals( ValueMetaInterface.TYPE_DATE, rm.getValueMeta( 6 ).getType() );
assertEquals( ResultFile.FILE_TYPE_GENERAL, resultFile.getType() );
assertEquals( "myOrigin", resultFile.getOrigin() );
assertEquals( "myOriginParent", resultFile.getOriginParent() );
assertTrue( "ResultFile timestamp is created in the expected window",
timeBeforeFile.compareTo( resultFile.getTimestamp() ) <= 0
&& timeAfterFile.compareTo( resultFile.getTimestamp() ) >= 0 );
tempFile.delete();
tempDir.delete();
}
|
@Override
@MethodNotAvailable
public void delete(K key) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testDelete() {
adapter.delete(23);
}
|
public static boolean isProtobufClass(Class<?> pojoClazz) {
if (protobufClss != null) {
return protobufClss.isAssignableFrom(pojoClazz);
}
return false;
}
|
@Test
void testIsProtobufClass() {
Assertions.assertTrue(ProtobufUtils.isProtobufClass(HelloRequest.class));
Assertions.assertTrue(ProtobufUtils.isProtobufClass(HelloReply.class));
Assertions.assertFalse(ProtobufUtils.isProtobufClass(Person.class));
Assertions.assertFalse(ProtobufUtils.isProtobufClass(SerializablePerson.class));
Assertions.assertFalse(ProtobufUtils.isProtobufClass(UserVo.class));
}
|
public static SourceDescription create(
final DataSource dataSource,
final boolean extended,
final List<RunningQuery> readQueries,
final List<RunningQuery> writeQueries,
final Optional<TopicDescription> topicDescription,
final List<QueryOffsetSummary> queryOffsetSummaries,
final List<String> sourceConstraints,
final MetricCollectors metricCollectors
) {
return create(
dataSource,
extended,
readQueries,
writeQueries,
topicDescription,
queryOffsetSummaries,
sourceConstraints,
Stream.empty(),
Stream.empty(),
new KsqlHostInfo("", 0),
metricCollectors
);
}
|
@Test
public void shouldReturnSourceConstraints() {
// Given:
final String kafkaTopicName = "kafka";
final DataSource dataSource = buildDataSource(kafkaTopicName, Optional.empty());
// When
final SourceDescription sourceDescription = SourceDescriptionFactory.create(
dataSource,
true,
Collections.emptyList(),
Collections.emptyList(),
Optional.empty(),
Collections.emptyList(),
ImmutableList.of("s1", "s2"),
new MetricCollectors()
);
// Then:
assertThat(sourceDescription.getSourceConstraints(), hasItems("s1", "s2"));
}
|
@Override
public void onAddClassLoader(ModuleModel scopeModel, ClassLoader classLoader) {
refreshClassLoader(classLoader);
}
|
@Test
void testStatus4() {
FrameworkModel frameworkModel = new FrameworkModel();
ApplicationModel applicationModel = frameworkModel.newApplication();
ModuleModel moduleModel = applicationModel.newModule();
System.setProperty(CommonConstants.CLASS_DESERIALIZE_OPEN_CHECK, "false");
SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class);
SerializeSecurityConfigurator serializeSecurityConfigurator = new SerializeSecurityConfigurator(moduleModel);
serializeSecurityConfigurator.onAddClassLoader(
moduleModel, Thread.currentThread().getContextClassLoader());
Assertions.assertEquals(SerializeCheckStatus.DISABLE, ssm.getCheckStatus());
System.clearProperty(CommonConstants.CLASS_DESERIALIZE_OPEN_CHECK);
frameworkModel.destroy();
}
|
public static NotControllerException newWrongControllerException(OptionalInt controllerId) {
if (controllerId.isPresent()) {
return new NotControllerException("The active controller appears to be node " +
controllerId.getAsInt() + ".");
} else {
return new NotControllerException("No controller appears to be active.");
}
}
|
@Test
public void testNewWrongControllerExceptionWithNoController() {
assertExceptionsMatch(new NotControllerException("No controller appears to be active."),
newWrongControllerException(OptionalInt.empty()));
}
|
@Override
public void publish(ScannerReportWriter writer) {
AbstractProjectOrModule rootProject = moduleHierarchy.root();
ScannerReport.Metadata.Builder builder = ScannerReport.Metadata.newBuilder()
.setAnalysisDate(projectInfo.getAnalysisDate().getTime())
// Here we want key without branch
.setProjectKey(rootProject.key())
.setCrossProjectDuplicationActivated(cpdSettings.isCrossProjectDuplicationEnabled())
.setRootComponentRef(rootProject.scannerId());
projectInfo.getProjectVersion().ifPresent(builder::setProjectVersion);
projectInfo.getBuildString().ifPresent(builder::setBuildString);
if (branchConfiguration.branchName() != null) {
addBranchInformation(builder);
}
String newCodeReferenceBranch = referenceBranchSupplier.getFromProperties();
if (newCodeReferenceBranch != null) {
builder.setNewCodeReferenceBranch(newCodeReferenceBranch);
}
addScmInformation(builder);
addNotAnalyzedFileCountsByLanguage(builder);
for (QProfile qp : qProfiles.findAll()) {
builder.putQprofilesPerLanguage(qp.getLanguage(), ScannerReport.Metadata.QProfile.newBuilder()
.setKey(qp.getKey())
.setLanguage(qp.getLanguage())
.setName(qp.getName())
.setRulesUpdatedAt(qp.getRulesUpdatedAt().getTime()).build());
}
for (Entry<String, ScannerPlugin> pluginEntry : pluginRepository.getPluginsByKey().entrySet()) {
builder.putPluginsByKey(pluginEntry.getKey(), ScannerReport.Metadata.Plugin.newBuilder()
.setKey(pluginEntry.getKey())
.setUpdatedAt(pluginEntry.getValue().getUpdatedAt()).build());
}
addRelativePathFromScmRoot(builder);
writer.writeMetadata(builder.build());
}
|
@Test
public void write_not_analysed_file_counts() {
when(componentStore.getNotAnalysedFilesByLanguage()).thenReturn(ImmutableMap.of("c", 10, "cpp", 20));
underTest.publish(writer);
ScannerReport.Metadata metadata = reader.readMetadata();
assertThat(metadata.getNotAnalyzedFilesByLanguageMap()).contains(entry("c", 10), entry("cpp", 20));
}
|
protected synchronized boolean download(final DownloadableFile downloadableFile) throws IOException, GeneralSecurityException {
File toDownload = downloadableFile.getLocalFile();
LOG.info("Downloading {}", toDownload);
String url = downloadableFile.url(urlGenerator);
final HttpRequestBase request = new HttpGet(url);
request.setConfig(RequestConfig.custom().setConnectTimeout(HTTP_TIMEOUT_IN_MILLISECONDS).build());
try (CloseableHttpClient httpClient = httpClientBuilder.build();
CloseableHttpResponse response = httpClient.execute(request)) {
LOG.info("Got server response");
if (response.getEntity() == null) {
LOG.error("Unable to read file from the server response");
return false;
}
handleInvalidResponse(response, url);
try (BufferedOutputStream outStream = new BufferedOutputStream(new FileOutputStream(downloadableFile.getLocalFile()))) {
response.getEntity().writeTo(outStream);
LOG.info("Piped the stream to {}", downloadableFile);
}
}
return true;
}
|
@Test
public void shouldThrowExceptionIfTheServerIsDown() {
ServerBinaryDownloader downloader = new ServerBinaryDownloader(new GoAgentServerHttpClientBuilder(null, SslVerificationMode.NONE, null, null, null), ServerUrlGeneratorMother.generatorFor("locahost", server.getPort()));
assertThatThrownBy(() -> downloader.download(DownloadableFile.AGENT))
.isExactlyInstanceOf(UnknownHostException.class);
}
|
static void handleJvmOptions(String[] args, String lsJavaOpts) {
final JvmOptionsParser parser = new JvmOptionsParser(args[0]);
final String jvmOpts = args.length == 2 ? args[1] : null;
try {
Optional<Path> jvmOptions = parser.lookupJvmOptionsFile(jvmOpts);
parser.handleJvmOptions(jvmOptions, lsJavaOpts);
} catch (JvmOptionsFileParserException pex) {
System.err.printf(Locale.ROOT,
"encountered [%d] error%s parsing [%s]",
pex.invalidLines().size(),
pex.invalidLines().size() == 1 ? "" : "s",
pex.jvmOptionsFile());
int errorCounter = 0;
for (final Map.Entry<Integer, String> entry : pex.invalidLines().entrySet()) {
errorCounter++;
System.err.printf(Locale.ROOT,
"[%d]: encountered improperly formatted JVM option in [%s] on line number [%d]: [%s]",
errorCounter,
pex.jvmOptionsFile(),
entry.getKey(),
entry.getValue());
}
} catch (IOException ex) {
System.err.println("Error accessing jvm.options file");
System.exit(1);
}
}
|
@Test
public void testEnvironmentOPTSVariableTakesPrecedenceOverOptionsFile() throws IOException {
String regex = "Xmx[^ ]+";
String expected = "Xmx25g";
File optionsFile = writeIntoTempOptionsFile(writer -> writer.println("-Xmx1g"));
JvmOptionsParser.handleJvmOptions(new String[] {"/path/to/ls_home", optionsFile.toString()}, expected);
final String output = outputStreamCaptor.toString();
java.util.regex.Pattern pattern = java.util.regex.Pattern.compile(regex);
String lastMatch = pattern.matcher(output)
.results()
.map(java.util.regex.MatchResult::group)
.reduce((first, second) -> second)
.orElse(null);
assertEquals("LS_JAVA_OPTS env must take precedence over jvm.options file", expected, lastMatch);
}
|
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
}
|
@Test
public void shouldPrintAssertNotExistsSchemaResult() {
// Given:
final KsqlEntityList entities = new KsqlEntityList(ImmutableList.of(
new AssertSchemaEntity("statement", Optional.of("abc"), Optional.of(55), false)
));
// When:
console.printKsqlEntityList(entities);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
void afterWrite(Runnable task) {
for (int i = 0; i < WRITE_BUFFER_RETRIES; i++) {
if (writeBuffer.offer(task)) {
scheduleAfterWrite();
return;
}
scheduleDrainBuffers();
Thread.onSpinWait();
}
// In scenarios where the writing threads cannot make progress then they attempt to provide
// assistance by performing the eviction work directly. This can resolve cases where the
// maintenance task is scheduled but not running. That might occur due to all of the executor's
// threads being busy (perhaps writing into this cache), the write rate greatly exceeds the
// consuming rate, priority inversion, or if the executor silently discarded the maintenance
// task. Unfortunately this cannot resolve when the eviction is blocked waiting on a long-
// running computation due to an eviction listener, the victim is being computed on by a writer,
// or the victim residing in the same hash bin as a computing entry. In those cases a warning is
// logged to encourage the application to decouple these computations from the map operations.
lock();
try {
maintenance(task);
} catch (RuntimeException e) {
logger.log(Level.ERROR, "Exception thrown when performing the maintenance task", e);
} finally {
evictionLock.unlock();
}
rescheduleCleanUpIfIncomplete();
}
|
@Test(dataProvider = "caches")
@CacheSpec(population = Population.EMPTY)
public void afterWrite_drainFullWriteBuffer(
BoundedLocalCache<Int, Int> cache, CacheContext context) {
cache.drainStatus = PROCESSING_TO_IDLE;
int[] queued = { 0 };
Runnable pendingTask = () -> queued[0]++;
for (int i = 0; i < WRITE_BUFFER_MAX; i++) {
cache.afterWrite(pendingTask);
}
assertThat(cache.drainStatus).isEqualTo(PROCESSING_TO_REQUIRED);
int[] triggered = { 0 };
Runnable triggerTask = () -> triggered[0] = WRITE_BUFFER_MAX + 1;
cache.afterWrite(triggerTask);
assertThat(cache.drainStatus).isEqualTo(IDLE);
assertThat(cache.evictionLock.isLocked()).isFalse();
assertThat(queued[0]).isEqualTo(WRITE_BUFFER_MAX);
assertThat(triggered[0]).isEqualTo(WRITE_BUFFER_MAX + 1);
}
|
@Override
public void set(File file, String view, String attribute, Object value, boolean create) {
throw unsettable(view, attribute, create);
}
|
@Test
public void testSet() {
assertSetFails("unix:uid", 1);
assertSetFails("unix:gid", 1);
assertSetFails("unix:rdev", 1L);
assertSetFails("unix:dev", 1L);
assertSetFails("unix:ino", 1);
assertSetFails("unix:mode", 1);
assertSetFails("unix:ctime", 1L);
assertSetFails("unix:nlink", 1);
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final List<Header> headers = new ArrayList<Header>(this.headers());
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
headers.add(new BasicHeader(HttpHeaders.RANGE, header));
// Disable compression
headers.add(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity"));
}
try {
final HttpRequestBase request = this.toRequest(file, status);
for(Header header : headers) {
request.addHeader(header);
}
final HttpResponse response = session.getClient().execute(request);
final VoidResponseHandler handler = new VoidResponseHandler();
try {
handler.handleResponse(response);
// Will abort the read when closed before EOF.
final ContentLengthStatusInputStream stream = new ContentLengthStatusInputStream(new HttpMethodReleaseInputStream(response, status),
response.getEntity().getContentLength(),
response.getStatusLine().getStatusCode());
if(status.isAppend()) {
if(stream.getCode() == HttpStatus.SC_OK) {
if(TransferStatus.UNKNOWN_LENGTH != status.getLength()) {
if(stream.getLength() != status.getLength()) {
log.warn(String.format("Range header not supported. Skipping %d bytes in file %s.", status.getOffset(), file));
stream.skip(status.getOffset());
}
}
}
}
return stream;
}
catch(IOException ex) {
request.abort();
throw ex;
}
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadRangeUnknownLength() throws Exception {
final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random());
final byte[] content = RandomUtils.nextBytes(1023);
final OutputStream out = local.getOutputStream(false);
assertNotNull(out);
IOUtils.write(content, out);
out.close();
new DAVUploadFeature(session).upload(
test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(),
new TransferStatus().withLength(content.length),
new DisabledConnectionCallback());
final TransferStatus status = new TransferStatus();
status.setLength(-1L);
status.setAppend(true);
status.setOffset(100L);
final InputStream in = new DAVReadFeature(session).read(test, status, new DisabledConnectionCallback());
assertNotNull(in);
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100);
new StreamCopier(status, status).transfer(in, buffer);
final byte[] reference = new byte[content.length - 100];
System.arraycopy(content, 100, reference, 0, content.length - 100);
assertArrayEquals(reference, buffer.toByteArray());
in.close();
new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public CapabilitySet requiredCapabilities(RequestView req) {
Path pathMatcher = new Path(req.uri());
Route route = resolveRoute(pathMatcher);
HandlerHolder<?> handler = resolveHandler(req.method(), route);
return Optional.ofNullable(handler.config.requiredCapabilities)
.or(() -> Optional.ofNullable(route.requiredCapabilities))
.or(() -> Optional.ofNullable(requiredCapabilities))
.orElse(DEFAULT_REQUIRED_CAPABILITIES);
}
|
@Test
void resolves_correct_capabilities() {
var restApi = RestApi.builder()
.requiredCapabilities(Capability.CONTENT__METRICS_API)
.addRoute(route("/api1")
.requiredCapabilities(Capability.CONTENT__SEARCH_API)
.get(ctx -> new MessageResponse(ctx.aclAction().name()),
handlerConfig().withRequiredCapabilities(Capability.SLOBROK__API))
.post(ctx -> new MessageResponse(ctx.aclAction().name())))
.addRoute(route("/api2")
.get(ctx -> new MessageResponse(ctx.aclAction().name()))
.post(ctx -> new MessageResponse(ctx.aclAction().name()),
handlerConfig().withRequiredCapabilities(Capability.CONTENT__DOCUMENT_API)))
.build();
assertRequiredCapability(restApi, Method.GET, "/api1", Capability.SLOBROK__API);
assertRequiredCapability(restApi, Method.POST, "/api1", Capability.CONTENT__SEARCH_API);
assertRequiredCapability(restApi, Method.GET, "/api2", Capability.CONTENT__METRICS_API);
assertRequiredCapability(restApi, Method.POST, "/api2", Capability.CONTENT__DOCUMENT_API);
}
|
public boolean isReservedIpAddress(String address) {
if (StringUtils.isBlank(address)) {
return false;
}
return ipBlocks.stream().anyMatch(e -> subnetContainsAddress(e, address));
}
|
@Test
void testIsReservedIpAddress() {
Assertions.assertTrue(ReservedIpChecker.getInstance().isReservedIpAddress("127.0.0.1"));
Assertions.assertTrue(ReservedIpChecker.getInstance().isReservedIpAddress("192.168.1.10"));
Assertions.assertFalse(ReservedIpChecker.getInstance().isReservedIpAddress("104.44.23.89"));
}
|
public static String join(final char delimiter, final String... strings) {
if (strings.length == 0) {
return null;
}
if (strings.length == 1) {
return strings[0];
}
int length = strings.length - 1;
for (final String s : strings) {
if (s == null) {
continue;
}
length += s.length();
}
final StringBuilder sb = new StringBuilder(length);
if (strings[0] != null) {
sb.append(strings[0]);
}
for (int i = 1; i < strings.length; ++i) {
if (!isEmpty(strings[i])) {
sb.append(delimiter).append(strings[i]);
} else {
sb.append(delimiter);
}
}
return sb.toString();
}
|
@Test
public void testJoin() {
assertNull(StringUtil.join('.'));
assertEquals("Single part.", StringUtil.join('.', "Single part."));
assertEquals("part1.part2.p3", StringUtil.join('.', "part1", "part2", "p3"));
assertEquals("E", StringUtil.join('E', new String[2]));
}
|
@Override
public void put(final Windowed<Bytes> sessionKey, final byte[] aggregate) {
wrapped().put(sessionKey, aggregate);
context.logChange(name(), SessionKeySchema.toBinary(sessionKey), aggregate, context.timestamp(), wrapped().getPosition());
}
|
@Test
public void shouldLogPutsWithPosition() {
final Bytes binaryKey = SessionKeySchema.toBinary(key1);
when(inner.getPosition()).thenReturn(POSITION);
store.put(key1, value1);
verify(inner).put(key1, value1);
verify(context).logChange(store.name(), binaryKey, value1, 0L, POSITION);
}
|
public void setSha1sum(String sha1sum) {
this.sha1sum = sha1sum;
}
|
@Test
public void testSetSha1sum() {
String sha1sum = "test";
Dependency instance = new Dependency();
instance.setSha1sum(sha1sum);
assertEquals(sha1sum, instance.getSha1sum());
}
|
public Collection<String> getAllLogicTables() {
return shardingTables.keySet();
}
|
@Test
void assertGetAllLogicTables() {
assertThat(createBindingTableRule().getAllLogicTables(), is(new LinkedHashSet<>(Arrays.asList("logic_table", "sub_logic_table"))));
}
|
public static void init() {
initHandlers();
initChains();
initPaths();
initDefaultHandlers();
ModuleRegistry.registerModule(HandlerConfig.CONFIG_NAME, Handler.class.getName(), Config.getNoneDecryptedInstance().getJsonMapConfigNoCache(HandlerConfig.CONFIG_NAME), null);
}
|
@Test
public void mixedPathsAndSource() {
Handler.config.setPaths(Arrays.asList(
mkPathChain(null, "/my-api/first", "post", "third"),
mkPathChain(MockEndpointSource.class.getName(), null, null, "secondBeforeFirst", "third"),
mkPathChain(null, "/my-api/second", "put", "third")
));
Handler.init();
Map<HttpString, PathTemplateMatcher<String>> methodToMatcher = Handler.methodToMatcherMap;
PathTemplateMatcher<String> getMatcher = methodToMatcher.get(Methods.GET);
PathTemplateMatcher.PathMatchResult<String> getFirst = getMatcher.match("/my-api/first");
Assert.assertNotNull(getFirst);
PathTemplateMatcher.PathMatchResult<String> getSecond = getMatcher.match("/my-api/second");
Assert.assertNotNull(getSecond);
PathTemplateMatcher.PathMatchResult<String> getThird = getMatcher.match("/my-api/third");
Assert.assertNull(getThird);
}
|
@Override
protected JsonObject convert(final JsonObject data) {
return data.getAsJsonObject(ConfigGroupEnum.RULE.name());
}
|
@Test
public void testConvert() {
JsonObject jsonObject = new JsonObject();
JsonObject expectJsonObject = new JsonObject();
jsonObject.add(ConfigGroupEnum.RULE.name(), expectJsonObject);
assertThat(mockRuleDataRefresh.convert(jsonObject), is(expectJsonObject));
}
|
@SuppressWarnings("unchecked")
public static <T> TypeInformation<T> createTypeInfo(Class<T> type) {
return (TypeInformation<T>) createTypeInfo((Type) type);
}
|
@Test
void testCreateTypeInfoFromInstance() {
ResultTypeQueryable instance =
new ResultTypeQueryable<Long>() {
@Override
public TypeInformation<Long> getProducedType() {
return BasicTypeInfo.LONG_TYPE_INFO;
}
};
TypeInformation<?> ti = TypeExtractor.createTypeInfo(instance, null, null, 0);
assertThat(ti).isEqualTo(BasicTypeInfo.LONG_TYPE_INFO);
// method also needs to work for instances that do not implement ResultTypeQueryable
MapFunction<Integer, Long> func =
new MapFunction<Integer, Long>() {
@Override
public Long map(Integer value) throws Exception {
return value.longValue();
}
};
ti = TypeExtractor.createTypeInfo(func, MapFunction.class, func.getClass(), 0);
assertThat(ti).isEqualTo(BasicTypeInfo.INT_TYPE_INFO);
}
|
public static <V> Read<V> read() {
return new AutoValue_SparkReceiverIO_Read.Builder<V>().build();
}
|
@Test
public void testReadFromCustomReceiverWithOffset() {
CustomReceiverWithOffset.shouldFailInTheMiddle = false;
ReceiverBuilder<String, CustomReceiverWithOffset> receiverBuilder =
new ReceiverBuilder<>(CustomReceiverWithOffset.class).withConstructorArgs();
SparkReceiverIO.Read<String> reader =
SparkReceiverIO.<String>read()
.withGetOffsetFn(Long::valueOf)
.withTimestampFn(Instant::parse)
.withPullFrequencySec(PULL_FREQUENCY_SEC)
.withStartPollTimeoutSec(START_POLL_TIMEOUT_SEC)
.withStartOffset(START_OFFSET)
.withSparkReceiverBuilder(receiverBuilder);
List<String> expected = new ArrayList<>();
for (int i = 0; i < CustomReceiverWithOffset.RECORDS_COUNT; i++) {
expected.add(String.valueOf(i));
}
PCollection<String> actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of());
PAssert.that(actual).containsInAnyOrder(expected);
pipeline.run().waitUntilFinish(Duration.standardSeconds(15));
}
|
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
}
|
@Test
void testMatchSchema() {
run(
"def dogSchema = { id: '#string', color: '#string' }",
"def schema = ({ id: '#string', name: '#string', dog: '##(dogSchema)' })",
"def response1 = { id: '123', name: 'foo' }",
"match response1 == schema",
"def response2 = { id: '123', name: 'foo', dog: { id: '456', color: 'brown' } }",
"match response2 == schema"
);
}
|
public static void main(String[] args) {
// create model, view and controller
var giant = new GiantModel(Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED);
var view = new GiantView();
var controller = new GiantController(giant, view);
// initial display
controller.updateView();
// controller receives some interactions that affect the giant
controller.setHealth(Health.WOUNDED);
controller.setNourishment(Nourishment.HUNGRY);
controller.setFatigue(Fatigue.TIRED);
// redisplay
controller.updateView();
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
@Override
public SchemaAndValue toConnectData(final String topic, final byte[] value) {
if (this.schema == null) {
throw new UnsupportedOperationException("ProtobufNoSRConverter is an internal "
+ "converter to ksqldb. It should not be instantiated via reflection through a no-arg "
+ "constructor.");
}
try {
final ProtobufSchema protobufSchema = protobufData.fromConnectSchema(schema);
final Object deserialized = deserializer.deserialize(value, protobufSchema);
if (deserialized == null) {
return SchemaAndValue.NULL;
} else {
if (deserialized instanceof Message) {
return protobufData.toConnectData(protobufSchema, (Message) deserialized);
}
throw new DataException(String.format(
"Unsupported type %s returned during deserialization of topic %s ",
deserialized.getClass().getName(),
topic
));
}
} catch (SerializationException e) {
throw new DataException(String.format(
"Failed to deserialize data for topic %s to Protobuf: ",
topic
), e);
} catch (InvalidConfigurationException e) {
throw new ConfigException(
String.format("Failed to access Protobuf data from topic %s : %s", topic, e.getMessage())
);
}
}
|
@Test(expected = UnsupportedOperationException.class)
public void shouldThrowExceptionWhenUsedWithNoArgConstructor1() {
// Given
final ProtobufNoSRConverter protobufNoSRConverter = new ProtobufNoSRConverter();
// When
protobufNoSRConverter.toConnectData("topic", "test".getBytes(StandardCharsets.UTF_8));
}
|
@Override
public V get(Object key) {
if (!underlyingMap.containsKey(key)) return null;
B value = underlyingMap.get(key);
return valueMapping.apply(value);
}
|
@Test
public void testGet() {
Map<String, Integer> underlying = createTestMap();
TranslatedValueMapView<String, String, Integer> view =
new TranslatedValueMapView<>(underlying, v -> v.toString());
assertEquals("2", view.get("foo"));
assertEquals("3", view.get("bar"));
assertEquals("5", view.get("baz"));
assertNull(view.get("quux"));
underlying.put("quux", 101);
assertEquals("101", view.get("quux"));
}
|
@Override
public ObjectNode encode(LispNonceAddress address, CodecContext context) {
checkNotNull(address, "LispListAddress cannot be null");
final ObjectNode result = context.mapper().createObjectNode()
.put(NONCE, address.getNonce());
if (address.getAddress() != null) {
final JsonCodec<MappingAddress> addressCodec =
context.codec(MappingAddress.class);
ObjectNode addressNode = addressCodec.encode(address.getAddress(), context);
result.set(ADDRESS, addressNode);
}
return result;
}
|
@Test
public void testLispNonceAddressEncode() {
LispNonceAddress address = new LispNonceAddress.Builder()
.withNonce(NONCE)
.withAddress(MappingAddresses.ipv4MappingAddress(ADDRESS))
.build();
ObjectNode addressJson = nonceAddressCodec.encode(address, context);
assertThat("errors in encoding nonce address JSON",
addressJson, LispNonceAddressJsonMatcher.matchesNonceAddress(address));
}
|
@Override
public void startTrackThread() {
}
|
@Test
public void startTrackThread() {
mSensorsAPI.startTrackThread();
}
|
public static Optional<SnapshotPath> parse(Path path) {
Path filename = path.getFileName();
if (filename == null) {
return Optional.empty();
}
String name = filename.toString();
boolean partial = false;
boolean deleted = false;
if (name.endsWith(PARTIAL_SUFFIX)) {
partial = true;
} else if (name.endsWith(DELETE_SUFFIX)) {
deleted = true;
} else if (!name.endsWith(SUFFIX)) {
return Optional.empty();
}
long endOffset = Long.parseLong(name.substring(0, OFFSET_WIDTH));
int epoch = Integer.parseInt(
name.substring(OFFSET_WIDTH + 1, OFFSET_WIDTH + EPOCH_WIDTH + 1)
);
return Optional.of(new SnapshotPath(path, new OffsetAndEpoch(endOffset, epoch), partial, deleted));
}
|
@Test
public void testInvalidSnapshotFilenames() {
Path root = FileSystems.getDefault().getPath("/");
// Doesn't parse log files
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("00000000000000000000.log")));
// Doesn't parse producer snapshots
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("00000000000000000000.snapshot")));
// Doesn't parse offset indexes
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("00000000000000000000.index")));
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("00000000000000000000.timeindex")));
// Leader epoch checkpoint
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("leader-epoch-checkpoint")));
// partition metadata
assertEquals(Optional.empty(), Snapshots.parse(root.resolve("partition.metadata")));
}
|
public static <T> ReadFiles<T> readFiles(Class<T> recordClass) {
return new AutoValue_ThriftIO_ReadFiles.Builder<T>().setRecordClass(recordClass).build();
}
|
@Test
public void testReadFilesBinaryProtocol() {
PCollection<TestThriftStruct> testThriftDoc =
mainPipeline
.apply(Create.of(THRIFT_DIR + "data").withCoder(StringUtf8Coder.of()))
.apply(FileIO.matchAll())
.apply(FileIO.readMatches())
.apply(ThriftIO.readFiles(TestThriftStruct.class).withProtocol(tBinaryProtoFactory));
// Assert
PAssert.that(testThriftDoc).containsInAnyOrder(TEST_THRIFT_STRUCT);
// Execute pipeline
mainPipeline.run();
}
|
boolean isModified(Namespace namespace) {
Release release = releaseService.findLatestActiveRelease(namespace);
List<Item> items = itemService.findItemsWithoutOrdered(namespace.getId());
if (release == null) {
return hasNormalItems(items);
}
Map<String, String> releasedConfiguration = GSON.fromJson(release.getConfigurations(), GsonType.CONFIG);
Map<String, String> configurationFromItems = generateConfigurationFromItems(namespace, items);
MapDifference<String, String> difference = Maps.difference(releasedConfiguration, configurationFromItems);
return !difference.areEqual();
}
|
@Test
public void testNamespaceAddItem() {
long namespaceId = 1;
Namespace namespace = createNamespace(namespaceId);
Release release = createRelease("{\"k1\":\"v1\"}");
List<Item> items = Arrays.asList(createItem("k1", "v1"), createItem("k2", "v2"));
when(releaseService.findLatestActiveRelease(namespace)).thenReturn(release);
when(itemService.findItemsWithoutOrdered(namespaceId)).thenReturn(items);
when(namespaceService.findParentNamespace(namespace)).thenReturn(null);
boolean isModified = namespaceUnlockAspect.isModified(namespace);
Assert.assertTrue(isModified);
}
|
@Override
public ParsedLine parse(final String line, final int cursor, final ParseContext context) {
final ParsedLine parsed = delegate.parse(line, cursor, context);
if (context != ParseContext.ACCEPT_LINE) {
return parsed;
}
if (UnclosedQuoteChecker.isUnclosedQuote(line)) {
throw new EOFError(-1, -1, "Missing end quote", "end quote char");
}
final String bare = CommentStripper.strip(parsed.line());
if (bare.isEmpty()) {
return parsed;
}
if (cliCmdPredicate.test(bare)) {
return parsed;
}
if (!bare.endsWith(TERMINATION_CHAR)) {
throw new EOFError(-1, -1, "Missing termination char", "termination char");
}
return parsed;
}
|
@Test
public void shouldAcceptIfLineTerminated() {
// Given:
givenDelegateWillReturn(TERMINATED_LINE);
// When:
final ParsedLine result = parser.parse("what ever", 0, ParseContext.ACCEPT_LINE);
// Then:
assertThat(result, is(parsedLine));
}
|
public static List<Integer> buildQueryWithINClause(Configuration conf,
List<String> queries,
StringBuilder prefix,
StringBuilder suffix,
Collection<Long> inValues,
String inColumn,
boolean addParens,
boolean notIn) {
List<String> inValueStrings = inValues.stream()
.map(Object::toString)
.collect(Collectors.toList());
return buildQueryWithINClauseStrings(conf, queries, prefix, suffix,
inValueStrings, inColumn, addParens, notIn);
}
|
@Test
public void testBuildQueryWithINClause() throws Exception {
List<String> queries = new ArrayList<>();
List<Integer> ret;
StringBuilder prefix = new StringBuilder();
StringBuilder suffix = new StringBuilder();
// Note, this is a "real" query that depends on one of the metastore tables
prefix.append("select count(*) from TXNS where ");
suffix.append(" and TXN_STATE = 'o'");
// Case 1 - Max in list members: 10; Max query string length: 1KB
// The first query happens to have 2 full batches.
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10);
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_PARAMETERS, 2000);
List<Long> inList = new ArrayList<>();
for (long i = 1; i <= 189; i++) {
inList.add(i);
}
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(1, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(189L, ret.get(0).longValue());
runAgainstDerby(queries);
// Case 2 - Max in list members: 10; Max query string length: 1KB
// The first query has 2 full batches, and the second query only has 1 batch which only contains 1 member
queries.clear();
inList.add((long)190);
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(2, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(189L, ret.get(0).longValue());
Assert.assertEquals(1L, ret.get(1).longValue());
runAgainstDerby(queries);
// Case 3.1 - Max in list members: 1000, Max query string length: 1KB, and exact 1000 members in a single IN clause
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
queries.clear();
for (long i = 191; i <= 1000; i++) {
inList.add(i);
}
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(5, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(267L, ret.get(0).longValue());
runAgainstDerby(queries);
// Case 3.2 - Max in list members: 1000, Max query string length: 10KB, and exact 1000 members in a single IN clause
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10);
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 1000);
queries.clear();
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(1, queries.size());
Assert.assertEquals(queries.size(), ret.size());
runAgainstDerby(queries);
// Case 3.3 - Now with 2000 entries, try the above settings
for (long i = 1001; i <= 2000; i++) {
inList.add(i);
}
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 1);
queries.clear();
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(10, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(267L, ret.get(0).longValue());
Assert.assertEquals(240L, ret.get(1).longValue());
runAgainstDerby(queries);
MetastoreConf.setLongVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH, 10);
queries.clear();
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(1, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(2000L, ret.get(0).longValue());
runAgainstDerby(queries);
// Case 4 - NOT IN list
queries.clear();
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, true);
Assert.assertEquals(1, queries.size());
Assert.assertEquals(queries.size(), ret.size());
runAgainstDerby(queries);
// Case 5 - Max in list members: 1000; Max query string length: 10KB
queries.clear();
for (long i = 2001; i <= 4321; i++) {
inList.add(i);
}
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", true, false);
Assert.assertEquals(3, queries.size());
Assert.assertEquals(queries.size(), ret.size());
runAgainstDerby(queries);
// Case 6 - No parenthesis
queries.clear();
suffix.setLength(0);
suffix.append("");
ret = TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, inList, "TXN_ID", false, false);
Assert.assertEquals(3, queries.size());
Assert.assertEquals(queries.size(), ret.size());
Assert.assertEquals(2000L, ret.get(0).longValue());
Assert.assertEquals(2000L, ret.get(1).longValue());
Assert.assertEquals(321L, ret.get(2).longValue());
runAgainstDerby(queries);
}
|
public boolean statsEnabled() {
return statsEnabled;
}
|
@Test
void abilityBuilderSetStatsEnabledTrueTest() {
Ability statsEnabledAbility = DefaultBot.getDefaultBuilder().setStatsEnabled(true).build();
assertTrue(statsEnabledAbility.statsEnabled());
}
|
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
CoordinatorRecord record
) throws RuntimeException {
ApiMessageAndVersion key = record.key();
ApiMessageAndVersion value = record.value();
switch (key.version()) {
case 0:
case 1:
offsetMetadataManager.replay(
offset,
producerId,
(OffsetCommitKey) key.message(),
(OffsetCommitValue) Utils.messageOrNull(value)
);
break;
case 2:
groupMetadataManager.replay(
(GroupMetadataKey) key.message(),
(GroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 3:
groupMetadataManager.replay(
(ConsumerGroupMetadataKey) key.message(),
(ConsumerGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 4:
groupMetadataManager.replay(
(ConsumerGroupPartitionMetadataKey) key.message(),
(ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 5:
groupMetadataManager.replay(
(ConsumerGroupMemberMetadataKey) key.message(),
(ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 6:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMetadataKey) key.message(),
(ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 7:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMemberKey) key.message(),
(ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 8:
groupMetadataManager.replay(
(ConsumerGroupCurrentMemberAssignmentKey) key.message(),
(ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
case 9:
groupMetadataManager.replay(
(ShareGroupPartitionMetadataKey) key.message(),
(ShareGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 10:
groupMetadataManager.replay(
(ShareGroupMemberMetadataKey) key.message(),
(ShareGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 11:
groupMetadataManager.replay(
(ShareGroupMetadataKey) key.message(),
(ShareGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 12:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMetadataKey) key.message(),
(ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 13:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMemberKey) key.message(),
(ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 14:
groupMetadataManager.replay(
(ShareGroupCurrentMemberAssignmentKey) key.message(),
(ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
default:
throw new IllegalStateException("Received an unknown record type " + key.version()
+ " in " + record);
}
}
|
@Test
public void testReplayOffsetCommit() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
OffsetCommitKey key = new OffsetCommitKey();
OffsetCommitValue value = new OffsetCommitValue();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 0),
new ApiMessageAndVersion(value, (short) 0)
));
coordinator.replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 1),
new ApiMessageAndVersion(value, (short) 0)
));
verify(offsetMetadataManager, times(1)).replay(
0L,
RecordBatch.NO_PRODUCER_ID,
key,
value
);
verify(offsetMetadataManager, times(1)).replay(
1L,
RecordBatch.NO_PRODUCER_ID,
key,
value
);
}
|
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
return invoke(invoker, invocation, PROVIDER.equals(MetricsSupport.getSide(invocation)));
}
|
@Test
void testCollectDisabled() {
given(invoker.invoke(invocation)).willReturn(new AppResponse("success"));
filter.invoke(invoker, invocation);
Map<String, MetricSample> metricsMap = getMetricsMap();
metricsMap.remove(MetricsKey.APPLICATION_METRIC_INFO.getName());
Assertions.assertTrue(metricsMap.isEmpty());
}
|
boolean matchWhen(URL url, Invocation invocation) {
if (CollectionUtils.isEmptyMap(whenCondition)) {
return true;
}
return doMatch(url, null, invocation, whenCondition, true);
}
|
@Test
void testRoute_methodRoute() {
Invocation invocation = new RpcInvocation("getFoo", "com.foo.BarService", "", new Class<?>[0], new Object[0]);
// More than one methods, mismatch
StateRouter router = new ConditionStateRouterFactory()
.getRouter(String.class, getRouteUrl("methods=getFoo => host = 1.2.3.4"));
boolean matchWhen = ((ConditionStateRouter) router)
.matchWhen(
URL.valueOf("consumer://1.1.1.1/com.foo.BarService?methods=setFoo,getFoo,findFoo"), invocation);
Assertions.assertTrue(matchWhen);
// Exactly one method, match
matchWhen = ((ConditionStateRouter) router)
.matchWhen(URL.valueOf("consumer://1.1.1.1/com.foo.BarService?methods=getFoo"), invocation);
Assertions.assertTrue(matchWhen);
// Method routing and Other condition routing can work together
StateRouter router2 = new ConditionStateRouterFactory()
.getRouter(String.class, getRouteUrl("methods=getFoo & host!=1.1.1.1 => host = 1.2.3.4"));
matchWhen = ((ConditionStateRouter) router2)
.matchWhen(URL.valueOf("consumer://1.1.1.1/com.foo.BarService?methods=getFoo"), invocation);
Assertions.assertFalse(matchWhen);
StateRouter router3 = new ConditionStateRouterFactory()
.getRouter(String.class, getRouteUrl("methods=getFoo & host=1.1.1.1 => host = 1.2.3.4"));
matchWhen = ((ConditionStateRouter) router3)
.matchWhen(URL.valueOf("consumer://1.1.1.1/com.foo.BarService?methods=getFoo"), invocation);
Assertions.assertTrue(matchWhen);
// Test filter condition
List<Invoker<String>> originInvokers = new ArrayList<Invoker<String>>();
Invoker<String> invoker1 = new MockInvoker<String>(URL.valueOf("dubbo://10.20.3.3:20880/com.foo.BarService"));
Invoker<String> invoker2 =
new MockInvoker<String>(URL.valueOf("dubbo://" + LOCAL_HOST + ":20880/com.foo.BarService"));
Invoker<String> invoker3 =
new MockInvoker<String>(URL.valueOf("dubbo://" + LOCAL_HOST + ":20880/com.foo.BarService"));
originInvokers.add(invoker1);
originInvokers.add(invoker2);
originInvokers.add(invoker3);
BitList<Invoker<String>> invokers = new BitList<>(originInvokers);
StateRouter router4 = new ConditionStateRouterFactory()
.getRouter(
String.class,
getRouteUrl("host = " + LOCAL_HOST + " & methods = getFoo => " + " host = 10.20.3.3")
.addParameter(FORCE_KEY, String.valueOf(true)));
List<Invoker<String>> filteredInvokers1 = router4.route(
invokers.clone(),
URL.valueOf("consumer://" + LOCAL_HOST + "/com.foo.BarService"),
invocation,
false,
new Holder<>());
Assertions.assertEquals(1, filteredInvokers1.size());
StateRouter router5 = new ConditionStateRouterFactory()
.getRouter(
String.class,
getRouteUrl("host = " + LOCAL_HOST + " & methods = unvalidmethod => " + " host = 10.20.3.3")
.addParameter(FORCE_KEY, String.valueOf(true)));
List<Invoker<String>> filteredInvokers2 = router5.route(
invokers.clone(),
URL.valueOf("consumer://" + LOCAL_HOST + "/com.foo.BarService"),
invocation,
false,
new Holder<>());
Assertions.assertEquals(3, filteredInvokers2.size());
// Request a non-exists method
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.