focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public Flux<ReactiveRedisConnection.BooleanResponse<RenameCommand>> renameNX(Publisher<RenameCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
Assert.notNull(command.getNewName(), "New name must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
byte[] newKeyBuf = toByteArray(command.getNewName());
if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) {
return super.renameNX(commands);
}
return exists(command.getNewName())
.zipWith(read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf))
.filter(newKeyExistsAndDump -> !newKeyExistsAndDump.getT1() && Objects.nonNull(newKeyExistsAndDump.getT2()))
.map(Tuple2::getT2)
.zipWhen(value ->
pTtl(command.getKey())
.filter(Objects::nonNull)
.map(ttl -> Math.max(0, ttl))
.switchIfEmpty(Mono.just(0L))
)
.flatMap(valueAndTtl -> write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1())
.then(Mono.just(true)))
.switchIfEmpty(Mono.just(false))
.doOnSuccess(didRename -> {
if (didRename) {
del(command.getKey());
}
})
.map(didRename -> new BooleanResponse<>(command, didRename));
});
}
|
@Test
public void testRenameNX() {
connection.stringCommands().set(originalKey, value).block();
if (hasTtl) {
connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block();
}
Integer originalSlot = getSlotForKey(originalKey);
newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot));
Boolean result = connection.keyCommands().renameNX(originalKey, newKey).block();
assertThat(result).isTrue();
assertThat(connection.stringCommands().get(newKey).block()).isEqualTo(value);
if (hasTtl) {
assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0);
} else {
assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1);
}
connection.stringCommands().set(originalKey, value).block();
result = connection.keyCommands().renameNX(originalKey, newKey).block();
assertThat(result).isFalse();
}
|
public static Read read() {
return new AutoValue_RabbitMqIO_Read.Builder()
.setQueueDeclare(false)
.setExchangeDeclare(false)
.setMaxReadTime(null)
.setMaxNumRecords(Long.MAX_VALUE)
.setUseCorrelationId(false)
.build();
}
|
@Test(expected = Pipeline.PipelineExecutionException.class)
public void testQueueDeclareWithoutQueueNameFails() throws Exception {
RabbitMqIO.Read read = RabbitMqIO.read().withQueueDeclare(true);
doExchangeTest(new ExchangeTestPlan(read, 1));
}
|
@Override
public boolean commitRequested() {
return task.commitRequested();
}
|
@Test
public void shouldDelegateCommitRequested() {
final ReadOnlyTask readOnlyTask = new ReadOnlyTask(task);
readOnlyTask.commitRequested();
verify(task).commitRequested();
}
|
public static DataflowRunner fromOptions(PipelineOptions options) {
DataflowPipelineOptions dataflowOptions =
PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options);
ArrayList<String> missing = new ArrayList<>();
if (dataflowOptions.getAppName() == null) {
missing.add("appName");
}
if (Strings.isNullOrEmpty(dataflowOptions.getRegion())
&& isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) {
missing.add("region");
}
if (missing.size() > 0) {
throw new IllegalArgumentException(
"Missing required pipeline options: " + Joiner.on(',').join(missing));
}
validateWorkerSettings(
PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options));
PathValidator validator = dataflowOptions.getPathValidator();
String gcpTempLocation;
try {
gcpTempLocation = dataflowOptions.getGcpTempLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires gcpTempLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(gcpTempLocation);
String stagingLocation;
try {
stagingLocation = dataflowOptions.getStagingLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires stagingLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(stagingLocation);
if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) {
validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs());
}
if (dataflowOptions.getFilesToStage() != null) {
// The user specifically requested these files, so fail now if they do not exist.
// (automatically detected classpath elements are permitted to not exist, so later
// staging will not fail on nonexistent files)
dataflowOptions.getFilesToStage().stream()
.forEach(
stagedFileSpec -> {
File localFile;
if (stagedFileSpec.contains("=")) {
String[] components = stagedFileSpec.split("=", 2);
localFile = new File(components[1]);
} else {
localFile = new File(stagedFileSpec);
}
if (!localFile.exists()) {
// should be FileNotFoundException, but for build-time backwards compatibility
// cannot add checked exception
throw new RuntimeException(
String.format("Non-existent files specified in filesToStage: %s", localFile));
}
});
} else {
dataflowOptions.setFilesToStage(
detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options));
if (dataflowOptions.getFilesToStage().isEmpty()) {
throw new IllegalArgumentException("No files to stage has been found.");
} else {
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
dataflowOptions.getFilesToStage().size());
LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage());
}
}
// Verify jobName according to service requirements, truncating converting to lowercase if
// necessary.
String jobName = dataflowOptions.getJobName().toLowerCase();
checkArgument(
jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"),
"JobName invalid; the name must consist of only the characters "
+ "[-a-z0-9], starting with a letter and ending with a letter "
+ "or number");
if (!jobName.equals(dataflowOptions.getJobName())) {
LOG.info(
"PipelineOptions.jobName did not match the service requirements. "
+ "Using {} instead of {}.",
jobName,
dataflowOptions.getJobName());
}
dataflowOptions.setJobName(jobName);
// Verify project
String project = dataflowOptions.getProject();
if (project.matches("[0-9]*")) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project number.");
} else if (!project.matches(PROJECT_ID_REGEXP)) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project"
+ " description.");
}
DataflowPipelineDebugOptions debugOptions =
dataflowOptions.as(DataflowPipelineDebugOptions.class);
// Verify the number of worker threads is a valid value
if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) {
throw new IllegalArgumentException(
"Number of worker harness threads '"
+ debugOptions.getNumberOfWorkerHarnessThreads()
+ "' invalid. Please make sure the value is non-negative.");
}
// Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11
if (dataflowOptions.getRecordJfrOnGcThrashing()
&& Environments.getJavaVersion() == Environments.JavaVersion.java8) {
throw new IllegalArgumentException(
"recordJfrOnGcThrashing is only supported on java 9 and up.");
}
if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) {
dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT);
}
// Adding the Java version to the SDK name for user's and support convenience.
String agentJavaVer = "(JRE 8 environment)";
if (Environments.getJavaVersion() != Environments.JavaVersion.java8) {
agentJavaVer =
String.format("(JRE %s environment)", Environments.getJavaVersion().specification());
}
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String userAgentName = dataflowRunnerInfo.getName();
Preconditions.checkArgument(
!userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty.");
String userAgentVersion = dataflowRunnerInfo.getVersion();
Preconditions.checkArgument(
!userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty.");
String userAgent =
String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_");
dataflowOptions.setUserAgent(userAgent);
return new DataflowRunner(dataflowOptions);
}
|
@Test
public void testGcsStagingLocationInitialization() throws Exception {
// Set temp location (required), and check that staging location is set.
DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class);
options.setTempLocation(VALID_TEMP_BUCKET);
options.setProject(PROJECT_ID);
options.setRegion(REGION_ID);
options.setGcpCredential(new TestCredential());
options.setGcsUtil(mockGcsUtil);
options.setRunner(DataflowRunner.class);
DataflowRunner.fromOptions(options);
assertNotNull(options.getStagingLocation());
}
|
@Override
public String getBookmark() {
final String path = this.getAbbreviatedPath();
String bookmark = PreferencesFactory.get().getProperty(String.format("local.bookmark.%s", path));
if(StringUtils.isBlank(bookmark)) {
try {
bookmark = resolver.create(this);
}
catch(AccessDeniedException e) {
log.warn(String.format("Failure resolving bookmark for %s. %s", this, e));
}
}
return bookmark;
}
|
@Test
public void testBookmark() throws Exception {
FinderLocal l = new FinderLocal(System.getProperty("user.dir"), UUID.randomUUID().toString(), new AliasFilesystemBookmarkResolver());
assertNull(l.getBookmark());
new DefaultLocalTouchFeature().touch(l);
assertNotNull(l.getBookmark());
assertEquals(l.getBookmark(), l.getBookmark());
l.delete();
}
|
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
}
|
@Test
void testDoCommitDeleteSnapshots() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
// add all snapshots to the base metadata
TableMetadata base = BASE_TABLE_METADATA;
for (Snapshot snapshot : testSnapshots) {
base =
TableMetadata.buildFrom(base)
.setBranchSnapshot(snapshot, SnapshotRef.MAIN_BRANCH)
.build();
}
Map<String, String> properties = new HashMap<>(base.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
// all only last 2 snapshots to new metadata
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY,
SnapshotsUtil.serializedSnapshots(testSnapshots.subList(2, 4)));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(
testSnapshots.get(testSnapshots.size() - 1))));
properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION);
TableMetadata metadata = base.replaceProperties(properties);
openHouseInternalTableOperations.doCommit(base, metadata);
Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture());
Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties();
Assertions.assertEquals(
4,
updatedProperties.size()); /*location, lastModifiedTime, version and deleted_snapshots*/
Assertions.assertEquals(
TEST_LOCATION, updatedProperties.get(getCanonicalFieldName("tableVersion")));
// verify 2 snapshots are deleted
Assertions.assertEquals(
testSnapshots.subList(0, 2).stream()
.map(s -> Long.toString(s.snapshotId()))
.collect(Collectors.joining(",")),
updatedProperties.get(getCanonicalFieldName("deleted_snapshots")));
Assertions.assertTrue(updatedProperties.containsKey(getCanonicalFieldName("tableLocation")));
Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable));
}
}
|
@Nullable
public static String getXID() {
return (String) CONTEXT_HOLDER.get(KEY_XID);
}
|
@Test
public void testGetXID() {
RootContext.bind(DEFAULT_XID);
assertThat(RootContext.getXID()).isEqualTo(DEFAULT_XID);
assertThat(RootContext.unbind()).isEqualTo(DEFAULT_XID);
assertThat(RootContext.getXID()).isNull();
}
|
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
}
|
@Test
public void testMapRecursion() {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
Configuration conf = new Configuration();
ProtoSchemaConverter.setMaxRecursion(conf, 1);
ProtoWriteSupport<Value> spyWriter = createReadConsumerInstance(Value.class, readConsumerMock, conf);
// Need to build it backwards due to clunky Struct.Builder interface.
Value.Builder msg = Value.newBuilder().setStringValue("last");
for (int i = 10; i > -1; --i) {
Value.Builder next = Value.newBuilder();
next.getStructValueBuilder().putFields("" + i, msg.build());
msg = next;
}
Value built = msg.build();
spyWriter.write(built);
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("struct_value", 4);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("fields", 0);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("key", 0);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("0".getBytes()));
inOrder.verify(readConsumerMock).endField("key", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("struct_value", 4);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("fields", 0);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("key", 0);
inOrder.verify(readConsumerMock).addBinary(Binary.fromConstantByteArray("1".getBytes()));
inOrder.verify(readConsumerMock).endField("key", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock)
.addBinary(Binary.fromConstantByteArray(built.getStructValue()
.getFieldsOrThrow("0")
.getStructValue()
.getFieldsOrThrow("1")
.toByteArray()));
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("fields", 0);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("struct_value", 4);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("fields", 0);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("struct_value", 4);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
}
|
protected GelfMessage toGELFMessage(final Message message) {
final DateTime timestamp;
final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP);
if (fieldTimeStamp instanceof DateTime) {
timestamp = (DateTime) fieldTimeStamp;
} else {
timestamp = Tools.nowUTC();
}
final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL));
final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE);
final String forwarder = GelfOutput.class.getCanonicalName();
final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource())
.timestamp(timestamp.getMillis() / 1000.0d)
.additionalField("_forwarder", forwarder)
.additionalFields(message.getFields());
if (messageLevel != null) {
builder.level(messageLevel);
}
if (fullMessage != null) {
builder.fullMessage(fullMessage);
}
return builder.build();
}
|
@Test
public void testToGELFMessageWithValidNumericLevel() throws Exception {
final GelfTransport transport = mock(GelfTransport.class);
final GelfOutput gelfOutput = new GelfOutput(transport);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Message message = messageFactory.createMessage("Test", "Source", now);
message.addField("level", 6);
final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message);
assertEquals(GelfMessageLevel.INFO, gelfMessage.getLevel());
}
|
@GetMapping()
@Secured(resource = AuthConstants.CONSOLE_RESOURCE_NAME_PREFIX
+ "namespaces", action = ActionTypes.READ, signType = SignType.CONSOLE)
public Result<Namespace> getNamespace(@RequestParam("namespaceId") String namespaceId) throws NacosException {
return Result.success(namespaceOperationService.getNamespace(namespaceId));
}
|
@Test
void testGetNamespace() throws NacosException {
Namespace namespaceAllInfo = new Namespace(TEST_NAMESPACE_ID, TEST_NAMESPACE_NAME, TEST_NAMESPACE_DESC, 200, 1,
NamespaceTypeEnum.GLOBAL.getType());
when(namespaceOperationService.getNamespace(TEST_NAMESPACE_ID)).thenReturn(namespaceAllInfo);
Result<Namespace> result = namespaceControllerV2.getNamespace(TEST_NAMESPACE_ID);
verify(namespaceOperationService).getNamespace(TEST_NAMESPACE_ID);
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
Namespace namespace = result.getData();
assertEquals(namespaceAllInfo.getNamespace(), namespace.getNamespace());
assertEquals(namespaceAllInfo.getNamespaceShowName(), namespace.getNamespaceShowName());
assertEquals(namespaceAllInfo.getNamespaceDesc(), namespace.getNamespaceDesc());
assertEquals(namespaceAllInfo.getQuota(), namespace.getQuota());
assertEquals(namespaceAllInfo.getConfigCount(), namespace.getConfigCount());
}
|
public static InlineExpressionParser newInstance(final String inlineExpression) {
Properties props = new Properties();
if (null == inlineExpression) {
return TypedSPILoader.getService(InlineExpressionParser.class, DEFAULT_TYPE_NAME, props);
}
if (!inlineExpression.startsWith(TYPE_NAME_BEGIN_SYMBOL)) {
props.setProperty(InlineExpressionParser.INLINE_EXPRESSION_KEY, inlineExpression);
return TypedSPILoader.getService(InlineExpressionParser.class, DEFAULT_TYPE_NAME, props);
}
Integer typeBeginIndex = inlineExpression.indexOf(TYPE_NAME_BEGIN_SYMBOL);
Integer typeEndIndex = inlineExpression.indexOf(TYPE_NAME_END_SYMBOL);
props.setProperty(InlineExpressionParser.INLINE_EXPRESSION_KEY, getExprWithoutTypeName(inlineExpression, typeBeginIndex, typeEndIndex));
return TypedSPILoader.getService(InlineExpressionParser.class, getTypeName(inlineExpression, typeBeginIndex, typeEndIndex), props);
}
|
@Test
void assertUndefinedInstance() {
assertThrows(ServiceProviderNotFoundException.class,
() -> InlineExpressionParserFactory.newInstance("<UNDEFINED>t_order_0, t_order_1").getType());
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
}
|
@Test
void shouldErrorWhenConflictingReliableSubscriptionAdded()
{
driverProxy.addSubscription(CHANNEL_4000 + "|reliable=false", STREAM_ID_1);
driverConductor.doWork();
final long id2 = driverProxy.addSubscription(CHANNEL_4000 + "|reliable=true", STREAM_ID_1);
driverConductor.doWork();
verify(mockClientProxy).onError(eq(id2), any(ErrorCode.class), anyString());
}
|
@Override
public Locator locator() {
return inner.locator();
}
|
@Test
public void shouldReturnInnerLocator() {
// Given:
final Locator expected = mock(Locator.class);
when(inner.locator()).thenReturn(expected);
givenNoopTransforms();
// When:
final Locator locator = materialization.locator();
// Then:
assertThat(locator, is(sameInstance(expected)));
}
|
public String destinationURL(File rootPath, File file) {
return destinationURL(rootPath, file, getSrc(), getDest());
}
|
@Test
public void shouldProvideAppendFilePathToDest() {
ArtifactPlan artifactPlan = new ArtifactPlan(ArtifactPlanType.file, "test/**/*/a.log", "logs");
assertThat(artifactPlan.destinationURL(new File("pipelines/pipelineA"),
new File("pipelines/pipelineA/test/a/b/a.log"))).isEqualTo("logs/a/b");
}
|
String name(String name) {
return sanitize(name, NAME_RESERVED);
}
|
@Test
public void truncatesNamesExceedingMaxLength() throws Exception {
String longName = "01234567890123456789012345678901234567890123456789012345678901234567890123456789";
assertThat(sanitize.name(longName)).isEqualTo(longName.substring(0, (Sanitize.DEFAULT_MAX_LENGTH)));
}
|
@PostMapping()
@Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG)
public Result<Boolean> publishConfig(ConfigForm configForm, HttpServletRequest request) throws NacosException {
// check required field
configForm.validate();
String encryptedDataKeyFinal = configForm.getEncryptedDataKey();
if (StringUtils.isBlank(encryptedDataKeyFinal)) {
// encrypted
Pair<String, String> pair = EncryptionHandler.encryptHandler(configForm.getDataId(), configForm.getContent());
configForm.setContent(pair.getSecond());
encryptedDataKeyFinal = pair.getFirst();
}
//fix issue #9783
configForm.setNamespaceId(NamespaceUtil.processNamespaceParameter(configForm.getNamespaceId()));
// check param
ParamUtils.checkTenantV2(configForm.getNamespaceId());
ParamUtils.checkParam(configForm.getDataId(), configForm.getGroup(), "datumId", configForm.getContent());
ParamUtils.checkParamV2(configForm.getTag());
if (StringUtils.isBlank(configForm.getSrcUser())) {
configForm.setSrcUser(RequestUtil.getSrcUserName(request));
}
if (!ConfigType.isValidType(configForm.getType())) {
configForm.setType(ConfigType.getDefaultType().getType());
}
ConfigRequestInfo configRequestInfo = new ConfigRequestInfo();
configRequestInfo.setSrcIp(RequestUtil.getRemoteIp(request));
configRequestInfo.setRequestIpApp(RequestUtil.getAppName(request));
configRequestInfo.setBetaIps(request.getHeader("betaIps"));
configRequestInfo.setCasMd5(request.getHeader("casMd5"));
return Result.success(configOperationService.publishConfig(configForm, configRequestInfo, encryptedDataKeyFinal));
}
|
@Test
void testPublishConfigWithEncryptedDataKey() throws Exception {
ConfigForm configForm = new ConfigForm();
configForm.setDataId(TEST_DATA_ID);
configForm.setGroup(TEST_GROUP);
configForm.setNamespaceId(TEST_NAMESPACE_ID);
configForm.setContent(TEST_CONTENT);
configForm.setEncryptedDataKey(TEST_ENCRYPTED_DATA_KEY);
MockHttpServletRequest request = new MockHttpServletRequest();
when(configOperationService.publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class),
eq(TEST_ENCRYPTED_DATA_KEY))).thenReturn(true);
Result<Boolean> booleanResult = configControllerV2.publishConfig(configForm, request);
verify(configOperationService).publishConfig(any(ConfigForm.class), any(ConfigRequestInfo.class), anyString());
assertEquals(ErrorCode.SUCCESS.getCode(), booleanResult.getCode());
assertTrue(booleanResult.getData());
}
|
public long getMinOffset(final String addr, final MessageQueue messageQueue, final long timeoutMillis)
throws RemotingException, MQBrokerException, InterruptedException {
GetMinOffsetRequestHeader requestHeader = new GetMinOffsetRequestHeader();
requestHeader.setTopic(messageQueue.getTopic());
requestHeader.setQueueId(messageQueue.getQueueId());
requestHeader.setBrokerName(messageQueue.getBrokerName());
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_MIN_OFFSET, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
GetMinOffsetResponseHeader responseHeader =
(GetMinOffsetResponseHeader) response.decodeCommandCustomHeader(GetMinOffsetResponseHeader.class);
return responseHeader.getOffset();
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
}
|
@Test
public void testGetMinOffset() throws Exception {
doAnswer((Answer<RemotingCommand>) mock -> {
RemotingCommand request = mock.getArgument(1);
final RemotingCommand response = RemotingCommand.createResponseCommand(GetMinOffsetResponseHeader.class);
final GetMinOffsetResponseHeader responseHeader = (GetMinOffsetResponseHeader) response.readCustomHeader();
responseHeader.setOffset(100L);
response.makeCustomHeaderToNet();
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
return response;
}).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong());
long offset = mqClientAPI.getMinOffset(brokerAddr, new MessageQueue(topic, brokerName, 0), 10000);
assertThat(offset).isEqualTo(100L);
}
|
@Override
public void apply(Project project)
{
checkGradleVersion(project);
project.getPlugins().apply(JavaPlugin.class);
// this HashMap will have a PegasusOptions per sourceSet
project.getExtensions().getExtraProperties().set("pegasus", new HashMap<>());
// this map will extract PegasusOptions.GenerationMode to project property
project.getExtensions().getExtraProperties().set("PegasusGenerationMode",
Arrays.stream(PegasusOptions.GenerationMode.values())
.collect(Collectors.toMap(PegasusOptions.GenerationMode::name, Function.identity())));
synchronized (STATIC_PROJECT_EVALUATED_LOCK)
{
// Check if this is the first time the block will run. Pegasus plugin can run multiple times in a build if
// multiple sub-projects applied the plugin.
if (!project.getRootProject().hasProperty(RUN_ONCE)
|| !Boolean.parseBoolean(String.valueOf(project.getRootProject().property(RUN_ONCE))))
{
project.getGradle().projectsEvaluated(gradle ->
gradle.getRootProject().subprojects(subproject ->
UNUSED_CONFIGURATIONS.forEach(configurationName -> {
Configuration conf = subproject.getConfigurations().findByName(configurationName);
if (conf != null && !conf.getDependencies().isEmpty()) {
subproject.getLogger().warn("*** Project {} declares dependency to unused configuration \"{}\". "
+ "This configuration is deprecated and you can safely remove the dependency. ***",
subproject.getPath(), configurationName);
}
})
)
);
// Re-initialize the static variables as they might have stale values from previous run. With Gradle 3.0 and
// gradle daemon enabled, the plugin class might not be loaded for every run.
DATA_TEMPLATE_FILE_SUFFIXES.clear();
DATA_TEMPLATE_FILE_SUFFIXES.add(DATA_TEMPLATE_FILE_SUFFIX);
DATA_TEMPLATE_FILE_SUFFIXES.add(PDL_FILE_SUFFIX);
_restModelCompatMessage = new StringBuffer();
_needCheckinFiles.clear();
_needBuildFolders.clear();
_possibleMissingFilesInEarlierCommit.clear();
project.getGradle().buildFinished(result ->
{
StringBuilder endOfBuildMessage = new StringBuilder();
if (_restModelCompatMessage.length() > 0)
{
endOfBuildMessage.append(_restModelCompatMessage);
}
if (!_needCheckinFiles.isEmpty())
{
endOfBuildMessage.append(createModifiedFilesMessage(_needCheckinFiles, _needBuildFolders));
}
if (!_possibleMissingFilesInEarlierCommit.isEmpty())
{
endOfBuildMessage.append(createPossibleMissingFilesMessage(_possibleMissingFilesInEarlierCommit));
}
if (endOfBuildMessage.length() > 0)
{
result.getGradle().getRootProject().getLogger().quiet(endOfBuildMessage.toString());
}
});
// Set an extra property on the root project to indicate the initialization is complete for the current build.
project.getRootProject().getExtensions().getExtraProperties().set(RUN_ONCE, true);
}
}
ConfigurationContainer configurations = project.getConfigurations();
// configuration for getting the required classes to make pegasus call main methods
configurations.maybeCreate(PEGASUS_PLUGIN_CONFIGURATION);
// configuration for compiling generated data templates
Configuration dataTemplateCompile = configurations.maybeCreate("dataTemplateCompile");
dataTemplateCompile.setVisible(false);
// configuration for running rest client generator
Configuration restClientCompile = configurations.maybeCreate("restClientCompile");
restClientCompile.setVisible(false);
// configuration for running data template generator
// DEPRECATED! This configuration is no longer used. Please stop using it.
Configuration dataTemplateGenerator = configurations.maybeCreate("dataTemplateGenerator");
dataTemplateGenerator.setVisible(false);
// configuration for running rest client generator
// DEPRECATED! This configuration is no longer used. Please stop using it.
Configuration restTools = configurations.maybeCreate("restTools");
restTools.setVisible(false);
// configuration for running Avro schema generator
// DEPRECATED! To skip avro schema generation, use PegasusOptions.generationModes
Configuration avroSchemaGenerator = configurations.maybeCreate("avroSchemaGenerator");
avroSchemaGenerator.setVisible(false);
// configuration for depending on data schemas and potentially generated data templates
// and for publishing jars containing data schemas to the project artifacts for including in the ivy.xml
Configuration dataModel = configurations.maybeCreate("dataModel");
Configuration testDataModel = configurations.maybeCreate("testDataModel");
testDataModel.extendsFrom(dataModel);
// configuration for depending on data schemas and potentially generated data templates
// and for publishing jars containing data schemas to the project artifacts for including in the ivy.xml
Configuration avroSchema = configurations.maybeCreate("avroSchema");
Configuration testAvroSchema = configurations.maybeCreate("testAvroSchema");
testAvroSchema.extendsFrom(avroSchema);
// configuration for depending on rest idl and potentially generated client builders
// and for publishing jars containing rest idl to the project artifacts for including in the ivy.xml
Configuration restModel = configurations.maybeCreate("restModel");
Configuration testRestModel = configurations.maybeCreate("testRestModel");
testRestModel.extendsFrom(restModel);
// configuration for publishing jars containing data schemas and generated data templates
// to the project artifacts for including in the ivy.xml
//
// published data template jars depends on the configurations used to compile the classes
// in the jar, this includes the data models/templates used by the data template generator
// and the classes used to compile the generated classes.
Configuration dataTemplate = configurations.maybeCreate("dataTemplate");
dataTemplate.extendsFrom(dataTemplateCompile, dataModel);
Configuration testDataTemplate = configurations.maybeCreate("testDataTemplate");
testDataTemplate.extendsFrom(dataTemplate, testDataModel);
// configuration for processing and validating schema annotation during build time.
//
// The configuration contains dependencies to schema annotation handlers which would process schema annotations
// and validate.
Configuration schemaAnnotationHandler = configurations.maybeCreate(SCHEMA_ANNOTATION_HANDLER_CONFIGURATION);
// configuration for publishing jars containing rest idl and generated client builders
// to the project artifacts for including in the ivy.xml
//
// published client builder jars depends on the configurations used to compile the classes
// in the jar, this includes the data models/templates (potentially generated by this
// project and) used by the data template generator and the classes used to compile
// the generated classes.
Configuration restClient = configurations.maybeCreate("restClient");
restClient.extendsFrom(restClientCompile, dataTemplate);
Configuration testRestClient = configurations.maybeCreate("testRestClient");
testRestClient.extendsFrom(restClient, testDataTemplate);
Properties properties = new Properties();
InputStream inputStream = getClass().getResourceAsStream("/pegasus-version.properties");
if (inputStream != null && !"true".equals(System.getenv("PEGASUS_INTEGRATION_TESTING")))
{
try
{
properties.load(inputStream);
}
catch (IOException e)
{
throw new GradleException("Unable to read pegasus-version.properties file.", e);
}
String version = properties.getProperty("pegasus.version");
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:data:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:data-avro-generator:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:generator:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:restli-tools:" + version);
}
else
{
project.getLogger().lifecycle("Unable to add pegasus dependencies to {}. Please be sure that "
+ "'com.linkedin.pegasus:data', 'com.linkedin.pegasus:data-avro-generator', 'com.linkedin.pegasus:generator', 'com.linkedin.pegasus:restli-tools'"
+ " are available on the configuration pegasusPlugin",
project.getPath());
}
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "org.slf4j:slf4j-simple:1.7.2");
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, project.files(System.getProperty("java.home") + "/../lib/tools.jar"));
// this call has to be here because:
// 1) artifact cannot be published once projects has been evaluated, so we need to first
// create the tasks and artifact handler, then progressively append sources
// 2) in order to append sources progressively, the source and documentation tasks and artifacts must be
// configured/created before configuring and creating the code generation tasks.
configureGeneratedSourcesAndJavadoc(project);
ChangedFileReportTask changedFileReportTask = project.getTasks()
.create("changedFilesReport", ChangedFileReportTask.class);
project.getTasks().getByName("check").dependsOn(changedFileReportTask);
SourceSetContainer sourceSets = project.getConvention()
.getPlugin(JavaPluginConvention.class).getSourceSets();
sourceSets.all(sourceSet ->
{
if (sourceSet.getName().toLowerCase(Locale.US).contains("generated"))
{
return;
}
checkAvroSchemaExist(project, sourceSet);
// the idl Generator input options will be inside the PegasusOptions class. Users of the
// plugin can set the inputOptions in their build.gradle
@SuppressWarnings("unchecked")
Map<String, PegasusOptions> pegasusOptions = (Map<String, PegasusOptions>) project
.getExtensions().getExtraProperties().get("pegasus");
pegasusOptions.put(sourceSet.getName(), new PegasusOptions());
// rest model generation could fail on incompatibility
// if it can fail, fail it early
configureRestModelGeneration(project, sourceSet);
// Do compatibility check for schemas under "pegasus" directory if the configuration property is provided.
if (isPropertyTrue(project, ENABLE_PEGASUS_SCHEMA_COMPATIBILITY_CHECK))
{
configurePegasusSchemaSnapshotGeneration(project, sourceSet, false);
}
configurePegasusSchemaSnapshotGeneration(project, sourceSet, true);
configureConversionUtilities(project, sourceSet);
GenerateDataTemplateTask generateDataTemplateTask = configureDataTemplateGeneration(project, sourceSet);
configureAvroSchemaGeneration(project, sourceSet);
configureRestClientGeneration(project, sourceSet);
if (!isPropertyTrue(project, DISABLE_SCHEMA_ANNOTATION_VALIDATION))
{
configureSchemaAnnotationValidation(project, sourceSet, generateDataTemplateTask);
}
Task cleanGeneratedDirTask = project.task(sourceSet.getTaskName("clean", "GeneratedDir"));
cleanGeneratedDirTask.doLast(new CacheableAction<>(task ->
{
deleteGeneratedDir(project, sourceSet, REST_GEN_TYPE);
deleteGeneratedDir(project, sourceSet, AVRO_SCHEMA_GEN_TYPE);
deleteGeneratedDir(project, sourceSet, DATA_TEMPLATE_GEN_TYPE);
}));
// make clean depends on deleting the generated directories
project.getTasks().getByName("clean").dependsOn(cleanGeneratedDirTask);
// Set data schema directories as resource roots
configureDataSchemaResourcesRoot(project, sourceSet);
});
project.getExtensions().getExtraProperties().set(GENERATOR_CLASSLOADER_NAME, getClass().getClassLoader());
}
|
@Test
public void test()
{
Project project = ProjectBuilder.builder().build();
project.getPlugins().apply(PegasusPlugin.class);
assertTrue(project.getPlugins().hasPlugin(JavaPlugin.class));
// if any configuration is resolved in configuration phase, user script that tries to exclude certain dependencies will fail
for (Configuration configuration : project.getConfigurations())
{
assertSame(configuration.getState(), Configuration.State.UNRESOLVED);
}
assertNotNull(project.getConfigurations().findByName("dataTemplate"));
assertNotNull(project.getConfigurations().findByName("restClient"));
assertTrue(project.getExtensions().getExtraProperties().get("PegasusGenerationMode") instanceof Map);
@SuppressWarnings("unchecked")
Map<String, PegasusOptions> pegasusOptions = (Map<String, PegasusOptions>) project
.getExtensions().getExtraProperties().get("pegasus");
assertFalse(pegasusOptions.get("main").hasGenerationMode(PegasusOptions.GenerationMode.AVRO));
assertTrue(pegasusOptions.get("main").hasGenerationMode(PegasusOptions.GenerationMode.PEGASUS));
}
|
@SuppressWarnings({"unchecked", "rawtypes"})
public static <T> List<T> sort(List<T> list) {
if (isNotEmpty(list)) {
Collections.sort((List) list);
}
return list;
}
|
@Test
void testSort() {
List<Integer> list = new ArrayList<Integer>();
list.add(100);
list.add(10);
list.add(20);
List<Integer> expected = new ArrayList<Integer>();
expected.add(10);
expected.add(20);
expected.add(100);
assertEquals(expected, CollectionUtils.sort(list));
}
|
public static DataSource createDataSource(final File yamlFile) throws SQLException, IOException {
YamlJDBCConfiguration rootConfig = YamlEngine.unmarshal(yamlFile, YamlJDBCConfiguration.class);
return createDataSource(new YamlDataSourceConfigurationSwapper().swapToDataSources(rootConfig.getDataSources()), rootConfig);
}
|
@Test
void assertCreateDataSourceWithFileForExternalSingleDataSource() throws Exception {
assertDataSource(YamlShardingSphereDataSourceFactory.createDataSource(new MockedDataSource(), new File(getYamlFileUrl().toURI())));
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "SecurityScheme with REf")
public void testSecuritySchemeWithRef() {
Components components = new Components();
components.addSecuritySchemes("Security", new SecurityScheme().description("Security Example").
name("Security").type(SecurityScheme.Type.OAUTH2).$ref("myOauth2Security").in(SecurityScheme.In.HEADER));
OpenAPI oas = new OpenAPI()
.info(new Info().description("info"))
.components(components);
Reader reader = new Reader(oas);
OpenAPI openAPI = reader.read(RefSecurityResource.class);
String yaml = "openapi: 3.0.1\n" +
"info:\n" +
" description: info\n" +
"paths:\n" +
" /:\n" +
" get:\n" +
" description: description\n" +
" operationId: Operation Id\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" '*/*': {}\n" +
" security:\n" +
" - security_key:\n" +
" - write:pets\n" +
" - read:pets\n" +
"components:\n" +
" securitySchemes:\n" +
" Security:\n" +
" type: oauth2\n" +
" description: Security Example\n" +
" myOauth2Security:\n" +
" type: oauth2\n" +
" description: myOauthSecurity Description\n" +
" $ref: '#/components/securitySchemes/Security'\n" +
" in: header\n" +
" flows:\n" +
" implicit:\n" +
" authorizationUrl: http://x.com\n" +
" scopes:\n" +
" write:pets: modify pets in your account\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
@Override
@MethodNotAvailable
public void close() {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testClose() {
adapter.close();
}
|
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
session.getClient().createDirectory(new DAVPathEncoder().encode(folder));
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, folder);
}
return folder;
}
|
@Test
public void testMakeDirectory() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new DAVDirectoryFeature(session).mkdir(test, new TransferStatus());
assertTrue(session.getFeature(Find.class).find(test));
assertThrows(ConflictException.class, () -> new DAVDirectoryFeature(session).mkdir(test, new TransferStatus()));
new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(session.getFeature(Find.class).find(test));
}
|
public List<String> splitSql(String text) {
List<String> queries = new ArrayList<>();
StringBuilder query = new StringBuilder();
char character;
boolean multiLineComment = false;
boolean singleLineComment = false;
boolean singleQuoteString = false;
boolean doubleQuoteString = false;
for (int index = 0; index < text.length(); index++) {
character = text.charAt(index);
// end of single line comment
if (singleLineComment && (character == '\n')) {
singleLineComment = false;
query.append(character);
if (index == (text.length() - 1) && !query.toString().trim().isEmpty()) {
// add query when it is the end of sql.
queries.add(query.toString());
}
continue;
}
// end of multiple line comment
if (multiLineComment && (index - 1) >= 0 && text.charAt(index - 1) == '/'
&& (index - 2) >= 0 && text.charAt(index - 2) == '*') {
multiLineComment = false;
}
if (character == '\'' && !(singleLineComment || multiLineComment)) {
if (singleQuoteString) {
singleQuoteString = false;
} else if (!doubleQuoteString) {
singleQuoteString = true;
}
}
if (character == '"' && !(singleLineComment || multiLineComment)) {
if (doubleQuoteString && index > 0) {
doubleQuoteString = false;
} else if (!singleQuoteString) {
doubleQuoteString = true;
}
}
if (!singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment
&& text.length() > (index + 1)) {
if (isSingleLineComment(text.charAt(index), text.charAt(index + 1))) {
singleLineComment = true;
} else if (text.charAt(index) == '/' && text.length() > (index + 2)
&& text.charAt(index + 1) == '*' && text.charAt(index + 2) != '+') {
multiLineComment = true;
}
}
if (character == ';' && !singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment) {
// meet the end of semicolon
if (!query.toString().trim().isEmpty()) {
queries.add(query.toString());
query = new StringBuilder();
}
} else if (index == (text.length() - 1)) {
// meet the last character
if ((!singleLineComment && !multiLineComment)) {
query.append(character);
}
if (!query.toString().trim().isEmpty()) {
queries.add(query.toString());
query = new StringBuilder();
}
} else if (!singleLineComment && !multiLineComment) {
// normal case, not in single line comment and not in multiple line comment
query.append(character);
} else if (character == '\n') {
query.append(character);
}
}
List<String> refinedQueries = new ArrayList<>();
for (int i = 0; i < queries.size(); ++i) {
String emptyLine = "";
if (i > 0) {
emptyLine = createEmptyLine(refinedQueries.get(i-1));
}
if (isSingleLineComment(queries.get(i)) || isMultipleLineComment(queries.get(i))) {
// refine the last refinedQuery
if (refinedQueries.size() > 0) {
String lastRefinedQuery = refinedQueries.get(refinedQueries.size() - 1);
refinedQueries.set(refinedQueries.size() - 1,
lastRefinedQuery + createEmptyLine(queries.get(i)));
}
} else {
String refinedQuery = emptyLine + queries.get(i);
refinedQueries.add(refinedQuery);
}
}
return refinedQueries;
}
|
@Test
void testCustomSplitter_1() {
SqlSplitter sqlSplitter = new SqlSplitter("//");
List<String> sqls = sqlSplitter.splitSql("show tables;\n//comment_1");
assertEquals(1, sqls.size());
assertEquals("show tables", sqls.get(0));
sqls = sqlSplitter.splitSql("show tables;\n//comment_1");
assertEquals(1, sqls.size());
assertEquals("show tables", sqls.get(0));
sqls = sqlSplitter.splitSql("show tables;\n//comment_1;");
assertEquals(1, sqls.size());
assertEquals("show tables", sqls.get(0));
sqls = sqlSplitter.splitSql("show tables;\n//comment_1;\n");
assertEquals(1, sqls.size());
assertEquals("show tables", sqls.get(0));
sqls = sqlSplitter.splitSql("//comment_1;\nshow tables");
assertEquals(1, sqls.size());
assertEquals("\nshow tables", sqls.get(0));
sqls = sqlSplitter.splitSql("//comment_1;\nshow tables;\n//comment_2");
assertEquals(1, sqls.size());
assertEquals("\nshow tables", sqls.get(0));
sqls = sqlSplitter.splitSql("show tables;\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("show tables;\n//comment_1;\ndescribe table_1");
assertEquals(2, sqls.size());
assertEquals("show tables", sqls.get(0));
assertEquals("\n\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("select a\nfrom table_1;\ndescribe table_1;//comment_1");
assertEquals(2, sqls.size());
assertEquals("select a\nfrom table_1", sqls.get(0));
assertEquals("\n\ndescribe table_1", sqls.get(1));
sqls = sqlSplitter.splitSql("//comment_1;\n//comment_2\n");
assertEquals(0, sqls.size());
sqls = sqlSplitter.splitSql("select a // comment\n from table_1");
assertEquals(1, sqls.size());
assertEquals("select a \n from table_1", sqls.get(0));
}
|
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) {
if (messageListener instanceof TracingMessageListener) return messageListener;
return new TracingMessageListener(messageListener, this, addConsumerSpan);
}
|
@Test void messageListener_traces_addsConsumerSpan() {
jmsTracing.messageListener(mock(MessageListener.class), true)
.onMessage(message);
assertThat(
asList(testSpanHandler.takeRemoteSpan(Kind.CONSUMER), testSpanHandler.takeLocalSpan()))
.extracting(brave.handler.MutableSpan::name)
.containsExactly("receive", "on-message");
}
|
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
ThreadContext.unbindSubject();
final boolean secure = requestContext.getSecurityContext().isSecure();
final MultivaluedMap<String, String> headers = requestContext.getHeaders();
final Map<String, Cookie> cookies = requestContext.getCookies();
final Request grizzlyRequest = grizzlyRequestProvider.get();
final String host = RestTools.getRemoteAddrFromRequest(grizzlyRequest, trustedProxies);
final String authHeader = headers.getFirst(HttpHeaders.AUTHORIZATION);
final Set<Class<?>> matchedResources = requestContext.getUriInfo().getMatchedResources().stream()
.map(Object::getClass).collect(Collectors.toSet());
final SecurityContext securityContext;
if (authHeader != null && authHeader.startsWith("Basic")) {
final String base64UserPass = authHeader.substring(authHeader.indexOf(' ') + 1);
final String userPass = decodeBase64(base64UserPass);
final String[] split = userPass.split(":", 2);
if (split.length != 2) {
throw new BadRequestException("Invalid credentials in Authorization header");
}
securityContext = createSecurityContext(split[0],
split[1],
secure,
SecurityContext.BASIC_AUTH,
host,
grizzlyRequest.getRemoteAddr(),
headers,
cookies,
matchedResources);
} else {
securityContext = createSecurityContext(null, null, secure, null, host,
grizzlyRequest.getRemoteAddr(),
headers,
cookies,
matchedResources);
}
requestContext.setSecurityContext(securityContext);
}
|
@Test(expected = BadRequestException.class)
public void filterWithMalformedBasicAuthShouldThrowBadRequestException() throws Exception {
final MultivaluedHashMap<String, String> headers = new MultivaluedHashMap<>();
headers.putSingle(HttpHeaders.AUTHORIZATION, "Basic ****");
when(requestContext.getHeaders()).thenReturn(headers);
filter.filter(requestContext);
}
|
@Override
public Collection<Event> filter(Collection<Event> events, final FilterMatchListener filterMatchListener) {
for (Event e : events) {
if (overwrite || e.getField(target) == null) {
e.setField(target, UUID.randomUUID().toString());
}
filterMatchListener.filterMatched(e);
}
return events;
}
|
@Test
public void testUuidWithOverwrite() {
String targetField = "target_field";
String originalValue = "originalValue";
Map<String, Object> rawConfig = new HashMap<>();
rawConfig.put(Uuid.TARGET_CONFIG.name(), targetField);
rawConfig.put(Uuid.OVERWRITE_CONFIG.name(), true);
Configuration config = new ConfigurationImpl(rawConfig);
Uuid uuid = new Uuid(ID, config, new ContextImpl(null, null));
PluginUtil.validateConfig(uuid, config);
org.logstash.Event e = new org.logstash.Event();
e.setField(targetField, originalValue);
Collection<Event> filteredEvents = uuid.filter(Collections.singletonList(e), NO_OP_MATCH_LISTENER);
Assert.assertEquals(1, filteredEvents.size());
Event finalEvent = filteredEvents.stream().findFirst().orElse(null);
Assert.assertNotNull(finalEvent);
Assert.assertNotEquals(originalValue, finalEvent.getField(targetField));
Assert.assertTrue(((String)finalEvent.getField(targetField)).matches("\\b[0-9a-f]{8}\\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\\b[0-9a-f]{12}\\b"));
}
|
public static void checkHttpTimeoutProperty() throws NumberFormatException {
checkNumericSystemProperty(HTTP_TIMEOUT, Range.atLeast(0));
}
|
@Test
public void testCheckHttpTimeoutProperty_stringValue() {
System.setProperty(JibSystemProperties.HTTP_TIMEOUT, "random string");
try {
JibSystemProperties.checkHttpTimeoutProperty();
Assert.fail();
} catch (NumberFormatException ex) {
Assert.assertEquals("jib.httpTimeout must be an integer: random string", ex.getMessage());
}
}
|
@PostMapping("/authorize")
@Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用")
@Parameters({
@Parameter(name = "response_type", required = true, description = "响应类型", example = "code"),
@Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"),
@Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数
@Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"),
@Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"),
@Parameter(name = "state", example = "1")
})
public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType,
@RequestParam("client_id") String clientId,
@RequestParam(value = "scope", required = false) String scope,
@RequestParam("redirect_uri") String redirectUri,
@RequestParam(value = "auto_approve") Boolean autoApprove,
@RequestParam(value = "state", required = false) String state) {
@SuppressWarnings("unchecked")
Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class);
scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap());
// 0. 校验用户已经登录。通过 Spring Security 实现
// 1.1 校验 responseType 是否满足 code 或者 token 值
OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType);
// 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内
OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null,
grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri);
// 2.1 假设 approved 为 null,说明是场景一
if (Boolean.TRUE.equals(autoApprove)) {
// 如果无法自动授权通过,则返回空 url,前端不进行跳转
if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) {
return success(null);
}
} else { // 2.2 假设 approved 非 null,说明是场景二
// 如果计算后不通过,则跳转一个错误链接
if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) {
return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state,
"access_denied", "User denied access"));
}
}
// 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向
List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue);
if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) {
return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
// 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向
return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state));
}
|
@Test
public void testApproveOrDeny_grantTypeError() {
// 调用,并断言
assertServiceException(() -> oauth2OpenController.approveOrDeny(randomString(), null,
null, null, null, null),
new ErrorCode(400, "response_type 参数值只允许 code 和 token"));
}
|
public ServiceInfo processServiceInfo(String json) {
ServiceInfo serviceInfo = JacksonUtils.toObj(json, ServiceInfo.class);
serviceInfo.setJsonFromServer(json);
return processServiceInfo(serviceInfo);
}
|
@Test
void testProcessNullServiceInfo() {
assertNull(holder.processServiceInfo(new ServiceInfo()));
}
|
public static String getRuleName(final String ruleRow) {
String testVal = ruleRow.toLowerCase();
final int left = testVal.indexOf( DefaultRuleSheetListener.RULE_TABLE_TAG );
return ruleRow.substring( left + DefaultRuleSheetListener.RULE_TABLE_TAG.length() ).trim();
}
|
@Test
public void testRuleName() {
final String row = " RuleTable This is my rule name";
final String result = getRuleName(row);
assertThat(result).isEqualTo("This is my rule name");
}
|
synchronized ActivateWorkResult activateWorkForKey(ExecutableWork executableWork) {
ShardedKey shardedKey = executableWork.work().getShardedKey();
Deque<ExecutableWork> workQueue = activeWork.getOrDefault(shardedKey, new ArrayDeque<>());
// This key does not have any work queued up on it. Create one, insert Work, and mark the work
// to be executed.
if (!activeWork.containsKey(shardedKey) || workQueue.isEmpty()) {
workQueue.addLast(executableWork);
activeWork.put(shardedKey, workQueue);
incrementActiveWorkBudget(executableWork.work());
return ActivateWorkResult.EXECUTE;
}
// Check to see if we have this work token queued.
Iterator<ExecutableWork> workIterator = workQueue.iterator();
while (workIterator.hasNext()) {
ExecutableWork queuedWork = workIterator.next();
if (queuedWork.id().equals(executableWork.id())) {
return ActivateWorkResult.DUPLICATE;
}
if (queuedWork.id().cacheToken() == executableWork.id().cacheToken()) {
if (executableWork.id().workToken() > queuedWork.id().workToken()) {
// Check to see if the queuedWork is active. We only want to remove it if it is NOT
// currently active.
if (!queuedWork.equals(workQueue.peek())) {
workIterator.remove();
decrementActiveWorkBudget(queuedWork.work());
}
// Continue here to possibly remove more non-active stale work that is queued.
} else {
return ActivateWorkResult.STALE;
}
}
}
// Queue the work for later processing.
workQueue.addLast(executableWork);
incrementActiveWorkBudget(executableWork.work());
return ActivateWorkResult.QUEUED;
}
|
@Test
public void
testActivateWorkForKey_matchingCacheTokens_newWorkTokenGreater_queuedWorkNotActive_QUEUED() {
long matchingCacheToken = 1L;
long newWorkToken = 10L;
long queuedWorkToken = newWorkToken / 2;
ShardedKey shardedKey = shardedKey("someKey", 1L);
ExecutableWork differentWorkTokenWork = createWork(createWorkItem(100L, 100L, shardedKey));
ExecutableWork queuedWork =
createWork(createWorkItem(queuedWorkToken, matchingCacheToken, shardedKey));
ExecutableWork newWork =
createWork(createWorkItem(newWorkToken, matchingCacheToken, shardedKey));
activeWorkState.activateWorkForKey(differentWorkTokenWork);
activeWorkState.activateWorkForKey(queuedWork);
ActivateWorkResult activateWorkResult = activeWorkState.activateWorkForKey(newWork);
assertEquals(ActivateWorkResult.QUEUED, activateWorkResult);
assertTrue(readOnlyActiveWork.get(shardedKey).contains(newWork));
assertFalse(readOnlyActiveWork.get(shardedKey).contains(queuedWork));
assertEquals(differentWorkTokenWork, readOnlyActiveWork.get(shardedKey).peek());
}
|
private OperationNode createParDoOperation(
Network<Node, Edge> network,
ParallelInstructionNode node,
PipelineOptions options,
DataflowExecutionContext<?> executionContext,
DataflowOperationContext operationContext)
throws Exception {
ParallelInstruction instruction = node.getParallelInstruction();
ParDoInstruction parDo = instruction.getParDo();
TupleTag<?> mainOutputTag = tupleTag(parDo.getMultiOutputInfos().get(0));
ImmutableMap.Builder<TupleTag<?>, Integer> outputTagsToReceiverIndicesBuilder =
ImmutableMap.builder();
int successorOffset = 0;
for (Node successor : network.successors(node)) {
for (Edge edge : network.edgesConnecting(node, successor)) {
outputTagsToReceiverIndicesBuilder.put(
tupleTag(((MultiOutputInfoEdge) edge).getMultiOutputInfo()), successorOffset);
}
successorOffset += 1;
}
ParDoFn fn =
parDoFnFactory.create(
options,
CloudObject.fromSpec(parDo.getUserFn()),
parDo.getSideInputs(),
mainOutputTag,
outputTagsToReceiverIndicesBuilder.build(),
executionContext,
operationContext);
OutputReceiver[] receivers = getOutputReceivers(network, node);
return OperationNode.create(new ParDoOperation(fn, receivers, operationContext));
}
|
@Test
public void testCreateParDoOperation() throws Exception {
int producerIndex = 1;
int producerOutputNum = 2;
BatchModeExecutionContext context =
BatchModeExecutionContext.forTesting(options, counterSet, "testStage");
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createParDoInstruction(producerIndex, producerOutputNum, "DoFn"),
ExecutionLocation.UNKNOWN);
Node outputReceiverNode =
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0), PCOLLECTION_ID));
when(network.successors(instructionNode)).thenReturn(ImmutableSet.of(outputReceiverNode));
when(network.outDegree(instructionNode)).thenReturn(1);
when(network.edgesConnecting(instructionNode, outputReceiverNode))
.thenReturn(
ImmutableSet.<Edge>of(
MultiOutputInfoEdge.create(
instructionNode
.getParallelInstruction()
.getParDo()
.getMultiOutputInfos()
.get(0))));
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE, network, options, readerRegistry, sinkRegistry, context)
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(ParDoOperation.class));
ParDoOperation parDoOperation = (ParDoOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, parDoOperation.receivers.length);
assertEquals(0, parDoOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, parDoOperation.initializationState);
}
|
@PostMapping
public AccountLogsResult getAccountLogs(@RequestBody @Valid AccountLogsRequest request,
@RequestHeader(MijnDigidSession.MIJN_DIGID_SESSION_HEADER) String mijnDigiDsessionId){
MijnDigidSession mijnDigiDSession = retrieveMijnDigiDSession(mijnDigiDsessionId);
return accountService.getAccountLogs(mijnDigiDSession.getAccountId(), mijnDigiDSession.getDeviceName(), mijnDigiDSession.getAppCode(), request);
}
|
@Test
public void testValidRequest() {
AccountLogsRequest request = new AccountLogsRequest();
request.setPageId(1);
AccountLogsResult result = new AccountLogsResult();
result.setTotalItems(10);
result.setTotalPages(1);
List<AccountLog> results = new ArrayList<>();
result.setResults(results);
result.setStatus(Status.OK);
result.setError("error");
when(accountService.getAccountLogs(anyLong(), anyString(), anyString(), any())).thenReturn(result);
AccountLogsResult accountLogs = accountLogsController.getAccountLogs(request, mijnDigiDSession.getId());
assertEquals(Status.OK, accountLogs.getStatus());
assertEquals("error", accountLogs.getError());
assertEquals(10, accountLogs.getTotalItems());
assertEquals(1, accountLogs.getTotalPages());
assertEquals(results, accountLogs.getResults());
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testIncrementalFromEarliestSnapshotWithNonEmptyTable() throws Exception {
appendTwoSnapshots();
ScanContext scanContext =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_EARLIEST_SNAPSHOT)
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.fromPosition()).isNull();
// For inclusive behavior, the initial result should point to snapshot1's parent,
// which leads to null snapshotId and snapshotTimestampMs.
assertThat(initialResult.toPosition().snapshotId()).isNull();
assertThat(initialResult.toPosition().snapshotTimestampMs()).isNull();
assertThat(initialResult.splits()).isEmpty();
ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition());
assertThat(secondResult.fromPosition().snapshotId()).isNull();
assertThat(secondResult.fromPosition().snapshotTimestampMs()).isNull();
assertThat(secondResult.toPosition().snapshotId().longValue())
.isEqualTo(snapshot2.snapshotId());
assertThat(secondResult.toPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot2.timestampMillis());
IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits());
assertThat(split.task().files()).hasSize(2);
Set<String> discoveredFiles =
split.task().files().stream()
.map(fileScanTask -> fileScanTask.file().path().toString())
.collect(Collectors.toSet());
// should discover files appended in both snapshot1 and snapshot2
Set<String> expectedFiles =
ImmutableSet.of(dataFile1.path().toString(), dataFile2.path().toString());
assertThat(discoveredFiles).containsExactlyInAnyOrderElementsOf(expectedFiles);
IcebergEnumeratorPosition lastPosition = secondResult.toPosition();
for (int i = 0; i < 3; ++i) {
lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition;
}
}
|
void buildPhysicalTopology() throws Exception {
if (CollectionUtils.isNotEmpty(plan.getFragments()) &&
plan.getTopFragment().getSink() instanceof OlapTableSink) {
ConnectContext context = queryCoordinator.getConnectContext();
context.getSessionVariable().setPreferComputeNode(false);
context.getSessionVariable().setUseComputeNodes(0);
OlapTableSink dataSink = getMVOlapTableSink();
// NOTE use a fake transaction id, the real one would be generated when epoch started
long fakeTransactionId = 1;
long dbId = getView().getDbId();
long timeout = context.getSessionVariable().getQueryTimeoutS();
dataSink.init(context.getExecutionId(), fakeTransactionId, dbId, timeout);
dataSink.complete();
}
queryCoordinator.prepareExec();
List<ExecutionFragment> execFragments = queryCoordinator.getFragmentsInPreorder();
TDescriptorTable descTable = queryCoordinator.getDescriptorTable();
int tabletSinkDop = 1;
// Group all fragment instances by BE id, and package them into a task
BiMap<Long, TNetworkAddress> taskId2Addr = HashBiMap.create();
BiMap<TNetworkAddress, Long> addr2TaskId = taskId2Addr.inverse();
Map<Long, MVMaintenanceTask> tasksByBe = new HashMap<>();
long taskIdGen = 0;
int backendIdGen = 0;
TFragmentInstanceFactory execPlanFragmentParamsFactory = queryCoordinator.createTFragmentInstanceFactory();
for (ExecutionFragment execFragment : execFragments) {
List<TExecPlanFragmentParams> tParams = execPlanFragmentParamsFactory.create(
execFragment, execFragment.getInstances(), descTable, tabletSinkDop, tabletSinkDop);
for (int i = 0; i < execFragment.getInstances().size(); i++) {
FragmentInstance instance = execFragment.getInstances().get(i);
// Get brpc address instead of the default address
TNetworkAddress beRpcAddr = queryCoordinator.getBrpcAddress(instance.getWorkerId());
Long taskId = addr2TaskId.get(beRpcAddr);
MVMaintenanceTask task;
if (taskId == null) {
taskId = taskIdGen++;
task = MVMaintenanceTask.build(this, taskId, beRpcAddr, new ArrayList<>());
tasksByBe.put(taskId, task);
addr2TaskId.put(beRpcAddr, taskId);
} else {
task = tasksByBe.get(taskId);
}
// TODO(murphy) is this necessary
int backendId = backendIdGen++;
instance.setIndexInJob(backendId);
task.addFragmentInstance(tParams.get(i));
}
}
this.taskId2Addr = taskId2Addr;
this.taskMap = tasksByBe;
}
|
@Test
public void buildPhysicalTopology() throws Exception {
String sql = "select count(distinct v5) from t1 join t2";
Pair<String, ExecPlan> pair = UtFrameUtils.getPlanAndFragment(connectContext, sql);
assertEquals("AGGREGATE ([GLOBAL] aggregate [{7: count=count(7: count)}] group by [[]] having [null]\n" +
" EXCHANGE GATHER\n" +
" AGGREGATE ([DISTINCT_LOCAL] aggregate [{7: count=count(2: v5)}] group by [[]] having [null]\n" +
" AGGREGATE ([DISTINCT_GLOBAL] aggregate [{}] group by [[2: v5]] having [null]\n" +
" EXCHANGE SHUFFLE[2]\n" +
" AGGREGATE ([LOCAL] aggregate [{}] group by [[2: v5]] having [null]\n" +
" CROSS JOIN (join-predicate [null] post-join-predicate [null])\n" +
" SCAN (columns[2: v5] predicate[null])\n" +
" EXCHANGE BROADCAST\n" +
" SCAN (columns[4: v7] predicate[null])",
pair.first);
String currentDb = connectContext.getDatabase();
long dbId = GlobalStateMgr.getCurrentState().getDb(currentDb).getId();
MaterializedView view = new MaterializedView();
view.setDbId(dbId);
view.setId(1024);
view.setName("view1");
view.setMaintenancePlan(pair.second);
MVMaintenanceJob job = new MVMaintenanceJob(view);
job.buildContext();
job.buildPhysicalTopology();
Map<Long, MVMaintenanceTask> taskMap = job.getTasks();
System.err.println(taskMap);
assertEquals(1, taskMap.size());
MVMaintenanceTask task = taskMap.values().stream().findFirst().get();
System.err.println(task);
assertEquals(0, task.getTaskId());
List<TExecPlanFragmentParams> instances = task.getFragmentInstances();
assertEquals(4, instances.size());
TExecPlanFragmentParams firstInstance = instances.get(2);
System.err.println(firstInstance);
List<TPlanNode> planNodes = firstInstance.getFragment().getPlan().getNodes();
assertEquals(5, planNodes.size());
assertEquals(TPlanNodeType.AGGREGATION_NODE, planNodes.get(0).getNode_type());
assertEquals(TPlanNodeType.PROJECT_NODE, planNodes.get(1).getNode_type());
assertEquals(TPlanNodeType.NESTLOOP_JOIN_NODE, planNodes.get(2).getNode_type());
assertEquals(TPlanNodeType.OLAP_SCAN_NODE, planNodes.get(3).getNode_type());
assertEquals(TPlanNodeType.EXCHANGE_NODE, planNodes.get(4).getNode_type());
}
|
@Override
public DescribeShareGroupsResult describeShareGroups(final Collection<String> groupIds,
final DescribeShareGroupsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, ShareGroupDescription> future =
DescribeShareGroupsHandler.newFuture(groupIds);
DescribeShareGroupsHandler handler = new DescribeShareGroupsHandler(options.includeAuthorizedOperations(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeShareGroupsResult(future.all().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue)));
}
|
@Test
public void testDescribeShareGroupsWithAuthorizedOperationsOmitted() throws Exception {
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
ShareGroupDescribeResponseData data = new ShareGroupDescribeResponseData();
data.groups().add(new ShareGroupDescribeResponseData.DescribedGroup()
.setGroupId(GROUP_ID)
.setAuthorizedOperations(MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED));
env.kafkaClient().prepareResponse(new ShareGroupDescribeResponse(data));
final DescribeShareGroupsResult result = env.adminClient().describeShareGroups(singletonList(GROUP_ID));
final ShareGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get();
assertNull(groupDescription.authorizedOperations());
}
}
|
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale,
DistanceConfig distanceConfig) {
ObjectNode json = JsonNodeFactory.instance.objectNode();
if (ghResponse.hasErrors())
throw new IllegalStateException(
"If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError");
PointList waypoints = ghResponse.getBest().getWaypoints();
final ArrayNode routesJson = json.putArray("routes");
List<ResponsePath> paths = ghResponse.getAll();
for (int i = 0; i < paths.size(); i++) {
ResponsePath path = paths.get(i);
ObjectNode pathJson = routesJson.addObject();
putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig);
}
final ArrayNode waypointsJson = json.putArray("waypoints");
for (int i = 0; i < waypoints.size(); i++) {
ObjectNode waypointJson = waypointsJson.addObject();
// TODO get names
waypointJson.put("name", "");
putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson);
}
json.put("code", "Ok");
// TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h"
json.put("uuid", UUID.randomUUID().toString().replaceAll("-", ""));
return json;
}
|
@Test
public void testMultipleWaypoints() {
GHRequest request = new GHRequest();
request.addPoint(new GHPoint(42.504606, 1.522438));
request.addPoint(new GHPoint(42.504776, 1.527209));
request.addPoint(new GHPoint(42.505144, 1.526113));
request.addPoint(new GHPoint(42.50529, 1.527218));
request.setProfile(profile);
GHResponse rsp = hopper.route(request);
ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig);
// Check that all waypoints are there and in the right order
JsonNode waypointsJson = json.get("waypoints");
assertEquals(4, waypointsJson.size());
JsonNode waypointLoc = waypointsJson.get(0).get("location");
assertEquals(1.522438, waypointLoc.get(0).asDouble(), .00001);
waypointLoc = waypointsJson.get(1).get("location");
assertEquals(1.527209, waypointLoc.get(0).asDouble(), .00001);
waypointLoc = waypointsJson.get(2).get("location");
assertEquals(1.526113, waypointLoc.get(0).asDouble(), .00001);
waypointLoc = waypointsJson.get(3).get("location");
assertEquals(1.527218, waypointLoc.get(0).asDouble(), .00001);
// Check that there are 3 legs
JsonNode route = json.get("routes").get(0);
JsonNode legs = route.get("legs");
assertEquals(3, legs.size());
double duration = 0;
double distance = 0;
for (int i = 0; i < 3; i++) {
JsonNode leg = legs.get(i);
duration += leg.get("duration").asDouble();
distance += leg.get("distance").asDouble();
JsonNode steps = leg.get("steps");
JsonNode step = steps.get(0);
JsonNode maneuver = step.get("maneuver");
assertEquals("depart", maneuver.get("type").asText());
maneuver = steps.get(steps.size() - 1).get("maneuver");
assertEquals("arrive", maneuver.get("type").asText());
}
// Check if the duration and distance of the legs sum up to the overall route
// distance and duration
assertEquals(route.get("duration").asDouble(), duration, 1);
assertEquals(route.get("distance").asDouble(), distance, 1);
}
|
@Override
public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) {
Map<String, Object> result = newLinkedHashMap();
OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication();
result.put(ACTIVE, true);
if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) {
Set<Object> permissions = Sets.newHashSet();
for (Permission perm : accessToken.getPermissions()) {
Map<String, Object> o = newLinkedHashMap();
o.put("resource_set_id", perm.getResourceSet().getId().toString());
Set<String> scopes = Sets.newHashSet(perm.getScopes());
o.put("scopes", scopes);
permissions.add(o);
}
result.put("permissions", permissions);
} else {
Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope());
result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes));
}
if (accessToken.getExpiration() != null) {
try {
result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration()));
result.put(EXP, accessToken.getExpiration().getTime() / 1000L);
} catch (ParseException e) {
logger.error("Parse exception in token introspection", e);
}
}
if (userInfo != null) {
// if we have a UserInfo, use that for the subject
result.put(SUB, userInfo.getSub());
} else {
// otherwise, use the authentication's username
result.put(SUB, authentication.getName());
}
if(authentication.getUserAuthentication() != null) {
result.put(USER_ID, authentication.getUserAuthentication().getName());
}
result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId());
result.put(TOKEN_TYPE, accessToken.getTokenType());
return result;
}
|
@Test
public void shouldAssembleExpectedResultForAccessTokenWithoutUserInfo() throws ParseException {
// given
OAuth2AccessTokenEntity accessToken = accessToken(new Date(123 * 1000L), scopes("foo", "bar"), null, "Bearer",
oauth2AuthenticationWithUser(oauth2Request("clientId"), "name"));
Set<String> authScopes = scopes("foo", "bar", "baz");
// when
Map<String, Object> result = assembler.assembleFrom(accessToken, null, authScopes);
// then
Map<String, Object> expected = new ImmutableMap.Builder<String, Object>()
.put("sub", "name")
.put("exp", 123L)
.put("expires_at", dateFormat.valueToString(new Date(123 * 1000L)))
.put("scope", "bar foo")
.put("active", Boolean.TRUE)
.put("user_id", "name")
.put("client_id", "clientId")
.put("token_type", "Bearer")
.build();
assertThat(result, is(equalTo(expected)));
}
|
public void updateConsumeOffset(MessageQueue mq, long offset) {
this.brokerController.getConsumerOffsetManager().commitOffset(
RemotingHelper.parseSocketAddressAddr(this.storeHost), TransactionalMessageUtil.buildConsumerGroup(), mq.getTopic(),
mq.getQueueId(), offset);
}
|
@Test
public void updateConsumeOffset() {
MessageQueue mq = new MessageQueue(TransactionalMessageUtil.buildOpTopic(), this.brokerController.getBrokerConfig().getBrokerName(),
0);
transactionBridge.updateConsumeOffset(mq, 0);
}
|
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
|
@Test
public void shouldThrowTimeoutExceptionWhenFuturesNeverCompleteDuringValidation() {
final AdminClient admin = mock(AdminClient.class);
final MockTime time = new MockTime(
(Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3
);
final InternalTopicManager topicManager = new InternalTopicManager(
time,
admin,
new StreamsConfig(config)
);
final KafkaFutureImpl<TopicDescription> topicDescriptionFutureThatNeverCompletes = new KafkaFutureImpl<>();
when(admin.describeTopics(Collections.singleton(topic1)))
.thenAnswer(answer -> new MockDescribeTopicsResult(mkMap(mkEntry(topic1, topicDescriptionFutureThatNeverCompletes))));
final KafkaFutureImpl<Config> topicConfigSuccessfulFuture = new KafkaFutureImpl<>();
topicConfigSuccessfulFuture.complete(
new Config(repartitionTopicConfig().entrySet().stream()
.map(entry -> new ConfigEntry(entry.getKey(), entry.getValue())).collect(Collectors.toSet()))
);
final ConfigResource topicResource = new ConfigResource(Type.TOPIC, topic1);
when(admin.describeConfigs(Collections.singleton(topicResource)))
.thenAnswer(answer -> new MockDescribeConfigsResult(mkMap(mkEntry(topicResource, topicConfigSuccessfulFuture))));
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
assertThrows(
TimeoutException.class,
() -> topicManager.validate(Collections.singletonMap(topic1, internalTopicConfig))
);
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<DropConnector> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final String connectorName = statement.getStatement().getConnectorName();
final boolean ifExists = statement.getStatement().getIfExists();
final ConnectResponse<String> response =
serviceContext.getConnectClient().delete(connectorName);
if (response.error().isPresent()) {
if (ifExists && response.httpCode() == HttpStatus.SC_NOT_FOUND) {
return StatementExecutorResponse.handled(Optional.of(
new WarningEntity(statement.getMaskedStatementText(),
"Connector '" + connectorName + "' does not exist.")));
} else {
final String errorMsg = "Failed to drop connector: " + response.error().get();
throw new KsqlRestException(EndpointResponse.create()
.status(response.httpCode())
.entity(new KsqlErrorMessage(Errors.toErrorCode(response.httpCode()), errorMsg))
.build()
);
}
}
return StatementExecutorResponse.handled(Optional.of(
new DropConnectorEntity(statement.getMaskedStatementText(), connectorName)));
}
|
@Test
public void shouldPassInCorrectArgsToConnectClient() {
// Given:
when(connectClient.delete(anyString()))
.thenReturn(ConnectResponse.success("foo", HttpStatus.SC_OK));
// When:
DropConnectorExecutor.execute(DROP_CONNECTOR_CONFIGURED, mock(SessionProperties.class),null, serviceContext);
// Then:
verify(connectClient).delete("foo");
}
|
@Override
public Collection<String> generateKeys(final AlgorithmSQLContext context, final int keyGenerateCount) {
Collection<String> result = new LinkedList<>();
ThreadLocalRandom threadLocalRandom = ThreadLocalRandom.current();
for (int index = 0; index < keyGenerateCount; index++) {
result.add(generateKey(threadLocalRandom));
}
return result;
}
|
@Test
void assertGenerateKeys() {
assertThat(algorithm.generateKeys(mock(AlgorithmSQLContext.class), 1).size(), is(1));
assertThat(algorithm.generateKeys(mock(AlgorithmSQLContext.class), 1).iterator().next().length(), is(32));
}
|
@VisibleForTesting
static List<String> tokenizeArguments(@Nullable final String args) {
if (args == null) {
return Collections.emptyList();
}
final Matcher matcher = ARGUMENTS_TOKENIZE_PATTERN.matcher(args);
final List<String> tokens = new ArrayList<>();
while (matcher.find()) {
tokens.add(matcher.group().trim().replace("\"", "").replace("\'", ""));
}
return tokens;
}
|
@Test
void testTokenizeNonQuoted() {
final List<String> arguments = JarHandlerUtils.tokenizeArguments("--foo bar");
assertThat(arguments.get(0)).isEqualTo("--foo");
assertThat(arguments.get(1)).isEqualTo("bar");
}
|
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
int time = payload.getByteBuf().readUnsignedMedium();
if (0x800000 == time) {
return MySQLTimeValueUtils.ZERO_OF_TIME;
}
MySQLFractionalSeconds fractionalSeconds = new MySQLFractionalSeconds(columnDef.getColumnMeta(), payload);
int hour = (time >> 12) % (1 << 10);
int minute = (time >> 6) % (1 << 6);
int second = time % (1 << 6);
return LocalTime.of(hour, minute, second).withNano(fractionalSeconds.getNanos());
}
|
@Test
void assertReadWithFraction1() {
columnDef.setColumnMeta(1);
when(payload.getByteBuf()).thenReturn(byteBuf);
when(payload.readInt1()).thenReturn(90);
when(byteBuf.readUnsignedMedium()).thenReturn(0x800000 | (0x10 << 12) | (0x08 << 6) | 0x04);
assertThat(new MySQLTime2BinlogProtocolValue().read(columnDef, payload), is(LocalTime.of(16, 8, 4).withNano(900000000)));
}
|
@Override
public TableDataConsistencyCheckResult swapToObject(final YamlTableDataConsistencyCheckResult yamlConfig) {
if (null == yamlConfig) {
return null;
}
if (!Strings.isNullOrEmpty(yamlConfig.getIgnoredType())) {
return new TableDataConsistencyCheckResult(TableDataConsistencyCheckIgnoredType.valueOf(yamlConfig.getIgnoredType()));
}
return new TableDataConsistencyCheckResult(yamlConfig.isMatched());
}
|
@Test
void assertSwapToObjectWithYamlTableDataConsistencyCheckResultIgnoredType() {
YamlTableDataConsistencyCheckResult yamlConfig = new YamlTableDataConsistencyCheckResult();
yamlConfig.setIgnoredType("NO_UNIQUE_KEY");
TableDataConsistencyCheckResult actual = yamlTableDataConsistencyCheckResultSwapper.swapToObject(yamlConfig);
assertThat(actual.getIgnoredType(), is(TableDataConsistencyCheckIgnoredType.NO_UNIQUE_KEY));
assertFalse(actual.isMatched());
}
|
@Override
public boolean betterThan(Num criterionValue1, Num criterionValue2) {
return lessIsBetter ? criterionValue1.isLessThan(criterionValue2)
: criterionValue1.isGreaterThan(criterionValue2);
}
|
@Test
public void betterThanWithLessIsBetter() {
AnalysisCriterion criterion = getCriterion();
assertTrue(criterion.betterThan(numOf(3), numOf(6)));
assertFalse(criterion.betterThan(numOf(7), numOf(4)));
}
|
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
|
@Test
public void shouldNotThrowExceptionIfTopicExistsWithDifferentReplication() {
setupTopicInMockAdminClient(topic1, repartitionTopicConfig());
// attempt to create it again with replication 1
final InternalTopicManager internalTopicManager2 = new InternalTopicManager(
time,
mockAdminClient,
new StreamsConfig(config)
);
final InternalTopicConfig internalTopicConfig = setupRepartitionTopicConfig(topic1, 1);
final ValidationResult validationResult =
internalTopicManager2.validate(Collections.singletonMap(topic1, internalTopicConfig));
assertThat(validationResult.missingTopics(), empty());
assertThat(validationResult.misconfigurationsForTopics(), anEmptyMap());
}
|
@Override
public Object unbox() {
switch (type) {
case STRING_ARRAY:
String[] arrays = new String[val.length];
for (int i = 0; i < arrays.length; ++i) {
arrays[i] = ((SelString) val[i]).getInternalVal();
}
return arrays;
case LONG_ARRAY:
long[] arrayl = new long[val.length];
for (int i = 0; i < arrayl.length; ++i) {
arrayl[i] = ((SelLong) val[i]).longVal();
}
return arrayl;
case DOUBLE_ARRAY:
double[] arrayd = new double[val.length];
for (int i = 0; i < arrayd.length; ++i) {
arrayd[i] = ((SelDouble) val[i]).doubleVal();
}
return arrayd;
case BOOLEAN_ARRAY:
boolean[] arrayb = new boolean[val.length];
for (int i = 0; i < arrayb.length; ++i) {
arrayb[i] = ((SelBoolean) val[i]).booleanVal();
}
return arrayb;
}
throw new UnsupportedOperationException("Not support to unbox an array with type " + type);
}
|
@Test
public void unbox() {
assertArrayEquals(new String[] {"foo", "bar"}, (String[]) one.unbox());
assertArrayEquals(new long[] {1, 2, 3}, (long[]) another.unbox());
}
|
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
}
|
@Test
public void testWithNullResult() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
return null;
}
})
.build();
final Message msg = createMessage("the hello");
extractor.runExtractor(msg);
assertThat(msg.hasField("target")).isFalse();
}
|
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
}
|
@Test
public void testInitProducerIdWithMaxInFlightOne() {
final long producerId = 123456L;
createMockClientWithMaxFlightOneMetadataPending();
// Initialize transaction manager. InitProducerId will be queued up until metadata response
// is processed and FindCoordinator can be sent to `leastLoadedNode`.
TransactionManager transactionManager = new TransactionManager(new LogContext(), "testInitProducerIdWithPendingMetadataRequest",
60000, 100L, new ApiVersions());
setupWithTransactionState(transactionManager, false, null, false);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0);
transactionManager.initializeTransactions();
sender.runOnce();
// Process metadata response, prepare FindCoordinator and InitProducerId responses.
// Verify producerId after the sender is run to process responses.
MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap());
client.respond(metadataUpdate);
prepareFindCoordinatorResponse(Errors.NONE, "testInitProducerIdWithPendingMetadataRequest");
prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
waitForProducerId(transactionManager, producerIdAndEpoch);
}
|
@Override
public String named() {
return PluginEnum.GENERAL_CONTEXT.getName();
}
|
@Test
public void tesNamed() {
assertEquals(this.generalContextPlugin.named(), PluginEnum.GENERAL_CONTEXT.getName());
}
|
public List<String> parse(final CharSequence line) {
return this.lineParser.parse( line.toString() );
}
|
@Test
public void testLineParse() {
final CsvLineParser parser = new CsvLineParser();
final String s = "a,\"b\",c";
final List<String> list = parser.parse(s);
assertThat(list).hasSize(3).containsExactly("a", "b", "c");
}
|
@Udf
public int field(
@UdfParameter final String str,
@UdfParameter final String... args
) {
if (str == null || args == null) {
return 0;
}
for (int i = 0; i < args.length; i++) {
if (str.equals(args[i])) {
return i + 1;
}
}
return 0;
}
|
@Test
public void shouldFindSecondArgument() {
// When:
final int pos = field.field("world", "hello", "world");
// Then:
assertThat(pos, equalTo(2));
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String pgDataType = typeDefine.getDataType().toLowerCase();
switch (pgDataType) {
case PG_BOOLEAN:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case PG_BOOLEAN_ARRAY:
builder.dataType(ArrayType.BOOLEAN_ARRAY_TYPE);
break;
case PG_SMALLSERIAL:
case PG_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case PG_SMALLINT_ARRAY:
builder.dataType(ArrayType.SHORT_ARRAY_TYPE);
break;
case PG_INTEGER:
case PG_SERIAL:
builder.dataType(BasicType.INT_TYPE);
break;
case PG_INTEGER_ARRAY:
builder.dataType(ArrayType.INT_ARRAY_TYPE);
break;
case PG_BIGINT:
case PG_BIGSERIAL:
builder.dataType(BasicType.LONG_TYPE);
break;
case PG_BIGINT_ARRAY:
builder.dataType(ArrayType.LONG_ARRAY_TYPE);
break;
case PG_REAL:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case PG_REAL_ARRAY:
builder.dataType(ArrayType.FLOAT_ARRAY_TYPE);
break;
case PG_DOUBLE_PRECISION:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case PG_DOUBLE_PRECISION_ARRAY:
builder.dataType(ArrayType.DOUBLE_ARRAY_TYPE);
break;
case PG_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
break;
case PG_MONEY:
// -92233720368547758.08 to +92233720368547758.07, With the sign bit it's 20, we use
// 30 precision to save it
DecimalType moneyDecimalType;
moneyDecimalType = new DecimalType(30, 2);
builder.dataType(moneyDecimalType);
builder.columnLength(30L);
builder.scale(2);
break;
case PG_CHAR:
case PG_CHARACTER:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
builder.sourceType(pgDataType);
} else {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
}
break;
case PG_VARCHAR:
case PG_CHARACTER_VARYING:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.sourceType(pgDataType);
} else {
builder.sourceType(String.format("%s(%s)", pgDataType, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
}
break;
case PG_TEXT:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_UUID:
builder.dataType(BasicType.STRING_TYPE);
builder.sourceType(pgDataType);
builder.columnLength(128L);
break;
case PG_JSON:
case PG_JSONB:
case PG_XML:
case PG_GEOMETRY:
case PG_GEOGRAPHY:
builder.dataType(BasicType.STRING_TYPE);
break;
case PG_CHAR_ARRAY:
case PG_VARCHAR_ARRAY:
case PG_TEXT_ARRAY:
builder.dataType(ArrayType.STRING_ARRAY_TYPE);
break;
case PG_BYTEA:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case PG_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case PG_TIME:
case PG_TIME_TZ:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIME_SCALE) {
builder.scale(MAX_TIME_SCALE);
log.warn(
"The scale of time type is larger than {}, it will be truncated to {}",
MAX_TIME_SCALE,
MAX_TIME_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
case PG_TIMESTAMP:
case PG_TIMESTAMP_TZ:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() != null && typeDefine.getScale() > MAX_TIMESTAMP_SCALE) {
builder.scale(MAX_TIMESTAMP_SCALE);
log.warn(
"The scale of timestamp type is larger than {}, it will be truncated to {}",
MAX_TIMESTAMP_SCALE,
MAX_TIMESTAMP_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
identifier(), typeDefine.getDataType(), typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertOtherString() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("text").dataType("text").build();
Column column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(null, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder().name("test").columnType("json").dataType("json").build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(null, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("jsonb")
.dataType("jsonb")
.build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(null, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
typeDefine =
BasicTypeDefine.builder().name("test").columnType("xml").dataType("xml").build();
column = PostgresTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType());
Assertions.assertEquals(null, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
public static boolean isFastStatsSame(Partition oldPart, Partition newPart) {
// requires to calculate stats if new and old have different fast stats
if ((oldPart != null) && oldPart.isSetParameters() && newPart != null && newPart.isSetParameters()) {
for (String stat : StatsSetupConst.FAST_STATS) {
if (oldPart.getParameters().containsKey(stat) && newPart.getParameters().containsKey(stat)) {
Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
String newStat = newPart.getParameters().get(stat);
if (newStat == null || !oldStat.equals(Long.parseLong(newStat))) {
return false;
}
} else {
return false;
}
}
return true;
}
return false;
}
|
@Test
public void isFastStatsSameNullStatsInNew() {
Partition oldPartition = new Partition();
Partition newPartition = new Partition();
Map<String, String> oldParams = new HashMap<>();
Map<String, String> newParams = new HashMap<>();
long testVal = 1;
for (String key : FAST_STATS) {
oldParams.put(key, String.valueOf(testVal));
newParams.put(key, null);
}
oldPartition.setParameters(oldParams);
newPartition.setParameters(newParams);
assertFalse(MetaStoreServerUtils.isFastStatsSame(oldPartition, newPartition));
}
|
@Override
public SelJodaDateTimeProperty assignOps(SelOp op, SelType rhs) {
if (op == SelOp.ASSIGN) {
SelTypeUtil.checkTypeMatch(this.type(), rhs.type());
this.val = ((SelJodaDateTimeProperty) rhs).val;
return this;
}
throw new UnsupportedOperationException(type() + " DO NOT support assignment operation " + op);
}
|
@Test
public void testAssignOps() {
assertEquals("DATETIME_PROPERTY: Property[dayOfWeek]", one.type() + ": " + one);
one.assignOps(SelOp.ASSIGN, another);
assertEquals("DATETIME_PROPERTY: Property[dayOfMonth]", one.type() + ": " + one);
}
|
public String getToken() throws IOException {
LOGGER.debug("[Agent Registration] Using URL {} to get a token.", tokenURL);
HttpRequestBase getTokenRequest = (HttpRequestBase) RequestBuilder.get(tokenURL)
.addParameter("uuid", agentRegistry.uuid())
.build();
try (CloseableHttpResponse response = httpClient.execute(getTokenRequest)) {
if (response.getStatusLine().getStatusCode() == SC_OK) {
LOGGER.info("The server has generated token for the agent.");
return responseBody(response);
} else {
LOGGER.error("[Agent Registration] Got status {} from GoCD", response.getStatusLine());
String error = Optional.ofNullable(ContentType.get(response.getEntity()))
.filter(ct -> ContentType.TEXT_HTML.getMimeType().equals(ct.getMimeType()))
.map(ignore -> "<non-machine HTML response>")
.orElseGet(() -> responseBody(response));
throw new RuntimeException(String.format("Agent registration could not acquire token due to %s: %s", response.getStatusLine(), error));
}
} finally {
getTokenRequest.releaseConnection();
}
}
|
@Test
void shouldErrorOutIfServerRejectTheRequest() throws Exception {
final CloseableHttpResponse httpResponse = mock(CloseableHttpResponse.class);
when(agentRegistry.uuid()).thenReturn("agent-uuid");
when(httpClient.execute(any(HttpRequestBase.class))).thenReturn(httpResponse);
when(httpResponse.getEntity()).thenReturn(new StringEntity("A token has already been issued for this agent."));
when(httpResponse.getStatusLine()).thenReturn(new BasicStatusLine(new ProtocolVersion("https", 1, 2), SC_UNPROCESSABLE_ENTITY, null));
assertThatCode(() -> tokenRequester.getToken())
.isInstanceOf(RuntimeException.class)
.hasMessage("Agent registration could not acquire token due to https/1.2 422 : A token has already been issued for this agent.");
}
|
@Override
public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) {
synchronized (lock) {
notifyLeaderInformationChangeInternal(
componentId,
leaderInformation,
confirmedLeaderInformation.forComponentIdOrEmpty(componentId));
}
}
|
@Test
void testGrantDoesNotBlockNotifyAllKnownLeaderInformation() throws Exception {
testLeaderEventDoesNotBlockLeaderInformationChangeEventHandling(
(listener, componentId, storedLeaderInformation) -> {
listener.onLeaderInformationChange(storedLeaderInformation);
});
}
|
@Override public Repository getRepository() {
try {
// NOTE: this class formerly used a ranking system to prioritize the providers registered and would check them in order
// of priority for the first non-null repository. In practice, we only ever registered one at a time, spoon or PUC.
// As such, the priority ranking is gone and will need to be reintroduced if desired later.
Collection<KettleRepositoryProvider> repositoryProviders = PluginServiceLoader.loadServices( KettleRepositoryProvider.class );
return repositoryProviders.stream().map( KettleRepositoryProvider::getRepository ).filter( Objects::nonNull ).findFirst().orElse( null );
} catch ( KettlePluginException e ) {
logger.error( "Error getting repository", e );
}
return null;
}
|
@Test
public void testGetRepositoryNone() {
assertNull( kettleRepositoryLocator.getRepository() );
}
|
public String extractDeclaredEncoding(byte[] bytes) {
int index = ArrayUtils.indexOf(bytes, (byte) '>');
if (index == -1) {
return null;
}
String pi = new String(ArrayUtils.subarray(bytes, 0, index + 1)).replace('\'', '"');
index = StringUtils.indexOf(pi, "encoding=\"");
if (index == -1) {
return null;
}
String encoding = pi.substring(index + 10);
encoding = encoding.substring(0, encoding.indexOf('"'));
return encoding;
}
|
@Test
void testExtractDeclaredEncoding() {
Assertions.assertNull(encodingDetector.extractDeclaredEncoding("<?xml ?>".getBytes()));
Assertions.assertNull(encodingDetector.extractDeclaredEncoding("<feed></feed>".getBytes()));
Assertions.assertEquals("UTF-8", encodingDetector.extractDeclaredEncoding("<?xml encoding=\"UTF-8\" ?>".getBytes()));
Assertions.assertEquals("UTF-8", encodingDetector.extractDeclaredEncoding("<?xml encoding='UTF-8' ?>".getBytes()));
Assertions.assertEquals("UTF-8", encodingDetector.extractDeclaredEncoding("<?xml encoding='UTF-8'?>".getBytes()));
}
|
public static synchronized String getMetric(MetricVisitor visitor, MetricsAction.RequestParams requestParams) {
if (!hasInit) {
return "";
}
// update the metrics first
updateMetrics();
// jvm
JvmStatCollector jvmStatCollector = new JvmStatCollector();
JvmStats jvmStats = jvmStatCollector.stats();
visitor.visitJvm(jvmStats);
// starrocks metrics
for (Metric metric : STARROCKS_METRIC_REGISTER.getMetrics()) {
visitor.visit(metric);
}
// database metrics
collectDatabaseMetrics(visitor);
// table metrics
if (requestParams.isCollectTableMetrics()) {
collectTableMetrics(visitor, requestParams.isMinifyTableMetrics());
}
// materialized view metrics
if (requestParams.isCollectMVMetrics()) {
MaterializedViewMetricsRegistry.collectMaterializedViewMetrics(visitor, requestParams.isMinifyMVMetrics());
}
// histogram
SortedMap<String, Histogram> histograms = METRIC_REGISTER.getHistograms();
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
visitor.visitHistogram(entry.getKey(), entry.getValue());
}
ResourceGroupMetricMgr.visitQueryLatency();
// collect routine load process metrics
if (Config.enable_routine_load_lag_metrics) {
collectRoutineLoadProcessMetrics(visitor);
}
if (Config.memory_tracker_enable) {
collectMemoryUsageMetrics(visitor);
}
// collect http metrics
HttpMetricRegistry.getInstance().visit(visitor);
// collect starmgr related metrics as well
StarMgrServer.getCurrentState().visitMetrics(visitor);
// node info
visitor.getNodeInfo();
return visitor.build();
}
|
@Test
public void testGetMetric() throws Exception {
starRocksAssert.useDatabase("test_metric")
.withTable("create table t1 (c1 int, c2 string)" +
" distributed by hash(c1) " +
" properties('replication_num'='1') ");
Table t1 = starRocksAssert.getTable("test_metric", "t1");
// update metric
TableMetricsEntity entity = TableMetricsRegistry.getInstance().getMetricsEntity(t1.getId());
entity.counterScanBytesTotal.increase(1024L);
// verify metric
JsonMetricVisitor visitor = new JsonMetricVisitor("m");
MetricsAction.RequestParams params = new MetricsAction.RequestParams(true, true, true, true);
MetricRepo.getMetric(visitor, params);
String json = visitor.build();
Assert.assertTrue(StringUtils.isNotEmpty(json));
Assert.assertTrue(json.contains("test_metric"));
}
|
@Override
public void filter(ContainerRequestContext requestContext, ContainerResponseContext responseContext) throws IOException {
responseContext.getHeaders().add(X_FRAME_OPTIONS, (httpAllowEmbedding ? EmbeddingOptions.SAMEORIGIN : EmbeddingOptions.DENY).toString());
}
|
@Test
void allowsEmbeddingForSameOriginIfConfigurationSettingIsTrue() throws IOException {
final EmbeddingControlFilter filter = new EmbeddingControlFilter(true);
final ContainerResponseContext responseContext = new ContainerResponse(requestContext, Response.ok().build());
filter.filter(requestContext, responseContext);
assertThat(responseContext.getHeaders())
.containsEntry("X-Frame-Options", Collections.singletonList("SAMEORIGIN"));
}
|
@Override
public V put(final K key, final V value) {
final Entry<K, V>[] table = this.table;
final int hash = key.hashCode();
final int index = HashUtil.indexFor(hash, table.length, mask);
for (Entry<K, V> e = table[index]; e != null; e = e.hashNext) {
final K entryKey;
if ((entryKey = e.key) == key || entryKey.equals(key)) {
moveToTop(e);
return e.setValue(value);
}
}
final Entry<K, V> e = new Entry<>(key, value);
e.hashNext = table[index];
table[index] = e;
final Entry<K, V> top = this.top;
e.next = top;
if (top != null) {
top.previous = e;
} else {
back = e;
}
this.top = e;
_size += 1;
if (removeEldestEntry(back)) {
remove(eldestKey());
} else if (_size > capacity) {
rehash(HashUtil.nextCapacity(capacity));
}
return null;
}
|
@Test
public void keySet() {
final LinkedHashMap<Integer, String> tested = new LinkedHashMap<>();
for (int i = 0; i < 10000; ++i) {
tested.put(i, Integer.toString(i));
}
int i = 10000;
for (Integer key : tested.keySet()) {
Assert.assertEquals(--i, key.intValue());
}
}
|
public static void debug(Logger logger, String msg) {
if (logger == null) {
return;
}
if (logger.isDebugEnabled()) {
logger.debug(msg);
}
}
|
@Test
void testDebug() {
Logger logger = Mockito.mock(Logger.class);
when(logger.isDebugEnabled()).thenReturn(true);
LogHelper.debug(logger, "debug");
verify(logger).debug("debug");
Throwable t = new RuntimeException();
LogHelper.debug(logger, t);
verify(logger).debug(t);
LogHelper.debug(logger, "debug", t);
verify(logger).debug("debug", t);
}
|
public static Map<String, String> revertSubscribe(Map<String, String> subscribe) {
Map<String, String> newSubscribe = new HashMap<>();
for (Map.Entry<String, String> entry : subscribe.entrySet()) {
String serviceName = entry.getKey();
String serviceQuery = entry.getValue();
if (StringUtils.isContains(serviceName, ':') && StringUtils.isContains(serviceName, '/')) {
Map<String, String> params = StringUtils.parseQueryString(serviceQuery);
String name = serviceName;
int i = name.indexOf('/');
if (i >= 0) {
params.put(GROUP_KEY, name.substring(0, i));
name = name.substring(i + 1);
}
i = name.lastIndexOf(':');
if (i >= 0) {
params.put(VERSION_KEY, name.substring(i + 1));
name = name.substring(0, i);
}
newSubscribe.put(name, StringUtils.toQueryString(params));
} else {
newSubscribe.put(serviceName, serviceQuery);
}
}
return newSubscribe;
}
|
@Test
void testRevertSubscribe() {
String key = "perf/dubbo.test.api.HelloService:1.0.0";
Map<String, String> subscribe = new HashMap<String, String>();
subscribe.put(key, null);
Map<String, String> newSubscribe = UrlUtils.revertSubscribe(subscribe);
Map<String, String> expectSubscribe = new HashMap<String, String>();
expectSubscribe.put("dubbo.test.api.HelloService", "group=perf&version=1.0.0");
assertEquals(expectSubscribe, newSubscribe);
}
|
@Override
public <VOut> KStream<K, VOut> processValues(
final FixedKeyProcessorSupplier<? super K, ? super V, VOut> processorSupplier,
final String... stateStoreNames
) {
return processValues(
processorSupplier,
Named.as(builder.newProcessorName(PROCESSVALUES_NAME)),
stateStoreNames
);
}
|
@Test
public void shouldNotAllowNullProcessValuesSupplierOnProcess() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.processValues((FixedKeyProcessorSupplier<? super String, ? super String, Void>) null));
assertThat(exception.getMessage(), equalTo("processorSupplier can't be null"));
}
|
@VisibleForTesting
void validateTemplateParams(MailTemplateDO template, Map<String, Object> templateParams) {
template.getParams().forEach(key -> {
Object value = templateParams.get(key);
if (value == null) {
throw exception(MAIL_SEND_TEMPLATE_PARAM_MISS, key);
}
});
}
|
@Test
public void testValidateTemplateParams_paramMiss() {
// 准备参数
MailTemplateDO template = randomPojo(MailTemplateDO.class,
o -> o.setParams(Lists.newArrayList("code")));
Map<String, Object> templateParams = new HashMap<>();
// mock 方法
// 调用,并断言异常
assertServiceException(() -> mailSendService.validateTemplateParams(template, templateParams),
MAIL_SEND_TEMPLATE_PARAM_MISS, "code");
}
|
@Override
public DateTime touch(AccessToken accessToken) throws ValidationException {
try {
return lastAccessCache.get(accessToken.getId());
} catch (ExecutionException e) {
LOG.debug("Ignoring error: " + e.getMessage());
return null;
}
}
|
@Test
@MongoDBFixtures("accessTokensSingleToken.json")
public void testTouch() throws Exception {
accessTokenService.setLastAccessCache(1, TimeUnit.NANOSECONDS);
final AccessToken token = accessTokenService.load("foobar");
final DateTime firstAccess = accessTokenService.touch(token);
Thread.sleep(1,0);
final DateTime secondAccess = accessTokenService.touch(token);
assertThat(secondAccess).isGreaterThan(firstAccess);
}
|
@Override
public JavaKeyStore create(SecureConfig config) {
if (exists(config)) {
throw new SecretStoreException.AlreadyExistsException(String.format("Logstash keystore at %s already exists.",
new String(config.getPlainText(PATH_KEY))));
}
try {
init(config);
lock.lock();
LOGGER.debug("Creating new keystore at {}.", keyStorePath.toAbsolutePath());
String keyStorePermissions = filePermissions;
//create the keystore on disk with a default entry to identify this as a logstash keystore
//can not set posix attributes on create here since not all Windows are posix, *nix will get the umask default and posix permissions will be set below
Files.createFile(keyStorePath);
try {
keyStore = KeyStore.Builder.newInstance(KEYSTORE_TYPE, null, protectionParameter).getKeyStore();
SecretKeyFactory factory = SecretKeyFactory.getInstance("PBE");
byte[] base64 = SecretStoreUtil.base64Encode(LOGSTASH_MARKER.getKey().getBytes(StandardCharsets.UTF_8));
SecretKey secretKey = factory.generateSecret(new PBEKeySpec(SecretStoreUtil.asciiBytesToChar(base64)));
keyStore.setEntry(LOGSTASH_MARKER.toExternalForm(), new KeyStore.SecretKeyEntry(secretKey), protectionParameter);
saveKeyStore();
PosixFileAttributeView attrs = Files.getFileAttributeView(keyStorePath, PosixFileAttributeView.class);
if (attrs != null) {
//the directory umask applies when creating the file, so re-apply permissions here
attrs.setPermissions(PosixFilePermissions.fromString(keyStorePermissions));
}
LOGGER.info("Created Logstash keystore at {}", keyStorePath.toAbsolutePath());
return this;
} catch (Exception e) {
throw new SecretStoreException.CreateException("Failed to create Logstash keystore.", e);
}
} catch (SecretStoreException sse) {
throw sse;
} catch (NoSuchFileException | AccessDeniedException fe) {
throw new SecretStoreException.CreateException("Error while trying to create the Logstash keystore. Please ensure that path to " + keyStorePath.toAbsolutePath() +
" exists and is writable", fe);
} catch (Exception e) { //should never happen
throw new SecretStoreException.UnknownException("Error while trying to create the Logstash keystore. ", e);
} finally {
releaseLock(lock);
config.clearValues();
}
}
|
@Test
public void testEmptyNotAllowedOnCreate() throws IOException {
Path altPath = folder.newFolder().toPath().resolve("alt.logstash.keystore");
SecureConfig altConfig = new SecureConfig();
altConfig.add("keystore.file", altPath.toString().toCharArray());
altConfig.add(SecretStoreFactory.KEYSTORE_ACCESS_KEY, "".toCharArray());
assertThrows(SecretStoreException.CreateException.class, () -> {
new JavaKeyStore().create(altConfig);
});
}
|
@VisibleForTesting
long getNumBytesToRead(long fileLength, long position, long bufLength) {
if (position + bufLength < fileLength) {
return bufLength;
} else {
return fileLength - position;
}
}
|
@Test(timeout = 40000)
public void testGetNumBytesToRead() {
long pos = 100;
long buffLength = 1024;
long fileLength = 2058;
RetriableFileCopyCommand retriableFileCopyCommand =
new RetriableFileCopyCommand("Testing NumBytesToRead ",
FileAction.OVERWRITE);
long numBytes = retriableFileCopyCommand
.getNumBytesToRead(fileLength, pos, buffLength);
Assert.assertEquals(1024, numBytes);
pos += numBytes;
numBytes = retriableFileCopyCommand
.getNumBytesToRead(fileLength, pos, buffLength);
Assert.assertEquals(934, numBytes);
pos += numBytes;
numBytes = retriableFileCopyCommand
.getNumBytesToRead(fileLength, pos, buffLength);
Assert.assertEquals(0, numBytes);
}
|
@Deprecated
public String idFromFilename(@NonNull String filename) {
return filename;
}
|
@SuppressWarnings("deprecation")
@Test
public void caseInsensitivePassesThroughOldLegacy() {
IdStrategy idStrategy = new IdStrategy.CaseInsensitive();
assertThat(idStrategy.idFromFilename("make\u1000000"), is("make\u1000000"));
assertThat(idStrategy.idFromFilename("\u306f\u56fd\u5185\u3067\u6700\u5927"), is("\u306f\u56fd\u5185\u3067\u6700\u5927"));
assertThat(idStrategy.idFromFilename("~fred"), is("~fred"));
assertThat(idStrategy.idFromFilename("~1fred"), is("~1fred"));
}
|
public static boolean validMagicNumbers( final BufferedInputStream bin ) throws IOException
{
final List<String> validMagicBytesCollection = JiveGlobals.getListProperty( "plugins.upload.magic-number.values.expected-value", Arrays.asList( "504B0304", "504B0506", "504B0708" ) );
for ( final String entry : validMagicBytesCollection )
{
final byte[] validMagicBytes = StringUtils.decodeHex( entry );
bin.mark( validMagicBytes.length );
try
{
final byte[] magicBytes = new byte[validMagicBytes.length];
int remaining = validMagicBytes.length;
while (remaining > 0) {
final int location = validMagicBytes.length - remaining;
final int count = bin.read(magicBytes, location, remaining);
if (count == -1) {
break;
}
remaining -= count;
}
if ( remaining <= 0 && Arrays.equals( validMagicBytes, magicBytes ) )
{
return true;
}
}
finally
{
bin.reset();
}
}
return false;
}
|
@Test
public void testTxtMagicBytes() throws Exception
{
// Setup test fixture.
try (final InputStream inputStream = getClass().getClassLoader().getResourceAsStream( "fullchain.pem" )) {
assert inputStream != null;
try (final BufferedInputStream in = new BufferedInputStream( inputStream )) {
// Execute system under test
final boolean result = PluginManager.validMagicNumbers(in);
// Verify results.
assertFalse(result);
}
}
}
|
public static int getLength(byte[] raw) {
try (final Asn1InputStream is = new Asn1InputStream(raw)) {
is.readTag();
return is.readLength();
}
}
|
@Test
public void getLengthSingleByte() {
final byte[] data = new byte[0x7f];
final byte[] obj = Arrays.concatenate(new byte[] { 0x10, (byte) 0x7f }, data);
assertEquals(0x7f, Asn1Utils.getLength(obj));
}
|
@Override
public Result responseMessageForCheckout(String responseBody) {
return jsonResultMessageHandler.toResult(responseBody);
}
|
@Test
public void shouldBuildResultFromCheckoutResponse() throws Exception {
String responseBody = "{\"status\":\"failure\",messages=[\"message-one\",\"message-two\"]}";
Result result = messageHandler.responseMessageForCheckout(responseBody);
assertFailureResult(result, List.of("message-one", "message-two"));
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldChooseLaterVariadicWhenTwoObjVariadicsMatch() {
// Given:
givenFunctions(
function(OTHER, 1, GenericType.of("A"), OBJ_VARARGS, STRING, DOUBLE),
function(EXPECTED, 2, GenericType.of("B"), INT, OBJ_VARARGS, DOUBLE)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(
SqlArgument.of(SqlTypes.BIGINT),
SqlArgument.of(SqlTypes.INTEGER),
SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.DOUBLE))
);
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
public <E extends SamplingEntry> Iterable<E> getRandomSamples(int sampleCount) {
if (sampleCount < 0) {
throw new IllegalArgumentException("Sample count cannot be a negative value.");
}
if (sampleCount == 0 || size() == 0) {
return Collections.emptyList();
}
return new LazySamplingEntryIterableIterator<>(sampleCount);
}
|
@Test
public void test_getRandomSamples_whenSampleCountIsGreaterThenCapacity() {
final int entryCount = 10;
final int sampleCount = 100;
map = new SampleableConcurrentHashMap<>(entryCount);
// put single entry
map.put(1, 1);
Iterable<SampleableConcurrentHashMap.SamplingEntry<Integer, Integer>> samples = map.getRandomSamples(sampleCount);
Iterator<SampleableConcurrentHashMap.SamplingEntry<Integer, Integer>> iterator = samples.iterator();
assertTrue(iterator.hasNext());
assertNotNull(iterator.next());
assertFalse(iterator.hasNext());
}
|
boolean valid(int nodeId, MetadataImage image) {
TopicImage topicImage = image.topics().getTopic(topicIdPartition.topicId());
if (topicImage == null) {
return false; // The topic has been deleted.
}
PartitionRegistration partition = topicImage.partitions().get(topicIdPartition.partitionId());
if (partition == null) {
return false; // The partition no longer exists.
}
// Check if this broker is still a replica.
return Replicas.contains(partition.replicas, nodeId);
}
|
@Test
public void testAssignmentReplicaNotOnBrokerIsNotValid() {
assertFalse(new Assignment(
new TopicIdPartition(Uuid.fromString("rTudty6ITOCcO_ldVyzZYg"), 0),
Uuid.fromString("rzRT8XZaSbKsP6j238zogg"),
0,
NoOpRunnable.INSTANCE).valid(3, TEST_IMAGE));
}
|
public <T> List<T> fromList(final String json, final Class<T> clazz) {
return GSON.fromJson(json, TypeToken.getParameterized(List.class, clazz).getType());
}
|
@Test
public void testFromList() {
List<String> testList = ImmutableList.of("123", "test", "测试");
String testJson = "[\"123\",\"test\",\"测试\"]";
assertEquals(testList, GsonUtils.getInstance().fromList(testJson, String.class));
}
|
public static Ip4Prefix valueOf(int address, int prefixLength) {
return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfStringTooLongPrefixLengthIPv4() {
Ip4Prefix ipPrefix;
ipPrefix = Ip4Prefix.valueOf("1.2.3.4/33");
}
|
public static Result label(long durationInMillis) {
double nbSeconds = durationInMillis / 1000.0;
double nbMinutes = nbSeconds / 60;
double nbHours = nbMinutes / 60;
double nbDays = nbHours / 24;
double nbYears = nbDays / 365;
return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears);
}
|
@Test
public void year_ago() {
DurationLabel.Result result = DurationLabel.label(now() - ago(14 * MONTH));
assertThat(result.key()).isEqualTo("duration.year");
assertThat(result.value()).isNull();
}
|
@Override
public void showPreviewForKey(
Keyboard.Key key, Drawable icon, View parentView, PreviewPopupTheme previewPopupTheme) {
KeyPreview popup = getPopupForKey(key, parentView, previewPopupTheme);
Point previewPosition =
mPositionCalculator.calculatePositionForPreview(
key, previewPopupTheme, getLocationInWindow(parentView));
popup.showPreviewForKey(key, icon, previewPosition);
}
|
@Test
public void testReuseForTheSameKey() {
KeyPreviewsManager underTest =
new KeyPreviewsManager(getApplicationContext(), mPositionCalculator, 3);
underTest.showPreviewForKey(mTestKeys[0], "y", mKeyboardView, mTheme);
final PopupWindow firstPopupWindow = getLatestCreatedPopupWindow();
Assert.assertNotNull(firstPopupWindow);
underTest.showPreviewForKey(mTestKeys[0], "y", mKeyboardView, mTheme);
Assert.assertSame(firstPopupWindow, getLatestCreatedPopupWindow());
}
|
@Override
public CompletableFuture<Void> closeAsync() {
synchronized (lock) {
if (isShutdown) {
return terminationFuture;
} else {
isShutdown = true;
final Collection<CompletableFuture<Void>> terminationFutures = new ArrayList<>(3);
final Time gracePeriod = Time.seconds(1L);
if (metricQueryServiceRpcService != null) {
final CompletableFuture<Void> metricQueryServiceRpcServiceTerminationFuture =
metricQueryServiceRpcService.closeAsync();
terminationFutures.add(metricQueryServiceRpcServiceTerminationFuture);
}
Throwable throwable = null;
for (ReporterAndSettings reporterAndSettings : reporters) {
try {
reporterAndSettings.getReporter().close();
} catch (Throwable t) {
throwable = ExceptionUtils.firstOrSuppressed(t, throwable);
}
}
reporters.clear();
if (throwable != null) {
terminationFutures.add(
FutureUtils.completedExceptionally(
new FlinkException(
"Could not shut down the metric reporters properly.",
throwable)));
}
final CompletableFuture<Void> reporterExecutorShutdownFuture =
ExecutorUtils.nonBlockingShutdown(
gracePeriod.toMilliseconds(),
TimeUnit.MILLISECONDS,
reporterScheduledExecutor);
terminationFutures.add(reporterExecutorShutdownFuture);
final CompletableFuture<Void> viewUpdaterExecutorShutdownFuture =
ExecutorUtils.nonBlockingShutdown(
gracePeriod.toMilliseconds(),
TimeUnit.MILLISECONDS,
viewUpdaterScheduledExecutor);
terminationFutures.add(viewUpdaterExecutorShutdownFuture);
FutureUtils.completeAll(terminationFutures)
.whenComplete(
(Void ignored, Throwable error) -> {
if (error != null) {
terminationFuture.completeExceptionally(error);
} else {
terminationFuture.complete(null);
}
});
return terminationFuture;
}
}
}
|
@Test
void testReporterScheduling() throws Exception {
MetricConfig config = new MetricConfig();
config.setProperty("arg1", "hello");
config.setProperty(MetricOptions.REPORTER_INTERVAL.key(), "50 MILLISECONDS");
final ReportCountingReporter reporter = new ReportCountingReporter();
MetricRegistryImpl registry =
new MetricRegistryImpl(
MetricRegistryTestUtils.defaultMetricRegistryConfiguration(),
Collections.singletonList(
ReporterSetup.forReporter("test", config, reporter)));
long start = System.currentTimeMillis();
// only start counting from now on
reporter.resetCount();
for (int x = 0; x < 10; x++) {
Thread.sleep(100);
int reportCount = reporter.getReportCount();
long curT = System.currentTimeMillis();
/**
* Within a given time-frame T only T/500 reports may be triggered due to the interval
* between reports. This value however does not not take the first triggered report into
* account (=> +1). Furthermore we have to account for the mis-alignment between reports
* being triggered and our time measurement (=> +1); for T=200 a total of 4-6 reports
* may have been triggered depending on whether the end of the interval for the first
* reports ends before or after T=50.
*/
long maxAllowedReports = (curT - start) / 50 + 2;
assertThat(maxAllowedReports)
.as("Too many reports were triggered.")
.isGreaterThanOrEqualTo(reportCount);
}
assertThat(reporter.getReportCount()).as("No report was triggered.").isGreaterThan(0);
registry.closeAsync().get();
}
|
@Override
public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan,
final boolean restoreInProgress) {
try {
final ExecuteResult result = EngineExecutor
.create(primaryContext, serviceContext, plan.getConfig())
.execute(plan.getPlan(), restoreInProgress);
return result;
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
// add the statement text to the KsqlException
throw new KsqlStatementException(
e.getMessage(),
e.getMessage(),
plan.getPlan().getStatementText(),
e.getCause()
);
}
}
|
@Test
public void shouldNotShowHintWhenFailingToDropNonExistingStream() {
// Given:
setupKsqlEngineWithSharedRuntimeEnabled();
KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"create table \"bar\" as select * from test2;",
ksqlConfig,
Collections.emptyMap()
);
// When:
final KsqlStatementException e = assertThrows(
KsqlStatementException.class,
() -> KsqlEngineTestUtil.execute(
serviceContext,
ksqlEngine,
"drop stream bar;",
ksqlConfig,
Collections.emptyMap()
)
);
// Then:
assertThat(e, rawMessage(is(
"Stream BAR does not exist.")));
assertThat(e, statementText(is("drop stream bar;")));
}
|
public final HttpClient doOnError(BiConsumer<? super HttpClientRequest, ? super Throwable> doOnRequestError,
BiConsumer<? super HttpClientResponse, ? super Throwable> doOnResponseError) {
Objects.requireNonNull(doOnRequestError, "doOnRequestError");
Objects.requireNonNull(doOnResponseError, "doOnResponseError");
HttpClient dup = duplicate();
@SuppressWarnings("unchecked")
BiConsumer<HttpClientRequest, Throwable> currentRequestError =
(BiConsumer<HttpClientRequest, Throwable>) configuration().doOnRequestError;
dup.configuration().doOnRequestError =
currentRequestError == null ? doOnRequestError : currentRequestError.andThen(doOnRequestError);
@SuppressWarnings("unchecked")
BiConsumer<HttpClientResponse, Throwable> currentResponseError =
(BiConsumer<HttpClientResponse, Throwable>) configuration().doOnResponseError;
dup.configuration().doOnResponseError =
currentResponseError == null ? doOnResponseError : currentResponseError.andThen(doOnResponseError);
return dup;
}
|
@Test
void doOnError() {
disposableServer =
createServer()
.handle((req, resp) -> {
if (req.requestHeaders().contains("during")) {
return resp.sendString(Flux.just("test").hide())
.then(Mono.error(new RuntimeException("test")));
}
throw new RuntimeException("test");
})
.bindNow();
HttpClient client = createHttpClientForContextWithPort();
doOnError(client.headers(h -> h.add("before", "test")));
doOnError(client.headersWhen(h -> Mono.just(h.add("before", "test"))));
}
|
public String asString(Map<String, ValueReference> parameters) {
switch (valueType()) {
case STRING:
return String.class.cast(value());
case PARAMETER:
return asType(parameters, String.class);
default:
throw new IllegalStateException("Expected value reference of type STRING but got " + valueType());
}
}
|
@Test
public void asString() {
assertThat(ValueReference.of("Test").asString(Collections.emptyMap())).isEqualTo("Test");
assertThatThrownBy(() -> ValueReference.of(false).asString(Collections.emptyMap()))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Expected value reference of type STRING but got BOOLEAN");
}
|
public List<Supplier<PageProjectionWithOutputs>> compileProjections(
SqlFunctionProperties sqlFunctionProperties,
Map<SqlFunctionId, SqlInvokedFunction> sessionFunctions,
List<? extends RowExpression> projections,
boolean isOptimizeCommonSubExpression,
Optional<String> classNameSuffix)
{
if (isOptimizeCommonSubExpression) {
ImmutableList.Builder<Supplier<PageProjectionWithOutputs>> pageProjections = ImmutableList.builder();
ImmutableMap.Builder<RowExpression, Integer> expressionsWithPositionBuilder = ImmutableMap.builder();
Set<RowExpression> expressionCandidates = new HashSet<>();
for (int i = 0; i < projections.size(); i++) {
RowExpression projection = projections.get(i);
// Duplicate expressions are not expected here in general due to duplicate assignments pruning in query optimization, hence we skip CSE for them to allow for a
// simpler implementation (and duplicate projections in expressionsWithPositionBuilder will throw exception when calling expressionsWithPositionBuilder.build())
if (projection instanceof ConstantExpression || projection instanceof InputReferenceExpression || expressionCandidates.contains(projection)) {
pageProjections.add(toPageProjectionWithOutputs(compileProjection(sqlFunctionProperties, sessionFunctions, projection, classNameSuffix), new int[] {i}));
}
else {
expressionsWithPositionBuilder.put(projection, i);
expressionCandidates.add(projection);
}
}
Map<RowExpression, Integer> expressionsWithPosition = expressionsWithPositionBuilder.build();
Map<List<RowExpression>, Boolean> projectionsPartitionedByCSE = getExpressionsPartitionedByCSE(expressionsWithPosition.keySet(), MAX_PROJECTION_GROUP_SIZE);
for (Map.Entry<List<RowExpression>, Boolean> entry : projectionsPartitionedByCSE.entrySet()) {
if (entry.getValue()) {
pageProjections.add(toPageProjectionWithOutputs(
compileProjectionCached(sqlFunctionProperties, sessionFunctions, entry.getKey(), true, classNameSuffix),
toIntArray(entry.getKey().stream().map(expressionsWithPosition::get).collect(toImmutableList()))));
}
else {
verify(entry.getKey().size() == 1, "Expect non-cse expression list to only have one element");
RowExpression projection = entry.getKey().get(0);
pageProjections.add(toPageProjectionWithOutputs(
compileProjection(sqlFunctionProperties, sessionFunctions, projection, classNameSuffix),
new int[] {expressionsWithPosition.get(projection)}));
}
}
return pageProjections.build();
}
return IntStream.range(0, projections.size())
.mapToObj(outputChannel -> toPageProjectionWithOutputs(
compileProjection(sqlFunctionProperties, sessionFunctions, projections.get(outputChannel), classNameSuffix),
new int[] {outputChannel}))
.collect(toImmutableList());
}
|
@Test
public void testCommonSubExpressionInProjection()
{
PageFunctionCompiler functionCompiler = new PageFunctionCompiler(createTestMetadataManager(), 0);
List<Supplier<PageProjectionWithOutputs>> pageProjectionsCSE = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), ImmutableList.of(ADD_X_Y, ADD_X_Y_Z), true, Optional.empty());
assertEquals(pageProjectionsCSE.size(), 1);
List<Supplier<PageProjectionWithOutputs>> pageProjectionsNoCSE = functionCompiler.compileProjections(SESSION.getSqlFunctionProperties(), ImmutableList.of(ADD_X_Y, ADD_X_Y_Z), false, Optional.empty());
assertEquals(pageProjectionsNoCSE.size(), 2);
Page input = createLongBlockPage(3, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
List<Block> cseResult = project(pageProjectionsCSE.get(0).get().getPageProjection(), input, SelectedPositions.positionsRange(0, input.getPositionCount()));
assertEquals(cseResult.size(), 2);
List<Block> noCseResult1 = project(pageProjectionsNoCSE.get(0).get().getPageProjection(), input, SelectedPositions.positionsRange(0, input.getPositionCount()));
assertEquals(noCseResult1.size(), 1);
List<Block> noCseResult2 = project(pageProjectionsNoCSE.get(1).get().getPageProjection(), input, SelectedPositions.positionsRange(0, input.getPositionCount()));
assertEquals(noCseResult2.size(), 1);
checkBlockEqual(cseResult.get(0), noCseResult1.get(0));
checkBlockEqual(cseResult.get(1), noCseResult2.get(0));
}
|
static int run(File buildResult, Path root) throws IOException {
// parse included dependencies from build output
final Map<String, Set<Dependency>> modulesWithBundledDependencies =
combineAndFilterFlinkDependencies(
ShadeParser.parseShadeOutput(buildResult.toPath()),
DependencyParser.parseDependencyCopyOutput(buildResult.toPath()));
final Set<String> deployedModules = DeployParser.parseDeployOutput(buildResult);
LOG.info(
"Extracted "
+ deployedModules.size()
+ " modules that were deployed and "
+ modulesWithBundledDependencies.keySet().size()
+ " modules which bundle dependencies with a total of "
+ modulesWithBundledDependencies.values().size()
+ " dependencies");
// find modules producing a shaded-jar
List<Path> noticeFiles = findNoticeFiles(root);
LOG.info("Found {} NOTICE files to check", noticeFiles.size());
final Map<String, Optional<NoticeContents>> moduleToNotice =
noticeFiles.stream()
.collect(
Collectors.toMap(
NoticeFileChecker::getModuleFromNoticeFile,
noticeFile -> {
try {
return NoticeParser.parseNoticeFile(noticeFile);
} catch (IOException e) {
// some machine issue
throw new RuntimeException(e);
}
}));
return run(modulesWithBundledDependencies, deployedModules, moduleToNotice);
}
|
@Test
void testRunRejectsIncorrectNotice() throws IOException {
final String moduleName = "test";
final Dependency bundledDependency = Dependency.create("a", "b", "c", null);
final Map<String, Set<Dependency>> bundleDependencies = new HashMap<>();
bundleDependencies.put(moduleName, Collections.singleton(bundledDependency));
final Set<String> deployedModules = Collections.singleton(moduleName);
final Optional<NoticeContents> emptyNotice =
Optional.of(new NoticeContents(moduleName, Collections.emptyList()));
assertThat(
NoticeFileChecker.run(
bundleDependencies,
deployedModules,
Collections.singletonMap(moduleName, emptyNotice)))
.isEqualTo(1);
}
|
@Override
public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddressTypes) {
return firstAddress(addresses(inetHost, resolvedAddressTypes));
}
|
@Test
public void shouldPickIpv6WhenBothAreDefinedButIpv6IsPreferred() {
HostsFileEntriesProvider.Parser parser = givenHostsParserWith(
LOCALHOST_V4_ADDRESSES,
LOCALHOST_V6_ADDRESSES
);
DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL);
InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV6_PREFERRED);
assertThat("Should pick an IPv6 address", address, instanceOf(Inet6Address.class));
}
|
public static Sensor getInvocationSensor(
final Metrics metrics,
final String sensorName,
final String groupName,
final String functionDescription
) {
final Sensor sensor = metrics.sensor(sensorName);
if (sensor.hasMetrics()) {
return sensor;
}
final BiFunction<String, String, MetricName> metricNamer = (suffix, descPattern) -> {
final String description = String.format(descPattern, functionDescription);
return metrics.metricName(sensorName + "-" + suffix, groupName, description);
};
sensor.add(
metricNamer.apply("avg", AVG_DESC),
new Avg()
);
sensor.add(
metricNamer.apply("max", MAX_DESC),
new Max()
);
sensor.add(
metricNamer.apply("count", COUNT_DESC),
new WindowedCount()
);
sensor.add(
metricNamer.apply("rate", RATE_DESC),
new Rate(TimeUnit.SECONDS, new WindowedCount())
);
return sensor;
}
|
@Test
public void shouldGetSensorWithCorrectName() {
// When:
FunctionMetrics
.getInvocationSensor(metrics, SENSOR_NAME, GROUP_NAME, FUNC_NAME);
// Then:
verify(metrics).sensor(SENSOR_NAME);
}
|
@SuppressWarnings("unused") // Required for automatic type inference
public static <K> Builder0<K> forClass(final Class<K> type) {
return new Builder0<>();
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldThrowOnDuplicateKey0() {
HandlerMaps.forClass(BaseType.class)
.put(LeafTypeA.class, handler0_1)
.put(LeafTypeA.class, handler0_2);
}
|
@Override
public int compareTo(Block leftBlock, int leftPosition, Block rightBlock, int rightPosition)
{
long leftLow = leftBlock.getLong(leftPosition, 0);
long leftHigh = leftBlock.getLong(leftPosition, SIZE_OF_LONG);
long rightLow = rightBlock.getLong(rightPosition, 0);
long rightHigh = rightBlock.getLong(rightPosition, SIZE_OF_LONG);
return compare(leftLow, leftHigh, rightLow, rightHigh);
}
|
@Test
public void testCompareTo()
{
testCompare("0", "-1234567891.1234567890", 1);
testCompare("1234567890.1234567890", "1234567890.1234567890", 0);
testCompare("1234567890.1234567890", "1234567890.1234567891", -1);
testCompare("1234567890.1234567890", "1234567890.1234567889", 1);
testCompare("1234567890.1234567890", "1234567891.1234567890", -1);
testCompare("1234567890.1234567890", "1234567889.1234567890", 1);
testCompare("0", "1234567891.1234567890", -1);
testCompare("1234567890.1234567890", "0", 1);
testCompare("0", "0", 0);
testCompare("-1234567890.1234567890", "-1234567890.1234567890", 0);
testCompare("-1234567890.1234567890", "-1234567890.1234567891", 1);
testCompare("-1234567890.1234567890", "-1234567890.1234567889", -1);
testCompare("-1234567890.1234567890", "-1234567891.1234567890", 1);
testCompare("-1234567890.1234567890", "-1234567889.1234567890", -1);
testCompare("0", "-1234567891.1234567890", 1);
testCompare("-1234567890.1234567890", "0", -1);
testCompare("-1234567890.1234567890", "1234567890.1234567890", -1);
testCompare("1234567890.1234567890", "-1234567890.1234567890", 1);
}
|
@Override
public List<SocialUserDO> getSocialUserList(Long userId, Integer userType) {
// 获得绑定
List<SocialUserBindDO> socialUserBinds = socialUserBindMapper.selectListByUserIdAndUserType(userId, userType);
if (CollUtil.isEmpty(socialUserBinds)) {
return Collections.emptyList();
}
// 获得社交用户
return socialUserMapper.selectBatchIds(convertSet(socialUserBinds, SocialUserBindDO::getSocialUserId));
}
|
@Test
public void testGetSocialUserList() {
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
// mock 获得社交用户
SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(SocialTypeEnum.GITEE.getType());
socialUserMapper.insert(socialUser); // 可被查到
socialUserMapper.insert(randomPojo(SocialUserDO.class)); // 不可被查到
// mock 获得绑定
socialUserBindMapper.insert(randomPojo(SocialUserBindDO.class) // 可被查询到
.setUserId(userId).setUserType(userType).setSocialType(SocialTypeEnum.GITEE.getType())
.setSocialUserId(socialUser.getId()));
socialUserBindMapper.insert(randomPojo(SocialUserBindDO.class) // 不可被查询到
.setUserId(2L).setUserType(userType).setSocialType(SocialTypeEnum.DINGTALK.getType()));
// 调用
List<SocialUserDO> result = socialUserService.getSocialUserList(userId, userType);
// 断言
assertEquals(1, result.size());
assertPojoEquals(socialUser, result.get(0));
}
|
@Override
public OpenstackNode node(String hostname) {
return osNodeStore.node(hostname);
}
|
@Test
public void testGetNodeByHostname() {
assertTrue(ERR_NOT_FOUND, Objects.equals(
target.node(COMPUTE_2_HOSTNAME), COMPUTE_2));
assertTrue(ERR_NOT_FOUND, Objects.equals(
target.node(COMPUTE_3_HOSTNAME), COMPUTE_3));
assertTrue(ERR_NOT_FOUND, Objects.equals(
target.node(GATEWAY_1_HOSTNAME), GATEWAY_1));
}
|
@VisibleForTesting
boolean hasNoActiveWindows() {
return activeWindows.getActiveAndNewWindows().isEmpty();
}
|
@Test
public void testMergingWithReusedWindow() throws Exception {
Duration allowedLateness = Duration.millis(50);
ReduceFnTester<Integer, Iterable<Integer>, IntervalWindow> tester =
ReduceFnTester.nonCombining(
Sessions.withGapDuration(Duration.millis(10)),
mockTriggerStateMachine,
AccumulationMode.DISCARDING_FIRED_PANES,
allowedLateness,
ClosingBehavior.FIRE_IF_NON_EMPTY);
IntervalWindow mergedWindow = new IntervalWindow(new Instant(1), new Instant(11));
// One elements in one session window.
tester.injectElements(TimestampedValue.of(1, new Instant(1))); // in [1, 11), gc at 21.
// Close the trigger, but the gargbage collection timer is still pending.
when(mockTriggerStateMachine.shouldFire(anyTriggerContext())).thenReturn(true);
triggerShouldFinish(mockTriggerStateMachine);
tester.advanceInputWatermark(new Instant(15));
tester.fireTimer(mergedWindow, mergedWindow.maxTimestamp(), TimeDomain.EVENT_TIME);
// Another element in the same session window.
// Should be discarded with 'window closed'.
tester.injectElements(TimestampedValue.of(1, new Instant(1))); // in [1, 11), gc at 21.
// And nothing should be left in the active window state.
assertTrue(tester.hasNoActiveWindows());
// Now the garbage collection timer will fire, finding the trigger already closed.
tester.advanceInputWatermark(new Instant(100));
List<WindowedValue<Iterable<Integer>>> output = tester.extractOutput();
assertThat(output.size(), equalTo(1));
assertThat(
output.get(0),
isSingleWindowedValue(
containsInAnyOrder(1),
equalTo(new Instant(1)), // timestamp
equalTo((BoundedWindow) mergedWindow)));
assertThat(
output.get(0).getPane(), equalTo(PaneInfo.createPane(true, true, Timing.ON_TIME, 0, 0)));
}
|
public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) {
KafkaMetadataState currentState = metadataState;
metadataState = switch (currentState) {
case KRaft -> onKRaft(kafkaStatus);
case ZooKeeper -> onZooKeeper(kafkaStatus);
case KRaftMigration -> onKRaftMigration(kafkaStatus);
case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus);
case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus);
case PreKRaft -> onPreKRaft(kafkaStatus);
};
if (metadataState != currentState) {
LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno);
} else {
LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno);
}
return metadataState;
}
|
@Test
public void testFromKRaftDualWritingToZookeeper() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editMetadata()
.addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "disabled")
.endMetadata()
.withNewStatus()
.withKafkaMetadataState(KRaftDualWriting)
.endStatus()
.build();
KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka);
assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), ZooKeeper);
}
|
@Override
public void customize(ServiceInstance serviceInstance, ApplicationModel applicationModel) {
Map<String, String> metadata = serviceInstance.getMetadata();
String propertyName = resolveMetadataPropertyName(serviceInstance);
String propertyValue = resolveMetadataPropertyValue(applicationModel);
if (!isBlank(propertyName) && !isBlank(propertyValue)) {
metadata.put(propertyName, propertyValue);
}
}
|
@Test
void test() {
DubboBootstrap providerBootstrap = DubboBootstrap.newInstance();
ServiceConfig<DemoService> serviceConfig = new ServiceConfig<>();
serviceConfig.setInterface(DemoService.class);
serviceConfig.setRef(new DemoServiceImpl());
serviceConfig.setDelay(1000);
ApplicationConfig applicationConfig = new ApplicationConfig("MetadataServiceURLParamsMetadataCustomizerTest");
applicationConfig.setMetadataType(DEFAULT_METADATA_STORAGE_TYPE);
providerBootstrap
.application(applicationConfig)
.registry(new RegistryConfig("N/A"))
.protocol(new ProtocolConfig("dubbo", 2002))
.service(serviceConfig);
// will start exporter.export()
providerBootstrap.start();
ApplicationModel applicationModel = providerBootstrap.getApplicationModel();
MetadataServiceURLParamsMetadataCustomizer customizer = new MetadataServiceURLParamsMetadataCustomizer();
customizer.customize(instance, applicationModel);
String val = instance.getMetadata().get(METADATA_SERVICE_URL_PARAMS_PROPERTY_NAME);
Assertions.assertNotNull(val);
Map<String, String> map = JsonUtils.toJavaObject(val, Map.class);
Assertions.assertEquals(map.get(PORT_KEY), String.valueOf(metadataServiceURL.getPort()));
Assertions.assertEquals(map.get(PROTOCOL_KEY), metadataServiceURL.getProtocol());
Assertions.assertEquals(map.get(VERSION_KEY), metadataServiceURL.getVersion());
Assertions.assertFalse(map.containsKey(TIMESTAMP_KEY));
Assertions.assertFalse(map.containsKey(GROUP_KEY));
Assertions.assertFalse(map.containsKey(APPLICATION_KEY));
}
|
@Override
public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException {
if (fromVersion == 0) {
var newConfigObjectNode = (ObjectNode) oldConfiguration;
var directionPropertyName = "direction";
if (!newConfigObjectNode.has(directionPropertyName)) {
throw new TbNodeException("property to update: '" + directionPropertyName + "' doesn't exists in configuration!");
}
String direction = newConfigObjectNode.get(directionPropertyName).asText();
if (EntitySearchDirection.TO.name().equals(direction)) {
newConfigObjectNode.put(directionPropertyName, EntitySearchDirection.FROM.name());
return new TbPair<>(true, newConfigObjectNode);
}
if (EntitySearchDirection.FROM.name().equals(direction)) {
newConfigObjectNode.put(directionPropertyName, EntitySearchDirection.TO.name());
return new TbPair<>(true, newConfigObjectNode);
}
throw new TbNodeException("property to update: '" + directionPropertyName + "' has invalid value!");
}
return new TbPair<>(false, oldConfiguration);
}
|
@Test
void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception {
// GIVEN
var config = new TbCheckRelationNodeConfiguration().defaultConfiguration();
config.setEntityType(ORIGINATOR_ID.getEntityType().name());
config.setEntityId(ORIGINATOR_ID.getId().toString());
String oldConfig = "{\"checkForSingleEntity\":true,\"direction\":\"TO\",\"entityType\":\"" + config.getEntityType() + "\",\"entityId\":\"" + config.getEntityId() + "\",\"relationType\":\"Contains\"}";
JsonNode configJson = JacksonUtil.toJsonNode(oldConfig);
// WHEN
TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson);
// THEN
assertTrue(upgrade.getFirst());
assertEquals(config, JacksonUtil.treeToValue(upgrade.getSecond(), config.getClass()));
}
|
public static MqttMessage newMessage(MqttFixedHeader mqttFixedHeader, Object variableHeader, Object payload) {
switch (mqttFixedHeader.messageType()) {
case CONNECT :
return new MqttConnectMessage(
mqttFixedHeader,
(MqttConnectVariableHeader) variableHeader,
(MqttConnectPayload) payload);
case CONNACK:
return new MqttConnAckMessage(mqttFixedHeader, (MqttConnAckVariableHeader) variableHeader);
case SUBSCRIBE:
return new MqttSubscribeMessage(
mqttFixedHeader,
(MqttMessageIdVariableHeader) variableHeader,
(MqttSubscribePayload) payload);
case SUBACK:
return new MqttSubAckMessage(
mqttFixedHeader,
(MqttMessageIdVariableHeader) variableHeader,
(MqttSubAckPayload) payload);
case UNSUBACK:
return new MqttUnsubAckMessage(
mqttFixedHeader,
(MqttMessageIdVariableHeader) variableHeader,
(MqttUnsubAckPayload) payload);
case UNSUBSCRIBE:
return new MqttUnsubscribeMessage(
mqttFixedHeader,
(MqttMessageIdVariableHeader) variableHeader,
(MqttUnsubscribePayload) payload);
case PUBLISH:
return new MqttPublishMessage(
mqttFixedHeader,
(MqttPublishVariableHeader) variableHeader,
(ByteBuf) payload);
case PUBACK:
//Having MqttPubReplyMessageVariableHeader or MqttMessageIdVariableHeader
return new MqttPubAckMessage(mqttFixedHeader, (MqttMessageIdVariableHeader) variableHeader);
case PUBREC:
case PUBREL:
case PUBCOMP:
//Having MqttPubReplyMessageVariableHeader or MqttMessageIdVariableHeader
return new MqttMessage(mqttFixedHeader, variableHeader);
case PINGREQ:
case PINGRESP:
return new MqttMessage(mqttFixedHeader);
case DISCONNECT:
case AUTH:
//Having MqttReasonCodeAndPropertiesVariableHeader
return new MqttMessage(mqttFixedHeader,
variableHeader);
default:
throw new IllegalArgumentException("unknown message type: " + mqttFixedHeader.messageType());
}
}
|
@Test
public void createSubscribeV5() {
MqttFixedHeader fixedHeader = new MqttFixedHeader(MqttMessageType.SUBSCRIBE, false, AT_LEAST_ONCE, false, 0);
MqttProperties properties = new MqttProperties();
properties.add(new MqttProperties.UserProperty("correlationId", "111222"));
MqttMessageIdAndPropertiesVariableHeader variableHeader =
new MqttMessageIdAndPropertiesVariableHeader(SAMPLE_MESSAGE_ID, properties);
List<MqttTopicSubscription> subscriptions = new ArrayList<MqttTopicSubscription>();
subscriptions.add(new MqttTopicSubscription(SAMPLE_TOPIC, MqttQoS.AT_MOST_ONCE));
MqttSubscribePayload payload = new MqttSubscribePayload(subscriptions);
MqttMessage subscribe = MqttMessageFactory.newMessage(fixedHeader, variableHeader, payload);
assertEquals(MqttMessageType.SUBSCRIBE, subscribe.fixedHeader().messageType());
MqttMessageIdAndPropertiesVariableHeader actualVariableHeader =
(MqttMessageIdAndPropertiesVariableHeader) subscribe.variableHeader();
assertEquals(SAMPLE_MESSAGE_ID, actualVariableHeader.messageId());
validateProperties(properties, actualVariableHeader.properties());
MqttSubscribePayload actualPayload = (MqttSubscribePayload) subscribe.payload();
validateSubscribePayload(payload, actualPayload);
}
|
@Activate
public void activate() {
eventHandler = newSingleThreadExecutor(groupedThreads("onos/security/store", "event-handler", log));
states = storageService.<ApplicationId, SecurityInfo>consistentMapBuilder()
.withName("smonos-sdata")
.withSerializer(STATE_SERIALIZER)
.build();
states.addListener(statesListener, eventHandler);
violations = storageService.<ApplicationId, Set<Permission>>eventuallyConsistentMapBuilder()
.withName("smonos-rperms")
.withSerializer(VIOLATION_SERIALIZER)
.withTimestampProvider((k, v) -> clockService.getTimestamp())
.build();
localBundleAppDirectory = new ConcurrentHashMap<>();
localAppBundleDirectory = new ConcurrentHashMap<>();
log.info("Started");
}
|
@Test
public void testActivate() {
eventHandler = newSingleThreadExecutor(groupedThreads("onos/security/store", "event-handler", log));
assertNotNull(eventHandler);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.