focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer,
final Merger<? super K, V> sessionMerger) {
return aggregate(initializer, sessionMerger, Materialized.with(null, null));
}
|
@Test
public void shouldNotHaveNullNamedOnAggregate() {
assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT,
sessionMerger, null, Materialized.as("test")));
}
|
public static Set<ConfigKey<?>> getAllConfigsProduced(Class<? extends ConfigProducer> producerClass, String configId) {
// TypeToken is @Beta in guava, so consider implementing a simple recursive method instead.
TypeToken<? extends ConfigProducer>.TypeSet interfaces = TypeToken.of(producerClass).getTypes().interfaces();
return interfaces.rawTypes().stream()
.filter(ReflectionUtil::isConcreteProducer)
.map(i -> createConfigKeyFromInstance(i.getEnclosingClass(), configId))
.collect(Collectors.toCollection(() -> new LinkedHashSet<>()));
}
|
@Test
void getAllConfigsProduced_includes_configs_produced_by_super_class() {
Set<ConfigKey<?>> configs = getAllConfigsProduced(ConcreteProducer.class, "foo");
assertEquals(1, configs.size());
assertTrue(configs.contains(new ConfigKey<>(SimpletypesConfig.CONFIG_DEF_NAME, "foo", SimpletypesConfig.CONFIG_DEF_NAMESPACE)));
}
|
public static int checkPositive(int i, String name) {
if (i <= INT_ZERO) {
throw new IllegalArgumentException(name + " : " + i + " (expected: > 0)");
}
return i;
}
|
@Test
public void testCheckPositiveLongString() {
Exception actualEx = null;
try {
ObjectUtil.checkPositive(POS_ONE_LONG, NUM_POS_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNull(actualEx, TEST_RESULT_NULLEX_NOK);
actualEx = null;
try {
ObjectUtil.checkPositive(ZERO_LONG, NUM_ZERO_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
actualEx = null;
try {
ObjectUtil.checkPositive(NEG_ONE_LONG, NUM_NEG_NAME);
} catch (Exception e) {
actualEx = e;
}
assertNotNull(actualEx, TEST_RESULT_NULLEX_OK);
assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK);
}
|
public String[] decodeStringArray(final byte[] parameterBytes, final boolean isBinary) {
ShardingSpherePreconditions.checkState(!isBinary, () -> new UnsupportedSQLOperationException("binary mode"));
String parameterValue = new String(parameterBytes, StandardCharsets.UTF_8);
Collection<String> parameterElements = decodeText(parameterValue);
return parameterElements.toArray(EMPTY_STRING_ARRAY);
}
|
@Test
void assertParseStringArrayNormalTextMode() {
String[] actual = DECODER.decodeStringArray("{\"a\",\"b\"}".getBytes(), false);
assertThat(actual.length, is(2));
assertThat(actual[0], is("a"));
assertThat(actual[1], is("b"));
}
|
@Override
public void addTaskExecutionLogs(List<TaskExecLog> logs) {
if (logs == null || logs.isEmpty()) {
return;
}
Set<TaskExecLog> taskLogs = new HashSet<>(logs);
try {
int cnt =
withRetryableStatement(
CREATE_TASK_EXECUTION_LOGS_STATEMENT,
statement -> {
for (TaskExecLog taskLog : taskLogs) {
statement.setString(1, taskLog.getTaskId());
statement.setLong(2, taskLog.getCreatedTime());
statement.setString(3, taskLog.getLog());
statement.addBatch();
}
int[] res = statement.executeBatch();
return res.length;
});
LOG.debug(
"Created {}/{} task execution logs for a task with id {}",
cnt,
logs.size(),
logs.get(0).getTaskId());
} catch (Exception e) {
LOG.warn(
"Fail creating {} task execution logs for a task with id {} due to {}",
logs.size(),
logs.get(0).getTaskId(),
e.getMessage());
// ignore error as the execution log is not in the critical path.
}
}
|
@Test
public void addTaskExecutionLogsTest() {
List<TaskExecLog> logs = new ArrayList<>();
logs.add(createLog(TEST_TASK_ID_1, "log1"));
logs.add(createLog(TEST_TASK_ID_1, "log2"));
logs.add(createLog(TEST_TASK_ID_1, "log3"));
dao.addTaskExecutionLogs(logs);
List<TaskExecLog> indexedLogs =
tryFindResults(() -> dao.getTaskExecutionLogs(TEST_TASK_ID_1), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
|
public static FileMetadata fromJson(String json) {
return JsonUtil.parse(json, FileMetadataParser::fromJson);
}
|
@Test
public void testFieldNumberOutOfRange() {
assertThatThrownBy(
() ->
FileMetadataParser.fromJson(
"{\n"
+ " \"blobs\" : [ {\n"
+ " \"type\" : \"type-a\",\n"
+ " \"fields\" : [ "
+ (Integer.MAX_VALUE + 1L)
+ " ],\n"
+ " \"offset\" : 4,\n"
+ " \"length\" : 16\n"
+ " } ]\n"
+ "}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse integer from non-int value in fields: 2147483648");
}
|
public void notify(PluginJarChangeListener listener, Collection<BundleOrPluginFileDetails> knowPluginFiles, Collection<BundleOrPluginFileDetails> currentPluginFiles) {
List<BundleOrPluginFileDetails> oldPlugins = new ArrayList<>(knowPluginFiles);
subtract(oldPlugins, currentPluginFiles).forEach(listener::pluginJarRemoved);
currentPluginFiles.forEach(newPlugin -> {
int index = oldPlugins.indexOf(newPlugin);
if (index < 0) {
listener.pluginJarAdded(newPlugin);
} else if (newPlugin.doesTimeStampDiffer(oldPlugins.get(index))) {
listener.pluginJarUpdated(newPlugin);
}
});
}
|
@Test
void shouldNotifyRemovedBeforeAddWhenPluginJarIsRenamed() {
final PluginJarChangeListener listener = mock(PluginJarChangeListener.class);
File pluginJarOne = mock(File.class);
File pluginJarTwo = mock(File.class);
File pluginWorkDir = mock(File.class);
BundleOrPluginFileDetails pluginOne = new BundleOrPluginFileDetails(pluginJarOne, false, pluginWorkDir);
BundleOrPluginFileDetails pluginTwo = new BundleOrPluginFileDetails(pluginJarTwo, false, pluginWorkDir);
List<BundleOrPluginFileDetails> knownPlugins = List.of(pluginOne);
List<BundleOrPluginFileDetails> newPlugins = List.of(pluginTwo);
when(pluginJarOne.getName()).thenReturn("plugin-1.0.0.jar");
when(pluginJarTwo.getName()).thenReturn("plugin-2.0.0.jar");
pluginChangeNotifier.notify(listener, knownPlugins, newPlugins);
InOrder inOrder = inOrder(listener);
inOrder.verify(listener).pluginJarRemoved(pluginOne);
inOrder.verify(listener).pluginJarAdded(pluginTwo);
verify(listener, never()).pluginJarUpdated(any());
}
|
public List<T> getUnmodifiableData() {
return Collections.unmodifiableList(scesimData);
}
|
@Test
public void getUnmodifiableData() {
assertThat(model.getUnmodifiableData()).isNotNull().hasSize(SCENARIO_DATA);
}
|
public List<Entry> getEntries() {
return new ArrayList<>(actions.values());
}
|
@Test
public void actions_with_multiple_clusters_of_same_type() {
ConfigChangeActions actions = new ConfigChangeActionsBuilder().
restart(CHANGE_MSG, CLUSTER, CLUSTER_TYPE, SERVICE_TYPE, SERVICE_NAME).
restart(CHANGE_MSG, CLUSTER_2, CLUSTER_TYPE, SERVICE_TYPE, SERVICE_NAME).build();
List<RestartActions.Entry> entries = actions.getRestartActions().getEntries();
assertThat(entries.size(), is(2));
assertThat(toString(entries.get(0)), equalTo("search.bar.searchnode:[baz][change]"));
assertThat(toString(entries.get(1)), equalTo("search.foo.searchnode:[baz][change]"));
}
|
public static void checkMrzCheckDigit(String mrz) {
final char checkDigit = calculateCheckDigit(mrz.substring(0, mrz.length() - 1));
if (checkDigit != mrz.charAt(mrz.length() - 1)) {
throw new VerificationException(String.format(
"The check digit of MRZ is not correct [%c != %c]",
checkDigit, mrz.charAt(mrz.length() - 1)));
}
}
|
@Test
public void checkMrzCheckDigit() {
MrzUtils.checkMrzCheckDigit("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPP");
MrzUtils.checkMrzCheckDigit("PPPPPPPPP");
assertThrows(VerificationException.class, () -> {
MrzUtils.checkMrzCheckDigit("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPP");
});
}
|
public static void main(String[] args) {
ParameterObject params = ParameterObject.newBuilder()
.withType("sneakers")
.sortBy("brand")
.build();
LOGGER.info(params.toString());
LOGGER.info(new SearchService().search(params));
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
public static void upgradeConfigurationAndVersion(RuleNode node, RuleNodeClassInfo nodeInfo) {
JsonNode oldConfiguration = node.getConfiguration();
int configurationVersion = node.getConfigurationVersion();
int currentVersion = nodeInfo.getCurrentVersion();
var configClass = nodeInfo.getAnnotation().configClazz();
if (oldConfiguration == null || !oldConfiguration.isObject()) {
log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " +
"Current configuration is null or not a json object. " +
"Going to set default configuration ... ",
node.getId(), node.getType(), configurationVersion, currentVersion);
node.setConfiguration(getDefaultConfig(configClass));
} else {
var tbVersionedNode = getTbVersionedNode(nodeInfo);
try {
JsonNode queueName = oldConfiguration.get(QUEUE_NAME);
TbPair<Boolean, JsonNode> upgradeResult = tbVersionedNode.upgrade(configurationVersion, oldConfiguration);
if (upgradeResult.getFirst()) {
node.setConfiguration(upgradeResult.getSecond());
if (nodeInfo.getAnnotation().hasQueueName() && queueName != null && queueName.isTextual()) {
node.setQueueName(queueName.asText());
}
}
} catch (Exception e) {
try {
JacksonUtil.treeToValue(oldConfiguration, configClass);
} catch (Exception ex) {
log.warn("Failed to upgrade rule node with id: {} type: {} fromVersion: {} toVersion: {}. " +
"Going to set default configuration ... ",
node.getId(), node.getType(), configurationVersion, currentVersion, e);
node.setConfiguration(getDefaultConfig(configClass));
}
}
}
node.setConfigurationVersion(currentVersion);
}
|
@Test
public void testUpgradeRuleNodeConfigurationWithNullNodeConfig() throws Exception {
// GIVEN
var node = new RuleNode();
node.setConfiguration(NullNode.instance);
var nodeInfo = mock(RuleNodeClassInfo.class);
var nodeConfigClazz = TbGetAttributesNodeConfiguration.class;
var annotation = mock(org.thingsboard.rule.engine.api.RuleNode.class);
var defaultConfig = JacksonUtil.valueToTree(nodeConfigClazz.getDeclaredConstructor().newInstance().defaultConfiguration());
when(nodeInfo.getClazz()).thenReturn((Class) TbGetAttributesNode.class);
when(nodeInfo.getCurrentVersion()).thenReturn(1);
when(nodeInfo.getAnnotation()).thenReturn(annotation);
when(annotation.configClazz()).thenReturn((Class) nodeConfigClazz);
// WHEN
TbNodeUpgradeUtils.upgradeConfigurationAndVersion(node, nodeInfo);
// THEN
Assertions.assertThat(node.getConfiguration()).isEqualTo(defaultConfig);
Assertions.assertThat(node.getConfigurationVersion()).isEqualTo(1);
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
String[] lines = splitAndRemoveEmpty(st, "\n");
return interpret(lines, context);
}
|
@Test
void catTest() throws IOException {
FileSystemTestUtils.createByteFile(fs, "/testFile", WritePType.MUST_CACHE, 10, 10);
InterpreterResult output = alluxioInterpreter.interpret("cat /testFile", null);
byte[] expected = BufferUtils.getIncreasingByteArray(10);
assertEquals(Code.SUCCESS, output.code());
assertArrayEquals(expected,
output.message().get(0).getData().substring(0,
output.message().get(0).getData().length() - 1).getBytes());
}
|
public static String stripTrailingSlash(String str) {
if (isNullOrEmpty(str)) {
return str;
}
if (str.charAt(str.length() - 1) == '/') {
return str.substring(0, str.length() - 1);
}
return str;
}
|
@Test
void testStripTrailingSlash() {
assertNull(StringUtil.stripTrailingSlash(null));
assertEquals("", StringUtil.stripTrailingSlash(""));
assertEquals("a", StringUtil.stripTrailingSlash("a"));
assertEquals("a", StringUtil.stripTrailingSlash("a/"));
assertEquals("a/a", StringUtil.stripTrailingSlash("a/a"));
assertEquals("a/", StringUtil.stripTrailingSlash("a//"));
}
|
@SuppressWarnings("deprecation")
@Override
public Handle newHandle() {
return new HandleImpl(minIndex, maxIndex, initialIndex, minCapacity, maxCapacity);
}
|
@Test
public void doesNotExceedMaximum() {
AdaptiveRecvByteBufAllocator recvByteBufAllocator = new AdaptiveRecvByteBufAllocator(64, 9000, 9000);
RecvByteBufAllocator.ExtendedHandle handle =
(RecvByteBufAllocator.ExtendedHandle) recvByteBufAllocator.newHandle();
handle.reset(config);
allocReadExpected(handle, alloc, 8192);
}
|
@Udf
public String lpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
final int padUpTo = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padUpTo; i += padding.length()) {
sb.append(padding);
}
sb.setLength(padUpTo);
sb.append(input);
sb.setLength(targetLen);
return sb.toString();
}
|
@Test
public void shouldReturnNullForNullInputString() {
final String result = udf.lpad(null, 4, "foo");
assertThat(result, is(nullValue()));
}
|
@Override
@Nullable
public final Data readData() throws IOException {
byte[] bytes = readByteArray();
return bytes == null ? null : new HeapData(bytes);
}
|
@Test
public void testReadData() throws Exception {
byte[] bytesBE = {0, 0, 0, 0, 0, 0, 0, 8, -1, -1, -1, -1, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1};
byte[] bytesLE = {0, 0, 0, 0, 8, 0, 0, 0, -1, -1, -1, -1, 0, 0, 0, 0, 0, 1, -1, -1, -1, -1};
in.init((byteOrder == BIG_ENDIAN ? bytesBE : bytesLE), 0);
in.position(bytesLE.length - 4);
Data nullData = readData(in);
in.position(0);
Data theZeroLengthArray = readData(in);
in.position(4);
Data data = readData(in);
assertNull(nullData);
assertEquals(0, theZeroLengthArray.getType());
assertArrayEquals(new byte[0], theZeroLengthArray.toByteArray());
assertArrayEquals(new byte[]{-1, -1, -1, -1, 0, 0, 0, 0}, data.toByteArray());
}
|
public TreeSelection[] getTreeObjects( final Tree tree, Tree selectionTree, Tree coreObjectsTree ) {
List<TreeSelection> objects = new ArrayList<TreeSelection>();
if ( selectionTree != null && !selectionTree.isDisposed() && tree.equals( selectionTree ) ) {
TreeItem[] selection = selectionTree.getSelection();
for ( int s = 0; s < selection.length; s++ ) {
TreeItem treeItem = selection[s];
String[] path = ConstUI.getTreeStrings( treeItem );
TreeSelection object = null;
switch ( path.length ) {
case 0:
break;
case 1: // ------complete-----
if ( path[0].equals( Spoon.STRING_TRANSFORMATIONS ) ) { // the top level Transformations entry
object = new TreeSelection( path[0], TransMeta.class );
}
if ( path[0].equals( Spoon.STRING_JOBS ) ) { // the top level Jobs entry
object = new TreeSelection( path[0], JobMeta.class );
}
break;
case 2: // ------complete-----
if ( path[0].equals( Spoon.STRING_BUILDING_BLOCKS ) ) { // the top level Transformations entry
if ( path[1].equals( Spoon.STRING_TRANS_BASE ) ) {
object = new TreeSelection( path[1], PluginInterface.class );
}
}
if ( path[0].equals( Spoon.STRING_TRANSFORMATIONS ) ) { // Transformation title
object = new TreeSelection( path[1], spoon.delegates.trans.getTransformation( path[1] ) );
}
if ( path[0].equals( Spoon.STRING_JOBS ) ) { // Jobs title
object = new TreeSelection( path[1], spoon.delegates.jobs.getJob( path[1] ) );
}
break;
case 3: // ------complete-----
if ( path[0].equals( Spoon.STRING_TRANSFORMATIONS ) ) { // Transformations title
TransMeta transMeta = spoon.delegates.trans.getTransformation( path[1] );
if ( path[2].equals( Spoon.STRING_CONNECTIONS ) ) {
object = new TreeSelection( path[2], DatabaseMeta.class, transMeta );
}
if ( path[2].equals( Spoon.STRING_STEPS ) ) {
object = new TreeSelection( path[2], StepMeta.class, transMeta );
}
if ( path[2].equals( Spoon.STRING_HOPS ) ) {
object = new TreeSelection( path[2], TransHopMeta.class, transMeta );
}
if ( path[2].equals( Spoon.STRING_PARTITIONS ) ) {
object = new TreeSelection( path[2], PartitionSchema.class, transMeta );
}
if ( path[2].equals( Spoon.STRING_SLAVES ) ) {
object = new TreeSelection( path[2], SlaveServer.class, transMeta );
}
if ( path[2].equals( Spoon.STRING_CLUSTERS ) ) {
object = new TreeSelection( path[2], ClusterSchema.class, transMeta );
}
executeExtensionPoint( new SpoonTreeDelegateExtension( transMeta, path, 3, objects ) );
}
if ( path[0].equals( Spoon.STRING_JOBS ) ) { // Jobs title
JobMeta jobMeta = spoon.delegates.jobs.getJob( path[1] );
if ( path[2].equals( Spoon.STRING_CONNECTIONS ) ) {
object = new TreeSelection( path[2], DatabaseMeta.class, jobMeta );
}
if ( path[2].equals( Spoon.STRING_JOB_ENTRIES ) ) {
object = new TreeSelection( path[2], JobEntryCopy.class, jobMeta );
}
if ( path[2].equals( Spoon.STRING_SLAVES ) ) {
object = new TreeSelection( path[2], SlaveServer.class, jobMeta );
}
executeExtensionPoint( new SpoonTreeDelegateExtension( jobMeta, path, 3, objects ) );
}
break;
case 4: // ------complete-----
if ( path[0].equals( Spoon.STRING_TRANSFORMATIONS ) ) { // The name of a transformation
final TransMeta transMeta = spoon.delegates.trans.getTransformation( path[1] );
if ( transMeta != null ) {
if ( path[2].equals( Spoon.STRING_CONNECTIONS ) ) {
String dbName = path[3];
DatabaseMeta databaseMeta = transMeta.findDatabase( dbName );
if ( databaseMeta != null ) {
dbName = databaseMeta.getName();
}
object = new TreeSelection( dbName, databaseMeta, transMeta );
}
if ( path[2].equals( Spoon.STRING_STEPS ) ) {
object = new TreeSelection( path[3], transMeta.findStep( path[3] ), transMeta );
}
if ( path[2].equals( Spoon.STRING_HOPS ) ) {
object = new TreeSelection( path[3], transMeta.findTransHop( path[3] ), transMeta );
}
if ( path[2].equals( Spoon.STRING_PARTITIONS ) ) {
object = new TreeSelection( path[3], transMeta.findPartitionSchema( path[3] ), transMeta );
}
if ( path[2].equals( Spoon.STRING_SLAVES ) ) {
object = new TreeSelection( path[3], transMeta.findSlaveServer( path[3] ), transMeta );
}
if ( path[2].equals( Spoon.STRING_CLUSTERS ) ) {
object = new TreeSelection( path[3], transMeta.findClusterSchema( path[3] ), transMeta );
}
executeExtensionPoint( new SpoonTreeDelegateExtension( transMeta, path, 4, objects ) );
}
}
if ( path[0].equals( Spoon.STRING_JOBS ) ) { // The name of a job
JobMeta jobMeta = spoon.delegates.jobs.getJob( path[1] );
if ( jobMeta != null && path[2].equals( Spoon.STRING_CONNECTIONS ) ) {
String dbName = path[3];
DatabaseMeta databaseMeta = jobMeta.findDatabase( dbName );
if ( databaseMeta != null ) {
dbName = databaseMeta.getName();
}
object = new TreeSelection( dbName, databaseMeta, jobMeta );
}
if ( jobMeta != null && path[2].equals( Spoon.STRING_JOB_ENTRIES ) ) {
object = new TreeSelection( path[3], jobMeta.findJobEntry( path[3] ), jobMeta );
}
if ( jobMeta != null && path[2].equals( Spoon.STRING_SLAVES ) ) {
object = new TreeSelection( path[3], jobMeta.findSlaveServer( path[3] ), jobMeta );
}
executeExtensionPoint( new SpoonTreeDelegateExtension( jobMeta, path, 4, objects ) );
}
break;
case 5:
if ( path[0].equals( Spoon.STRING_TRANSFORMATIONS ) ) { // The name of a transformation
TransMeta transMeta = spoon.delegates.trans.getTransformation( path[1] );
if ( transMeta != null && path[2].equals( Spoon.STRING_CLUSTERS ) ) {
ClusterSchema clusterSchema = transMeta.findClusterSchema( path[3] );
object =
new TreeSelection( path[4], clusterSchema.findSlaveServer( path[4] ), clusterSchema, transMeta );
}
}
break;
default:
break;
}
if ( object != null ) {
objects.add( object );
}
}
}
if ( tree != null && coreObjectsTree != null && tree.equals( coreObjectsTree ) ) {
TreeItem[] selection = coreObjectsTree.getSelection();
for ( int s = 0; s < selection.length; s++ ) {
TreeItem treeItem = selection[s];
String[] path = ConstUI.getTreeStrings( treeItem );
TreeSelection object = null;
switch ( path.length ) {
case 0:
break;
case 2: // Job entries
if ( spoon.showJob ) {
PluginRegistry registry = PluginRegistry.getInstance();
Class<? extends PluginTypeInterface> pluginType = JobEntryPluginType.class;
PluginInterface plugin = registry.findPluginWithName( pluginType, path[1] );
// Retry for Start
//
if ( plugin == null ) {
if ( path[1].equalsIgnoreCase( JobMeta.STRING_SPECIAL_START ) ) {
plugin = registry.findPluginWithId( pluginType, JobMeta.STRING_SPECIAL );
}
}
// Retry for Dummy
//
if ( plugin == null ) {
if ( path[1].equalsIgnoreCase( JobMeta.STRING_SPECIAL_DUMMY ) ) {
plugin = registry.findPluginWithId( pluginType, JobMeta.STRING_SPECIAL );
}
}
if ( plugin != null ) {
object = new TreeSelection( path[1], plugin );
}
}
if ( spoon.showTrans ) {
String stepId = (String) treeItem.getData( "StepId" );
if ( stepId != null ) {
object = new TreeSelection( path[1], PluginRegistry.getInstance().findPluginWithId( StepPluginType.class, stepId ) );
} else {
object = new TreeSelection( path[1], PluginRegistry.getInstance().findPluginWithName( StepPluginType.class, path[1] ) );
}
}
break;
default:
break;
}
if ( object != null ) {
objects.add( object );
}
}
}
return objects.toArray( new TreeSelection[objects.size()] );
}
|
@Test
public void getTreeObjects_getStepByName() {
SpoonTreeDelegate std = spy( new SpoonTreeDelegate( spoon ) );
Tree selection = mock( Tree.class );
Tree core = mock( Tree.class );
TreeItem item = mock( TreeItem.class );
PluginInterface step = mock( PluginInterface.class );
PluginRegistry registry = mock( PluginRegistry.class );
TreeItem[] items = new TreeItem[] { item };
when( ConstUI.getTreeStrings( item ) ).thenReturn( new String[] { "Output", "Delete" } );
when( PluginRegistry.getInstance() ).thenReturn( registry );
doReturn( items ).when( core ).getSelection();
doReturn( null ).when( item ).getData( anyString() );
doReturn( step ).when( registry ).findPluginWithName( StepPluginType.class, "Delete" );
spoon.showJob = false;
spoon.showTrans = true;
TreeSelection[] ts = std.getTreeObjects( core, selection, core );
assertEquals( 1, ts.length );
assertEquals( step, ts[ 0 ].getSelection() );
}
|
@Override
public boolean supportsMultipleTransactions() {
return false;
}
|
@Test
void assertSupportsMultipleTransactions() {
assertFalse(metaData.supportsMultipleTransactions());
}
|
@Around(SYNC_UPDATE_CONFIG_ALL)
public Object aroundSyncUpdateConfigAll(ProceedingJoinPoint pjp, HttpServletRequest request,
HttpServletResponse response, String dataId, String group, String content, String appName, String srcUser,
String tenant, String tag) throws Throwable {
if (!PropertyUtil.isManageCapacity()) {
return pjp.proceed();
}
LOGGER.info("[capacityManagement] aroundSyncUpdateConfigAll");
String betaIps = request.getHeader("betaIps");
if (StringUtils.isBlank(betaIps)) {
if (StringUtils.isBlank(tag)) {
// do capacity management limitation check for writing or updating config_info table.
if (configInfoPersistService.findConfigInfo(dataId, group, tenant) == null) {
// Write operation.
return do4Insert(pjp, request, response, group, tenant, content);
}
// Update operation.
return do4Update(pjp, request, response, dataId, group, tenant, content);
}
}
return pjp.proceed();
}
|
@Test
void testAroundSyncUpdateConfigAllForInsertAspect3Tenant() throws Throwable {
//test with insert
//condition:
// 1. has tenant: true
// 2. capacity limit check: true
// 3. over cluster quota: false
// 4. tenant capacity: not null
// 5. over tenant max size: true/false (if tenant max size is 0, will use default max size)
when(PropertyUtil.isManageCapacity()).thenReturn(true);
when(PropertyUtil.isCapacityLimitCheck()).thenReturn(true);
when(configInfoPersistService.findConfigInfo(any(), any(), any())).thenReturn(null);
when(capacityService.insertAndUpdateClusterUsage(any(), anyBoolean())).thenReturn(true);
when(capacityService.updateTenantUsage(eq(CounterMode.INCREMENT), eq(mockTenant))).thenReturn(true);
TenantCapacity localTenantCapacity = new TenantCapacity();
localTenantCapacity.setTenant(mockTenant);
localTenantCapacity.setMaxSize(0);
localTenantCapacity.setMaxAggrCount(0);
when(capacityService.getTenantCapacity(eq(mockTenant))).thenReturn(localTenantCapacity);
MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest();
MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse();
String localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(proceedingJoinPoint, mockHttpServletRequest,
mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null);
assertEquals(localMockResult, mockProceedingJoinPointResult);
Mockito.verify(capacityService, Mockito.times(0)).initTenantCapacity(eq(mockTenant));
Mockito.verify(capacityService, Mockito.times(1)).updateTenantUsage(eq(CounterMode.INCREMENT), eq(mockTenant));
Mockito.verify(proceedingJoinPoint, Mockito.times(1)).proceed();
// 5. over tenant max size: true
localTenantCapacity.setMaxSize(1);
localTenantCapacity.setMaxAggrCount(1);
localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(proceedingJoinPoint, mockHttpServletRequest,
mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null);
assertEquals(localMockResult, String.valueOf(OVER_MAX_SIZE.status));
// 5. over tenant max size: true
localTenantCapacity.setMaxSize(10 * 1024);
localTenantCapacity.setMaxAggrCount(1024);
localMockResult = (String) capacityManagementAspect.aroundSyncUpdateConfigAll(proceedingJoinPoint, mockHttpServletRequest,
mockHttpServletResponse, mockDataId, mockGroup, mockContent, null, null, mockTenant, null);
assertEquals(localMockResult, mockProceedingJoinPointResult);
}
|
@Override
public boolean isIn(String ipAddress) {
if (ipAddress == null || addressList == null) {
return false;
}
return addressList.includes(ipAddress);
}
|
@Test
public void testSubnetsAndIPs() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertTrue ("10.119.103.112 is not in the list",
ipList.isIn("10.119.103.112"));
assertFalse ("10.119.103.113 is in the list",
ipList.isIn("10.119.103.113"));
assertTrue ("10.221.102.0 is not in the list",
ipList.isIn("10.221.102.0"));
assertTrue ("10.221.102.1 is not in the list",
ipList.isIn("10.221.102.1"));
assertTrue ("10.221.103.1 is not in the list",
ipList.isIn("10.221.103.1"));
assertTrue ("10.221.103.255 is not in the list",
ipList.isIn("10.221.103.255"));
assertFalse("10.221.104.0 is in the list",
ipList.isIn("10.221.104.0"));
assertFalse("10.221.104.1 is in the list",
ipList.isIn("10.221.104.1"));
}
|
public DiscardObject markAsDiscardedOnShutdown(JobStatus jobStatus) {
return shouldBeDiscardedOnShutdown(jobStatus) ? markAsDiscarded() : NOOP_DISCARD_OBJECT;
}
|
@Test
void testCompletedCheckpointStatsCallbacks() throws Exception {
Map<JobVertexID, TaskStateStats> taskStats = new HashMap<>();
JobVertexID jobVertexId = new JobVertexID();
taskStats.put(jobVertexId, new TaskStateStats(jobVertexId, 1));
CompletedCheckpointStats checkpointStats =
new CompletedCheckpointStats(
1,
0,
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION),
1,
taskStats,
1,
1,
1,
1,
true,
mock(SubtaskStateStats.class),
null);
CompletedCheckpoint completed =
new CompletedCheckpoint(
new JobID(),
0,
0,
1,
Collections.emptyMap(),
Collections.emptyList(),
CheckpointProperties.forCheckpoint(
CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION),
new TestCompletedCheckpointStorageLocation(),
checkpointStats);
completed.markAsDiscardedOnShutdown(JobStatus.FINISHED).discard();
assertThat(checkpointStats.isDiscarded()).isTrue();
}
|
@Override
@CacheEvict(cacheNames = RedisKeyConstants.SMS_TEMPLATE,
allEntries = true) // allEntries 清空所有缓存,因为可能修改到 code 字段,不好清理
public void updateSmsTemplate(SmsTemplateSaveReqVO updateReqVO) {
// 校验存在
validateSmsTemplateExists(updateReqVO.getId());
// 校验短信渠道
SmsChannelDO channelDO = validateSmsChannel(updateReqVO.getChannelId());
// 校验短信编码是否重复
validateSmsTemplateCodeDuplicate(updateReqVO.getId(), updateReqVO.getCode());
// 校验短信模板
validateApiTemplate(updateReqVO.getChannelId(), updateReqVO.getApiTemplateId());
// 更新
SmsTemplateDO updateObj = BeanUtils.toBean(updateReqVO, SmsTemplateDO.class);
updateObj.setParams(parseTemplateContentParams(updateObj.getContent()));
updateObj.setChannelCode(channelDO.getCode());
smsTemplateMapper.updateById(updateObj);
}
|
@Test
public void testUpdateSmsTemplate_notExists() {
// 准备参数
SmsTemplateSaveReqVO reqVO = randomPojo(SmsTemplateSaveReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> smsTemplateService.updateSmsTemplate(reqVO), SMS_TEMPLATE_NOT_EXISTS);
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
public void branch_comes_from_AnalysisMetadataHolder_when_set() {
analysisMetadataHolder.setBranch(new Branch() {
@Override
public BranchType getType() {
return BranchType.BRANCH;
}
@Override
public boolean isMain() {
return false;
}
@Override
public String getReferenceBranchUuid() {
throw new UnsupportedOperationException();
}
@Override
public String getName() {
return "feature/foo";
}
@Override
public boolean supportsCrossProjectCpd() {
throw new UnsupportedOperationException();
}
@Override
public String getPullRequestKey() {
throw new UnsupportedOperationException();
}
@Override
public String getTargetBranchName() {
throw new UnsupportedOperationException();
}
@Override
public String generateKey(String projectKey, @Nullable String fileOrDirPath) {
throw new UnsupportedOperationException();
}
});
underTest.finished(true);
verify(postProjectAnalysisTask).finished(taskContextCaptor.capture());
org.sonar.api.ce.posttask.Branch branch = taskContextCaptor.getValue().getProjectAnalysis().getBranch().get();
assertThat(branch.isMain()).isFalse();
assertThat(branch.getName()).hasValue("feature/foo");
assertThat(branch.getType()).isEqualTo(BranchImpl.Type.BRANCH);
}
|
@Override
public KeyValueIterator<K, V> reverseRange(final K from,
final K to) {
final byte[] serFrom = from == null ? null : serdes.rawKey(from);
final byte[] serTo = to == null ? null : serdes.rawKey(to);
return new MeteredKeyValueIterator(
wrapped().reverseRange(Bytes.wrap(serFrom), Bytes.wrap(serTo)),
rangeSensor
);
}
|
@Test
public void shouldThrowNullPointerOnReverseRangeIfToIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> metered.reverseRange("from", null));
}
|
public <T> Map<String, Object> schemas(Class<? extends T> cls) {
return this.schemas(cls, false);
}
|
@SuppressWarnings({"unchecked", "deprecation"})
@Test
void echoTask() throws URISyntaxException {
Helpers.runApplicationContext((applicationContext) -> {
JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class);
Map<String, Object> returnSchema = jsonSchemaGenerator.schemas(Echo.class);
var definitions = (Map<String, Map<String, Object>>) returnSchema.get("definitions");
var returnTask = definitions.get(Echo.class.getName());
var deprecated = (String) returnTask.get("$deprecated");
assertThat(deprecated, is("true"));
});
}
|
@Override
public HealthCheckResponse call() {
final HealthCheckResponseBuilder healthResponseBuilder = HealthCheckResponse.named("JobRunr");
if (!jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) {
healthResponseBuilder
.up()
.withData("backgroundJobServer", "disabled");
} else {
final BackgroundJobServer backgroundJobServer = backgroundJobServerInstance.get();
if (backgroundJobServer.isRunning()) {
healthResponseBuilder
.up()
.withData("backgroundJobServer", "enabled")
.withData("backgroundJobServerStatus", "running");
} else {
healthResponseBuilder
.down()
.withData("backgroundJobServer", "enabled")
.withData("backgroundJobServerStatus", "stopped");
}
}
return healthResponseBuilder.build();
}
|
@Test
void givenEnabledBackgroundJobServerAndBackgroundJobServerStopped_ThenHealthIsDown() {
when(backgroundJobServerConfiguration.enabled()).thenReturn(true);
when(backgroundJobServer.isRunning()).thenReturn(false);
assertThat(jobRunrHealthCheck.call().getStatus()).isEqualTo(HealthCheckResponse.Status.DOWN);
}
|
public abstract VoiceInstructionValue getConfigForDistance(
double distance,
String turnDescription,
String thenVoiceInstruction);
|
@Test
public void conditionalDistanceVICShouldReturnFirstFittingMetricValues() {
ConditionalDistanceVoiceInstructionConfig config = new ConditionalDistanceVoiceInstructionConfig(IN_LOWER_DISTANCE_PLURAL.metric, trMap, locale, new int[]{400, 200}, new int[]{400, 200});
compareVoiceInstructionValues(
400,
"In 400 meters turn then",
config.getConfigForDistance(10010, "turn", " then")
);
compareVoiceInstructionValues(
400,
"In 400 meters turn then",
config.getConfigForDistance(450, "turn", " then")
);
compareVoiceInstructionValues(
400,
"In 400 meters turn then",
config.getConfigForDistance(400, "turn", " then")
);
compareVoiceInstructionValues(
200,
"In 200 meters turn then",
config.getConfigForDistance(399, "turn", " then")
);
compareVoiceInstructionValues(
200,
"In 200 meters turn then",
config.getConfigForDistance(200, "turn", " then")
);
assertNull(config.getConfigForDistance(190, "turn", " then"));
}
|
@PostMapping("/order")
public ResponseEntity<String> processOrder(@RequestBody(required = false) String request) {
LOGGER.info("Received order request: {}", request);
var result = orderService.processOrder();
LOGGER.info("Order processed result: {}", result);
return ResponseEntity.ok(result);
}
|
@Test
void processOrderShouldReturnSuccessStatus() {
// Arrange
when(orderService.processOrder()).thenReturn("Order processed successfully");
// Act
ResponseEntity<String> response = orderController.processOrder("test order");
// Assert
assertEquals("Order processed successfully", response.getBody());
}
|
public static <K, E, V> Collector<E, ImmutableSetMultimap.Builder<K, V>, ImmutableSetMultimap<K, V>> unorderedFlattenIndex(
Function<? super E, K> keyFunction, Function<? super E, Stream<V>> valueFunction) {
verifyKeyAndValueFunctions(keyFunction, valueFunction);
BiConsumer<ImmutableSetMultimap.Builder<K, V>, E> accumulator = (map, element) -> {
K key = requireNonNull(keyFunction.apply(element), KEY_FUNCTION_CANT_RETURN_NULL_MESSAGE);
Stream<V> valueStream = requireNonNull(valueFunction.apply(element), VALUE_FUNCTION_CANT_RETURN_NULL_MESSAGE);
valueStream.forEach(value -> map.put(key, value));
};
BinaryOperator<ImmutableSetMultimap.Builder<K, V>> merger = (m1, m2) -> {
for (Map.Entry<K, V> entry : m2.build().entries()) {
m1.put(entry.getKey(), entry.getValue());
}
return m1;
};
return Collector.of(
ImmutableSetMultimap::builder,
accumulator,
merger,
ImmutableSetMultimap.Builder::build);
}
|
@Test
public void unorderedFlattenIndex_with_valueFunction_fails_if_key_function_returns_null() {
assertThatThrownBy(() -> SINGLE_ELEMENT2_LIST.stream().collect(unorderedFlattenIndex(s -> null, MyObj2::getTexts)))
.isInstanceOf(NullPointerException.class)
.hasMessage("Key function can't return null");
}
|
public static String getContainerExecutorExecutablePath(Configuration conf) {
String yarnHomeEnvVar =
System.getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key());
File hadoopBin = new File(yarnHomeEnvVar, "bin");
String defaultPath =
new File(hadoopBin, "container-executor").getAbsolutePath();
return null == conf
? defaultPath
: conf.get(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,
defaultPath);
}
|
@Test
public void testExecutorPath() {
String containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(nullConf);
//In case HADOOP_YARN_HOME isn't set, CWD is used. If conf is null or
//NM_LINUX_CONTAINER_EXECUTOR_PATH is not set, then a defaultPath is
//constructed.
String yarnHomeEnvVar = System.getenv("HADOOP_YARN_HOME");
String yarnHome = yarnHomeEnvVar != null ? yarnHomeEnvVar
: new File("").getAbsolutePath();
String expectedPath = yarnHome + "/bin/container-executor";
Assert.assertEquals(expectedPath, containerExePath);
containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(emptyConf);
Assert.assertEquals(expectedPath, containerExePath);
//if NM_LINUX_CONTAINER_EXECUTOR_PATH is set, this must be returned
expectedPath = customExecutorPath;
containerExePath = PrivilegedOperationExecutor
.getContainerExecutorExecutablePath(confWithExecutorPath);
Assert.assertEquals(expectedPath, containerExePath);
}
|
@Operation(summary = "queryAuthorizedUser", description = "QUERY_AUTHORIZED_USER_NOTES")
@Parameters({
@Parameter(name = "projectCode", description = "PROJECT_CODE", schema = @Schema(implementation = long.class, example = "100", required = true))
})
@GetMapping(value = "/authed-user")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_AUTHORIZED_USER)
public UserListResponse queryAuthorizedUser(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("projectCode") Long projectCode) {
Result result = projectService.queryAuthorizedUser(loginUser, projectCode);
return new UserListResponse(result);
}
|
@Test
public void testQueryAuthorizedUser() {
Result result = new Result();
this.putMsg(result, Status.SUCCESS);
Mockito.when(this.projectService.queryAuthorizedUser(this.user, 3682329499136L)).thenReturn(result);
Result response = this.projectV2Controller.queryAuthorizedUser(this.user, 3682329499136L);
Assertions.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
|
@Override public void pluginRemoved( Object serviceObject ) {
SpoonPluginInterface spoonPluginInterface = plugins.get( serviceObject );
if ( spoonPluginInterface == null ) {
return;
}
SpoonPluginCategories categories = spoonPluginInterface.getClass().getAnnotation( SpoonPluginCategories.class );
if ( categories != null ) {
for ( String cat : categories.value() ) {
List<SpoonPluginInterface> categoryList = pluginCategoryMap.get( cat );
categoryList.remove( spoonPluginInterface );
}
}
if ( spoonPluginInterface.getPerspective() != null ) {
getSpoonPerspectiveManager().removePerspective( spoonPluginInterface.getPerspective() );
}
plugins.remove( serviceObject );
}
|
@Test
public void testPluginRemoved() throws Exception {
spoonPluginManager.pluginAdded( plugin1 );
spoonPluginManager.pluginRemoved( plugin1 );
verify( spoonPerspectiveManager ).removePerspective( spoonPerspective );
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testNameAndLabelsFromPattern() throws Exception {
new JmxCollector(
"\n---\nrules:\n- pattern: `^hadoop<(service)=(DataNode), name=DataNodeActivity-ams-hdd001-50010><>(replaceBlockOpMinTime):`\n name: hadoop_$3\n labels:\n `$1`: `$2`"
.replace('`', '"'))
.register(prometheusRegistry);
assertEquals(
200,
getSampleValue(
"hadoop_replaceBlockOpMinTime",
new String[] {"service"},
new String[] {"DataNode"}),
.001);
}
|
@Override
public CloseableIterator<ScannerReport.Duplication> readComponentDuplications(int componentRef) {
ensureInitialized();
return delegate.readComponentDuplications(componentRef);
}
|
@Test
public void readComponentDuplications_returns_empty_list_if_file_does_not_exist() {
assertThat(underTest.readComponentDuplications(COMPONENT_REF)).isExhausted();
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testEqualsAndHashCode() throws Exception {
ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap());
Simple proxy = handler.as(Simple.class);
JLSDefaults sameAsProxy = proxy.as(JLSDefaults.class);
ProxyInvocationHandler handler2 = new ProxyInvocationHandler(Maps.newHashMap());
Simple proxy2 = handler2.as(Simple.class);
JLSDefaults sameAsProxy2 = proxy2.as(JLSDefaults.class);
new EqualsTester()
.addEqualityGroup(handler, proxy, sameAsProxy)
.addEqualityGroup(handler2, proxy2, sameAsProxy2)
.testEquals();
}
|
public Resource getIncrementAllocation() {
Long memory = null;
Integer vCores = null;
Map<String, Long> others = new HashMap<>();
ResourceInformation[] resourceTypes = ResourceUtils.getResourceTypesArray();
for (int i=0; i < resourceTypes.length; ++i) {
String name = resourceTypes[i].getName();
String propertyKey = getAllocationIncrementPropKey(name);
String propValue = get(propertyKey);
if (propValue != null) {
Matcher matcher = RESOURCE_REQUEST_VALUE_PATTERN.matcher(propValue);
if (matcher.matches()) {
long value = Long.parseLong(matcher.group(1));
String unit = matcher.group(2);
long valueInDefaultUnits = getValueInDefaultUnits(value, unit, name);
others.put(name, valueInDefaultUnits);
} else {
throw new IllegalArgumentException("Property " + propertyKey +
" is not in \"value [unit]\" format: " + propValue);
}
}
}
if (others.containsKey(ResourceInformation.MEMORY_MB.getName())) {
memory = others.get(ResourceInformation.MEMORY_MB.getName());
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.MEMORY_MB.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_MB +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_MB) + " property");
}
others.remove(ResourceInformation.MEMORY_MB.getName());
} else {
memory = getLong(
RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB);
}
if (others.containsKey(ResourceInformation.VCORES.getName())) {
vCores = others.get(ResourceInformation.VCORES.getName()).intValue();
if (get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) != null) {
String overridingKey = getAllocationIncrementPropKey(
ResourceInformation.VCORES.getName());
LOG.warn("Configuration " + overridingKey + "=" + get(overridingKey) +
" is overriding the " + RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES +
"=" + get(RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES) + " property");
}
others.remove(ResourceInformation.VCORES.getName());
} else {
vCores = getInt(
RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,
DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES);
}
return Resource.newInstance(memory, vCores, others);
}
|
@Test
public void testAllocationIncrementMemoryNonDefaultUnit() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RESOURCE_TYPES + "." +
ResourceInformation.MEMORY_MB.getName() +
FairSchedulerConfiguration.INCREMENT_ALLOCATION, "1 Gi");
FairSchedulerConfiguration fsc = new FairSchedulerConfiguration(conf);
Resource minimum = Resources.createResource(0L, 0);
Resource maximum =
Resources.createResource(Long.MAX_VALUE, Integer.MAX_VALUE);
Resource increment = fsc.getIncrementAllocation();
DominantResourceCalculator resourceCalculator =
new DominantResourceCalculator();
assertEquals(1024L, resourceCalculator.normalize(
Resources.createResource(1023L), minimum, maximum, increment)
.getMemorySize());
assertEquals(1024L, resourceCalculator.normalize(
Resources.createResource(1024L), minimum, maximum, increment)
.getMemorySize());
assertEquals(2048L, resourceCalculator.normalize(
Resources.createResource(1025L), minimum, maximum, increment)
.getMemorySize());
}
|
public static boolean isComplete(Object obj) throws IllegalArgumentException {
requireNonNull(obj);
Field[] fields = obj.getClass().getDeclaredFields();
StringBuilder error = new StringBuilder();
for (Field field : fields) {
if (field.isAnnotationPresent(FieldContext.class)) {
field.setAccessible(true);
Object value;
try {
value = field.get(obj);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
if (log.isDebugEnabled()) {
log.debug("Validating configuration field '{}' = '{}'", field.getName(), value);
}
boolean isRequired = field.getAnnotation(FieldContext.class).required();
long minValue = field.getAnnotation(FieldContext.class).minValue();
long maxValue = field.getAnnotation(FieldContext.class).maxValue();
if (isRequired && isEmpty(value)) {
error.append(String.format("Required %s is null,", field.getName()));
}
if (value != null && Number.class.isAssignableFrom(value.getClass())) {
long fieldVal = ((Number) value).longValue();
boolean valid = fieldVal >= minValue && fieldVal <= maxValue;
if (!valid) {
error.append(String.format("%s value %d doesn't fit in given range (%d, %d),", field.getName(),
fieldVal, minValue, maxValue));
}
}
}
}
if (error.length() > 0) {
throw new IllegalArgumentException(error.substring(0, error.length() - 1));
}
return true;
}
|
@Test
public void testIncomplete() throws IllegalAccessException {
assertThrows(IllegalArgumentException.class, () -> isComplete(new TestInCompleteObjectRequired()));
assertThrows(IllegalArgumentException.class, () -> isComplete(new TestInCompleteObjectMin()));
assertThrows(IllegalArgumentException.class, () -> isComplete(new TestInCompleteObjectMax()));
assertThrows(IllegalArgumentException.class, () -> isComplete(new TestInCompleteObjectMix()));
}
|
public static ConditionStatus create(EvaluationStatus status, String value) {
requireNonNull(status, "status can not be null");
checkArgument(status != EvaluationStatus.NO_VALUE, "EvaluationStatus 'NO_VALUE' can not be used with this method, use constant ConditionStatus.NO_VALUE_STATUS instead.");
requireNonNull(value, "value can not be null");
return new ConditionStatus(status, value);
}
|
@Test
@UseDataProvider("allStatusesButNO_VALUE")
public void create_throws_NPE_if_value_is_null_and_status_argument_is_not_NO_VALUE(ConditionStatus.EvaluationStatus status) {
assertThatThrownBy(() -> ConditionStatus.create(status, null))
.isInstanceOf(NullPointerException.class)
.hasMessage("value can not be null");
}
|
public static TimeLock ofTimestamp(Instant time) {
long secs = time.getEpochSecond();
if (secs < THRESHOLD)
throw new IllegalArgumentException("timestamp too low: " + secs);
return new TimeLock(secs);
}
|
@Test
public void timestampSubtype() {
LockTime timestamp = LockTime.ofTimestamp(Instant.now());
assertTrue(timestamp instanceof TimeLock);
assertTrue(((TimeLock) timestamp).timestamp().isAfter(Instant.EPOCH));
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_parse_glue_list() {
properties.put(Constants.GLUE_PROPERTY_NAME, "com.example.app.steps, com.example.other.steps");
RuntimeOptions options = cucumberPropertiesParser.parse(properties).build();
assertThat(options.getGlue(), contains(
URI.create("classpath:/com/example/app/steps"),
URI.create("classpath:/com/example/other/steps")));
}
|
public static int readVInt(ByteData arr, long position) {
byte b = arr.get(position++);
if(b == (byte) 0x80)
throw new RuntimeException("Attempting to read null value as int");
int value = b & 0x7F;
while ((b & 0x80) != 0) {
b = arr.get(position++);
value <<= 7;
value |= (b & 0x7F);
}
return value;
}
|
@Test
public void testReadVIntHollowBlobInput() throws IOException {
HollowBlobInput hbi = HollowBlobInput.serial(BYTES_VALUE_129);
Assert.assertEquals(129l, VarInt.readVInt(hbi));
}
|
public static Statement sanitize(
final Statement node,
final MetaStore metaStore) {
return sanitize(node, metaStore, true);
}
|
@Test
public void shouldThrowOnUnknownSource() {
// Given:
final Statement stmt = givenQuery("SELECT * FROM Unknown;");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> AstSanitizer.sanitize(stmt, META_STORE)
);
// Then:
assertThat(e.getMessage(), containsString(
"UNKNOWN does not exist"));
}
|
@Override
public Decision onResultPartitionClosed(HsSpillingInfoProvider spillingInfoProvider) {
Decision.Builder builder = Decision.builder();
for (int subpartitionId = 0;
subpartitionId < spillingInfoProvider.getNumSubpartitions();
subpartitionId++) {
builder.addBufferToSpill(
subpartitionId,
// get all not start spilling buffers.
spillingInfoProvider.getBuffersInOrder(
subpartitionId,
SpillStatus.NOT_SPILL,
ConsumeStatusWithId.ALL_ANY))
.addBufferToRelease(
subpartitionId,
// get all not released buffers.
spillingInfoProvider.getBuffersInOrder(
subpartitionId, SpillStatus.ALL, ConsumeStatusWithId.ALL_ANY));
}
return builder.build();
}
|
@Test
void testOnResultPartitionClosed() {
final int subpartition1 = 0;
final int subpartition2 = 1;
List<BufferIndexAndChannel> subpartitionBuffer1 =
createBufferIndexAndChannelsList(subpartition1, 0, 1, 2, 3);
List<BufferIndexAndChannel> subpartitionBuffer2 =
createBufferIndexAndChannelsList(subpartition2, 0, 1, 2);
TestingSpillingInfoProvider spillInfoProvider =
TestingSpillingInfoProvider.builder()
.setGetNumSubpartitionsSupplier(() -> 2)
.addSubpartitionBuffers(subpartition1, subpartitionBuffer1)
.addSubpartitionBuffers(subpartition2, subpartitionBuffer2)
.addSpillBuffers(subpartition1, Arrays.asList(2, 3))
.addConsumedBuffers(subpartition1, Collections.singletonList(0))
.addSpillBuffers(subpartition2, Collections.singletonList(2))
.build();
Decision decision = spillStrategy.onResultPartitionClosed(spillInfoProvider);
Map<Integer, List<BufferIndexAndChannel>> expectedToSpillBuffers = new HashMap<>();
expectedToSpillBuffers.put(subpartition1, subpartitionBuffer1.subList(0, 2));
expectedToSpillBuffers.put(subpartition2, subpartitionBuffer2.subList(0, 2));
assertThat(decision.getBufferToSpill()).isEqualTo(expectedToSpillBuffers);
Map<Integer, List<BufferIndexAndChannel>> expectedToReleaseBuffers = new HashMap<>();
expectedToReleaseBuffers.put(subpartition1, subpartitionBuffer1.subList(0, 4));
expectedToReleaseBuffers.put(subpartition2, subpartitionBuffer2.subList(0, 3));
assertThat(decision.getBufferToRelease()).isEqualTo(expectedToReleaseBuffers);
}
|
@Override
public Optional<HealthStatus> health() {
return clusterHealth().map(response -> healthStatusFrom(response.getStatus()));
}
|
@Test
void returnsEmptyOptionalForHealthWhenElasticsearchExceptionThrown() {
when(client.execute(any())).thenThrow(new ElasticsearchException("Exception"));
final Optional<HealthStatus> healthStatus = clusterAdapter.health();
assertThat(healthStatus).isEmpty();
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testNullableField() {
assertDeterministic(AvroCoder.of(NullableField.class));
}
|
@SuppressWarnings("unchecked")
public static <K1,V1,K2,V2> Mapper<K1,V1,K2,V2>.Context
cloneMapContext(MapContext<K1,V1,K2,V2> context,
Configuration conf,
RecordReader<K1,V1> reader,
RecordWriter<K2,V2> writer
) throws IOException, InterruptedException {
try {
// get the outer object pointer
Object outer = OUTER_MAP_FIELD.get(context);
// if it is a wrapped 21 context, unwrap it
if ("org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context".equals
(context.getClass().getName())) {
context = (MapContext<K1,V1,K2,V2>) WRAPPED_CONTEXT_FIELD.get(context);
}
// if the reader or writer aren't given, use the same ones
if (reader == null) {
reader = (RecordReader<K1,V1>) READER_FIELD.get(context);
}
if (writer == null) {
writer = (RecordWriter<K2,V2>) WRITER_FIELD.get(context);
}
if (useV21) {
Object basis =
MAP_CONTEXT_IMPL_CONSTRUCTOR.newInstance(conf,
context.getTaskAttemptID(),
reader, writer,
context.getOutputCommitter(),
REPORTER_FIELD.get(context),
context.getInputSplit());
return (Mapper.Context)
MAP_CONTEXT_CONSTRUCTOR.newInstance(outer, basis);
} else {
return (Mapper.Context)
MAP_CONTEXT_CONSTRUCTOR.newInstance(outer,
conf, context.getTaskAttemptID(),
reader, writer,
context.getOutputCommitter(),
REPORTER_FIELD.get(context),
context.getInputSplit());
}
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Can't access field", e);
} catch (InstantiationException e) {
throw new IllegalArgumentException("Can't create object", e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Can't invoke constructor", e);
}
}
|
@Test
public void testCloneMapContext() throws Exception {
TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
TaskAttemptID taskAttemptid = new TaskAttemptID(taskId, 0);
MapContext<IntWritable, IntWritable, IntWritable, IntWritable> mapContext =
new MapContextImpl<IntWritable, IntWritable, IntWritable, IntWritable>(
conf, taskAttemptid, null, null, null, null, null);
Mapper<IntWritable, IntWritable, IntWritable, IntWritable>.Context mapperContext =
new WrappedMapper<IntWritable, IntWritable, IntWritable, IntWritable>().getMapContext(
mapContext);
ContextFactory.cloneMapContext(mapperContext, conf, null, null);
}
|
public static byte[] toArray(ByteBuffer bytebuffer) {
if (bytebuffer.hasArray()) {
return Arrays.copyOfRange(bytebuffer.array(), bytebuffer.position(), bytebuffer.limit());
} else {
int oldPosition = bytebuffer.position();
bytebuffer.position(0);
int size = bytebuffer.limit();
byte[] buffers = new byte[size];
bytebuffer.get(buffers);
bytebuffer.position(oldPosition);
return buffers;
}
}
|
@Test
public void toArrayTest() {
final ArrayList<String> list = CollUtil.newArrayList("A", "B", "C", "D");
final String[] array = ArrayUtil.toArray(list, String.class);
assertEquals("A", array[0]);
assertEquals("B", array[1]);
assertEquals("C", array[2]);
assertEquals("D", array[3]);
}
|
public static JsonNode getHostLifePacket() {
var jsonMapper = Jackson.mapper();
ObjectNode jsonObject = jsonMapper.createObjectNode();
jsonObject.put("timestamp", Instant.now().getEpochSecond());
jsonObject.put("application", "host_life");
ObjectNode metrics = jsonMapper.createObjectNode();
metrics.put("alive", 1);
jsonObject.set("metrics", metrics);
ObjectNode dimensions = jsonMapper.createObjectNode();
dimensions.put("vespaVersion", Vtag.currentVersion.toFullString());
jsonObject.set("dimensions", dimensions);
return jsonObject;
}
|
@Test
void host_is_alive() {
JsonNode packet = HostLifeGatherer.getHostLifePacket();
JsonNode metrics = packet.get("metrics");
assertEquals("host_life", packet.get("application").textValue());
assertEquals(1, metrics.get("alive").intValue());
assertTrue(packet.get("dimensions").hasNonNull("vespaVersion"));
}
|
protected void setCharsetWithContentType(Exchange camelExchange) {
// setup the charset from content-type header
String contentTypeHeader = ExchangeHelper.getContentType(camelExchange);
if (contentTypeHeader != null) {
String charset = HttpHeaderHelper.findCharset(contentTypeHeader);
String normalizedEncoding = HttpHeaderHelper.mapCharset(charset, StandardCharsets.UTF_8.name());
if (normalizedEncoding != null) {
camelExchange.setProperty(ExchangePropertyKey.CHARSET_NAME, normalizedEncoding);
}
}
}
|
@Test
public void testSetCharsetWithContentType() {
DefaultCxfRsBinding cxfRsBinding = new DefaultCxfRsBinding();
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setHeader(Exchange.CONTENT_TYPE, "text/xml;charset=ISO-8859-1");
cxfRsBinding.setCharsetWithContentType(exchange);
String charset = ExchangeHelper.getCharsetName(exchange);
assertEquals("ISO-8859-1", charset, "Get a wrong charset");
exchange.getIn().setHeader(Exchange.CONTENT_TYPE, "text/xml");
cxfRsBinding.setCharsetWithContentType(exchange);
charset = ExchangeHelper.getCharsetName(exchange);
assertEquals("UTF-8", charset, "Get a worng charset name");
}
|
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("loaded", (Gauge<Long>) mxBean::getTotalLoadedClassCount);
gauges.put("unloaded", (Gauge<Long>) mxBean::getUnloadedClassCount);
return gauges;
}
|
@Test
public void unLoadedGauge() {
final Gauge gauge = (Gauge) gauges.getMetrics().get("unloaded");
assertThat(gauge.getValue()).isEqualTo(1L);
}
|
@Override
public void cancel() {
synchronized (monitor) {
if (!periodicExecutionCancellation.isPresent()) {
throw new IllegalStateException("setPeriodicExecutionCancellationCallback has not been called before cancel");
}
cancelled = true;
if (running) return;
}
runlet.close();
periodicExecutionCancellation.get().run();
}
|
@Test
public void testCancelWhileIdle() {
// Cancel while runlet is not running and verify closure and executor cancellation
cancellable.cancel();
assertFalse(executor.isExecutionRunning());
assertTrue(runlet.isClosed());
// Ensure a spurious run is ignored.
executor.runAsync();
executor.runToCompletion(3);
assertEquals(2, runlet.getRunsStarted());
}
|
@Override
public ConfigFileList getConfigFiles(String pluginId, final String destinationFolder, final Collection<CRConfigurationProperty> configurations) {
String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, CONFIG_REPO_EXTENSION, goSupportedVersions);
if (resolvedExtensionVersion.equals("1.0") || resolvedExtensionVersion.equals("2.0")) {
return ConfigFileList.withError("Unsupported Operation", "This plugin version does not support list config files");
}
return pluginRequestHelper.submitRequest(pluginId, REQUEST_CONFIG_FILES, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).requestMessageConfigFiles(destinationFolder, configurations);
}
@Override
public ConfigFileList onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return messageHandlerMap.get(resolvedExtensionVersion).responseMessageForConfigFiles(responseBody);
}
});
}
|
@Test
public void shouldGracefullyHandleV1AndV2Incompatability() {
ConfigFileList expected = ConfigFileList.withError("Unsupported Operation", "This plugin version does not support list config files");
when(pluginManager.resolveExtensionVersion(PLUGIN_ID, CONFIG_REPO_EXTENSION, new ArrayList<>(List.of("1.0", "2.0", "3.0")))).thenReturn("1.0");
ConfigFileList responseV1 = extension.getConfigFiles(PLUGIN_ID, "dir", null);
assertTrue(responseV1.hasErrors(), "should have errors");
assertThat(responseV1.getErrors().getErrorsAsText(), is(expected.getErrors().getErrorsAsText()));
when(pluginManager.resolveExtensionVersion(PLUGIN_ID, CONFIG_REPO_EXTENSION, new ArrayList<>(List.of("1.0", "2.0", "3.0")))).thenReturn("2.0");
ConfigFileList responseV2 = extension.getConfigFiles(PLUGIN_ID, "dir", null);
assertTrue(responseV2.hasErrors(), "should have errors");
assertThat(responseV2.getErrors().getErrorsAsText(), is(expected.getErrors().getErrorsAsText()));
}
|
public String get(String key) {
return properties.getProperty(key);
}
|
@Test
public void testGet_whenKeyExisting() {
Properties props = new Properties();
props.setProperty("key1", "value1");
props.setProperty("key2", "value2");
HazelcastProperties properties = new HazelcastProperties(props);
assertEquals("value1", properties.get("key1"));
}
|
public static void validateCardSecurityVsCardAccess(SecurityInfos cardSecurity, int caKeyReference, int paceVersion,
int taVersion) {
Assert.notNull(cardSecurity, "cardSecurity may not be null");
if (caKeyReference != cardSecurity.getCaKeyId() || paceVersion != cardSecurity.getPaceVersion()
|| taVersion != cardSecurity.getTaVersion()) {
logger.error("the card info and the card security do not match.");
throw new ClientException("The card info and the card security do not match.");
}
}
|
@Test
public void validateCardSecurityVsCardAccessCaFail() {
ClientException thrown = assertThrows(ClientException.class, () -> CardValidations.validateCardSecurityVsCardAccess(efCardSecurity, 2, 1, 1));
assertEquals("The card info and the card security do not match.", thrown.getMessage());
}
|
@Override
public List<?> deserialize(final String topic, final byte[] bytes) {
if (bytes == null) {
return null;
}
try {
final String recordCsvString = new String(bytes, StandardCharsets.UTF_8);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat)
.getRecords();
if (csvRecords.isEmpty()) {
throw new SerializationException("No fields in record");
}
final CSVRecord csvRecord = csvRecords.get(0);
if (csvRecord == null || csvRecord.size() == 0) {
throw new SerializationException("No fields in record.");
}
SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic);
final List<Object> values = new ArrayList<>(parsers.size());
final Iterator<Parser> pIt = parsers.iterator();
for (int i = 0; i < csvRecord.size(); i++) {
final String value = csvRecord.get(i);
final Parser parser = pIt.next();
final Object parsed = value == null || value.isEmpty()
? null
: parser.parse(value);
values.add(parsed);
}
return values;
} catch (final Exception e) {
throw new SerializationException("Error deserializing delimited", e);
}
}
|
@Test
public void shouldThrowOnNegativeTime() {
// Given:
KsqlDelimitedDeserializer deserializer = createDeserializer(persistenceSchema(
column(
"ids",
SqlTypes.TIME
)
));
final byte[] bytes = "-5".getBytes(StandardCharsets.UTF_8);
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize("", bytes)
);
// Then:
assertThat(e.getCause().getMessage(),
containsString("Time values must use number of milliseconds greater than 0 and less than 86400000."));
}
|
public static TrackPair of(Track track1, Track track2) {
return new TrackPair(track1, track2);
}
|
@Test
public void testOf_nullInput_1st() {
assertThrows(
NullPointerException.class,
() -> TrackPair.of(null, A_TRACK)
);
}
|
@Override
public void execute(SensorContext context) {
Set<String> reportPaths = loadReportPaths();
Map<String, SarifImportResults> filePathToImportResults = new HashMap<>();
for (String reportPath : reportPaths) {
try {
SarifImportResults sarifImportResults = processReport(context, reportPath);
filePathToImportResults.put(reportPath, sarifImportResults);
} catch (NoSuchFileException e) {
throw MessageException.of(format("SARIF report file not found: %s", e.getFile()));
} catch (Exception exception) {
LOG.warn("Failed to process SARIF report from file '{}', error: '{}'", reportPath, exception.getMessage());
}
}
filePathToImportResults.forEach(SarifIssuesImportSensor::displayResults);
}
|
@Test
public void execute_whenMultipleFilesAreSpecified_shouldImportResults() throws NoSuchFileException {
sensorSettings.setProperty("sonar.sarifReportPaths", SARIF_REPORT_PATHS_PARAM);
ReportAndResults reportAndResults1 = mockSuccessfulReportAndResults(FILE_1);
ReportAndResults reportAndResults2 = mockSuccessfulReportAndResults(FILE_2);
SarifIssuesImportSensor sensor = new SarifIssuesImportSensor(sarifSerializer, sarifImporter, sensorSettings.asConfig());
sensor.execute(sensorContext);
verify(sarifImporter).importSarif(reportAndResults1.getSarifReport());
verify(sarifImporter).importSarif(reportAndResults2.getSarifReport());
assertSummaryIsCorrectlyDisplayedForSuccessfulFile(FILE_1, reportAndResults1.getSarifImportResults());
assertSummaryIsCorrectlyDisplayedForSuccessfulFile(FILE_2, reportAndResults2.getSarifImportResults());
}
|
@Override
public void validate(Context context) {
context.model().getContainerClusters().forEach((id, cluster) -> {
Http http = cluster.getHttp();
if (http != null) {
if (http.getAccessControl().isPresent()) {
verifyAccessControlFilterPresent(context, http);
}
}
});
}
|
@Test
void validator_accepts_non_empty_access_control_filter_chain() throws IOException, SAXException {
DeployState deployState = createDeployState();
VespaModel model = new VespaModel(
MapConfigModelRegistry.createFromList(new ModelBuilderAddingAccessControlFilter()),
deployState);
ValidationTester.validate(new AccessControlFilterValidator(), model, deployState);
}
|
public static TraceTransferBean encoderFromContextBean(TraceContext ctx) {
if (ctx == null) {
return null;
}
//build message trace of the transferring entity content bean
TraceTransferBean transferBean = new TraceTransferBean();
StringBuilder sb = new StringBuilder(256);
switch (ctx.getTraceType()) {
case Pub: {
TraceBean bean = ctx.getTraceBeans().get(0);
//append the content of context and traceBean to transferBean's TransData
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getBodyLength()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getOffsetMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.isSuccess()).append(TraceConstants.FIELD_SPLITOR);//
}
break;
case SubBefore: {
for (TraceBean bean : ctx.getTraceBeans()) {
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getRetryTimes()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.FIELD_SPLITOR);//
}
}
break;
case SubAfter: {
for (TraceBean bean : ctx.getTraceBeans()) {
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRequestId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getCostTime()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.isSuccess()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getContextCode()).append(TraceConstants.CONTENT_SPLITOR);
if (!ctx.getAccessChannel().equals(AccessChannel.CLOUD)) {
sb.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR);
sb.append(ctx.getGroupName());
}
sb.append(TraceConstants.FIELD_SPLITOR);
}
}
break;
case EndTransaction: {
TraceBean bean = ctx.getTraceBeans().get(0);
sb.append(ctx.getTraceType()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getTimeStamp()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getRegionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(ctx.getGroupName()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTopic()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTags()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getKeys()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getStoreHost()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getMsgType().ordinal()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTransactionId()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.getTransactionState().name()).append(TraceConstants.CONTENT_SPLITOR)//
.append(bean.isFromTransactionCheck()).append(TraceConstants.FIELD_SPLITOR);
}
break;
default:
}
transferBean.setTransData(sb.toString());
for (TraceBean bean : ctx.getTraceBeans()) {
transferBean.getTransKey().add(bean.getMsgId());
if (bean.getKeys() != null && bean.getKeys().length() > 0) {
String[] keys = bean.getKeys().split(MessageConst.KEY_SEPARATOR);
transferBean.getTransKey().addAll(Arrays.asList(keys));
}
}
return transferBean;
}
|
@Test
public void testSubBeforeTraceDataFormatTest() {
TraceContext subBeforeContext = new TraceContext();
subBeforeContext.setTraceType(TraceType.SubBefore);
subBeforeContext.setTimeStamp(time);
subBeforeContext.setRegionId("Default-region");
subBeforeContext.setGroupName("GroupName-test");
subBeforeContext.setRequestId("3455848576927");
TraceBean bean = new TraceBean();
bean.setMsgId("AC1415116D1418B4AAC217FE1B4E0000");
bean.setRetryTimes(0);
bean.setKeys("keys");
subBeforeContext.setTraceBeans(new ArrayList<>(1));
subBeforeContext.getTraceBeans().add(bean);
TraceTransferBean traceTransferBean = TraceDataEncoder.encoderFromContextBean(subBeforeContext);
String transData = traceTransferBean.getTransData();
Assert.assertNotNull(transData);
String[] items = transData.split(String.valueOf(TraceConstants.CONTENT_SPLITOR));
Assert.assertEquals(8, items.length);
}
|
public AbstractRequestBuilder<K, V, R> addReqParam(String key, Object value)
{
ArgumentUtil.notNull(value, "value");
return addParam(key, value);
}
|
@Test
public void testAddReqParamWithNullValue()
{
final AbstractRequestBuilder<?, ?, ?> builder = new DummyAbstractRequestBuilder();
try
{
builder.addReqParam("a", null);
Assert.fail("addReqParam should not allow null values");
}
catch (NullPointerException e)
{
}
}
|
@Override
public PageData<WidgetTypeInfo> findSystemWidgetTypes(WidgetTypeFilter widgetTypeFilter, PageLink pageLink) {
boolean deprecatedFilterEnabled = !DeprecatedFilter.ALL.equals(widgetTypeFilter.getDeprecatedFilter());
boolean deprecatedFilterBool = DeprecatedFilter.DEPRECATED.equals(widgetTypeFilter.getDeprecatedFilter());
boolean widgetTypesEmpty = widgetTypeFilter.getWidgetTypes() == null || widgetTypeFilter.getWidgetTypes().isEmpty();
return DaoUtil.toPageData(
widgetTypeInfoRepository
.findSystemWidgetTypes(
NULL_UUID,
pageLink.getTextSearch(),
widgetTypeFilter.isFullSearch(),
deprecatedFilterEnabled,
deprecatedFilterBool,
widgetTypesEmpty,
widgetTypeFilter.getWidgetTypes() == null ? Collections.emptyList() : widgetTypeFilter.getWidgetTypes(),
widgetTypeFilter.isScadaFirst(),
DaoUtil.toPageable(pageLink, WidgetTypeInfoEntity.SEARCH_COLUMNS_MAP)));
}
|
@Test
public void testTagsSearchInFindBySystemWidgetTypes() {
for (var entry : SHOULD_FIND_SEARCH_TO_TAGS_MAP.entrySet()) {
String searchText = entry.getKey();
String[] tags = entry.getValue();
WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags);
PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findSystemWidgetTypes(
WidgetTypeFilter.builder()
.tenantId(TenantId.SYS_TENANT_ID)
.fullSearch(true)
.deprecatedFilter(DeprecatedFilter.ALL)
.widgetTypes(null).build(),
new PageLink(10, 0, searchText)
);
assertThat(widgetTypes.getData()).hasSize(1);
assertThat(widgetTypes.getData().get(0).getId()).isEqualTo(savedWidgetType.getId());
widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId());
}
for (var entry : SHOULDNT_FIND_SEARCH_TO_TAGS_MAP.entrySet()) {
String searchText = entry.getKey();
String[] tags = entry.getValue();
WidgetTypeDetails savedWidgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, WIDGET_TYPE_COUNT + 1, tags);
PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findSystemWidgetTypes(
WidgetTypeFilter.builder()
.tenantId(TenantId.SYS_TENANT_ID)
.fullSearch(true)
.deprecatedFilter(DeprecatedFilter.ALL)
.widgetTypes(null).build(),
new PageLink(10, 0, searchText)
);
assertThat(widgetTypes.getData()).hasSize(0);
widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, savedWidgetType.getUuidId());
}
}
|
@Override
public void close() {
super.close();
// close other classloader in the list
for (FlinkUserCodeClassLoader classLoader : originClassLoaders) {
try {
classLoader.close();
} catch (IOException e) {
LOG.error("Failed to close the origin classloader.", e);
}
}
originClassLoaders.clear();
}
|
@Test
public void testClassLoadingByAddURL() throws Exception {
Configuration configuration = new Configuration();
final ClientWrapperClassLoader classLoader =
new ClientWrapperClassLoader(
ClientClassloaderUtil.buildUserClassLoader(
Collections.emptyList(),
getClass().getClassLoader(),
configuration),
configuration);
// test class loader before add jar url to ClassLoader
assertClassNotFoundException(GENERATED_LOWER_UDF_CLASS, classLoader);
// add jar url to ClassLoader
classLoader.addURL(userJar.toURI().toURL());
assertEquals(1, classLoader.getURLs().length);
final Class<?> clazz1 = Class.forName(GENERATED_LOWER_UDF_CLASS, false, classLoader);
final Class<?> clazz2 = Class.forName(GENERATED_LOWER_UDF_CLASS, false, classLoader);
assertEquals(clazz1, clazz2);
classLoader.close();
}
|
public static Builder newBuilder() {
return new Builder();
}
|
@Test void noRulesOk() {
HttpRuleSampler.newBuilder().build();
}
|
public static <T> T createRMProxy(final Configuration configuration,
final Class<T> protocol) throws IOException {
long rmConnectWait =
configuration.getLong(
YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS);
long rmRetryInterval =
configuration.getLong(
YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
YarnConfiguration
.DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS);
long nmRmConnectWait =
configuration.getLong(
YarnConfiguration.NM_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
rmConnectWait);
long nmRmRetryInterval =
configuration.getLong(
YarnConfiguration.NM_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
rmRetryInterval);
ServerRMProxy<T> serverRMProxy = new ServerRMProxy<>();
return createRMProxy(configuration, protocol, serverRMProxy,
nmRmConnectWait, nmRmRetryInterval);
}
|
@Test
public void testDistributedProtocol() {
YarnConfiguration conf = new YarnConfiguration();
try {
ServerRMProxy.createRMProxy(conf, DistributedSchedulingAMProtocol.class);
} catch (Exception e) {
Assert.fail("DistributedSchedulingAMProtocol fail in non HA");
}
// HA is enabled
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm1"), "0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME, "rm2"), "0.0.0.0");
try {
ServerRMProxy.createRMProxy(conf, DistributedSchedulingAMProtocol.class);
} catch (Exception e) {
Assert.fail("DistributedSchedulingAMProtocol fail in HA");
}
}
|
public static ClusterAllocationDiskSettings create(boolean enabled, String low, String high, String floodStage) {
if (!enabled) {
return ClusterAllocationDiskSettings.create(enabled, null);
}
return ClusterAllocationDiskSettings.create(enabled, createWatermarkSettings(low, high, floodStage));
}
|
@Test
public void createAbsoluteValueWatermarkSettingsWithoutFloodStage() throws Exception {
ClusterAllocationDiskSettings clusterAllocationDiskSettings = ClusterAllocationDiskSettingsFactory.create(true, "10Gb", "5Gb", "");
assertThat(clusterAllocationDiskSettings).isInstanceOf(ClusterAllocationDiskSettings.class);
assertThat(clusterAllocationDiskSettings.ThresholdEnabled()).isTrue();
assertThat(clusterAllocationDiskSettings.watermarkSettings()).isInstanceOf(AbsoluteValueWatermarkSettings.class);
AbsoluteValueWatermarkSettings settings = (AbsoluteValueWatermarkSettings) clusterAllocationDiskSettings.watermarkSettings();
assertThat(settings.type()).isEqualTo(WatermarkSettings.SettingsType.ABSOLUTE);
assertThat(settings.low()).isInstanceOf(ByteSize.class);
assertThat(settings.low().getBytes()).isEqualTo(10737418240L);
assertThat(settings.high()).isInstanceOf(ByteSize.class);
assertThat(settings.high().getBytes()).isEqualTo(5368709120L);
assertThat(settings.floodStage()).isNull();
}
|
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
}
|
@Test
void testDoCommitAppendSnapshotsInitialVersion() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
Map<String, String> properties = new HashMap<>(BASE_TABLE_METADATA.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(testSnapshots));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(
testSnapshots.get(testSnapshots.size() - 1))));
TableMetadata metadata = BASE_TABLE_METADATA.replaceProperties(properties);
openHouseInternalTableOperations.doCommit(BASE_TABLE_METADATA, metadata);
Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture());
Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties();
Assertions.assertEquals(
4,
updatedProperties.size()); /*location, lastModifiedTime, version and appended_snapshots*/
Assertions.assertEquals(
"INITIAL_VERSION", updatedProperties.get(getCanonicalFieldName("tableVersion")));
Assertions.assertEquals(
testSnapshots.stream()
.map(s -> Long.toString(s.snapshotId()))
.collect(Collectors.joining(",")),
updatedProperties.get(getCanonicalFieldName("appended_snapshots")));
Assertions.assertTrue(updatedProperties.containsKey(getCanonicalFieldName("tableLocation")));
Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable));
}
}
|
@GetMapping("/find/username")
@PreAuthorize("isAnonymous()")
public ResponseEntity<?> findUsername(@Validated PhoneVerificationDto.VerifyCodeReq request) {
return ResponseEntity.ok(SuccessResponse.from("user", authCheckUseCase.findUsername(request)));
}
|
@Test
@DisplayName("일반 회원의 휴대폰 번호로 아이디를 찾을 때 200 응답을 반환한다.")
void findUsername() throws Exception {
// given
given(authCheckUseCase.findUsername(new PhoneVerificationDto.VerifyCodeReq(inputPhone, code))).willReturn(
new AuthFindDto.FindUsernameRes(expectedUsername));
// when
ResultActions resultActions = findUsernameRequest(inputPhone, code);
// then
resultActions
.andExpect(status().isOk())
.andExpect(jsonPath("$.data.user.username").value(expectedUsername));
}
|
public String[] getFunctions() {
return getFunctions(
catalogManager.getCurrentCatalog(), catalogManager.getCurrentDatabase());
}
|
@Test
void testGetBuiltInFunctions() {
Set<String> actual = new HashSet<>();
Collections.addAll(actual, functionCatalog.getFunctions());
Set<String> expected = new ModuleManager().listFunctions();
assertThat(actual.containsAll(expected)).isTrue();
}
|
@Override
public BufferedReader getReader() {
try {
return source.getReader();
} catch (IOException e) {
throw new IllegalStateException("Failed to read", e);
}
}
|
@Test
public void getReader() throws IOException {
BufferedReader reader = new BufferedReader(new StringReader("foo"));
when(source.getReader()).thenReturn(reader);
assertThat(underTest.getReader()).isEqualTo(reader);
}
|
@Internal
public void replaceAll(List<PTransformOverride> overrides) {
for (PTransformOverride override : overrides) {
replace(override);
}
checkNoMoreMatches(overrides);
}
|
@Test
public void testReplaceAll() {
pipeline.enableAbandonedNodeEnforcement(false);
pipeline.apply("unbounded", GenerateSequence.from(0));
pipeline.apply("bounded", GenerateSequence.from(0).to(100));
pipeline.replaceAll(
ImmutableList.of(
PTransformOverride.of(
application -> application.getTransform() instanceof GenerateSequence,
new GenerateSequenceToCreateOverride()),
PTransformOverride.of(
application -> application.getTransform() instanceof Create.Values,
new CreateValuesToEmptyFlattenOverride())));
pipeline.traverseTopologically(
new PipelineVisitor.Defaults() {
@Override
public CompositeBehavior enterCompositeTransform(Node node) {
String fullName = node.getFullName();
if (fullName.equals("unbounded") || fullName.equals("bounded")) {
assertThat(node.getTransform(), Matchers.instanceOf(EmptyFlatten.class));
}
return CompositeBehavior.ENTER_TRANSFORM;
}
});
}
|
@Override
public OAuth2CodeDO consumeAuthorizationCode(String code) {
OAuth2CodeDO codeDO = oauth2CodeMapper.selectByCode(code);
if (codeDO == null) {
throw exception(OAUTH2_CODE_NOT_EXISTS);
}
if (DateUtils.isExpired(codeDO.getExpiresTime())) {
throw exception(OAUTH2_CODE_EXPIRE);
}
oauth2CodeMapper.deleteById(codeDO.getId());
return codeDO;
}
|
@Test
public void testConsumeAuthorizationCode_expired() {
// 准备参数
String code = "test_code";
// mock 数据
OAuth2CodeDO codeDO = randomPojo(OAuth2CodeDO.class).setCode(code)
.setExpiresTime(LocalDateTime.now().minusDays(1));
oauth2CodeMapper.insert(codeDO);
// 调用,并断言
assertServiceException(() -> oauth2CodeService.consumeAuthorizationCode(code),
OAUTH2_CODE_EXPIRE);
}
|
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Namespaces differ")
public void testMergeDifferentNamespace() {
SourceConfig sourceConfig = createSourceConfig();
SourceConfig newSourceConfig = createUpdatedSourceConfig("namespace", "Different");
SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
}
|
@Override
public void createNode(K8sNode node) {
checkNotNull(node, ERR_NULL_NODE);
K8sNode intNode;
K8sNode extNode;
K8sNode localNode;
K8sNode tunNode;
if (node.intgBridge() == null) {
String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet());
checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID);
intNode = node.updateIntgBridge(DeviceId.deviceId(deviceIdStr));
checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()),
NOT_DUPLICATED_MSG, intNode.intgBridge());
} else {
intNode = node;
checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()),
NOT_DUPLICATED_MSG, intNode.intgBridge());
}
if (intNode.extBridge() == null) {
String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet());
checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID);
extNode = intNode.updateExtBridge(DeviceId.deviceId(deviceIdStr));
checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()),
NOT_DUPLICATED_MSG, extNode.extBridge());
} else {
extNode = intNode;
checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()),
NOT_DUPLICATED_MSG, extNode.extBridge());
}
if (node.localBridge() == null) {
String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet());
checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID);
localNode = extNode.updateLocalBridge(DeviceId.deviceId(deviceIdStr));
checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()),
NOT_DUPLICATED_MSG, localNode.localBridge());
} else {
localNode = extNode;
checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()),
NOT_DUPLICATED_MSG, localNode.localBridge());
}
if (node.mode() == NORMAL) {
if (node.tunBridge() == null) {
String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet());
checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID);
tunNode = localNode.updateTunBridge(DeviceId.deviceId(deviceIdStr));
checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()),
NOT_DUPLICATED_MSG, tunNode.tunBridge());
} else {
tunNode = localNode;
checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()),
NOT_DUPLICATED_MSG, tunNode.tunBridge());
}
nodeStore.createNode(tunNode);
} else {
nodeStore.createNode(localNode);
}
log.info(String.format(MSG_NODE, extNode.hostname(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateNode() {
target.createNode(MINION_1);
target.createNode(MINION_1);
}
|
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
if (readError == null) {
try {
processSymbols(lineBuilder);
} catch (RangeOffsetConverter.RangeOffsetConverterException e) {
readError = new ReadError(Data.SYMBOLS, lineBuilder.getLine());
LOG.warn(format("Inconsistency detected in Symbols data. Symbols will be ignored for file '%s'", file.getKey()), e);
}
}
return Optional.ofNullable(readError);
}
|
@Test
public void read_symbols_with_two_references_on_the_same_line() {
SymbolsLineReader symbolsLineReader = newReader(newSymbol(
newSingleLineTextRangeWithExpectedLabel(LINE_1, OFFSET_2, OFFSET_3, RANGE_LABEL_1),
newSingleLineTextRangeWithExpectedLabel(LINE_2, OFFSET_0, OFFSET_1, RANGE_LABEL_2),
newSingleLineTextRangeWithExpectedLabel(LINE_2, OFFSET_2, OFFSET_3, RANGE_LABEL_3)));
assertThat(symbolsLineReader.read(line1)).isEmpty();
assertThat(symbolsLineReader.read(line2)).isEmpty();
assertThat(line1.getSymbols()).isEqualTo(RANGE_LABEL_1 + ",1");
assertThat(line2.getSymbols()).isEqualTo(RANGE_LABEL_2 + ",1;" + RANGE_LABEL_3 + ",1");
}
|
public void convertQueueHierarchy(FSQueue queue) {
List<FSQueue> children = queue.getChildQueues();
final String queueName = queue.getName();
emitChildQueues(queueName, children);
emitMaxAMShare(queueName, queue);
emitMaxParallelApps(queueName, queue);
emitMaxAllocations(queueName, queue);
emitPreemptionDisabled(queueName, queue);
emitChildCapacity(queue);
emitMaximumCapacity(queueName, queue);
emitSizeBasedWeight(queueName);
emitOrderingPolicy(queueName, queue);
checkMaxChildCapacitySetting(queue);
emitDefaultUserLimitFactor(queueName, children);
for (FSQueue childQueue : children) {
convertQueueHierarchy(childQueue);
}
}
|
@Test
public void testQueueOrderingPolicy() throws Exception {
converter = builder.build();
String absolutePath =
new File("src/test/resources/fair-scheduler-orderingpolicy.xml")
.getAbsolutePath();
yarnConfig.set(FairSchedulerConfiguration.ALLOCATION_FILE,
FILE_PREFIX + absolutePath);
fs.close();
fs = createFairScheduler();
rootQueue = fs.getQueueManager().getRootQueue();
converter.convertQueueHierarchy(rootQueue);
// root
assertEquals("root ordering policy", "fifo",
csConfig.getAppOrderingPolicy(ROOT).getConfigName());
assertEquals("root.default ordering policy", "fair",
csConfig.getAppOrderingPolicy(DEFAULT).getConfigName());
assertEquals("root.admins ordering policy", "fifo",
csConfig.getAppOrderingPolicy(ADMINS).getConfigName());
assertEquals("root.users ordering policy", "fifo",
csConfig.getAppOrderingPolicy(USERS).getConfigName());
// root.users
assertEquals("root.users.joe ordering policy", "fair",
csConfig.getAppOrderingPolicy(USERS_JOE).getConfigName());
assertEquals("root.users.john ordering policy", "fifo",
csConfig.getAppOrderingPolicy(USERS_JOHN).getConfigName());
// root.admins
assertEquals("root.admins.alice ordering policy", "fifo",
csConfig.getAppOrderingPolicy(ADMINS_ALICE).getConfigName());
assertEquals("root.admins.bob ordering policy", "fair",
csConfig.getAppOrderingPolicy(ADMINS_BOB).getConfigName());
}
|
static boolean isLocalhost(String hostname, String ipAddress) {
try {
return isLocalhostWithLoopbackIpAddress(hostname, ipAddress) || isLocalhostWithNonLoopbackIpAddress(ipAddress);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
@Test
public void shouldDetermineIfAddressIsLocal() throws UnknownHostException {
InetAddress local;
try {
local = InetAddress.getLocalHost();
}
catch (UnknownHostException e) {
local = InetAddress.getByName("localhost");
}
assertThat("Localhost (" + local.getHostName() + ") should be a local address.", SystemUtil.isLocalhost(local.getHostAddress()), is(true));
}
|
@Override
public Long dbSize(RedisClusterNode node) {
return execute(node, RedisCommands.DBSIZE);
}
|
@Test
public void testDbSize() {
RedisClusterNode master = getFirstMaster();
Long size = connection.dbSize(master);
assertThat(size).isZero();
}
|
public static Comparator<StructLike> forType(Types.StructType struct) {
return new StructLikeComparator(struct);
}
|
@Test
public void testList() {
assertComparesCorrectly(
Comparators.forType(Types.ListType.ofRequired(18, Types.IntegerType.get())),
ImmutableList.of(1, 1, 1),
ImmutableList.of(1, 1, 2));
assertComparesCorrectly(
Comparators.forType(Types.ListType.ofRequired(18, Types.IntegerType.get())),
ImmutableList.of(1, 1),
ImmutableList.of(1, 1, 1));
assertComparesCorrectly(
Comparators.forType(Types.ListType.ofOptional(18, Types.IntegerType.get())),
Collections.singletonList(null),
Collections.singletonList(1));
}
|
public boolean isProactiveSupportEnabled() {
if (properties == null) {
return false;
}
return getMetricsEnabled();
}
|
@Test
public void isProactiveSupportEnabledHTTPSOnly() {
// Given
Properties serverProperties = new Properties();
serverProperties.setProperty(
BaseSupportConfig.CONFLUENT_SUPPORT_METRICS_ENDPOINT_SECURE_ENABLE_CONFIG, "true");
BaseSupportConfig supportConfig = new TestSupportConfig(serverProperties);
// When/Then
assertTrue(supportConfig.isProactiveSupportEnabled());
}
|
@Override
public int read(ByteBuffer dst) throws IOException {
if (state == State.CLOSING) return -1;
else if (!ready()) return 0;
//if we have unread decrypted data in appReadBuffer read that into dst buffer.
int read = 0;
if (appReadBuffer.position() > 0) {
read = readFromAppBuffer(dst);
}
boolean readFromNetwork = false;
boolean isClosed = false;
// Each loop reads at most once from the socket.
while (dst.remaining() > 0) {
int netread = 0;
netReadBuffer = Utils.ensureCapacity(netReadBuffer, netReadBufferSize());
if (netReadBuffer.remaining() > 0) {
netread = readFromSocketChannel();
if (netread > 0)
readFromNetwork = true;
}
while (netReadBuffer.position() > 0) {
netReadBuffer.flip();
SSLEngineResult unwrapResult;
try {
unwrapResult = sslEngine.unwrap(netReadBuffer, appReadBuffer);
if (state == State.POST_HANDSHAKE && appReadBuffer.position() != 0) {
// For TLSv1.3, we have finished processing post-handshake messages since we are now processing data
state = State.READY;
}
} catch (SSLException e) {
// For TLSv1.3, handle SSL exceptions while processing post-handshake messages as authentication exceptions
if (state == State.POST_HANDSHAKE) {
state = State.HANDSHAKE_FAILED;
throw new SslAuthenticationException("Failed to process post-handshake messages", e);
} else
throw e;
}
netReadBuffer.compact();
// reject renegotiation if TLS < 1.3, key updates for TLS 1.3 are allowed
if (unwrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING &&
unwrapResult.getHandshakeStatus() != HandshakeStatus.FINISHED &&
unwrapResult.getStatus() == Status.OK &&
!sslEngine.getSession().getProtocol().equals(TLS13)) {
log.error("Renegotiation requested, but it is not supported, channelId {}, " +
"appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {} handshakeStatus {}", channelId,
appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position(), unwrapResult.getHandshakeStatus());
throw renegotiationException();
}
if (unwrapResult.getStatus() == Status.OK) {
read += readFromAppBuffer(dst);
} else if (unwrapResult.getStatus() == Status.BUFFER_OVERFLOW) {
int currentApplicationBufferSize = applicationBufferSize();
appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize);
if (appReadBuffer.position() >= currentApplicationBufferSize) {
throw new IllegalStateException("Buffer overflow when available data size (" + appReadBuffer.position() +
") >= application buffer size (" + currentApplicationBufferSize + ")");
}
// appReadBuffer will extended upto currentApplicationBufferSize
// we need to read the existing content into dst before we can do unwrap again. If there are no space in dst
// we can break here.
if (dst.hasRemaining())
read += readFromAppBuffer(dst);
else
break;
} else if (unwrapResult.getStatus() == Status.BUFFER_UNDERFLOW) {
int currentNetReadBufferSize = netReadBufferSize();
netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentNetReadBufferSize);
if (netReadBuffer.position() >= currentNetReadBufferSize) {
throw new IllegalStateException("Buffer underflow when available data size (" + netReadBuffer.position() +
") > packet buffer size (" + currentNetReadBufferSize + ")");
}
break;
} else if (unwrapResult.getStatus() == Status.CLOSED) {
// If data has been read and unwrapped, return the data. Close will be handled on the next poll.
if (appReadBuffer.position() == 0 && read == 0)
throw new EOFException();
else {
isClosed = true;
break;
}
}
}
if (read == 0 && netread < 0)
throw new EOFException("EOF during read");
if (netread <= 0 || isClosed)
break;
}
updateBytesBuffered(readFromNetwork || read > 0);
// If data has been read and unwrapped, return the data even if end-of-stream, channel will be closed
// on a subsequent poll.
return read;
}
|
@Test
public void testScatteringRead() throws IOException {
SSLEngine sslEngine = mock(SSLEngine.class);
SelectionKey selectionKey = mock(SelectionKey.class);
SslTransportLayer sslTransportLayer = spy(new SslTransportLayer(
"test-channel",
selectionKey,
sslEngine,
mock(ChannelMetadataRegistry.class)
));
ByteBuffer mockSocket = ByteBuffer.wrap("Hello, World!".getBytes(StandardCharsets.UTF_8));
when(sslTransportLayer.read(any(ByteBuffer.class))).then(invocation -> {
ByteBuffer buf = invocation.getArgument(0);
int read = buf.remaining();
for (int i = 0; i < read; i++) {
buf.put(mockSocket.get());
}
return read;
});
ByteBuffer[] dsts = {
ByteBuffer.allocate(2),
ByteBuffer.allocate(3),
ByteBuffer.allocate(4)
};
assertEquals(7, sslTransportLayer.read(dsts, 1, 2));
assertArrayEquals("Hel".getBytes(StandardCharsets.UTF_8), dsts[1].array());
assertArrayEquals("lo, ".getBytes(StandardCharsets.UTF_8), dsts[2].array());
}
|
@Override
public Ingress ingress(String uid) {
checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_INGRESS_UID);
return k8sIngressStore.ingress(uid);
}
|
@Test
public void testGetIngressByUid() {
createBasicIngresses();
assertNotNull("Ingress did not match", target.ingress(INGRESS_UID));
assertNull("Ingress did not match", target.ingress(UNKNOWN_UID));
}
|
@Override
public TypeDescriptor<Collection<T>> getEncodedTypeDescriptor() {
return new TypeDescriptor<Collection<T>>() {}.where(
new TypeParameter<T>() {}, getElemCoder().getEncodedTypeDescriptor());
}
|
@Test
public void testEncodedTypeDescriptor() throws Exception {
TypeDescriptor<Collection<Integer>> expectedTypeDescriptor =
new TypeDescriptor<Collection<Integer>>() {};
assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(expectedTypeDescriptor));
}
|
@Field
public void setExtractFontNames(boolean extractFontNames) {
defaultConfig.setExtractFontNames(extractFontNames);
}
|
@Test
public void testFontNameExtraction() throws Exception {
PDFParserConfig config = new PDFParserConfig();
config.setExtractFontNames(true);
ParseContext pc = new ParseContext();
pc.set(PDFParserConfig.class, config);
XMLResult r = getXML("testPDFVarious.pdf", pc);
assertContains("ABCDEE+Calibri", r.metadata.get(Font.FONT_NAME));
}
|
@Override
public ValidationException getOrThrowException() throws ValidationException {
// We skip schema field validation errors because they are CDAP oriented and don't affect
// anything in our case
List<ValidationFailure> schemaValidationFailures = new ArrayList<>();
for (ValidationFailure failure : failuresCollection) {
List<ValidationFailure.Cause> causes = failure.getCauses();
if (causes != null) {
for (ValidationFailure.Cause cause : causes) {
String inputField = cause.getAttribute(CauseAttributes.INPUT_SCHEMA_FIELD);
if (BatchContextImpl.DEFAULT_SCHEMA_FIELD_NAME.equals(inputField)) {
schemaValidationFailures.add(failure);
}
}
}
}
failuresCollection.removeAll(schemaValidationFailures);
if (failuresCollection.isEmpty()) {
return new ValidationException(this.failuresCollection);
}
throw new ValidationException(this.failuresCollection);
}
|
@Test
public void getOrThrowException() {
/** arrange */
FailureCollectorWrapper failureCollectorWrapper = new FailureCollectorWrapper();
String errorMessage = "An error has occurred";
String expectedMessage = "Errors were encountered during validation. An error has occurred";
FailureCollectorWrapper emptyFailureCollectorWrapper = new FailureCollectorWrapper();
RuntimeException error = new RuntimeException(errorMessage);
failureCollectorWrapper.addFailure(error.getMessage(), null);
/** act && assert */
ValidationException e =
assertThrows(
ValidationException.class, () -> failureCollectorWrapper.getOrThrowException());
assertEquals(expectedMessage, e.getMessage());
// A case when return ValidationException with empty collector
ArrayList<ValidationFailure> exceptionCollector =
emptyFailureCollectorWrapper.getValidationFailures();
assertEquals(0, exceptionCollector.size());
}
|
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
}
|
@Test
void typedHandle_InvalidRequestEventException_500State() {
AwsProxyResponse resp = exceptionHandler.handle(new InvalidRequestEventException(INVALID_REQUEST_MESSAGE, null));
assertNotNull(resp);
assertEquals(500, resp.getStatusCode());
}
|
@Override
public SessionStore<K, V> build() {
return new MeteredSessionStore<>(
maybeWrapCaching(maybeWrapLogging(storeSupplier.get())),
storeSupplier.metricsScope(),
keySerde,
valueSerde,
time);
}
|
@Test
public void shouldHaveChangeLoggingStoreWhenLoggingEnabled() {
setUp();
final SessionStore<String, String> store = builder
.withLoggingEnabled(Collections.emptyMap())
.build();
final StateStore wrapped = ((WrappedStateStore) store).wrapped();
assertThat(store, instanceOf(MeteredSessionStore.class));
assertThat(wrapped, instanceOf(ChangeLoggingSessionBytesStore.class));
assertThat(((WrappedStateStore) wrapped).wrapped(), CoreMatchers.equalTo(inner));
}
|
@Override
public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) {
return sqlStatementContext instanceof InsertStatementContext && (((InsertStatementContext) sqlStatementContext).getSqlStatement()).getOnDuplicateKeyColumns().isPresent();
}
|
@Test
void assertIsNotGenerateSQLTokenWithNotInsertStatement() {
assertFalse(generator.isGenerateSQLToken(mock(SelectStatementContext.class)));
}
|
public void executeTaskLater(final Runnable r, final long timeDelay) {
if (!isStopped()) {
this.scheduledExecutorService.schedule(r, timeDelay, TimeUnit.MILLISECONDS);
} else {
logger.warn("PullMessageServiceScheduledThread has shutdown");
}
}
|
@Test
public void testExecuteTaskLater() {
Runnable runnable = mock(Runnable.class);
pullMessageService.executeTaskLater(runnable, defaultTimeout);
pullMessageService.makeStop();
pullMessageService.executeTaskLater(runnable, defaultTimeout);
verify(executorService, times(1))
.schedule(any(Runnable.class),
eq(defaultTimeout),
eq(TimeUnit.MILLISECONDS));
}
|
@Override
public boolean isSubTypeOf(Class<?> ancestor) {
checkNotNull(ancestor);
return id.isSubTypeOf(ancestor);
}
|
@Test
public void testSubTypeOf() {
DiscreteResource discrete = Resources.discrete(D1, P1, VLAN1).resource();
assertThat(discrete.isSubTypeOf(DeviceId.class), is(true));
assertThat(discrete.isSubTypeOf(PortNumber.class), is(true));
assertThat(discrete.isSubTypeOf(VlanId.class), is(true));
assertThat(discrete.isSubTypeOf(Bandwidth.class), is(false));
}
|
public static String[] intersection(String[] arr1, String[] arr2) {
if (arr1 == null || arr2 == null) {
return null;
}
if (arr1.length == 0 || arr2.length == 0) {
return new String[0];
}
List<String> list = new ArrayList<>(Arrays.asList(arr1));
list.retainAll(Arrays.asList(arr2));
return list.toArray(new String[0]);
}
|
@Test
void testArrayIntersection() {
assertArrayEquals(arr("test"), StringUtil.intersection(arr("x", "test", "y", "z"), arr("a", "b", "test")));
assertArrayEquals(arr(""), StringUtil.intersection(arr("", "z"), arr("a", "")));
assertArrayEquals(arr(), StringUtil.intersection(arr("", "z"), arr("a")));
}
|
@Override
public ConnectClusterDetails clusterDetails() {
return clusterDetails;
}
|
@Test
public void kafkaClusterId() {
assertEquals(KAFKA_CLUSTER_ID, connectClusterState.clusterDetails().kafkaClusterId());
}
|
public StepInstance getStepInstance(
String workflowId,
long workflowInstanceId,
long workflowRunId,
String stepId,
String stepAttempt) {
return getStepInstanceFieldByIds(
StepInstanceField.ALL,
workflowId,
workflowInstanceId,
workflowRunId,
stepId,
stepAttempt,
this::maestroStepFromResult);
}
|
@Test
public void testGetStepInstanceWithInvalidAttempt() {
AssertHelper.assertThrows(
"cannot cast an invalid attempt id",
NumberFormatException.class,
"For input string: \"first\"",
() -> stepDao.getStepInstance(TEST_WORKFLOW_ID, 1, 1, "job1", "first"));
}
|
@Udf
public <T> Boolean contains(
@UdfParameter final String jsonArray,
@UdfParameter final T val
) {
try (JsonParser parser = PARSER_FACTORY.createParser(jsonArray)) {
if (parser.nextToken() != START_ARRAY) {
return false;
}
while (parser.nextToken() != null) {
final JsonToken token = parser.currentToken();
if (token == null) {
return val == null;
} else if (token == END_ARRAY) {
return false;
}
parser.skipChildren();
if (TOKEN_COMPAT.getOrDefault(token, foo -> false).test(val)) {
if (token == VALUE_NULL
|| (val != null && Objects.equals(parser.readValueAs(val.getClass()), val))) {
return true;
}
}
}
return false;
} catch (final IOException e) {
return false;
}
}
|
@Test
public void shouldReturnFalseOnEmptyArray() {
assertEquals(false, jsonUdf.contains("[]", true));
assertEquals(false, jsonUdf.contains("[]", false));
assertEquals(false, jsonUdf.contains("[]", null));
assertEquals(false, jsonUdf.contains("[]", 1.0));
assertEquals(false, jsonUdf.contains("[]", 100));
assertEquals(false, jsonUdf.contains("[]", "abc"));
assertEquals(false, jsonUdf.contains("[]", ""));
}
|
@Override
public Collection<SQLToken> generateSQLTokens(final AlterTableStatementContext sqlStatementContext) {
String tableName = sqlStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue();
EncryptTable encryptTable = encryptRule.getEncryptTable(tableName);
Collection<SQLToken> result = new LinkedList<>(getAddColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getAddColumnDefinitions()));
result.addAll(getModifyColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getModifyColumnDefinitions()));
result.addAll(getChangeColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getChangeColumnDefinitions()));
List<SQLToken> dropColumnTokens = getDropColumnTokens(encryptTable, sqlStatementContext.getSqlStatement().getDropColumnDefinitions());
String databaseName = sqlStatementContext.getDatabaseType().getType();
if ("SQLServer".equals(databaseName)) {
result.addAll(mergeDropColumnStatement(dropColumnTokens, "", ""));
} else if ("Oracle".equals(databaseName)) {
result.addAll(mergeDropColumnStatement(dropColumnTokens, "(", ")"));
} else {
result.addAll(dropColumnTokens);
}
return result;
}
|
@Test
void assertChangeEncryptColumnGenerateSQLTokens() {
assertThrows(UnsupportedOperationException.class, () -> generator.generateSQLTokens(mockChangeColumnStatementContext()));
}
|
public static boolean shouldLoadInIsolation(String name) {
return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches());
}
|
@Test
public void testAllowedRuntimeClasses() {
List<String> jsonConverterClasses = Arrays.asList(
"org.apache.kafka.connect.connector.policy.",
"org.apache.kafka.connect.connector.policy.AbstractConnectorClientConfigOverridePolicy",
"org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy",
"org.apache.kafka.connect.connector.policy.NoneConnectorClientConfigOverridePolicy",
"org.apache.kafka.connect.connector.policy.PrincipalConnectorClientConfigOverridePolicy",
"org.apache.kafka.connect.converters.",
"org.apache.kafka.connect.converters.ByteArrayConverter",
"org.apache.kafka.connect.converters.DoubleConverter",
"org.apache.kafka.connect.converters.FloatConverter",
"org.apache.kafka.connect.converters.IntegerConverter",
"org.apache.kafka.connect.converters.LongConverter",
"org.apache.kafka.connect.converters.NumberConverter",
"org.apache.kafka.connect.converters.NumberConverterConfig",
"org.apache.kafka.connect.converters.ShortConverter",
//"org.apache.kafka.connect.storage.", not isolated by default
"org.apache.kafka.connect.storage.StringConverter",
"org.apache.kafka.connect.storage.SimpleHeaderConverter"
);
for (String clazz : jsonConverterClasses) {
assertTrue(PluginUtils.shouldLoadInIsolation(clazz),
clazz + " from 'runtime' is not loaded in isolation but should be");
}
}
|
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext,
final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) {
SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext();
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof TCLStatement) {
return new ShardingDatabaseBroadcastRoutingEngine();
}
if (sqlStatement instanceof DDLStatement) {
if (sqlStatementContext instanceof CursorAvailable) {
return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props);
}
return getDDLRoutingEngine(shardingRule, database, sqlStatementContext);
}
if (sqlStatement instanceof DALStatement) {
return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext);
}
if (sqlStatement instanceof DCLStatement) {
return getDCLRoutingEngine(shardingRule, database, sqlStatementContext);
}
return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext);
}
|
@Test
void assertNewInstanceForTCL() {
TCLStatement tclStatement = mock(TCLStatement.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(tclStatement);
QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class));
ShardingRouteEngine actual =
ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet));
assertThat(actual, instanceOf(ShardingDatabaseBroadcastRoutingEngine.class));
}
|
public UUID getPhoneNumberIdentifier(final String phoneNumber) {
final GetItemResponse response = GET_PNI_TIMER.record(() -> dynamoDbClient.getItem(GetItemRequest.builder()
.tableName(tableName)
.key(Map.of(KEY_E164, AttributeValues.fromString(phoneNumber)))
.projectionExpression(ATTR_PHONE_NUMBER_IDENTIFIER)
.build()));
final UUID phoneNumberIdentifier;
if (response.hasItem()) {
phoneNumberIdentifier = AttributeValues.getUUID(response.item(), ATTR_PHONE_NUMBER_IDENTIFIER, null);
} else {
phoneNumberIdentifier = generatePhoneNumberIdentifierIfNotExists(phoneNumber);
}
if (phoneNumberIdentifier == null) {
throw new RuntimeException("Could not retrieve phone number identifier from stored item");
}
return phoneNumberIdentifier;
}
|
@Test
void getPhoneNumberIdentifier() {
final String number = "+18005551234";
final String differentNumber = "+18005556789";
final UUID firstPni = phoneNumberIdentifiers.getPhoneNumberIdentifier(number);
final UUID secondPni = phoneNumberIdentifiers.getPhoneNumberIdentifier(number);
assertEquals(firstPni, secondPni);
assertNotEquals(firstPni, phoneNumberIdentifiers.getPhoneNumberIdentifier(differentNumber));
}
|
public FEELFnResult<List<BigDecimal>> invoke(@ParameterName( "list" ) List list, @ParameterName( "match" ) Object match) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
final List<BigDecimal> result = new ArrayList<>();
for( int i = 0; i < list.size(); i++ ) {
Object o = list.get( i );
if ( o == null && match == null) {
result.add( BigDecimal.valueOf( i+1L ) );
} else if ( o != null && match != null ) {
if ( equalsAsBigDecimals(o, match) || o.equals(match) ) {
result.add( BigDecimal.valueOf( i+1L ) );
}
}
}
return FEELFnResult.ofResult( result );
}
|
@Test
void invokeListNull() {
FunctionTestUtil.assertResultError(indexOfFunction.invoke((List) null, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(indexOfFunction.invoke(null, new Object()), InvalidParametersEvent.class);
}
|
@Override
public Optional<WindowExpression> getWindowExpression() {
final Optional<WindowExpression> windowExpression = original.getWindowExpression();
final Optional<RefinementInfo> refinementInfo = original.getRefinementInfo();
// we only need to rewrite if we have a window expression and if we use emit final
if (!windowExpression.isPresent()
|| !refinementInfo.isPresent()
|| refinementInfo.get().getOutputRefinement() == OutputRefinement.CHANGES) {
return original.getWindowExpression();
}
final Optional<WindowTimeClause> gracePeriod;
if (!windowExpression.get().getKsqlWindowExpression().getGracePeriod().isPresent()) {
gracePeriod = Optional.of(zeroGracePeriod);
} else {
gracePeriod = windowExpression.get().getKsqlWindowExpression().getGracePeriod();
}
final WindowExpression window = original.getWindowExpression().get();
final KsqlWindowExpression ksqlWindowNew;
final KsqlWindowExpression ksqlWindowOld = window.getKsqlWindowExpression();
final Optional<NodeLocation> location = ksqlWindowOld.getLocation();
final Optional<WindowTimeClause> retention = ksqlWindowOld.getRetention();
if (ksqlWindowOld instanceof HoppingWindowExpression) {
ksqlWindowNew = new HoppingWindowExpression(
location,
((HoppingWindowExpression) ksqlWindowOld).getSize(),
((HoppingWindowExpression) ksqlWindowOld).getAdvanceBy(),
retention,
gracePeriod,
Optional.of(OutputRefinement.FINAL)
);
} else if (ksqlWindowOld instanceof TumblingWindowExpression) {
ksqlWindowNew = new TumblingWindowExpression(
location,
((TumblingWindowExpression) ksqlWindowOld).getSize(),
retention,
gracePeriod,
Optional.of(OutputRefinement.FINAL)
);
} else if (ksqlWindowOld instanceof SessionWindowExpression) {
ksqlWindowNew = new SessionWindowExpression(
location,
((SessionWindowExpression) ksqlWindowOld).getGap(),
retention,
gracePeriod,
Optional.of(OutputRefinement.FINAL)
);
} else {
throw new KsqlException("WINDOW type must be HOPPING, TUMBLING, or SESSION");
}
return Optional.of(new WindowExpression(
original.getWindowExpression().get().getWindowName(),
ksqlWindowNew
));
}
|
@Test
public void shouldCreateNewSessionWindowWithZeroGracePeriodDefault() {
// Given:
when(windowExpression.getKsqlWindowExpression()).thenReturn(sessionWindow);
when(sessionWindow.getGracePeriod()).thenReturn(gracePeriodOptional);
when(gracePeriodOptional.isPresent()).thenReturn(false);
when(sessionWindow.getLocation()).thenReturn(location);
when(sessionWindow.getRetention()).thenReturn(retention);
when(sessionWindow.getGap()).thenReturn(gap);
when(windowExpression.getWindowName()).thenReturn(windowName);
// When:
Optional<WindowExpression> result = rewrittenAnalysis.getWindowExpression();
// Then:
assertThat(result.get().getKsqlWindowExpression().getGracePeriod(), not(Optional.empty()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.