focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static void checkArgument(final boolean expression) {
if (!expression) {
throw new IllegalArgumentException();
}
}
|
@Test
public void testCheckArgumentWithSuccess() throws Exception {
// success
Preconditions.checkArgument(true);
// null supplier
Preconditions.checkArgument(true, null);
// null message
Preconditions.checkArgument(true, (String) null);
Preconditions.checkArgument(true, NON_NULL_STRING);
// null in string format
Preconditions.checkArgument(true, EXPECTED_ERROR_MSG_ARGS, null, null);
// illegalformat
Preconditions.checkArgument(true, EXPECTED_ERROR_MSG_ARGS, 1, 2);
// ill-formated string supplier
Preconditions.checkArgument(true, ()-> String.format("%d",
NON_INT_STRING));
// null pattern to string formatter
Preconditions.checkArgument(true, NULL_FORMATTER, null, 1);
// null arguments to string formatter
Preconditions.checkArgument(true, EXPECTED_ERROR_MSG_ARGS,
null, null);
// illegal format exception
Preconditions.checkArgument(true, "message %d %d",
NON_INT_STRING, 1);
// insufficient arguments
Preconditions.checkArgument(true, EXPECTED_ERROR_MSG_ARGS,
NON_INT_STRING);
// null format in string supplier
Preconditions.checkArgument(true,
() -> String.format(NULL_FORMATTER, NON_INT_STRING));
}
|
public Status getStatus() {
return getStatus(migrationHistory.getLastMigrationNumber(), migrationSteps.getMaxMigrationNumber());
}
|
@Test
public void getStatus_returns_REQUIRES_UPGRADE_when_max_migration_number_in_table_is_less_than_max_migration_number_in_configuration() {
mockMaxMigrationNumberInDb(123L);
mockMaxMigrationNumberInConfig(150L);
assertThat(underTest.getStatus()).isEqualTo(REQUIRES_UPGRADE);
}
|
@SuppressWarnings("unchecked")
@Override
public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException {
NodeStatus remoteNodeStatus = request.getNodeStatus();
/**
* Here is the node heartbeat sequence...
* 1. Check if it's a valid (i.e. not excluded) node
* 2. Check if it's a registered node
* 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
* 4. Send healthStatus to RMNode
* 5. Update node's labels if distributed Node Labels configuration is enabled
*/
NodeId nodeId = remoteNodeStatus.getNodeId();
// 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is
// in decommissioning.
if (!this.nodesListManager.isValidNode(nodeId.getHost())
&& !isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager nodeId: " + nodeId + " hostname: "
+ nodeId.getHost();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
// 2. Check if it's a registered node
RMNode rmNode = this.rmContext.getRMNodes().get(nodeId);
if (rmNode == null) {
/* node does not exist */
String message = "Node not found resyncing " + remoteNodeStatus.getNodeId();
LOG.info(message);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Send ping
this.nmLivelinessMonitor.receivedPing(nodeId);
this.decommissioningWatcher.update(rmNode, remoteNodeStatus);
// 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat
NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse();
if (getNextResponseId(
remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse
.getResponseId()) {
LOG.info("Received duplicate heartbeat from node "
+ rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId());
return lastNodeHeartbeatResponse;
} else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse
.getResponseId()) {
String message =
"Too far behind rm response id:"
+ lastNodeHeartbeatResponse.getResponseId() + " nm response id:"
+ remoteNodeStatus.getResponseId();
LOG.info(message);
// TODO: Just sending reboot is not enough. Think more.
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING));
return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC,
message);
}
// Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED.
if (rmNode.getState() == NodeState.DECOMMISSIONING &&
decommissioningWatcher.checkReadyToBeDecommissioned(
rmNode.getNodeID())) {
String message = "DECOMMISSIONING " + nodeId +
" is ready to be decommissioned";
LOG.info(message);
this.rmContext.getDispatcher().getEventHandler().handle(
new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION));
this.nmLivelinessMonitor.unregister(nodeId);
return YarnServerBuilderUtils.newNodeHeartbeatResponse(
NodeAction.SHUTDOWN, message);
}
if (timelineServiceV2Enabled) {
// Check & update collectors info from request.
updateAppCollectorsMap(request);
}
// Heartbeat response
long newInterval = nextHeartBeatInterval;
if (heartBeatIntervalScalingEnable) {
newInterval = rmNode.calculateHeartBeatInterval(
nextHeartBeatInterval, heartBeatIntervalMin,
heartBeatIntervalMax, heartBeatIntervalSpeedupFactor,
heartBeatIntervalSlowdownFactor);
}
NodeHeartbeatResponse nodeHeartBeatResponse =
YarnServerBuilderUtils.newNodeHeartbeatResponse(
getNextResponseId(lastNodeHeartbeatResponse.getResponseId()),
NodeAction.NORMAL, null, null, null, null, newInterval);
rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse);
populateKeys(request, nodeHeartBeatResponse);
populateTokenSequenceNo(request, nodeHeartBeatResponse);
if (timelineServiceV2Enabled) {
// Return collectors' map that NM needs to know
setAppCollectorsMapToResponse(rmNode.getRunningApps(),
nodeHeartBeatResponse);
}
// 4. Send status to RMNode, saving the latest response.
RMNodeStatusEvent nodeStatusEvent =
new RMNodeStatusEvent(nodeId, remoteNodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
nodeStatusEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent);
// 5. Update node's labels to RM's NodeLabelManager.
if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) {
try {
updateNodeLabelsFromNMReport(
NodeLabelsUtils.convertToStringSet(request.getNodeLabels()),
nodeId);
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage());
nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false);
}
}
// 6. check if node's capacity is load from dynamic-resources.xml
// if so, send updated resource back to NM.
String nid = nodeId.toString();
Resource capability = loadNodeResourceFromDRConfiguration(nid);
// sync back with new resource if not null.
if (capability != null) {
nodeHeartBeatResponse.setResource(capability);
}
// Check if we got an event (AdminService) that updated the resources
if (rmNode.isUpdatedCapability()) {
nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
rmNode.resetUpdatedCapability();
}
// 7. Send Container Queuing Limits back to the Node. This will be used by
// the node to truncate the number of Containers queued for execution.
if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) {
nodeHeartBeatResponse.setContainerQueuingLimit(
this.rmContext.getNodeManagerQueueLimitCalculator()
.createContainerQueuingLimit());
}
// 8. Get node's attributes and update node-to-attributes mapping
// in RMNodeAttributeManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg =
nodeHeartBeatResponse.getDiagnosticsMessage() == null ?
ex.getMessage() :
nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex
.getMessage();
nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg);
nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false);
}
}
return nodeHeartBeatResponse;
}
|
@Test(timeout = 60000)
public void testNodeHeartBeatResponseForUnknownContainerCleanUp()
throws Exception {
Configuration conf = new Configuration();
rm = new MockRM(conf);
rm.init(conf);
rm.start();
MockNM nm1 = rm.registerNode("host1:1234", 5120);
rm.drainEvents();
// send 1st heartbeat
nm1.nodeHeartbeat(true);
// Create 2 unknown containers tracked by NM
ApplicationId applicationId = BuilderUtils.newApplicationId(1, 1);
ApplicationAttemptId applicationAttemptId = BuilderUtils
.newApplicationAttemptId(applicationId, 1);
ContainerId cid1 = BuilderUtils.newContainerId(applicationAttemptId, 2);
ContainerId cid2 = BuilderUtils.newContainerId(applicationAttemptId, 3);
ArrayList<ContainerStatus> containerStats =
new ArrayList<ContainerStatus>();
containerStats.add(
ContainerStatus.newInstance(cid1, ContainerState.COMPLETE, "", -1));
containerStats.add(
ContainerStatus.newInstance(cid2, ContainerState.COMPLETE, "", -1));
Map<ApplicationId, List<ContainerStatus>> conts =
new HashMap<ApplicationId, List<ContainerStatus>>();
conts.put(applicationAttemptId.getApplicationId(), containerStats);
// add RMApp into context.
RMApp app1 = mock(RMApp.class);
when(app1.getApplicationId()).thenReturn(applicationId);
rm.getRMContext().getRMApps().put(applicationId, app1);
// Send unknown container status in heartbeat
nm1.nodeHeartbeat(conts, true);
rm.drainEvents();
int containersToBeRemovedFromNM = 0;
while (true) {
NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true);
rm.drainEvents();
containersToBeRemovedFromNM +=
nodeHeartbeat.getContainersToBeRemovedFromNM().size();
// asserting for 2 since two unknown containers status has been sent
if (containersToBeRemovedFromNM == 2) {
break;
}
}
}
|
public static long elapsed(long started, long finished) {
return Times.elapsed(started, finished, true);
}
|
@Test
void testNegativeFinishTimes() {
long elapsed = Times.elapsed(5, -10, false);
assertEquals(-1, elapsed, "Elapsed time is not -1");
}
|
public static String getRemoteAddrFromRequest(Request request, Set<IpSubnet> trustedSubnets) {
final String remoteAddr = request.getRemoteAddr();
final String XForwardedFor = request.getHeader("X-Forwarded-For");
if (XForwardedFor != null) {
for (IpSubnet s : trustedSubnets) {
try {
if (s.contains(remoteAddr)) {
// Request came from trusted source, trust X-Forwarded-For and return it
return XForwardedFor;
}
} catch (UnknownHostException e) {
// ignore silently, probably not worth logging
}
}
}
// Request did not come from a trusted source, or the X-Forwarded-For header was not set
return remoteAddr;
}
|
@Test
public void getRemoteAddrFromRequestReturnsClientAddressWithNoXForwardedForHeader() throws Exception {
final Request request = mock(Request.class);
when(request.getRemoteAddr()).thenReturn("192.168.0.1");
when(request.getHeader("X-Forwarded-For")).thenReturn(null);
final String s = RestTools.getRemoteAddrFromRequest(request, Collections.emptySet());
assertThat(s).isEqualTo("192.168.0.1");
}
|
public String getControlFileContents( GPBulkLoaderMeta meta, RowMetaInterface rm, Object[] r ) throws KettleException {
DatabaseMeta dm = meta.getDatabaseMeta();
String inputName = "'" + environmentSubstitute( meta.getDataFile() ) + "'";
// if ( GPBulkLoaderMeta.METHOD_AUTO_CONCURRENT.equals(meta.getLoadMethod()) )
// {
// // if loading is concurrent, the filename has to be a * as sqlldr will
// // read from stdin.
// inputName = "*";
// }
String loadAction = meta.getLoadAction();
StringBuffer contents = new StringBuffer( 500 );
String tableName =
dm.getQuotedSchemaTableCombination(
environmentSubstitute( meta.getSchemaName() ), environmentSubstitute( meta.getTableName() ) );
// Create a Postgres / Greenplum COPY string for use with a psql client
if ( loadAction.equalsIgnoreCase( "truncate" ) ) {
contents.append( loadAction + " " );
contents.append( tableName + ";" );
contents.append( Const.CR );
}
contents.append( "\\COPY " );
// Table name
contents.append( tableName );
// Names of columns
contents.append( " ( " );
String[] streamFields = meta.getFieldStream();
String[] tableFields = meta.getFieldTable();
if ( streamFields == null || streamFields.length == 0 ) {
throw new KettleException( "No fields defined to load to database" );
}
for ( int i = 0; i < streamFields.length; i++ ) {
if ( i != 0 ) {
contents.append( ", " );
}
contents.append( dm.quoteField( tableFields[i] ) );
}
contents.append( " ) " );
// The "FROM" filename
contents.append( " FROM " );
contents.append( inputName );
// The "FORMAT" clause
contents.append( " WITH CSV " );
// The single row error handling
contents.append( "LOG ERRORS INTO " );
contents.append( tableName + "_errors " );
contents.append( " SEGMENT REJECT LIMIT " );
contents.append( meta.getMaxErrors() );
// contents.append(" ERRORS=\'").append(meta.getMaxErrors()).append("\'").append(Const.CR);
// contents.append("LOAD DATA").append(Const.CR).append(
// "INFILE ").append(inputName).append(Const.CR).append(
// "INTO TABLE ").append(dm.getQuotedSchemaTableCombination(environmentSubstitute(meta.getSchemaName()),
// environmentSubstitute(meta.getTableName()))).append(
// Const.CR).append(loadAction).append(Const.CR).append(
// "FIELDS TERMINATED BY ',' ENCLOSED BY '\"'").append(Const.CR).append(
// "(");
//
// String streamFields[] = meta.getFieldStream();
// String tableFields[] = meta.getFieldTable();
// String dateMask[] = meta.getDateMask();
//
// if ( streamFields == null || streamFields.length == 0 )
// {
// throw new KettleException("No fields defined to load to database");
// }
//
// for (int i = 0; i < streamFields.length; i++)
// {
// if ( i!=0 ) contents.append(", ").append(Const.CR);
// contents.append(dm.quoteField(tableFields[i]));
//
// int pos = rm.indexOfValue(streamFields[i]);
// if (pos<0)
// {
// throw new KettleException("Could not find field " +
// streamFields[i] + " in stream");
// }
// ValueMetaInterface v = rm.getValueMeta(pos);
// switch ( v.getType() )
// {
// case ValueMetaInterface.TYPE_STRING:
// if ( v.getLength() > 255 )
// {
// contents.append(" CHAR(").append(v.getLength()).append(")");
// }
// else
// {
// contents.append(" CHAR");
// }
// break;
// case ValueMetaInterface.TYPE_INTEGER:
// case ValueMetaInterface.TYPE_NUMBER:
// case ValueMetaInterface.TYPE_BIGNUMBER:
// break;
// case ValueMetaInterface.TYPE_DATE:
// if ( GPBulkLoaderMeta.DATE_MASK_DATE.equals(dateMask[i]) )
// {
// contents.append(" DATE 'yyyy-mm-dd'");
// }
// else if ( GPBulkLoaderMeta.DATE_MASK_DATETIME.equals(dateMask[i]) )
// {
// contents.append(" TIMESTAMP 'yyyy-mm-dd hh24:mi:ss.ff'");
// }
// else
// {
// // If not specified the default is date.
// contents.append(" DATE 'yyyy-mm-dd'");
// }
// break;
// case ValueMetaInterface.TYPE_BINARY:
// contents.append(" ENCLOSED BY '<startlob>' AND '<endlob>'");
// break;
// }
// }
// contents.append(")");
// if ( GPBulkLoaderMeta.METHOD_AUTO_CONCURRENT.equals(meta.getLoadMethod()) )
// {
// contents.append(Const.CR).append("BEGINDATA").append(Const.CR);
// }
return contents.toString();
}
|
@Test
public void testInputFileSurroundedBySingleQuotes() throws Exception {
String datafile = "test-data-file";
loader =
new GPBulkLoader( mockHelper.stepMeta, mockHelper.stepDataInterface, 0, mockHelper.transMeta, mockHelper.trans );
DatabaseMeta dbMetaMock = mock( DatabaseMeta.class );
doReturn( "" ).when( dbMetaMock ).getQuotedSchemaTableCombination( anyString(), anyString() );
doReturn( "" ).when( dbMetaMock ).quoteField( anyString() );
GPBulkLoaderMeta meta = new GPBulkLoaderMeta();
meta.setLoadAction( "" );
meta.setFieldStream( new String[] { "" } );
meta.setFieldTable( new String[] { "" } );
meta.setDatabaseMeta( dbMetaMock );
meta.setDataFile( datafile );
String actual = loader.getControlFileContents( meta, null, null );
int first = actual.indexOf( datafile );
if ( first > 0 ) {
if ( actual.charAt( first - 1 ) != '\'' || actual.charAt( first + datafile.length() ) != '\'' ) {
Assert.fail( "Datafile name is not surrounded by single quotes. Actual control file: " + actual );
}
} else {
Assert.fail( "Datafile name not found in control file. Actual control file: " + actual );
}
}
|
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
}
|
@Test
public void testValuesByPatternTTL() {
RMapCacheNative<String, String> map = redisson.getMapCacheNative("simple", StringCodec.INSTANCE);
map.put("10", "100");
map.put("20", "200", Duration.ofMinutes(1));
map.put("30", "300");
assertThat(map.values("?0")).containsExactlyInAnyOrder("100", "200", "300");
assertThat(map.values("1")).isEmpty();
assertThat(map.values("10")).containsExactlyInAnyOrder("100");
map.destroy();
}
|
@Override
public String nameForConstructorParameter(MapperConfig<?> config,
AnnotatedParameter ctorParam,
String defaultName) {
if (ctorParam == null) {
return defaultName;
} else if (ctorParam.getDeclaringClass().isAnnotationPresent(JsonSnakeCase.class)) {
return snakeCase.nameForConstructorParameter(config, ctorParam, defaultName);
}
return super.nameForConstructorParameter(config, ctorParam, defaultName);
}
|
@Test
void nameForConstructorParameterWorksWithNullField() {
final MapperConfig<?> mapperConfig = mock(MapperConfig.class);
final String name = strategy.nameForConstructorParameter(mapperConfig, null, "defaultName");
assertThat(name).isEqualTo("defaultName");
}
|
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
if (boolean.class == type) {
return resultSet.getBoolean(columnIndex);
}
if (byte.class == type) {
return resultSet.getByte(columnIndex);
}
if (short.class == type) {
return resultSet.getShort(columnIndex);
}
if (int.class == type) {
return resultSet.getInt(columnIndex);
}
if (long.class == type) {
return resultSet.getLong(columnIndex);
}
if (float.class == type) {
return resultSet.getFloat(columnIndex);
}
if (double.class == type) {
return resultSet.getDouble(columnIndex);
}
if (String.class == type) {
return resultSet.getString(columnIndex);
}
if (BigDecimal.class == type) {
return resultSet.getBigDecimal(columnIndex);
}
if (byte[].class == type) {
return resultSet.getBytes(columnIndex);
}
if (Date.class == type) {
return resultSet.getDate(columnIndex);
}
if (Time.class == type) {
return resultSet.getTime(columnIndex);
}
if (Timestamp.class == type) {
return resultSet.getTimestamp(columnIndex);
}
if (Blob.class == type) {
return resultSet.getBlob(columnIndex);
}
if (Clob.class == type) {
return resultSet.getClob(columnIndex);
}
if (Array.class == type) {
return resultSet.getArray(columnIndex);
}
return resultSet.getObject(columnIndex);
}
|
@Test
void assertGetValueByBoolean() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getBoolean(1)).thenReturn(true);
assertTrue((boolean) new JDBCStreamQueryResult(resultSet).getValue(1, boolean.class));
}
|
public static void saveExistingErrors(
final File markFile,
final AtomicBuffer errorBuffer,
final PrintStream logger,
final String errorFilePrefix)
{
try
{
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final int observations = printErrorLog(errorBuffer, new PrintStream(baos, false, "US-ASCII"));
if (observations > 0)
{
final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss-SSSZ");
final File errorLogFile = new File(
markFile.getParentFile(), errorFilePrefix + '-' + dateFormat.format(new Date()) + "-error.log");
if (null != logger)
{
logger.println("WARNING: existing errors saved to: " + errorLogFile);
}
try (FileOutputStream out = new FileOutputStream(errorLogFile))
{
baos.writeTo(out);
}
}
}
catch (final Exception ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
|
@Test
void saveExistingErrorsIsANoOpIfErrorBufferIsEmpty()
{
final File markFile = tempDir.resolve("mark.dat").toFile();
final UnsafeBuffer errorBuffer = new UnsafeBuffer(new byte[0]);
final PrintStream logger = mock(PrintStream.class);
final String errorFilePrefix = "test-error-";
CommonContext.saveExistingErrors(markFile, errorBuffer, logger, errorFilePrefix);
verifyNoInteractions(logger);
}
|
public URL getInterNodeListener(
final Function<URL, Integer> portResolver
) {
return getInterNodeListener(portResolver, LOGGER);
}
|
@Test
public void shouldResolveInterNodeListenerToFirstListenerWithAutoPortAssignment() {
// Given:
final URL autoPort = url("https://example.com:0");
when(portResolver.apply(any())).thenReturn(2222);
final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder()
.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092")
.put(LISTENERS_CONFIG, autoPort.toString() + ",http://localhost:2589")
.build()
);
// When:
final URL actual = config.getInterNodeListener(portResolver, logger);
// Then:
final URL expected = url("https://example.com:2222");
assertThat(actual, is(expected));
verifyLogsInterNodeListener(expected, QUOTED_FIRST_LISTENER_CONFIG);
verifyNoMoreInteractions(logger);
}
|
@Override
public void append(int keyGroup, byte[] value) throws IOException {
LOG.trace("append to {}: keyGroup={} {} bytes", logId, keyGroup, value.length);
if (closed) {
LOG.warn("{} is closed.", logId);
return;
}
activeChangeSet.add(StateChange.ofDataChange(keyGroup, value));
preEmptiveFlushIfNeeded(value);
}
|
@Test
void testAppend() throws Exception {
withWriter(
(writer, uploader) -> {
writer.append(KEY_GROUP, getBytes());
writer.append(KEY_GROUP, getBytes());
writer.append(KEY_GROUP, getBytes());
assertNoUpload(uploader, "append shouldn't persist");
});
}
|
public static Set<String> findKeywordsFromCrashReport(String crashReport) {
Matcher matcher = CRASH_REPORT_STACK_TRACE_PATTERN.matcher(crashReport);
Set<String> result = new HashSet<>();
if (matcher.find()) {
for (String line : matcher.group("stacktrace").split("\\n")) {
Matcher lineMatcher = STACK_TRACE_LINE_PATTERN.matcher(line);
if (lineMatcher.find()) {
String[] method = lineMatcher.group("method").split("\\.");
for (int i = 0; i < method.length - 2; i++) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(method[i])) {
continue;
}
result.add(method[i]);
}
Matcher moduleMatcher = STACK_TRACE_LINE_MODULE_PATTERN.matcher(line);
if (moduleMatcher.find()) {
for (String module : moduleMatcher.group("tokens").split(",")) {
String[] split = module.split(":");
if (split.length >= 2 && "xf".equals(split[0])) {
if (PACKAGE_KEYWORD_BLACK_LIST.contains(split[1])) {
continue;
}
result.add(split[1]);
}
}
}
}
}
}
return result;
}
|
@Test
public void thaumcraft() throws IOException {
assertEquals(
Collections.singleton("thaumcraft"),
CrashReportAnalyzer.findKeywordsFromCrashReport(loadLog("/crash-report/mod/thaumcraft.txt")));
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testListInvisibleCharacter() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path placeholder = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(
new Path(container, String.format("test-\u001F-%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new S3ObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener()).contains(placeholder));
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test(description = "Responses Default Status")
public void testResponseDefaultStatus() {
SwaggerConfiguration config = new SwaggerConfiguration().defaultResponseCode("200");
Reader reader = new Reader(config);
OpenAPI openAPI = reader.read(DefaultResponseResource.class);
String yaml = "openapi: 3.0.1\n" +
"paths:\n" +
" /:\n" +
" get:\n" +
" operationId: test\n" +
" responses:\n" +
" \"200\":\n" +
" description: default response\n" +
" content:\n" +
" '*/*':\n" +
" schema:\n" +
" type: string\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
public final QueuedPipe<E> queue(int index) {
return queues[index];
}
|
@Test
public void getQueueAtIndex() {
assertSame(defaultQ, conveyor.queue(0));
}
|
public static Object construct(String className) throws JMeterException {
Object instance = null;
try {
instance = ClassUtils.getClass(className).getDeclaredConstructor().newInstance();
} catch (IllegalArgumentException | ReflectiveOperationException | SecurityException e) {
throw new JMeterException(e);
}
return instance;
}
|
@Test
public void testConstructString() throws JMeterException {
String dummy = (String) ClassTools.construct("java.lang.String");
assertNotNull(dummy);
assertEquals("", dummy);
}
|
@Override
public DataTableType dataTableType() {
return dataTableType;
}
|
@Test
void can_define_data_table_converter_with_empty_pattern() throws NoSuchMethodException {
Method method = JavaDataTableTypeDefinitionTest.class.getMethod("convert_data_table_to_string",
DataTable.class);
JavaDataTableTypeDefinition definition = new JavaDataTableTypeDefinition(method, lookup,
new String[] { "[empty]" });
assertThat(definition.dataTableType().transform(emptyTable.cells()),
is("convert_data_table_to_string=[[a, ], [, d]]"));
}
|
@Override
public void rollbackMigration(PartitionMigrationEvent event) {
try {
migrationAwareService.rollbackMigration(event);
} finally {
if (isPrimaryReplicaMigrationEvent(event)) {
int completed = ownerMigrationsCompleted.incrementAndGet();
assert completed <= ownerMigrationsStarted.get();
}
}
}
|
@Test
public void rollbackMigration() {
// when: before - rollback migration methods have been executed
try {
countingMigrationAwareService.rollbackMigration(event);
} catch (RuntimeException e) {
// we do not care whether the wrapped service throws an exception
}
int currentMigrationStamp = countingMigrationAwareService.getMigrationStamp();
// then: if event involves primary replica, stamp should change.
if (isPrimaryReplicaMigrationEvent(event)) {
assertNotEquals(initialMigrationStamp, currentMigrationStamp);
} else {
assertEquals(initialMigrationStamp, currentMigrationStamp);
}
assertTrue(countingMigrationAwareService.validateMigrationStamp(currentMigrationStamp));
}
|
@Override
public KStream<K, V> toStream() {
return toStream(NamedInternal.empty());
}
|
@Test
public void shouldNotAllowNullSelectorOnToStream() {
assertThrows(NullPointerException.class, () -> table.toStream((KeyValueMapper) null));
}
|
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/20497)
})
public static TableReference parseTableSpec(String tableSpec) {
Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec);
if (!match.matches()) {
throw new IllegalArgumentException(
String.format(
"Table specification [%s] is not in one of the expected formats ("
+ " [project_id]:[dataset_id].[table_id],"
+ " [project_id].[dataset_id].[table_id],"
+ " [dataset_id].[table_id])",
tableSpec));
}
TableReference ref = new TableReference();
ref.setProjectId(match.group("PROJECT"));
return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE"));
}
|
@Test
public void testTablesspecParsingLegacySql() {
TableReference ref = BigQueryHelpers.parseTableSpec("my-project:data_set.table_name");
assertEquals("my-project", ref.getProjectId());
assertEquals("data_set", ref.getDatasetId());
assertEquals("table_name", ref.getTableId());
}
|
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) {
ShareFetch<K, V> fetch = ShareFetch.empty();
int recordsRemaining = fetchConfig.maxPollRecords;
try {
while (recordsRemaining > 0) {
final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch();
if (nextInLineFetch == null || nextInLineFetch.isConsumed()) {
final ShareCompletedFetch completedFetch = fetchBuffer.peek();
if (completedFetch == null) {
break;
}
if (!completedFetch.isInitialized()) {
try {
fetchBuffer.setNextInLineFetch(initialize(completedFetch));
} catch (Exception e) {
if (fetch.isEmpty()) {
fetchBuffer.poll();
}
throw e;
}
} else {
fetchBuffer.setNextInLineFetch(completedFetch);
}
fetchBuffer.poll();
} else {
final TopicIdPartition tp = nextInLineFetch.partition;
ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords(
deserializers,
recordsRemaining,
fetchConfig.checkCrcs);
if (batch.isEmpty()) {
nextInLineFetch.drain();
}
recordsRemaining -= batch.numRecords();
fetch.add(tp, batch);
if (batch.getException() != null) {
throw batch.getException();
} else if (batch.hasCachedException()) {
break;
}
}
}
} catch (KafkaException e) {
if (fetch.isEmpty()) {
throw e;
}
}
return fetch;
}
|
@Test
public void testFetchWithUnknownServerError() {
buildDependencies();
subscribeAndAssign(topicAPartition0);
ShareCompletedFetch completedFetch = completedFetchBuilder
.error(Errors.UNKNOWN_SERVER_ERROR)
.build();
fetchBuffer.add(completedFetch);
ShareFetch<String, String> fetch = fetchCollector.collect(fetchBuffer);
assertTrue(fetch.isEmpty());
}
|
@Override
public HttpMethodWrapper getHttpMethod() {
return HttpMethodWrapper.GET;
}
|
@Test
void testMethod() {
assertThat(instance.getHttpMethod()).isEqualTo(HttpMethodWrapper.GET);
}
|
public boolean setIfAbsentAndValid(String headerName, String value) {
Objects.requireNonNull(value, "value");
Objects.requireNonNull(headerName, "headerName");
if (isValid(headerName) && isValid(value)) {
String normalName = HeaderName.normalize(headerName);
return setIfAbsentNormal(headerName, normalName, value);
}
return false;
}
|
@Test
void setIfAbsentAndValid() {
Headers headers = new Headers();
headers.add("Via", "duct");
headers.add("Cookie", "this=that");
headers.add("Cookie", "frizzle=frazzle");
headers.setIfAbsentAndValid("X-Netflix-Awesome", "true");
headers.setIfAbsentAndValid("X-Netflix-Awesome", "True");
Truth.assertThat(headers.getAll("X-netflix-Awesome")).containsExactly("true");
Truth.assertThat(headers.size()).isEqualTo(4);
}
|
@VisibleForTesting
RowData createRowData(RowType rowType, Map<String, String> partitionSpec) {
GenericRowData rowData = new GenericRowData(rowType.getFieldCount());
for (int i = 0; i < rowType.getFieldCount(); ++i) {
String value = partitionSpec.get(dynamicFilterPartitionKeys.get(i));
LogicalType fieldType = rowType.getTypeAt(i);
Object convertedValue =
HivePartitionUtils.restorePartitionValueFromType(
hiveShim, value, rowType.getTypeAt(i), defaultPartitionName);
// Note: also modify supported types if the switch is modified.
switch (rowType.getTypeAt(i).getTypeRoot()) {
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
// No further process is necessary.
break;
case CHAR:
case VARCHAR:
convertedValue = StringData.fromString((String) convertedValue);
break;
case DATE:
convertedValue =
LocalDateConverter.INSTANCE.toInternal((LocalDate) convertedValue);
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
convertedValue =
new LocalDateTimeConverter(9)
.toInternal((LocalDateTime) convertedValue);
break;
default:
throw new UnsupportedOperationException(
"Unsupported type for dynamic filtering:" + rowType.getTypeAt(i));
}
if (!fieldType.isNullable() && convertedValue == null) {
return null;
}
rowData.setField(i, convertedValue);
}
return rowData;
}
|
@Test
void testCreateRowSupportedTypes() {
List<Tuple3<LogicalType, Object, String>> testTypeValues = new ArrayList<>();
testTypeValues.add(new Tuple3<>(new IntType(), 42, "42"));
testTypeValues.add(new Tuple3<>(new BigIntType(), 9876543210L, "9876543210"));
testTypeValues.add(new Tuple3<>(new SmallIntType(), (short) 41, "41"));
testTypeValues.add(new Tuple3<>(new TinyIntType(), (byte) 40, "40"));
testTypeValues.add(new Tuple3<>(new VarCharType(), StringData.fromString("1234"), "1234"));
testTypeValues.add(new Tuple3<>(new CharType(), StringData.fromString("7"), "7"));
testTypeValues.add(
new Tuple3<>(
new DateType(),
LocalDateConverter.INSTANCE.toInternal(LocalDate.of(2022, 2, 22)),
"2022-2-22"));
testTypeValues.add(
new Tuple3<>(
new TimestampType(9),
new LocalDateTimeConverter(9)
.toInternal(LocalDateTime.of(2022, 2, 22, 22, 2, 20, 20222022)),
"2022-2-22 22:02:20.020222022"));
// Default partition values
testTypeValues.add(
new Tuple3<>(
new VarCharType(),
StringData.fromString(HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal),
HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal));
testTypeValues.add(
new Tuple3<>(
new IntType(), null, HiveConf.ConfVars.DEFAULTPARTITIONNAME.defaultStrVal));
RowType rowType =
RowType.of(testTypeValues.stream().map(t -> t.f0).toArray(LogicalType[]::new));
List<String> keys = new ArrayList<>();
Map<String, String> spec = new HashMap<>();
for (int i = 0; i < testTypeValues.size(); i++) {
keys.add(String.valueOf(i));
spec.put(String.valueOf(i), testTypeValues.get(i).f2);
}
HiveSourceDynamicFileEnumerator enumerator =
createTestEnumerator(keys, Collections.emptyList());
RowData result = enumerator.createRowData(rowType, spec);
for (int i = 0; i < testTypeValues.size(); i++) {
LogicalType type = testTypeValues.get(i).f0;
Object expected = testTypeValues.get(i).f1;
FieldGetter getter = RowData.createFieldGetter(type, i);
assertThat(getter.getFieldOrNull(result))
.withFailMessage(
() ->
"Mismatching row type "
+ type
+ ", expected:"
+ expected
+ ", actual:"
+ getter.getFieldOrNull(result))
.isEqualTo(testTypeValues.get(i).f1);
}
}
|
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentExecutor,
TokenSecretAuthData authData,
PhotosContainerResource data)
throws Exception {
if (data == null) {
// Nothing to do
return ImportResult.OK;
}
BackblazeDataTransferClient b2Client = b2ClientFactory.getOrCreateB2Client(jobId, authData);
if (data.getAlbums() != null && data.getAlbums().size() > 0) {
for (PhotoAlbum album : data.getAlbums()) {
idempotentExecutor.executeAndSwallowIOExceptions(
album.getId(),
String.format("Caching album name for album '%s'", album.getId()),
() -> album.getName());
}
}
final LongAdder totalImportedFilesSizes = new LongAdder();
if (data.getPhotos() != null && data.getPhotos().size() > 0) {
for (PhotoModel photo : data.getPhotos()) {
idempotentExecutor.importAndSwallowIOExceptions(
photo,
p -> {
ItemImportResult<String> fileImportResult =
importSinglePhoto(idempotentExecutor, b2Client, jobId, p);
if (fileImportResult.hasBytes()) {
totalImportedFilesSizes.add(fileImportResult.getBytes());
}
return fileImportResult;
});
}
}
return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue());
}
|
@Test
public void testNullData() throws Exception {
BackblazePhotosImporter sut =
new BackblazePhotosImporter(monitor, dataStore, streamProvider, clientFactory);
ImportResult result = sut.importItem(UUID.randomUUID(), executor, authData, null);
assertEquals(ImportResult.OK, result);
}
|
@Override
public Num calculate(BarSeries series, Position position) {
Num numberOfPositions = numberOfPositionsCriterion.calculate(series, position);
return standardDeviationCriterion.calculate(series, position).dividedBy(numberOfPositions.sqrt());
}
|
@Test
public void calculateStandardErrorPnL() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series, series.one()),
Trade.sellAt(2, series, series.one()), Trade.buyAt(3, series, series.one()),
Trade.sellAt(5, series, series.one()));
AnalysisCriterion criterion = getCriterion(new ProfitLossCriterion());
assertNumEquals(1.7677669529663687, criterion.calculate(series, tradingRecord));
}
|
public static Read read() {
return new Read(null, "", new Scan());
}
|
@Test
public void testReadingKeyRangeMiddleSDF() throws Exception {
final String table = tmpTable.getName();
final int numRows = 1001;
final byte[] startRow = "2".getBytes(StandardCharsets.UTF_8);
final byte[] stopRow = "9".getBytes(StandardCharsets.UTF_8);
createAndWriteData(table, numRows);
// Test restricted range: [startKey, endKey).
// This one tests the second signature of .withKeyRange
runReadTestLength(
HBaseIO.read().withConfiguration(conf).withTableId(table).withKeyRange(startRow, stopRow),
true,
441);
}
|
public static <T> AvroCoder<T> specific(TypeDescriptor<T> type) {
return specific((Class<T>) type.getRawType());
}
|
@Test
public void testAvroSpecificCoderIsSerializable() throws Exception {
AvroCoder<TestAvro> coder = AvroCoder.specific(TestAvro.class);
// Check that the coder is serializable using the regular JSON approach.
SerializableUtils.ensureSerializable(coder);
}
|
public String getSeataEnv() {
return seataEnv;
}
|
@Test
public void testGetSeataEnv() {
Assertions.assertEquals("test", parameterParser.getSeataEnv());
}
|
@Override
public void addJobStorageOnChangeListener(StorageProviderChangeListener listener) {
onChangeListeners.add(listener);
startTimerToSendUpdates();
}
|
@Test
void ifMultipleJobChangeListenersForSameJobStillOneDatabaseCall() {
final Job job = anEnqueuedJob().build();
storageProvider.save(job);
final JobChangeListenerForTest changeListener1 = new JobChangeListenerForTest(new JobId(job.getId()));
final JobChangeListenerForTest changeListener2 = new JobChangeListenerForTest(new JobId(job.getId()));
storageProvider.addJobStorageOnChangeListener(changeListener1);
storageProvider.addJobStorageOnChangeListener(changeListener2);
await().untilAsserted(() -> assertThat(changeListener1.job).isNotNull());
await().untilAsserted(() -> assertThat(changeListener2.job).isNotNull());
Mockito.verify(storageProvider, times(1)).getJobById(job.getId());
}
|
@Override
public Graph toGraph(ConfigVariableExpander cve) throws InvalidIRException {
Graph trueGraph = getTrueStatement().toGraph(cve);
Graph falseGraph = getFalseStatement().toGraph(cve);
// If there is nothing in the true or false sections of this if statement,
// we can omit the if statement altogether!
if (trueGraph.isEmpty() && falseGraph.isEmpty()) {
return new Graph();
}
Graph.GraphCombinationResult combination = Graph.combine(trueGraph, falseGraph);
Graph newGraph = combination.graph;
Collection<Vertex> trueRoots = trueGraph.roots().map(combination.oldToNewVertices::get).collect(Collectors.toList());
Collection<Vertex> falseRoots = falseGraph.roots().map(combination.oldToNewVertices::get).collect(Collectors.toList());
IfVertex ifVertex = new IfVertex(this.getSourceWithMetadata(),
(BooleanExpression) ExpressionSubstitution.substituteBoolExpression(cve, this.booleanExpression));
newGraph.addVertex(ifVertex);
for (Vertex v : trueRoots) {
newGraph.chainVerticesUnsafe(BooleanEdge.trueFactory, ifVertex, v);
}
for (Vertex v : falseRoots) {
newGraph.chainVerticesUnsafe(BooleanEdge.falseFactory, ifVertex, v);
}
return newGraph;
}
|
@Test
public void testIfWithOneTrueStatement() throws InvalidIRException {
ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider());
PluginDefinition pluginDef = testPluginDefinition();
Statement trueStatement = new PluginStatement(randMeta(), pluginDef);
Statement falseStatement = new NoopStatement(randMeta());
BooleanExpression ifExpression = createTestExpression();
IfStatement ifStatement = new IfStatement(
randMeta(),
ifExpression,
trueStatement,
falseStatement
);
Graph ifStatementGraph = ifStatement.toGraph(cve);
assertFalse(ifStatementGraph.isEmpty());
Graph expected = new Graph();
IfVertex expectedIf = DSL.gIf(randMeta(), ifExpression);
expected.addVertex(expectedIf);
PluginVertex expectedT = DSL.gPlugin(randMeta(), pluginDef);
expected.chainVertices(true, expectedIf, expectedT);
assertSyntaxEquals(expected, ifStatementGraph);
}
|
@Override
public HollowProducer.ReadState restore(long versionDesired, HollowConsumer.BlobRetriever blobRetriever) {
return super.restore(versionDesired, blobRetriever);
}
|
@Test
public void testPublishAndRestore() {
HollowProducer producer = createProducer(tmpFolder, schema);
long version = testPublishV1(producer, 2, 10);
producer.restore(version, blobRetriever);
Assert.assertNotNull(lastRestoreStatus);
assertEquals(Status.SUCCESS, lastRestoreStatus.getStatus());
assertEquals("Version should be the same", version, lastRestoreStatus.getDesiredVersion());
assertEquals(producer.getCycleCountWithPrimaryStatus(), 1);
}
|
public <T> Future<T> valueFuture(ByteString encodedTag, String stateFamily, Coder<T> coder) {
return stateFuture(StateTag.of(StateTag.Kind.VALUE, encodedTag, stateFamily), coder);
}
|
@Test
public void testReadValue() throws Exception {
Future<Integer> future = underTest.valueFuture(STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addValuesToFetch(
Windmill.TagValue.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.build());
Windmill.KeyedGetDataResponse.Builder response =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addValues(
Windmill.TagValue.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.setValue(intValue(8)));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build()))
.thenReturn(response.build());
Integer result = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build());
Mockito.verifyNoMoreInteractions(mockWindmill);
assertThat(result, Matchers.equalTo(8));
assertNoReader(future);
}
|
@Override
protected String buildApiSuperPath(final Class<?> clazz,
final ShenyuSofaClient beanShenyuClient) {
if (Objects.nonNull(beanShenyuClient) && !StringUtils.isBlank(beanShenyuClient.path())) {
return beanShenyuClient.path();
}
return "";
}
|
@Test
public void testBuildApiSuperPathWhenBeanShenyuClientIsNull() {
Class<?> clazz = Class.class;
String realSuperPath = sofaServiceEventListener.buildApiSuperPath(clazz, null);
verify(shenyuSofaClient, times(0)).path();
assertEquals("", realSuperPath);
}
|
public Collection<IndexSetFieldType> merge(final Collection<FieldTypeDTO> fromNewerIndex,
final Collection<FieldTypeDTO> fromOlderIndex,
final CustomFieldMappings customFieldMappings,
final IndexFieldTypeProfile profile) {
Map<String, IndexSetFieldType> result = new HashMap<>();
if (fromNewerIndex != null) {
fromNewerIndex.forEach(dto -> result.put(
dto.fieldName(),
toIndexSetFieldType(dto, INDEX)
)
);
}
if (fromOlderIndex != null) {
fromOlderIndex.forEach(dto -> result.putIfAbsent(
dto.fieldName(),
toIndexSetFieldType(dto, INDEX)
)
);
}
if (profile != null) {
profile.customFieldMappings().forEach(profileMapping ->
result.put(
profileMapping.fieldName(),
toIndexSetFieldType(profileMapping.toFieldTypeDTO(), PROFILE)
)
);
}
if (customFieldMappings != null) {
customFieldMappings.forEach(customFieldMapping -> {
final IndexSetFieldType indexSetFieldTypeFromPrevSources = result.get(customFieldMapping.fieldName());
result.put(
customFieldMapping.fieldName(),
toIndexSetFieldType(
customFieldMapping.toFieldTypeDTO(),
indexSetFieldTypeFromPrevSources != null && indexSetFieldTypeFromPrevSources.origin() == PROFILE
? OVERRIDDEN_PROFILE : OVERRIDDEN_INDEX
)
);
}
);
}
return result.values();
}
|
@Test
void customMappingsOverrideEverything() {
final Collection<IndexSetFieldType> merged = toTest.merge(
List.of(
FieldTypeDTO.create("changed_field", "long")
),
List.of(
FieldTypeDTO.create("changed_field", "date")
),
new CustomFieldMappings(
List.of(new CustomFieldMapping("changed_field", "ip"))
),
new IndexFieldTypeProfile("id", "name", "descr", new CustomFieldMappings(
List.of(new CustomFieldMapping("changed_field", "double"))
))
);
assertThat(merged)
.isNotNull()
.hasSize(1)
.contains(new IndexSetFieldType("changed_field", "ip", FieldTypeOrigin.OVERRIDDEN_PROFILE, false));
}
|
private QueryParamsDataMap()
{
}
|
@Test
public void testNestedCases() throws Exception
{
String testQS = "ids[0].keys[0].part1=part001&ids[0].keys[0].part2=part002&" +
"ids[0].keys[1].part1=part011&ids[0].keys[1].part2=part012&" +
"ids[1].keys[0].part1=part101&ids[1].keys[0].part2=part102&" +
"ids[1].keys[1].part1=part111&ids[1].keys[1].part2=part112&";
DataMap queryParamsDataMap = queryParamsDataMap(testQS);
Object ids = queryParamsDataMap.get("ids");
Assert.assertTrue(ids instanceof DataList);
DataList idsList = (DataList)ids;
Object id = idsList.get(0);
Assert.assertTrue(id instanceof DataMap);
DataMap idDataMap = (DataMap)id;
Object keys = idDataMap.get("keys");
Assert.assertTrue(keys instanceof DataList);
DataList keysList = (DataList)keys;
Object parts = keysList.get(0);
Assert.assertTrue(parts instanceof DataMap);
DataMap partsDataMap = (DataMap)parts;
Assert.assertEquals("part001", partsDataMap.get("part1"));
Assert.assertEquals("part002", partsDataMap.get("part2"));
parts = keysList.get(1);
Assert.assertTrue(parts instanceof DataMap);
partsDataMap = (DataMap)parts;
Assert.assertEquals("part011", partsDataMap.get("part1"));
Assert.assertEquals("part012", partsDataMap.get("part2"));
id = idsList.get(1);
Assert.assertTrue(id instanceof DataMap);
idDataMap = (DataMap)id;
keys = idDataMap.get("keys");
Assert.assertTrue(keys instanceof DataList);
keysList = (DataList)keys;
parts = keysList.get(0);
Assert.assertTrue(parts instanceof DataMap);
partsDataMap = (DataMap)parts;
Assert.assertEquals("part101", partsDataMap.get("part1"));
Assert.assertEquals("part102", partsDataMap.get("part2"));
parts = keysList.get(1);
Assert.assertTrue(parts instanceof DataMap);
partsDataMap = (DataMap)parts;
Assert.assertEquals("part111", partsDataMap.get("part1"));
Assert.assertEquals("part112", partsDataMap.get("part2"));
}
|
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
return inject(statement, new TopicProperties.Builder());
}
|
@Test
public void shouldHaveCleanupPolicyDeleteCsas() {
// Given:
givenStatement("CREATE STREAM x AS SELECT * FROM SOURCE;");
// When:
final CreateAsSelect csas = ((CreateAsSelect) injector.inject(statement, builder).getStatement());
// Then:
final CreateSourceAsProperties props = csas.getProperties();
assertThat(props.getCleanupPolicy(), is(Optional.of(TopicConfig.CLEANUP_POLICY_DELETE)));
}
|
public static int parseInt(String number) throws NumberFormatException {
if (StrUtil.isBlank(number)) {
return 0;
}
if (StrUtil.startWithIgnoreCase(number, "0x")) {
// 0x04表示16进制数
return Integer.parseInt(number.substring(2), 16);
}
if (StrUtil.containsIgnoreCase(number, "E")) {
// 科学计数法忽略支持,科学计数法一般用于表示非常小和非常大的数字,这类数字转换为int后精度丢失,没有意义。
throw new NumberFormatException(StrUtil.format("Unsupported int format: [{}]", number));
}
try {
return Integer.parseInt(number);
} catch (NumberFormatException e) {
return parseNumber(number).intValue();
}
}
|
@Test
public void parseNumberTest4(){
assertThrows(NumberFormatException.class, () -> {
// issue#I5M55F
// 科学计数法忽略支持,科学计数法一般用于表示非常小和非常大的数字,这类数字转换为int后精度丢失,没有意义。
final String numberStr = "429900013E20220812163344551";
NumberUtil.parseInt(numberStr);
});
}
|
@Override
public Map<PCollection<?>, ReplacementOutput> mapOutputs(
Map<TupleTag<?>, PCollection<?>> outputs, PCollection<T> newOutput) {
return ReplacementOutputs.singleton(outputs, newOutput);
}
|
@Test
public void outputMapping() {
final PCollectionList<String> inputList =
PCollectionList.of(first).and(second).and(first).and(first);
PCollection<String> original = inputList.apply(Flatten.pCollections());
PCollection<String> replacement = inputList.apply(new FlattenWithoutDuplicateInputs<>());
assertThat(
factory.mapOutputs(PValues.expandOutput(original), replacement),
Matchers.hasEntry(
replacement,
ReplacementOutput.of(
TaggedPValue.ofExpandedValue(original),
TaggedPValue.ofExpandedValue(replacement))));
}
|
public List<Flow> convertFlows(String componentName, @Nullable DbIssues.Locations issueLocations) {
if (issueLocations == null) {
return Collections.emptyList();
}
return issueLocations.getFlowList().stream()
.map(sourceFlow -> toFlow(componentName, sourceFlow))
.collect(Collectors.toCollection(LinkedList::new));
}
|
@Test
public void convertFlows_withEmptyDbLocations_returnsEmptyList() {
DbIssues.Locations issueLocations = DbIssues.Locations.newBuilder().build();
assertThat(flowGenerator.convertFlows(COMPONENT_NAME, issueLocations)).isEmpty();
}
|
public NodeModel pathBegin(NodeModel commonAncestor) {
return relativeNode(commonAncestor, beginPath, beginPath.length);
}
|
@Test
public void zeroLevelBegin(){
final NodeModel parent = root();
final NodeRelativePath nodeRelativePath = new NodeRelativePath(parent, parent);
final NodeModel startingPoint = new NodeModel("startingPoint", map);
assertThat(nodeRelativePath.pathBegin(startingPoint), equalTo(startingPoint));
}
|
public static int decodeInt(InputStream stream) throws IOException {
long r = decodeLong(stream);
if (r < 0 || r >= 1L << 32) {
throw new IOException("varint overflow " + r);
}
return (int) r;
}
|
@Test
public void decodeThrowsExceptionForIntUnderflow() throws IOException {
byte[] encoded = encodeLong(-1);
thrown.expect(IOException.class);
decodeInt(encoded);
}
|
@Override
public List<UserFileKeySetRequest> operate(final Session<?> client, final PasswordCallback callback, final Path file) throws BackgroundException {
final SDSSession session = (SDSSession) client;
final SDSNodeIdProvider nodeid = (SDSNodeIdProvider) session._getFeature(VersionIdProvider.class);
try {
final UserAccountWrapper account = session.userAccount();
if(!account.isEncryptionEnabled()) {
log.warn(String.format("No key pair found in user account %s", account));
return Collections.emptyList();
}
final List<UserFileKeySetRequest> processed = new ArrayList<>();
// Null when operating from scheduler. File reference is set for post upload.
final Long fileId = file != null ? Long.parseLong(nodeid.getVersionId(file)) : null;
UserFileKeySetBatchRequest request;
do {
if(log.isDebugEnabled()) {
log.debug(String.format("Request a list of missing file keys for file %s", file));
}
request = new UserFileKeySetBatchRequest();
final MissingKeysResponse missingKeys = new NodesApi(session.getClient()).requestMissingFileKeys(
null, null, null, fileId, null, null, null);
final Map<Long, List<UserUserPublicKey>> userPublicKeys = missingKeys.getUsers().stream().collect(groupingBy(UserUserPublicKey::getId));
final Map<Long, List<FileFileKeys>> files = missingKeys.getFiles().stream().collect(groupingBy(FileFileKeys::getId));
for(UserIdFileIdItem item : missingKeys.getItems()) {
final HashMap<UserKeyPairContainer, Credentials> passphrases = new HashMap<>();
for(FileFileKeys fileKey : files.get(item.getFileId())) {
final EncryptedFileKey encryptedFileKey = TripleCryptConverter.toCryptoEncryptedFileKey(fileKey.getFileKeyContainer());
final UserKeyPairContainer keyPairForDecryption = session.getKeyPairForFileKey(encryptedFileKey.getVersion());
if(!passphrases.containsKey(keyPairForDecryption)) {
passphrases.put(keyPairForDecryption,
new TripleCryptKeyPair().unlock(callback, session.getHost(), TripleCryptConverter.toCryptoUserKeyPair(keyPairForDecryption)));
}
final Credentials passphrase = passphrases.get(keyPairForDecryption);
for(UserUserPublicKey userPublicKey : userPublicKeys.get(item.getUserId())) {
final EncryptedFileKey fk = this.encryptFileKey(
TripleCryptConverter.toCryptoUserPrivateKey(keyPairForDecryption.getPrivateKeyContainer()),
passphrase, userPublicKey, fileKey);
final UserFileKeySetRequest keySetRequest = new UserFileKeySetRequest()
.fileId(item.getFileId())
.userId(item.getUserId())
.fileKey(TripleCryptConverter.toSwaggerFileKey(fk));
if(log.isDebugEnabled()) {
log.debug(String.format("Missing file key processed for file %d and user %d", item.getFileId(), item.getUserId()));
}
request.addItemsItem(keySetRequest);
}
}
}
if(!request.getItems().isEmpty()) {
if(log.isDebugEnabled()) {
log.debug(String.format("Set file keys with %s", request));
}
new NodesApi(session.getClient()).setUserFileKeys(request, StringUtils.EMPTY);
processed.addAll(request.getItems());
}
}
while(!request.getItems().isEmpty());
this.deleteDeprecatedKeyPair(session);
return processed;
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map(e);
}
catch(CryptoException e) {
throw new TripleCryptExceptionMappingService().map(e);
}
}
|
@Test
public void testMissingKeys() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final EncryptRoomRequest encrypt = new EncryptRoomRequest().isEncrypted(true);
final Node node = new NodesApi(session.getClient()).encryptRoom(encrypt, Long.parseLong(new SDSNodeIdProvider(session).getVersionId(room)), StringUtils.EMPTY, null);
new NodesApi(session.getClient()).updateRoomUsers(new RoomUsersAddBatchRequest().
addItemsItem(new RoomUsersAddBatchRequestItem().id(757L).permissions(new NodePermissions().read(true))), node.getId(), StringUtils.EMPTY);
room.attributes().withCustom(KEY_ENCRYPTED, String.valueOf(true));
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test), status), new DisabledConnectionCallback());
final TripleCryptWriteFeature writer = new TripleCryptWriteFeature(session, nodeid, new SDSDirectS3MultipartWriteFeature(session, nodeid));
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(content.length, new SDSAttributesFinderFeature(session, nodeid).find(test).getSize());
final SDSMissingFileKeysSchedulerFeature background = new SDSMissingFileKeysSchedulerFeature();
final List<UserFileKeySetRequest> processed = background.operate(session, new DisabledPasswordCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
}, test);
assertTrue(processed.stream().filter(userFileKeySetRequest -> userFileKeySetRequest.getFileId().equals(Long.parseLong(test.attributes().getVersionId()))).findAny().isPresent());
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public Page download(Request request, Task task) {
if (task == null || task.getSite() == null) {
throw new NullPointerException("task or site can not be null");
}
CloseableHttpResponse httpResponse = null;
CloseableHttpClient httpClient = getHttpClient(task.getSite());
Proxy proxy = proxyProvider != null ? proxyProvider.getProxy(request, task) : null;
HttpClientRequestContext requestContext = httpUriRequestConverter.convert(request, task.getSite(), proxy);
Page page = Page.fail(request);
try {
httpResponse = httpClient.execute(requestContext.getHttpUriRequest(), requestContext.getHttpClientContext());
page = handleResponse(request, request.getCharset() != null ? request.getCharset() : task.getSite().getCharset(), httpResponse, task);
onSuccess(page, task);
return page;
} catch (IOException e) {
onError(page, task, e);
return page;
} finally {
if (httpResponse != null) {
//ensure the connection is released back to pool
EntityUtils.consumeQuietly(httpResponse.getEntity());
}
if (proxyProvider != null && proxy != null) {
proxyProvider.returnProxy(proxy, page, task);
}
}
}
|
@Test
public void test_download_set_charset() throws Exception {
HttpServer server = httpServer(13423);
server.response(header("Content-Type","text/html; charset=utf-8")).response("hello world!");
Runner.running(server, new Runnable() {
@Override
public void run() throws Exception {
final HttpClientDownloader httpClientDownloader = new HttpClientDownloader();
Request request = new Request();
request.setUrl("http://127.0.0.1:13423/");
Page page = httpClientDownloader.download(request, Site.me().toTask());
assertThat(page.getCharset()).isEqualTo("utf-8");
}
});
}
|
@Override
public void recordStateFinished(StateInstance stateInstance, ProcessContext context) {
if (stateInstance != null) {
stateInstance.setSerializedOutputParams(paramsSerializer.serialize(stateInstance.getOutputParams()));
stateInstance.setSerializedException(exceptionSerializer.serialize(stateInstance.getException()));
executeUpdate(stateLogStoreSqls.getRecordStateFinishedSql(dbType), STATE_INSTANCE_TO_STATEMENT_FOR_UPDATE,
stateInstance);
//A switch to skip branch report on branch success, in order to optimize performance
StateMachineConfig stateMachineConfig = (StateMachineConfig) context.getVariable(
DomainConstants.VAR_NAME_STATEMACHINE_CONFIG);
if (!(stateMachineConfig instanceof DbStateMachineConfig
&& !((DbStateMachineConfig)stateMachineConfig).isRmReportSuccessEnable()
&& ExecutionStatus.SU.equals(stateInstance.getStatus()))) {
branchReport(stateInstance, context);
}
}
}
|
@Test
public void testRecordStateFinished() {
DbAndReportTcStateLogStore dbAndReportTcStateLogStore = new DbAndReportTcStateLogStore();
StateInstanceImpl stateMachineInstance = new StateInstanceImpl();
ProcessContextImpl context = new ProcessContextImpl();
context.setVariable(DomainConstants.VAR_NAME_STATEMACHINE_CONFIG, new DbStateMachineConfig());
Assertions.assertThrows(NullPointerException.class,
() -> dbAndReportTcStateLogStore.recordStateFinished(stateMachineInstance, context));
}
|
static Schema toBeamSchema(List<FieldSchema> fields) {
return fields.stream().map(SchemaUtils::toBeamField).collect(toSchema());
}
|
@Test
public void testParameterizedTypesToBeamTypes() {
List<FieldSchema> listOfFieldSchema = new ArrayList<>();
listOfFieldSchema.add(new FieldSchema("parameterizedChar", "char(10)", null));
listOfFieldSchema.add(new FieldSchema("parameterizedVarchar", "varchar(100)", null));
listOfFieldSchema.add(new FieldSchema("parameterizedDecimal", "decimal(30,16)", null));
Schema expectedSchema =
Schema.builder()
.addNullableField("parameterizedChar", Schema.FieldType.STRING)
.addNullableField("parameterizedVarchar", Schema.FieldType.STRING)
.addNullableField("parameterizedDecimal", Schema.FieldType.DECIMAL)
.build();
Schema actualSchema = SchemaUtils.toBeamSchema(listOfFieldSchema);
Assert.assertEquals(expectedSchema, actualSchema);
}
|
public static boolean inGlobalTransaction() {
return CONTEXT_HOLDER.get(KEY_XID) != null;
}
|
@Test
public void testInGlobalTransaction() {
assertThat(RootContext.inGlobalTransaction()).isFalse();
RootContext.bind(DEFAULT_XID);
assertThat(RootContext.inGlobalTransaction()).isTrue();
RootContext.unbind();
assertThat(RootContext.inGlobalTransaction()).isFalse();
assertThat(RootContext.getXID()).isNull();
}
|
public boolean isComplete() {
return !ruleMetaData.getRules().isEmpty() && !resourceMetaData.getStorageUnits().isEmpty();
}
|
@Test
void assertIsNotCompleteWithoutDataSource() {
ResourceMetaData resourceMetaData = new ResourceMetaData(Collections.emptyMap());
RuleMetaData ruleMetaData = new RuleMetaData(Collections.singleton(mock(ShardingSphereRule.class)));
assertFalse(new ShardingSphereDatabase("foo_db", mock(DatabaseType.class), resourceMetaData, ruleMetaData, Collections.emptyMap()).isComplete());
}
|
@Override
public void updateMailSendResult(Long logId, String messageId, Exception exception) {
// 1. 成功
if (exception == null) {
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.SUCCESS.getStatus()).setSendMessageId(messageId));
return;
}
// 2. 失败
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.FAILURE.getStatus()).setSendException(getRootCauseMessage(exception)));
}
|
@Test
public void testUpdateMailSendResult_success() {
// mock 数据
MailLogDO log = randomPojo(MailLogDO.class, o -> {
o.setSendStatus(MailSendStatusEnum.INIT.getStatus());
o.setSendTime(null).setSendMessageId(null).setSendException(null)
.setTemplateParams(randomTemplateParams());
});
mailLogMapper.insert(log);
// 准备参数
Long logId = log.getId();
String messageId = randomString();
// 调用
mailLogService.updateMailSendResult(logId, messageId, null);
// 断言
MailLogDO dbLog = mailLogMapper.selectById(logId);
assertEquals(MailSendStatusEnum.SUCCESS.getStatus(), dbLog.getSendStatus());
assertNotNull(dbLog.getSendTime());
assertEquals(messageId, dbLog.getSendMessageId());
assertNull(dbLog.getSendException());
}
|
@GetMapping(value = "/{id}")
public Mono<Post> get(@PathVariable(value = "id") Long id) {
return this.posts.findById(id);
}
|
@Test
public void getPostById() throws Exception {
this.rest
.get()
.uri("/posts/1")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post one");
this.rest
.get()
.uri("/posts/2")
.accept(APPLICATION_JSON)
.exchange()
.expectBody()
.jsonPath("$.title")
.isEqualTo("post two");
}
|
public abstract AppResponse activateApp(AppAuthenticator appAuthenticator, AppSession appSession);
|
@Test
void activateAppActualMethodByActivationCodeTest() {
AppAuthenticator appAuthenticator = new AppAuthenticator();
appAuthenticator.setActivationCode("");
appAuthenticator.setGeldigheidstermijn("");
when(flow.activateApp(any(), any(), anyString())).thenCallRealMethod();
when(flow.activateApp(any(), any(), anyString(), anyBoolean())).thenCallRealMethod();
AppResponse result = flow.activateApp(appAuthenticator, mockedAppSession, LETTER);
assertEquals("active", appAuthenticator.getStatus());
assertNotNull(appAuthenticator.getActivatedAt());
assertNull(appAuthenticator.getActivationCode());
assertNull(appAuthenticator.getGeldigheidstermijn());
assertTrue(result instanceof ActivateAppResponse);
assertEquals(20, ((ActivateAppResponse) result).getAuthenticationLevel());
}
|
public static URL parseDecodedStr(String decodedURLStr) {
Map<String, String> parameters = null;
int pathEndIdx = decodedURLStr.indexOf('?');
if (pathEndIdx >= 0) {
parameters = parseDecodedParams(decodedURLStr, pathEndIdx + 1);
} else {
pathEndIdx = decodedURLStr.length();
}
String decodedBody = decodedURLStr.substring(0, pathEndIdx);
return parseURLBody(decodedURLStr, decodedBody, parameters);
}
|
@Test
void testDecoded() {
testCases.forEach(testCase -> {
assertThat(URLStrParser.parseDecodedStr(testCase), equalTo(URL.valueOf(testCase)));
});
errorDecodedCases.forEach(errorCase -> {
Assertions.assertThrows(RuntimeException.class, () -> URLStrParser.parseDecodedStr(errorCase));
});
}
|
public static Map<String, String> parseParams(HttpRequest request) {
if (request instanceof HttpGet) {
return parseParamsForGet(request);
}
if (request instanceof HttpEntityEnclosingRequestBase) {
return parseParamsForRequestWithEntity((HttpEntityEnclosingRequestBase) request);
}
return new LinkedHashMap<>();
}
|
@Test
public void parseParams_shouldParseParamsFromGetRequests() throws Exception {
HttpGet httpGet = new HttpGet("http://example.com/path?foo=bar");
Map<String, String> parsed = ParamsParser.parseParams(httpGet);
assertThat(parsed.size()).isEqualTo(1);
assertThat(parsed.get("foo")).isEqualTo("bar");
}
|
@Override
public void commit() throws SQLException {
for (TransactionHook each : transactionHooks) {
each.beforeCommit(connection.getCachedConnections().values(), getTransactionContext(), ProxyContext.getInstance().getContextManager().getComputeNodeInstanceContext().getLockContext());
}
if (connection.getConnectionSession().getTransactionStatus().isInTransaction()) {
try {
if (TransactionType.LOCAL == TransactionUtils.getTransactionType(getTransactionContext()) || null == distributionTransactionManager) {
localTransactionManager.commit();
} else {
distributionTransactionManager.commit(getTransactionContext().isExceptionOccur());
}
} finally {
for (TransactionHook each : transactionHooks) {
each.afterCommit(connection.getCachedConnections().values(),
getTransactionContext(), ProxyContext.getInstance().getContextManager().getComputeNodeInstanceContext().getLockContext());
}
for (Connection each : connection.getCachedConnections().values()) {
ConnectionSavepointManager.getInstance().transactionFinished(each);
}
connection.getConnectionSession().getTransactionStatus().setInTransaction(false);
connection.getConnectionSession().getConnectionContext().close();
}
}
}
|
@Test
void assertCommitForDistributedTransaction() throws SQLException {
ContextManager contextManager = mockContextManager(TransactionType.XA);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
newBackendTransactionManager(TransactionType.XA, true);
backendTransactionManager.commit();
verify(transactionStatus).setInTransaction(false);
verify(distributionTransactionManager).commit(false);
}
|
public void update(Map<String, NamespaceBundleStats> bundleStats, int topk) {
arr.clear();
try {
var isLoadBalancerSheddingBundlesWithPoliciesEnabled =
pulsar.getConfiguration().isLoadBalancerSheddingBundlesWithPoliciesEnabled();
for (var etr : bundleStats.entrySet()) {
String bundle = etr.getKey();
// TODO: do not filter system topic while shedding
if (NamespaceService.isSystemServiceNamespace(NamespaceBundle.getBundleNamespace(bundle))) {
continue;
}
if (!isLoadBalancerSheddingBundlesWithPoliciesEnabled && hasPolicies(bundle)) {
continue;
}
arr.add(etr);
}
var topKBundlesLoadData = loadData.getTopBundlesLoadData();
topKBundlesLoadData.clear();
if (arr.isEmpty()) {
return;
}
topk = Math.min(topk, arr.size());
partitionSort(arr, topk);
for (int i = topk - 1; i >= 0; i--) {
var etr = arr.get(i);
topKBundlesLoadData.add(
new TopBundlesLoadData.BundleLoadData(etr.getKey(), (NamespaceBundleStats) etr.getValue()));
}
} finally {
arr.clear();
}
}
|
@Test
public void testAntiAffinityGroupPolicy() throws MetadataStoreException {
setAntiAffinityGroup();
Map<String, NamespaceBundleStats> bundleStats = new HashMap<>();
var topKBundles = new TopKBundles(pulsar);
NamespaceBundleStats stats1 = new NamespaceBundleStats();
stats1.msgRateIn = 500;
bundleStats.put(bundle1, stats1);
NamespaceBundleStats stats2 = new NamespaceBundleStats();
stats2.msgRateIn = 10000;
bundleStats.put(bundle2, stats2);
topKBundles.update(bundleStats, 2);
assertEquals(topKBundles.getLoadData().getTopBundlesLoadData().size(), 1);
var top0 = topKBundles.getLoadData().getTopBundlesLoadData().get(0);
assertEquals(top0.bundleName(), bundle1);
}
|
protected String addDatetimeToFilename( String filename, boolean addDate, String datePattern, boolean addTime,
String timePattern, boolean specifyFormat, String datetimeFormat ) {
if ( Utils.isEmpty( filename ) ) {
return null;
}
// Replace possible environment variables...
String realfilename = environmentSubstitute( filename );
String filenameNoExtension = FilenameUtils.removeExtension( realfilename );
String extension = FilenameUtils.getExtension( realfilename );
// If an extension exists, add the corresponding dot before
if ( !StringUtil.isEmpty( extension ) ) {
extension = '.' + extension;
}
final SimpleDateFormat sdf = new SimpleDateFormat();
Date now = new Date();
if ( specifyFormat && !Utils.isEmpty( datetimeFormat ) ) {
sdf.applyPattern( datetimeFormat );
String dt = sdf.format( now );
filenameNoExtension += dt;
} else {
if ( addDate && null != datePattern ) {
sdf.applyPattern( datePattern );
String d = sdf.format( now );
filenameNoExtension += '_' + d;
}
if ( addTime && null != timePattern ) {
sdf.applyPattern( timePattern );
String t = sdf.format( now );
filenameNoExtension += '_' + t;
}
}
return filenameNoExtension + extension;
}
|
@Test
public void testAddDatetimeToFilename_zipWithoutDotsInFolderWithoutDots() {
JobEntryBase jobEntryBase = new JobEntryBase();
String fullFilename;
String filename = "/folder_without_dots/zip_without_dots_in_folder_without_dots";
// add nothing
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename, fullFilename ) );
// add date
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", false, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + DATE_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, false, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add time
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, false, null, true, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add date and time
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + DATE_PATTERN + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, true, "HHmmssSSS", false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + TIME_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, "yyyyMMdd", true, null, false, null );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + DATE_PATTERN, fullFilename ) );
fullFilename = jobEntryBase.addDatetimeToFilename( filename, true, null, true, null, false, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
// add datetime
fullFilename =
jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, true, "(yyyyMMdd_HHmmssSSS)" );
assertNotNull( fullFilename );
assertTrue( Pattern.matches( filename + DATE_TIME_PATTERN, fullFilename ) );
fullFilename =
jobEntryBase.addDatetimeToFilename( filename, false, null, false, null, true, null );
assertNotNull( fullFilename );
assertEquals( filename, fullFilename );
}
|
@Override
public X process(T input, Context context) throws Exception {
if (!this.initialized) {
initialize(context);
}
// record must be PulsarFunctionRecord.
Record<T> record = (Record<T>) context.getCurrentRecord();
// windows function processing semantics requires separate processing
if (windowConfig.getProcessingGuarantees() == WindowConfig.ProcessingGuarantees.ATMOST_ONCE) {
record.ack();
}
if (isEventTime()) {
long ts = this.timestampExtractor.extractTimestamp(record.getValue());
if (this.waterMarkEventGenerator.track(record.getTopicName().get(), ts)) {
this.windowManager.add(record, ts, record);
} else {
if (this.windowConfig.getLateDataTopic() != null) {
context.newOutputMessage(this.windowConfig.getLateDataTopic(), null).value(input).sendAsync();
} else {
log.info(String.format(
"Received a late tuple %s with ts %d. This will not be " + "processed"
+ ".", input, ts));
}
}
} else {
this.windowManager.add(record, System.currentTimeMillis(), record);
}
return null;
}
|
@Test(expectedExceptions = RuntimeException.class)
public void testExecuteWithWrongWrongTimestampExtractorType() throws Exception {
WindowConfig windowConfig = new WindowConfig();
windowConfig.setTimestampExtractorClassName(TestWrongTimestampExtractor.class.getName());
doReturn(Optional.of(new Gson().fromJson(new Gson().toJson(windowConfig), Map.class)))
.when(context).getUserConfigValue(WindowConfig.WINDOW_CONFIG_KEY);
testWindowedPulsarFunction.process(10L, context);
}
|
@Override
public void importFrom(Import theImport, String sourceSystemId) {
this.namespace = theImport.getNamespace() == null ? "" : theImport.getNamespace() + ":";
this.importFrom(theImport.getLocation());
}
|
@Test
public void testImportBasicElement() throws Exception {
URL url = ReflectUtil.getResource("org/flowable/engine/impl/webservice/basic-elements-in-types.wsdl");
assertThat(url).isNotNull();
importer.importFrom(url.toString());
}
|
@Override
public int write(ByteBuffer sourceBuffer) throws IOException {
if (!isOpen()) {
throw new ClosedChannelException();
}
int totalBytesWritten = 0;
while (sourceBuffer.hasRemaining()) {
int position = sourceBuffer.position();
int bytesWritten = Math.min(sourceBuffer.remaining(), uploadBuffer.remaining());
totalBytesWritten += bytesWritten;
if (sourceBuffer.hasArray()) {
// If the underlying array is accessible, direct access is the most efficient approach.
int start = sourceBuffer.arrayOffset() + position;
uploadBuffer.put(sourceBuffer.array(), start, bytesWritten);
md5.update(sourceBuffer.array(), start, bytesWritten);
} else {
// Otherwise, use a readonly copy with an appropriate mark to read the current range of the
// buffer twice.
ByteBuffer copyBuffer = sourceBuffer.asReadOnlyBuffer();
copyBuffer.mark().limit(position + bytesWritten);
uploadBuffer.put(copyBuffer);
copyBuffer.reset();
md5.update(copyBuffer);
}
sourceBuffer.position(position + bytesWritten); // move position forward by the bytes written
if (!uploadBuffer.hasRemaining() || sourceBuffer.hasRemaining()) {
flush();
}
}
return totalBytesWritten;
}
|
@Test
public void write() throws IOException {
writeFromConfig(s3Config("s3"), false);
writeFromConfig(s3Config("s3"), true);
writeFromConfig(s3ConfigWithSSEAlgorithm("s3"), false);
writeFromConfig(s3ConfigWithSSECustomerKey("s3"), false);
writeFromConfig(s3ConfigWithSSEKMSKeyId("s3"), false);
assertThrows(
IllegalArgumentException.class,
() -> writeFromConfig(s3ConfigWithMultipleSSEOptions("s3"), false));
}
|
public static Calendar getCalendar(Object date, Calendar defaultValue) {
Calendar cal = new GregorianCalendar();
if (date instanceof java.util.Date) {
cal.setTime((java.util.Date) date);
return cal;
} else if (date != null) {
Optional<Date> d = tryToParseDate(date);
if (!d.isPresent()) {
return defaultValue;
}
cal.setTime(d.get());
} else {
cal = defaultValue;
}
return cal;
}
|
@Test
public void testGetCalendarObjectCalendarWithValidStringAndNullDefault() {
Calendar cal = new GregorianCalendar();
cal.set(Calendar.HOUR_OF_DAY, 0);
cal.set(Calendar.MINUTE, 0);
cal.set(Calendar.SECOND, 0);
cal.set(Calendar.MILLISECOND, 0);
Date time = cal.getTime();
for (int formatId : new int[]{DateFormat.SHORT, DateFormat.MEDIUM,
DateFormat.LONG, DateFormat.FULL}) {
DateFormat formatter = DateFormat.getDateInstance(formatId);
assertEquals(cal,
Converter.getCalendar(formatter.format(time), null));
}
}
|
public static void checkBetaIps(String betaIps) throws NacosException {
if (StringUtils.isBlank(betaIps)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, BETAIPS_INVALID_MSG);
}
String[] ipsArr = betaIps.split(",");
for (String ip : ipsArr) {
if (!InternetAddressUtil.isIP(ip)) {
throw new NacosException(NacosException.CLIENT_INVALID_PARAM, BETAIPS_INVALID_MSG);
}
}
}
|
@Test
void testCheckBetaIpsFail2() throws NacosException {
Throwable exception = assertThrows(NacosException.class, () -> {
ParamUtils.checkBetaIps("aaa");
});
assertTrue(exception.getMessage().contains("betaIps invalid"));
}
|
@Override
public void processElement(StreamRecord<IN> element) throws Exception {
IN value = element.getValue();
IN currentValue = values.value();
if (currentValue == null) {
// register a timer for emitting the result at the end when this is the
// first input for this key
timerService.registerEventTimeTimer(VoidNamespace.INSTANCE, Long.MAX_VALUE);
} else {
// otherwise, reduce things
value = userFunction.reduce(currentValue, value);
}
values.update(value);
}
|
@Test
void resultsOnMaxWatermark() throws Exception {
KeyedOneInputStreamOperatorTestHarness<String, String, String> testHarness =
createTestHarness();
testHarness.processElement(new StreamRecord<>("hello"));
testHarness.processElement(new StreamRecord<>("hello"));
testHarness.processElement(new StreamRecord<>("ciao"));
testHarness.processElement(new StreamRecord<>("ciao"));
testHarness.processElement(new StreamRecord<>("ciao"));
testHarness.processWatermark(Long.MAX_VALUE);
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
expectedOutput.add(new StreamRecord<>("hellohello", Long.MAX_VALUE));
expectedOutput.add(new StreamRecord<>("ciaociaociao", Long.MAX_VALUE));
expectedOutput.add(new Watermark(Long.MAX_VALUE));
assertThat(testHarness.getOutput()).contains(expectedOutput.toArray());
}
|
String getLockName(String namespace, String name) {
return "lock::" + namespace + "::" + kind() + "::" + name;
}
|
@Test
/*
* Verifies that the lock is released by a call to `releaseLockAndTimer`.
* The call is made through a chain of futures ending with `eventually` after a failed execution via an unhandled exception in the `Callable`.
*/
void testWithLockCallableUnhandledExceptionReleasesLock(VertxTestContext context) {
var resourceOperator = new DefaultWatchableStatusedResourceOperator<>(vertx, null, "TestResource");
@SuppressWarnings({ "unchecked", "rawtypes" })
var target = new DefaultOperator(vertx, "Test", resourceOperator, new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()), null);
Reconciliation reconciliation = new Reconciliation("test", "TestResource", "my-namespace", "my-resource");
String lockName = target.getLockName(reconciliation);
Checkpoint callableFailed = context.checkpoint();
Checkpoint lockObtained = context.checkpoint();
@SuppressWarnings("unchecked")
Future<String> result = target.withLockTest(reconciliation,
() -> {
throw new UnsupportedOperationException(EXPECTED_MESSAGE);
});
Promise<Void> failHandlerCalled = Promise.promise();
result.onComplete(context.failing(e -> context.verify(() -> {
assertThat(e.getMessage(), is(EXPECTED_MESSAGE));
failHandlerCalled.complete();
callableFailed.flag();
})));
failHandlerCalled.future()
.compose(nothing -> vertx.sharedData().getLockWithTimeout(lockName, 10000L))
.onComplete(context.succeeding(lock -> context.verify(() -> {
assertThat(lock, instanceOf(Lock.class));
lock.release();
lockObtained.flag();
})));
}
|
public void validate(ByteBuffer[] inputs, int[] erasedIndexes,
ByteBuffer[] outputs) throws IOException {
markBuffers(outputs);
try {
ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
boolean isDirect = validInput.isDirect();
int capacity = validInput.capacity();
int remaining = validInput.remaining();
// Init buffer
if (buffer == null || buffer.isDirect() != isDirect
|| buffer.capacity() < remaining) {
buffer = allocateBuffer(isDirect, capacity);
}
buffer.clear().limit(remaining);
// Create newInputs and newErasedIndex for validation
ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
int count = 0;
for (int i = 0; i < erasedIndexes.length; i++) {
newInputs[erasedIndexes[i]] = outputs[i];
count++;
}
newErasedIndex = -1;
boolean selected = false;
int numValidIndexes = CoderUtil.getValidIndexes(inputs).length;
for (int i = 0; i < newInputs.length; i++) {
if (count == numValidIndexes) {
break;
} else if (!selected && inputs[i] != null) {
newErasedIndex = i;
newInputs[i] = null;
selected = true;
} else if (newInputs[i] == null) {
newInputs[i] = inputs[i];
if (inputs[i] != null) {
count++;
}
}
}
// Keep it for testing
newValidIndexes = CoderUtil.getValidIndexes(newInputs);
decoder.decode(newInputs, new int[]{newErasedIndex},
new ByteBuffer[]{buffer});
if (!buffer.equals(inputs[newErasedIndex])) {
throw new InvalidDecodingException("Failed to validate decoding");
}
} finally {
toLimits(inputs);
resetBuffers(outputs);
}
}
|
@Test
public void testValidateWithBadDecoding() throws IOException {
prepare(null, numDataUnits, numParityUnits, erasedDataIndexes,
erasedParityIndexes);
this.usingDirectBuffer = true;
prepareCoders(true);
prepareValidator(true);
prepareBufferAllocator(false);
// encode
ECChunk[] dataChunks = prepareDataChunksForEncoding();
ECChunk[] parityChunks = prepareParityChunksForEncoding();
ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
try {
encoder.encode(dataChunks, parityChunks);
} catch (Exception e) {
Assert.fail("Should not get Exception: " + e.getMessage());
}
// decode
backupAndEraseChunks(clonedDataChunks, parityChunks);
ECChunk[] inputChunks =
prepareInputChunksForDecoding(clonedDataChunks, parityChunks);
markChunks(inputChunks);
ensureOnlyLeastRequiredChunks(inputChunks);
ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
int[] erasedIndexes = getErasedIndexesForDecoding();
try {
decoder.decode(inputChunks, erasedIndexes, recoveredChunks);
} catch (Exception e) {
Assert.fail("Should not get Exception: " + e.getMessage());
}
// validate
restoreChunksFromMark(inputChunks);
polluteSomeChunk(recoveredChunks);
try {
validator.validate(inputChunks, erasedIndexes, recoveredChunks);
Assert.fail("Validation should fail due to bad decoding");
} catch (InvalidDecodingException e) {
String expected = "Failed to validate decoding";
GenericTestUtils.assertExceptionContains(expected, e);
}
}
|
public static String getApplicationName(SofaRequest request) {
String appName = (String) request.getRequestProp(RemotingConstants.HEAD_APP_NAME);
return appName == null ? "" : appName;
}
|
@Test
public void testGetApplicationName() {
SofaRequest request = new SofaRequest();
String applicationName = SofaRpcUtils.getApplicationName(request);
assertEquals("", applicationName);
request.addRequestProp("app", "test-app");
applicationName = SofaRpcUtils.getApplicationName(request);
assertEquals("test-app", applicationName);
}
|
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
}
|
@Test
public void shouldDeserializedJsonNumberAsInt() {
// Given:
final KsqlJsonDeserializer<Integer> deserializer =
givenDeserializerForSchema(Schema.OPTIONAL_INT32_SCHEMA, Integer.class);
final List<String> validCoercions = ImmutableList.of(
"41",
"41.456",
"\"41\""
);
validCoercions.forEach(value -> {
final byte[] bytes = addMagic(value.getBytes(StandardCharsets.UTF_8));
// When:
final Object result = deserializer.deserialize(SOME_TOPIC, bytes);
// Then:
assertThat(result, is(41));
});
}
|
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
}
|
@Test
public void structToConnect() {
byte[] structJson = "{ \"schema\": { \"type\": \"struct\", \"fields\": [{ \"field\": \"field1\", \"type\": \"boolean\" }, { \"field\": \"field2\", \"type\": \"string\" }] }, \"payload\": { \"field1\": true, \"field2\": \"string\" } }".getBytes();
Schema expectedSchema = SchemaBuilder.struct().field("field1", Schema.BOOLEAN_SCHEMA).field("field2", Schema.STRING_SCHEMA).build();
Struct expected = new Struct(expectedSchema).put("field1", true).put("field2", "string");
SchemaAndValue converted = converter.toConnectData(TOPIC, structJson);
assertEquals(new SchemaAndValue(expectedSchema, expected), converted);
}
|
@Override
public boolean writeMetricsSync(String content) {
if (!trySignIn()) {
Logger.warn("try login fails, so do not continue write content!");
return false;
}
final HttpRequest req = new HttpRequest.Builder()
.url(writeReqUrl)
.header("Cookie", cookie)
.post(content)
.build();
try {
final HttpResponse response = httpClient.execute(req);
final HttpRespStatus status = response.getStatus();
if (status.statusClass() == SUCCESS) {
if (Logger.isDebugEnable()) {
Logger.debug("InfluxDbV2Client.writeMetricsSync(): respStatus=" + status.simpleString()
+ ", reqBody=" + content);
}
return true;
}
if (status.statusClass() != INFORMATIONAL && status.statusClass() != SUCCESS) {
Logger.warn("InfluxDbV2Client.writeMetricsSync(): respStatus=" + status.simpleString()
+ ", reqBody=" + content);
}
} catch (IOException e) {
Logger.warn("InfluxDbV2Client.writeMetricsSync() catch IOException!", e);
} catch (Throwable t) {
Logger.error("InfluxDbV2Client.writeMetricsSync() catch Exception!", t);
}
return false;
}
|
@Test
public void testWrite() {
Assert.assertTrue(influxDbClient.writeMetricsSync(
"cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000\n" +
"cpu_load_short,host=server02,region=us-west value=0.96 1434055562000000000"));
}
|
@Override
public V get() {
return get(getAsync());
}
|
@Test
public void testFailoverTimeout() {
GenericContainer<?> redis = createRedis();
redis.start();
Config config = createConfig(redis);
config.useSingleServer()
.setRetryAttempts(3)
.setRetryInterval(0);
RedissonClient rc = Redisson.create(config);
List<String> args = new ArrayList<>();
for (int i = 0; i < 100; i++) {
args.add("" + i);
}
redis.stop();
Assertions.assertTimeout(Duration.ofMillis(100), () -> {
Assertions.assertThrows(Exception.class, () -> {
rc.getBuckets().get(args.toArray(new String[0]));
});
});
Assertions.assertTimeout(Duration.ofMillis(100), () -> {
Assertions.assertThrows(Exception.class, () -> {
rc.getBucket("test").get();
});
});
rc.shutdown();
}
|
@Udf
public String lpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
final int padUpTo = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padUpTo; i += padding.length()) {
sb.append(padding);
}
sb.setLength(padUpTo);
sb.append(input);
sb.setLength(targetLen);
return sb.toString();
}
|
@Test
public void shouldTruncateInputIfTargetLengthTooSmallBytes() {
final ByteBuffer result = udf.lpad(BYTES_123, 2, BYTES_45);
assertThat(result, is(ByteBuffer.wrap(new byte[]{1,2})));
}
|
public Map<String, Parameter> getAllParams(
Step stepDefinition, WorkflowSummary workflowSummary, StepRuntimeSummary runtimeSummary) {
return paramsManager.generateMergedStepParams(
workflowSummary, stepDefinition, getStepRuntime(stepDefinition.getType()), runtimeSummary);
}
|
@Test(expected = MaestroValidationException.class)
public void testMergeNestedParamMismatchTypes() {
when(this.defaultParamManager.getDefaultStepParams())
.thenReturn(
Collections.singletonMap(
"nested-mismatch", ParamDefinition.buildParamDefinition("foo", "bar")));
TypedStep testStep = new TypedStep();
testStep.setParams(
singletonMap(
"nested-mismatch",
MapParamDefinition.builder()
.name("nested-mismatch")
.value(
Collections.singletonMap(
"default-new",
ParamDefinition.buildParamDefinition("default-new", "from-default")))
.build()));
testStep.setType(StepType.NOOP);
testStep.setId("step1");
runtimeManager.getAllParams(testStep, workflowSummary, runtimeSummary);
}
|
@Override
public String name() {
return "BrokerRegistrationTracker(id=" + id + ")";
}
|
@Test
public void testTrackerName() {
BrokerRegistrationTrackerTestContext ctx = new BrokerRegistrationTrackerTestContext();
assertEquals("BrokerRegistrationTracker(id=1)", ctx.tracker.name());
}
|
public static boolean supportsSwitchExpressions(Context context) {
return sourceIsAtLeast(context, 14);
}
|
@Test
public void supportsSwitchExpressions_notSupported() {
Context context = contextWithSourceVersion("13");
assertThat(SourceVersion.supportsSwitchExpressions(context)).isFalse();
}
|
public static String toHexString(final byte[] bytes) {
return toHexString(bytes, ":");
}
|
@Test
public void testToStringBytes() {
byte[] dpid = {0, 0, 0, 0, 0, 0, 0, -1 };
String valid = "00:00:00:00:00:00:00:ff";
String testString = HexString.toHexString(dpid);
assertEquals(valid, testString);
String validNoSep = "00000000000000ff";
assertEquals(validNoSep, HexString.toHexString(dpid, null));
}
|
public static Key of(String key, ApplicationId appId) {
return new StringKey(key, appId);
}
|
@Test
public void stringAndLongKeyCompare() {
Key stringKey0 = Key.of("0" + KEY_1, NetTestTools.APP_ID);
Key longKey1 = Key.of(LONG_KEY_1, NetTestTools.APP_ID);
Key stringKey2 = Key.of(KEY_2, NetTestTools.APP_ID);
assertThat(stringKey0, lessThan(longKey1));
assertThat(stringKey0, lessThan(stringKey2));
assertThat(longKey1, greaterThan(stringKey0));
assertThat(longKey1, lessThan(stringKey2));
assertThat(stringKey2, greaterThan(stringKey0));
assertThat(stringKey2, greaterThan(longKey1));
}
|
@Override @Nonnull public ListIterator<T> listIterator(final int initialIndex) {
final Iterator<T> initialIterator;
try {
initialIterator = iterator(initialIndex);
} catch (NoSuchElementException ex) {
throw new IndexOutOfBoundsException();
}
return new AbstractListIterator<T>() {
private int index = initialIndex - 1;
@Nullable private Iterator<T> forwardIterator = initialIterator;
@Nonnull
private Iterator<T> getForwardIterator() {
if (forwardIterator == null) {
try {
forwardIterator = iterator(index+1);
} catch (IndexOutOfBoundsException ex) {
throw new NoSuchElementException();
}
}
return forwardIterator;
}
@Override public boolean hasNext() {
return getForwardIterator().hasNext();
}
@Override public boolean hasPrevious() {
return index >= 0;
}
@Override public T next() {
T ret = getForwardIterator().next();
index++;
return ret;
}
@Override public int nextIndex() {
return index+1;
}
@Override public T previous() {
forwardIterator = null;
try {
return iterator(index--).next();
} catch (IndexOutOfBoundsException ex) {
throw new NoSuchElementException();
}
}
@Override public int previousIndex() {
return index;
}
};
}
|
@Test(expected = IndexOutOfBoundsException.class)
public void testNegativeIndex() {
list.listIterator(-1);
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof IndexMeta)) {
return false;
}
IndexMeta indexMeta = (IndexMeta)o;
if (!ArrayUtils.isEquals(indexMeta.values, this.values)) {
return false;
}
if (!Objects.equals(indexMeta.nonUnique, this.nonUnique)) {
return false;
}
if (!Objects.equals(indexMeta.indexQualifier, this.indexQualifier)) {
return false;
}
if (!Objects.equals(indexMeta.indexName, this.indexName)) {
return false;
}
if (!Objects.equals(indexMeta.type, this.type)) {
return false;
}
if (!Objects.equals(indexMeta.indextype.value(), this.indextype.value())) {
return false;
}
if (!Objects.equals(indexMeta.ascOrDesc, this.ascOrDesc)) {
return false;
}
if (!Objects.equals(indexMeta.ordinalPosition, this.ordinalPosition)) {
return false;
}
return true;
}
|
@Test
public void testEqualsNullObject() {
IndexMeta indexMeta = new IndexMeta();
assertFalse(indexMeta.equals(null), "An object should not be equal to null");
}
|
public static Set<TargetStepAttribute> getUnavailableTargetSteps( Map<TargetStepAttribute, SourceStepField> targetMap,
TransMeta injectedTransMeta ) {
Set<String> usedStepNames = getUsedStepsForReferencendTransformation( injectedTransMeta );
Set<TargetStepAttribute> unavailableTargetSteps = new HashSet<TargetStepAttribute>();
for ( TargetStepAttribute currentTarget : targetMap.keySet() ) {
if ( !usedStepNames.contains( currentTarget.getStepname().toUpperCase() ) ) {
unavailableTargetSteps.add( currentTarget );
}
}
return Collections.unmodifiableSet( unavailableTargetSteps );
}
|
@Test
public void getUnavailableTargetSteps() {
TargetStepAttribute unavailableTargetStep = new TargetStepAttribute( UNAVAILABLE_STEP, TEST_ATTR_VALUE, false );
SourceStepField sourceStep = new SourceStepField( TEST_SOURCE_STEP_NAME, TEST_FIELD );
Map<TargetStepAttribute, SourceStepField> targetMap = Collections.singletonMap( unavailableTargetStep, sourceStep );
TransMeta injectedTransMeta = mock( TransMeta.class );
doReturn( Collections.emptyList() ).when( injectedTransMeta ).getUsedSteps();
Set<TargetStepAttribute> actualSet = MetaInject.getUnavailableTargetSteps( targetMap, injectedTransMeta );
assertTrue( actualSet.contains( unavailableTargetStep ) );
}
|
public void markContainerForKillable(
RMContainer killableContainer) {
writeLock.lock();
try {
LOG.debug("{}: container {}",
SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE, killableContainer);
if (!isLazyPreemptionEnabled) {
super.completedContainer(killableContainer, SchedulerUtils
.createPreemptedContainerStatus(killableContainer.getContainerId(),
SchedulerUtils.PREEMPTED_CONTAINER), RMContainerEventType.KILL);
} else {
FiCaSchedulerNode node = getSchedulerNode(
killableContainer.getAllocatedNode());
FiCaSchedulerApp application = getCurrentAttemptForContainer(
killableContainer.getContainerId());
node.markContainerToKillable(killableContainer.getContainerId());
// notify PreemptionManager
// Get the application for the finished container
if (null != application) {
String leafQueuePath = application.getCSLeafQueue().getQueuePath();
getPreemptionManager().addKillableContainer(
new KillableContainer(killableContainer, node.getPartition(),
leafQueuePath));
}
}
} finally {
writeLock.unlock();
}
}
|
@Test(timeout = 120000)
public void testPreemptionInfo() throws Exception {
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
int CONTAINER_MEMORY = 1024; // start RM
MockRM rm1 = new MockRM(conf);
rm1.start();
// get scheduler
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
// start NM
MockNM nm1 =
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
nm1.registerNode();
// create app and launch the AM
RMApp app0 = MockRMAppSubmitter.submitWithMemory(CONTAINER_MEMORY, rm1);
MockAM am0 = MockRM.launchAM(app0, rm1, nm1);
am0.registerAppAttempt();
// get scheduler app
FiCaSchedulerApp schedulerAppAttempt =
cs.getSchedulerApplications().get(app0.getApplicationId())
.getCurrentAppAttempt();
// allocate some containers and launch them
List<Container> allocatedContainers =
am0.allocateAndWaitForContainers(3, CONTAINER_MEMORY, nm1);
// kill the 3 containers
for (Container c : allocatedContainers) {
cs.markContainerForKillable(schedulerAppAttempt.getRMContainer(c.getId()));
}
// check values
waitForAppPreemptionInfo(app0,
Resource.newInstance(CONTAINER_MEMORY * 3, 3), 0, 3,
Resource.newInstance(CONTAINER_MEMORY * 3, 3), false, 3);
// kill app0-attempt0 AM container
cs.markContainerForKillable(schedulerAppAttempt.getRMContainer(app0
.getCurrentAppAttempt().getMasterContainer().getId()));
// wait for app0 failed
waitForNewAttemptCreated(app0, am0.getApplicationAttemptId());
// check values
waitForAppPreemptionInfo(app0,
Resource.newInstance(CONTAINER_MEMORY * 4, 4), 1, 3,
Resource.newInstance(0, 0), false, 0);
// launch app0-attempt1
MockAM am1 = MockRM.launchAM(app0, rm1, nm1);
am1.registerAppAttempt();
schedulerAppAttempt =
cs.getSchedulerApplications().get(app0.getApplicationId())
.getCurrentAppAttempt();
// allocate some containers and launch them
allocatedContainers =
am1.allocateAndWaitForContainers(3, CONTAINER_MEMORY, nm1);
for (Container c : allocatedContainers) {
cs.markContainerForKillable(schedulerAppAttempt.getRMContainer(c.getId()));
}
// check values
waitForAppPreemptionInfo(app0,
Resource.newInstance(CONTAINER_MEMORY * 7, 7), 1, 6,
Resource.newInstance(CONTAINER_MEMORY * 3, 3), false, 3);
rm1.stop();
}
|
public StatementExecutorResponse execute(
final ConfiguredStatement<? extends Statement> statement,
final KsqlExecutionContext executionContext,
final KsqlSecurityContext securityContext
) {
final String commandRunnerWarningString = commandRunnerWarning.get();
if (!commandRunnerWarningString.equals("")) {
throw new KsqlServerException("Failed to handle Ksql Statement."
+ System.lineSeparator()
+ commandRunnerWarningString);
}
final InjectorWithSideEffects injector = InjectorWithSideEffects.wrap(
injectorFactory.apply(executionContext, securityContext.getServiceContext()));
final ConfiguredStatementWithSideEffects<?> injectedWithSideEffects =
injector.injectWithSideEffects(statement);
try {
return executeInjected(
injectedWithSideEffects.getStatement(),
statement,
executionContext,
securityContext);
} catch (Exception e) {
injector.revertSideEffects(injectedWithSideEffects);
throw e;
}
}
|
@Test
public void shouldThrowFailureIfCannotInferSchema() {
// Given:
final PreparedStatement<Statement> preparedStatement =
PreparedStatement.of("", new ListProperties(Optional.empty()));
final ConfiguredStatement<Statement> configured =
ConfiguredStatement.of(preparedStatement, SessionConfig.of(KSQL_CONFIG, ImmutableMap.of())
);
when(schemaInjector.inject(any())).thenThrow(new KsqlException("Could not infer!"));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> distributor.execute(configured, executionContext, securityContext)
);
// Then:
assertThat(e.getMessage(), containsString(
"Could not infer!"));
}
|
public LoggerContext configure() {
LoggerContext ctx = helper.getRootContext();
ctx.reset();
helper.enableJulChangePropagation(ctx);
configureConsole(ctx);
configureWithLogbackWritingToFile(ctx);
helper.apply(
LogLevelConfig.newBuilder(helper.getRootLoggerName())
.rootLevelFor(ProcessId.APP)
.immutableLevel("com.hazelcast",
Level.toLevel("WARN"))
.build(),
appSettings.getProps());
return ctx;
}
|
@Test
public void fail_with_IAE_if_app_property_unsupported_level() {
settings.getProps().set("sonar.log.level.app", "ERROR");
assertThatThrownBy(() -> underTest.configure())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("log level ERROR in property sonar.log.level.app is not a supported value (allowed levels are [TRACE, DEBUG, INFO])");
}
|
@Override
public void close() {
if (database != null) {
database.close();
}
}
|
@Ignore
@Test
public void testRefreshTime() throws Exception {
BDBEnvironment environment = initBDBEnv();
BDBJEJournal journal = new BDBJEJournal(environment);
journal.open();
for (int i = 1; i < 10; ++ i) {
if (i % 2 == 1) {
journal.batchWriteBegin();
}
journal.batchWriteAppend(i, makeBuffer(i));
if (i % 2 == 0) {
journal.batchWriteCommit();
journal.rollJournal(i + 1);
}
}
journal.batchWriteCommit();
journal.close();
Assert.assertEquals(Arrays.asList(1L, 3L, 5L, 7L, 9L), environment.getDatabaseNames());
long cnt = 1000000;
long start = System.currentTimeMillis();
for (int i = 0; i < cnt; ++ i) {
environment.getDatabaseNames();
}
long interval = System.currentTimeMillis() - start;
// 2022-07-12 17:20:49 .. - call environment.getDatabaseNames() 1000000 times cost 6797 ms
// 2022-07-12 18:56:41 .. - call environment.getDatabaseNames() 1000000 times cost 6892 ms
// seems like an in-memory operation, only cost 6µs
LOG.info("call environment.getDatabaseNames() {} times cost {} ms", cnt, interval);
}
|
static Map<String, String> buildClientVersionHeader(String clientVersion, boolean required) {
if (clientVersion == null || clientVersion.equals("")) {
return Collections.emptyMap();
}
if (required) {
return Collections.singletonMap(INFURA_ETHEREUM_PREFERRED_CLIENT, clientVersion);
} else {
return Collections.singletonMap(
INFURA_ETHEREUM_PREFERRED_CLIENT, clientVersion + "; required=false");
}
}
|
@Test
public void testBuildHeader() {
assertTrue(buildClientVersionHeader("", false).isEmpty());
assertTrue(buildClientVersionHeader(null, false).isEmpty());
assertEquals(
buildClientVersionHeader("geth 1.4.19", true),
(Collections.singletonMap("Infura-Ethereum-Preferred-Client", "geth 1.4.19")));
assertEquals(
buildClientVersionHeader("geth 1.4.19", false),
(Collections.singletonMap(
"Infura-Ethereum-Preferred-Client", "geth 1.4.19; required=false")));
}
|
public static String jsToString( Object value, String classType ) {
if ( classType.equalsIgnoreCase( JS_NATIVE_JAVA_OBJ )
|| classType.equalsIgnoreCase( JS_UNDEFINED ) ) {
// Is it a java Value class ?
try {
Value v = (Value) Context.jsToJava( value, Value.class );
return v.toString();
} catch ( Exception ev ) {
// convert to a string should work in most cases...
//
return Context.toString( value );
}
} else {
// A String perhaps?
return Context.toString( value );
}
}
|
@Test
public void jsToString_String() throws Exception {
assertEquals( "qwerty", JavaScriptUtils.jsToString( "qwerty", String.class.getName() ) );
}
|
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
}
|
@Test
void mergeWithIncludeFailsOnDuplicateColumn() {
Schema sourceSchema = Schema.newBuilder().column("one", DataTypes.INT()).build();
List<SqlNode> derivedColumns =
Arrays.asList(
regularColumn("one", DataTypes.INT()),
regularColumn("four", DataTypes.STRING()));
assertThatThrownBy(
() ->
util.mergeTables(
getDefaultMergingStrategies(),
sourceSchema,
derivedColumns,
Collections.emptyList(),
null))
.isInstanceOf(ValidationException.class)
.hasMessage("A column named 'one' already exists in the base table.");
}
|
public Type parse(final String schema) {
try {
final TypeContext typeContext = parseTypeContext(schema);
return getType(typeContext);
} catch (final ParsingException e) {
throw new KsqlStatementException(
"Failed to parse schema",
"Failed to parse: " + schema,
schema,
KsqlStatementException.Problem.STATEMENT,
e
);
}
}
|
@Test
public void shouldThrowOnNonIntegerPrecision() {
// Given:
final String schemaString = "DECIMAL(.1, 1)";
// When:
final KsqlException e = assertThrows(
KsqlException.class,
() -> parser.parse(schemaString)
);
// Then:
assertThat(e.getMessage(), containsString(
"Value must be integer for command: DECIMAL(PRECISION)"
));
}
|
public File getHomeDirectory() {
return homeDirectory;
}
|
@Test
public void getHomeDirectory_is_elasticsearch_subdirectory_of_sq_home_directory() throws IOException {
File sqHomeDir = temp.newFolder();
Props props = new Props(new Properties());
props.set(PATH_DATA.getKey(), temp.newFolder().getAbsolutePath());
props.set(PATH_HOME.getKey(), sqHomeDir.getAbsolutePath());
props.set(PATH_TEMP.getKey(), temp.newFolder().getAbsolutePath());
props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath());
EsInstallation underTest = new EsInstallation(props);
assertThat(underTest.getHomeDirectory()).isEqualTo(new File(sqHomeDir, "elasticsearch"));
}
|
public static String prettyFormatXml(CharSequence xml) {
String xmlString = xml.toString();
StreamSource source = new StreamSource(new StringReader(xmlString));
StringWriter stringWriter = new StringWriter();
StreamResult result = new StreamResult(stringWriter);
try {
Transformer transformer = transformerFactory.newTransformer();
transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
// Transform the requested string into a nice formatted XML string
transformer.transform(source, result);
}
catch (TransformerException | IllegalArgumentException e) {
LOGGER.log(Level.SEVERE, "Transformer error", e);
return xmlString;
}
return stringWriter.toString();
}
|
@Test
public void prettyFormatIncompleteXmlTest() {
final String uglyXml = "<foo attr1='value1' attr2='value2'><inner-element attr1='value1'>Test</inner-element>";
final String prettyXml = XmlUtil.prettyFormatXml(uglyXml);
assertEquals("<foo attr1='value1' attr2='value2'><inner-element attr1='value1'>Test</inner-element>",
prettyXml);
}
|
public static List<Event> computeEventDiff(final Params params) {
final List<Event> events = new ArrayList<>();
emitPerNodeDiffEvents(createBaselineParams(params), events);
emitWholeClusterDiffEvent(createBaselineParams(params), events);
emitDerivedBucketSpaceStatesDiffEvents(params, events);
return events;
}
|
@Test
void too_low_distributor_node_ratio_cluster_down_reason_emits_corresponding_event() {
final EventFixture fixture = EventFixture.createForNodes(3)
.clusterStateBefore("distributor:3 storage:3")
.clusterStateAfter("cluster:d distributor:3 storage:3")
.clusterReasonAfter(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO);
final List<Event> events = fixture.computeEventDiff();
assertThat(events.size(), equalTo(1));
assertThat(events, hasItem(
clusterEventWithDescription("Too low ratio of available distributor nodes. Setting cluster state down")));
}
|
@Override
public List<NacosServiceInstance> getInstanceList(String serviceId) {
return client.getInstances(serviceId);
}
|
@Test
public void getInstanceList() {
final List<NacosServiceInstance> instanceList = nacosRegister.getInstanceList(serviceName);
Assert.assertEquals(instanceList.size(), this.instanceList.size());
}
|
@Override
public boolean getAutoCommit() throws SQLException {
return currentAutoCommitStatus;
}
|
@Test
public void testCreateStatement() throws Throwable {
Connection connection = Mockito.mock(Connection.class);
Mockito.when(connection.getAutoCommit()).thenReturn(true);
XAConnection xaConnection = Mockito.mock(XAConnection.class);
BaseDataSourceResource<ConnectionProxyXA> baseDataSourceResource = Mockito.mock(BaseDataSourceResource.class);
String xid = "xxx";
ConnectionProxyXA connectionProxyXA = new ConnectionProxyXA(connection, xaConnection, baseDataSourceResource, xid);
Statement statement = connectionProxyXA.createStatement();
Assertions.assertTrue(statement instanceof StatementProxyXA);
}
|
public static String encode(Object... elements) {
final StringBuilder sb = new StringBuilder();
buildEnabled(sb, elements);
return sb.toString();
}
|
@Test
@Disabled
public void colorfulEncodeTest(){
String text = "Hutool▀████▀";
final AnsiColors ansiColors = new AnsiColors(AnsiColors.BitDepth.EIGHT);
Color[] colorArray = new Color[]{
Color.BLACK, Color.BLUE,
Color.CYAN, Color.DARK_GRAY,
Color.GRAY, Color.GREEN,
Color.LIGHT_GRAY, Color.MAGENTA,
Color.ORANGE, Color.PINK,
Color.RED, Color.WHITE,
Color.YELLOW
};
for (int i = 0; i < colorArray.length; i++) {
Color foreColor = colorArray[i];
AnsiElement foreElement = ansiColors.findClosest(foreColor).toAnsiElement(ForeOrBack.FORE);
Color backColor = new Color(255 - foreColor.getRed(), 255 - foreColor.getGreen(), 255 - foreColor.getBlue());
AnsiElement backElement = ansiColors.findClosest(backColor).toAnsiElement(ForeOrBack.BACK);
String encode = AnsiEncoder.encode(foreElement, backElement, text);
Console.print( i%2==1?encode+"\n":encode);
}
}
|
public static FailureBatch indexingFailureBatch(List<IndexingFailure> indexingFailures) {
return new FailureBatch(indexingFailures, IndexingFailure.class);
}
|
@Test
public void indexingFailureBatch_creationFailsUponMixedTypesOfFailures() {
assertThatCode(() ->
FailureBatch.indexingFailureBatch(List.of(createIndexingFailure(), createIndexingFailure(), new CustomIndexingFailure())))
.isExactlyInstanceOf(IllegalArgumentException.class);
}
|
@Override
public void deleteDiscountActivity(Long id) {
// 校验存在
DiscountActivityDO activity = validateDiscountActivityExists(id);
if (CommonStatusEnum.isEnable(activity.getStatus())) { // 未关闭的活动,不能删除噢
throw exception(DISCOUNT_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED);
}
// 删除
discountActivityMapper.deleteById(id);
}
|
@Test
public void testDeleteDiscountActivity_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> discountActivityService.deleteDiscountActivity(id), DISCOUNT_ACTIVITY_NOT_EXISTS);
}
|
public void execute() {
new PathAwareCrawler<>(
FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas))
.visit(treeRootHolder.getReportTreeRoot());
}
|
@Test
public void compute_duplicated_blocks_one_for_original_and_ignores_InProjectDuplicate() {
duplicationRepository.addDuplication(FILE_1_REF, new TextBlock(1, 1), FILE_2_REF, new TextBlock(2, 2));
underTest.execute();
assertRawMeasureValue(FILE_1_REF, DUPLICATED_BLOCKS_KEY, 1);
}
|
@Override
public int size() {
lock.lock();
try {
return tasks.size();
} finally {
lock.unlock();
}
}
|
@Test
void testSize() {
assertEquals(0, nacosDelayTaskExecuteEngine.size());
nacosDelayTaskExecuteEngine.addTask("test", abstractTask);
assertEquals(1, nacosDelayTaskExecuteEngine.size());
nacosDelayTaskExecuteEngine.removeTask("test");
assertEquals(0, nacosDelayTaskExecuteEngine.size());
}
|
@Override
public String probeContentType(Path path) throws IOException {
// Try to detect based on the file name only for efficiency
String fileNameDetect = tika.detect(path.toString());
if (!fileNameDetect.equals(MimeTypes.OCTET_STREAM)) {
return fileNameDetect;
}
// Then check the file content if necessary
String fileContentDetect = tika.detect(path);
if (!fileContentDetect.equals(MimeTypes.OCTET_STREAM)) {
return fileContentDetect;
}
// Specification says to return null if we could not
// conclusively determine the file type
return null;
}
|
@Test
public final void testDirectAccess() throws Exception {
String contentType =
new TikaFileTypeDetector().probeContentType(testDirectory.resolve(TEST_HTML));
assertNotNull(contentType);
assertEquals("text/html", contentType);
}
|
public static void addTransferRateMetric(final DataNodeMetrics metrics, final long read,
final long durationInNS) {
metrics.addReadTransferRate(getTransferRateInBytesPerSecond(read, durationInNS));
}
|
@Test
public void testAddTransferRateMetricZeroTransferBytes() {
DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class);
DFSUtil.addTransferRateMetric(mockMetrics, -1L, 0);
verify(mockMetrics).addReadTransferRate(0L);
}
|
@Udf
public String trim(
@UdfParameter(
description = "The string to trim") final String input) {
if (input == null) {
return null;
}
return input.trim();
}
|
@Test
public void shouldReturnEmptyForEmptyInput() {
final String result = udf.trim("");
assertThat(result, is(""));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.