focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public String sendXML( String xml, String service ) throws Exception {
HttpPost method = buildSendXMLMethod( xml.getBytes( Const.XML_ENCODING ), service );
try {
return executeAuth( method );
} finally {
// Release current connection to the connection pool once you are done
method.releaseConnection();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "SlaveServer.DETAILED_SentXmlToService", service,
environmentSubstitute( hostname ) ) );
}
}
}
|
@Test( expected = NullPointerException.class )
public void testSendXML() throws Exception {
slaveServer.setHostname( "hostNameStub" );
slaveServer.setUsername( "userNAmeStub" );
HttpPost httpPostMock = mock( HttpPost.class );
URI uriMock = new URI( "fake" );
doReturn( uriMock ).when( httpPostMock ).getURI();
doReturn( httpPostMock ).when( slaveServer ).buildSendXMLMethod( any( byte[].class ), anyString() );
slaveServer.sendXML( "", "" );
fail( "Incorrect connection details had been used, but no exception was thrown" );
}
|
@Override
public void addDevice(SnmpDevice device) {
log.info("Adding device {}", device.deviceId());
snmpDeviceMap.put(device.deviceId(), device);
}
|
@Test
public void addDevice() {
snmpController.addDevice(device);
assertEquals("Controller should contain device", device, snmpController.getDevice(device.deviceId()));
}
|
@Override
// not used in the codebase, here just for future API usage
public boolean isEmpty() {
return size() == 0;
}
|
@Test
public void testIsEmpty() {
final ArrayRingbuffer<String> rb = new ArrayRingbuffer<>(5);
assertTrue(rb.isEmpty());
rb.add("");
assertFalse(rb.isEmpty());
}
|
@Override
public void write(String key, InputStream data) {
checkNotNull(data);
try {
write(key, data.readAllBytes());
} catch (IOException e) {
throw new IllegalStateException("Failed to read sensor write cache data", e);
}
}
|
@Test
public void write_throws_IAE_if_writing_same_key_twice() {
byte[] b1 = new byte[] {1};
byte[] b2 = new byte[] {2};
writeCache.write("key", b1);
assertThatThrownBy(() -> writeCache.write("key", b2))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cache already contains key 'key'");
}
|
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
}
|
@Test
void assertGetBinaryProtocolValueWithMySQLTypeTinyBlob() {
assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.TINY_BLOB), instanceOf(MySQLByteLenencBinaryProtocolValue.class));
}
|
@Override
public Map<String, Object> encode(Object object) throws EncodeException {
if (object == null) {
return Collections.emptyMap();
}
try {
ObjectParamMetadata metadata = getMetadata(object.getClass());
Map<String, Object> propertyNameToValue = new HashMap<String, Object>();
for (PropertyDescriptor pd : metadata.objectProperties) {
Method method = pd.getReadMethod();
Object value = method.invoke(object);
if (value != null && value != object) {
Param alias = method.getAnnotation(Param.class);
String name = alias != null ? alias.value() : pd.getName();
propertyNameToValue.put(name, value);
}
}
return propertyNameToValue;
} catch (IllegalAccessException | IntrospectionException | InvocationTargetException e) {
throw new EncodeException("Failure encoding object into query map", e);
}
}
|
@Test
void defaultEncoder_haveSuperClass() {
Map<String, Object> expected = new HashMap<>();
expected.put("page", 1);
expected.put("size", 10);
expected.put("query", "queryString");
SubClass subClass = new SubClass();
subClass.setPage(1);
subClass.setSize(10);
subClass.setQuery("queryString");
Map<String, Object> encodedMap = encoder.encode(subClass);
assertThat(encodedMap).as("Unexpected encoded query map").isEqualTo(expected);
}
|
@Override
public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions,
final CreatePartitionsOptions options) {
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size());
final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size());
for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) {
final String topic = entry.getKey();
final NewPartitions newPartition = entry.getValue();
List<List<Integer>> newAssignments = newPartition.assignments();
List<CreatePartitionsAssignment> assignments = newAssignments == null ? null :
newAssignments.stream()
.map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds))
.collect(Collectors.toList());
topics.add(new CreatePartitionsTopic()
.setName(topic)
.setCount(newPartition.totalCount())
.setAssignments(assignments));
futures.put(topic, new KafkaFutureImpl<>());
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreatePartitionsCall(options, futures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreatePartitionsResult(new HashMap<>(futures));
}
|
@Test
public void testCreatePartitions() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(
expectCreatePartitionsRequestWithTopics("my_topic", "other_topic"),
prepareCreatePartitionsResponse(1000,
createPartitionsTopicResult("my_topic", Errors.NONE),
createPartitionsTopicResult("other_topic", Errors.INVALID_TOPIC_EXCEPTION,
"some detailed reason")));
Map<String, NewPartitions> counts = new HashMap<>();
counts.put("my_topic", NewPartitions.increaseTo(3));
counts.put("other_topic", NewPartitions.increaseTo(3, asList(singletonList(2), singletonList(3))));
CreatePartitionsResult results = env.adminClient().createPartitions(counts);
Map<String, KafkaFuture<Void>> values = results.values();
KafkaFuture<Void> myTopicResult = values.get("my_topic");
myTopicResult.get();
KafkaFuture<Void> otherTopicResult = values.get("other_topic");
assertEquals("some detailed reason",
assertInstanceOf(InvalidTopicException.class,
assertThrows(ExecutionException.class, otherTopicResult::get).getCause()).getMessage());
}
}
|
public <T> void resolve(T resolvable) {
ParamResolver resolver = this;
if (ParamScope.class.isAssignableFrom(resolvable.getClass())) {
ParamScope newScope = (ParamScope) resolvable;
resolver = newScope.applyOver(resolver);
}
resolveStringLeaves(resolvable, resolver);
resolveNonStringLeaves(resolvable, resolver);
resolveNodes(resolvable, resolver);
}
|
@Test
public void shouldLexicallyScopeTheParameters() {
PipelineConfig withParams = PipelineConfigMother.createPipelineConfig("cruise", "dev", "ant");
withParams.addParam(param("foo", "pipeline"));
PipelineConfig withoutParams = PipelineConfigMother.createPipelineConfig("mingle", "dev", "ant");
CruiseConfig cruiseConfig = new BasicCruiseConfig();
cruiseConfig.addPipeline("group", withParams);
cruiseConfig.addPipeline("group", withoutParams);
cruiseConfig.server().setArtifactsDir("/#{foo}/#{bar}");
HgMaterialConfig materialConfig = MaterialConfigsMother.hgMaterialConfig();
materialConfig.setConfigAttributes(Map.of(ScmMaterialConfig.FOLDER, "work/#{foo}/#{bar}/baz"));
withParams.addMaterialConfig(materialConfig);
withParams.setLabelTemplate("2.0.#{foo}-#{bar}");
withoutParams.setLabelTemplate("2.0.#{foo}-#{bar}");
new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "global"), param("bar", "global-only"))), fieldCache).resolve(cruiseConfig);
assertThat(withParams.materialConfigs().get(1).getFolder(), is("work/pipeline/global-only/baz"));
assertThat(withParams.getLabelTemplate(), is("2.0.pipeline-global-only"));
assertThat(withoutParams.getLabelTemplate(), is("2.0.global-global-only"));
}
|
static void cleanStackTrace(Throwable throwable) {
new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet());
}
|
@Test
public void allFramesBelowJUnitRunnerCleaned() {
Throwable throwable =
createThrowableWithStackTrace(
"com.google.common.truth.StringSubject",
"com.google.example.SomeTest",
SomeRunner.class.getName(),
"com.google.example.SomeClass");
StackTraceCleaner.cleanStackTrace(throwable);
assertThat(throwable.getStackTrace())
.isEqualTo(
new StackTraceElement[] {
createStackTraceElement("com.google.example.SomeTest"),
});
}
|
@VisibleForTesting
public Schema getTableAvroSchema() {
try {
TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);
return schemaResolver.getTableAvroSchema();
} catch (Throwable e) {
// table exists but has no written data
LOG.warn("Get table avro schema error, use schema from the DDL instead", e);
return inferSchemaFromDdl();
}
}
|
@Test
void testGetTableAvroSchema() {
HoodieTableSource tableSource = getEmptyStreamingSource();
assertNull(tableSource.getMetaClient(), "Streaming source with empty table path is allowed");
final String schemaFields = tableSource.getTableAvroSchema().getFields().stream()
.map(Schema.Field::name)
.collect(Collectors.joining(","));
final String expected = "_hoodie_commit_time,"
+ "_hoodie_commit_seqno,"
+ "_hoodie_record_key,"
+ "_hoodie_partition_path,"
+ "_hoodie_file_name,"
+ "uuid,name,age,ts,partition";
assertThat(schemaFields, is(expected));
}
|
public int read() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).read();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).read();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
|
@Test
public void testRead() throws IOException {
HollowBlobInput inStream = HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob);
assertEquals(0, inStream.read()); // first byte is 0
assertEquals(1, inStream.read()); // second byte is 1
HollowBlobInput inBuffer = HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob);
assertEquals(0, inBuffer.read()); // first byte is 0
assertEquals(1, inBuffer.read()); // second byte is 1
}
|
public Properties createProperties(Props props, File logDir) {
Log4JPropertiesBuilder log4JPropertiesBuilder = new Log4JPropertiesBuilder(props);
RootLoggerConfig config = newRootLoggerConfigBuilder()
.setNodeNameField(getNodeNameWhenCluster(props))
.setProcessId(ProcessId.ELASTICSEARCH)
.build();
String logPattern = log4JPropertiesBuilder.buildLogPattern(config);
return log4JPropertiesBuilder.internalLogLevel(Level.ERROR)
.rootLoggerConfig(config)
.logPattern(logPattern)
.enableAllLogsToConsole(isAllLogsToConsoleEnabled(props))
.jsonOutput(isJsonOutput(props))
.logDir(logDir)
.logLevelConfig(
LogLevelConfig.newBuilder(log4JPropertiesBuilder.getRootLoggerName())
.rootLevelFor(ProcessId.ELASTICSEARCH)
.build())
.build();
}
|
@Test
public void createProperties_sets_root_logger_to_process_property_if_set() throws IOException {
File logDir = temporaryFolder.newFolder();
Properties properties = underTest.createProperties(newProps("sonar.log.level.es", "DEBUG"), logDir);
assertThat(properties.getProperty("rootLogger.level")).isEqualTo("DEBUG");
}
|
public void writeEncodedValue(EncodedValue encodedValue) throws IOException {
switch (encodedValue.getValueType()) {
case ValueType.BOOLEAN:
writeBooleanEncodedValue((BooleanEncodedValue) encodedValue);
break;
case ValueType.BYTE:
writeIntegralValue(((ByteEncodedValue) encodedValue).getValue(), 't');
break;
case ValueType.CHAR:
writeCharEncodedValue((CharEncodedValue) encodedValue);
break;
case ValueType.SHORT:
writeIntegralValue(((ShortEncodedValue) encodedValue).getValue(), 's');
break;
case ValueType.INT:
writeIntegralValue(((IntEncodedValue) encodedValue).getValue(), null);
break;
case ValueType.LONG:
writeIntegralValue(((LongEncodedValue)encodedValue).getValue(), 'L');
break;
case ValueType.FLOAT:
writeFloatEncodedValue((FloatEncodedValue) encodedValue);
break;
case ValueType.DOUBLE:
writeDoubleEncodedValue((DoubleEncodedValue) encodedValue);
break;
case ValueType.ANNOTATION:
writeAnnotation((AnnotationEncodedValue)encodedValue);
break;
case ValueType.ARRAY:
writeArray((ArrayEncodedValue)encodedValue);
break;
case ValueType.STRING:
writeQuotedString(((StringEncodedValue)encodedValue).getValue());
break;
case ValueType.FIELD:
writeFieldDescriptor(((FieldEncodedValue)encodedValue).getValue());
break;
case ValueType.ENUM:
writeEnum((EnumEncodedValue) encodedValue);
break;
case ValueType.METHOD:
writeMethodDescriptor(((MethodEncodedValue)encodedValue).getValue());
break;
case ValueType.TYPE:
writeType(((TypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_TYPE:
writeMethodProtoDescriptor(((MethodTypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_HANDLE:
writeMethodHandle(((MethodHandleEncodedValue)encodedValue).getValue());
break;
case ValueType.NULL:
writer.write("null");
break;
default:
throw new IllegalArgumentException("Unknown encoded value type");
}
}
|
@Test
public void testWriteEncodedValue_enum_withSpaces() throws IOException {
BaksmaliWriter writer = new BaksmaliWriter(output);
writer.writeEncodedValue(new ImmutableEnumEncodedValue(getFieldReferenceWithSpaces()));
Assert.assertEquals(
".enum Ldefining/class/`with spaces`;->`fieldName with spaces`:Lfield/`type with spaces`;",
output.toString());
}
|
@Override
public ArrayList<ValidationFailure> getValidationFailures() {
return this.failuresCollection;
}
|
@Test
public void getValidationFailures() {
/** arrange */
FailureCollectorWrapper failureCollectorWrapper = new FailureCollectorWrapper();
String errorMessage = "An error has occurred";
FailureCollectorWrapper emptyFailureCollectorWrapper = new FailureCollectorWrapper();
RuntimeException error = new RuntimeException(errorMessage);
failureCollectorWrapper.addFailure(error.getMessage(), null);
/** act */
ArrayList<ValidationFailure> exceptionCollector =
failureCollectorWrapper.getValidationFailures();
ArrayList<ValidationFailure> emptyExceptionCollector =
emptyFailureCollectorWrapper.getValidationFailures();
/** assert */
assertEquals(1, exceptionCollector.size());
assertEquals(errorMessage, exceptionCollector.get(0).getMessage());
assertEquals(0, emptyExceptionCollector.size());
}
|
public void updateState(List<TridentTuple> tuples) {
try {
String bulkRequest = buildRequest(tuples);
final Request request = new Request("post", "_bulk");
request.setEntity(new StringEntity(bulkRequest));
Response response = client.performRequest(request);
BulkIndexResponse bulkResponse = objectMapper.readValue(response.getEntity().getContent(), BulkIndexResponse.class);
if (bulkResponse.hasErrors()) {
LOG.warn("failed processing bulk index requests: " + bulkResponse.getFirstError() + ": " + bulkResponse.getFirstResult());
throw new FailedException();
}
} catch (IOException e) {
LOG.warn("failed processing bulk index requests: " + e.toString());
throw new FailedException(e);
}
}
|
@Test
public void indexMissing() throws Exception {
List<TridentTuple> tuples = tuples("missing", type, documentId, source);
state.updateState(tuples);
}
|
@Override
public void check(final String databaseName, final ReadwriteSplittingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) {
checkDataSources(databaseName, ruleConfig.getDataSourceGroups(), dataSourceMap, builtRules);
checkLoadBalancer(databaseName, ruleConfig);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void assertCheckWhenConfigInvalidReadDataSource() {
ReadwriteSplittingRuleConfiguration config = mock(ReadwriteSplittingRuleConfiguration.class);
List<ReadwriteSplittingDataSourceGroupRuleConfiguration> configs = Arrays.asList(createDataSourceGroupRuleConfiguration(
"write_ds_0", Arrays.asList("read_ds_0", "read_ds_0")), createDataSourceGroupRuleConfiguration("write_ds_1", Arrays.asList("read_ds_0", "read_ds_0")));
when(config.getDataSourceGroups()).thenReturn(configs);
RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass());
assertThrows(DuplicateReadwriteSplittingActualDataSourceException.class, () -> checker.check("test", config, mockDataSources(), Collections.emptyList()));
}
|
public static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options) {
return createBatchExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldSupportIPv4Batch() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(FlinkRunner.class);
options.setFlinkMaster("192.168.1.1:1234");
ExecutionEnvironment bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options);
checkHostAndPort(bev, "192.168.1.1", 1234);
options.setFlinkMaster("192.168.1.1");
bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options);
checkHostAndPort(bev, "192.168.1.1", RestOptions.PORT.defaultValue());
}
|
@Override
public String retrieveIPfilePath(String id, String dstDir,
Map<Path, List<String>> localizedResources) {
// Assume .aocx IP file is distributed by DS to local dir
String ipFilePath = null;
LOG.info("Got environment: " + id +
", search IP file in localized resources");
if (null == id || id.isEmpty()) {
LOG.warn("IP_ID environment is empty, skip downloading");
return null;
}
if (localizedResources != null) {
Optional<Path> aocxPath = localizedResources
.keySet()
.stream()
.filter(path -> matchesIpid(path, id))
.findFirst();
if (aocxPath.isPresent()) {
ipFilePath = aocxPath.get().toString();
LOG.info("Found: {}", ipFilePath);
} else {
LOG.warn("Requested IP file not found");
}
} else {
LOG.warn("Localized resource is null!");
}
return ipFilePath;
}
|
@Test
public void testLocalizedIpfileNotFoundWithNoLocalResources() {
String path = plugin.retrieveIPfilePath("fpga", "workDir", null);
assertNull("Retrieved IP file path", path);
}
|
public static Map<JobVertexID, ForwardGroup> computeForwardGroups(
final Iterable<JobVertex> topologicallySortedVertices,
final Function<JobVertex, Set<JobVertex>> forwardProducersRetriever) {
final Map<JobVertex, Set<JobVertex>> vertexToGroup = new IdentityHashMap<>();
// iterate all the vertices which are topologically sorted
for (JobVertex vertex : topologicallySortedVertices) {
Set<JobVertex> currentGroup = new HashSet<>();
currentGroup.add(vertex);
vertexToGroup.put(vertex, currentGroup);
for (JobVertex producerVertex : forwardProducersRetriever.apply(vertex)) {
final Set<JobVertex> producerGroup = vertexToGroup.get(producerVertex);
if (producerGroup == null) {
throw new IllegalStateException(
"Producer task "
+ producerVertex.getID()
+ " forward group is null"
+ " while calculating forward group for the consumer task "
+ vertex.getID()
+ ". This should be a forward group building bug.");
}
if (currentGroup != producerGroup) {
currentGroup =
VertexGroupComputeUtil.mergeVertexGroups(
currentGroup, producerGroup, vertexToGroup);
}
}
}
final Map<JobVertexID, ForwardGroup> ret = new HashMap<>();
for (Set<JobVertex> vertexGroup :
VertexGroupComputeUtil.uniqueVertexGroups(vertexToGroup)) {
if (vertexGroup.size() > 1) {
ForwardGroup forwardGroup = new ForwardGroup(vertexGroup);
for (JobVertexID jobVertexId : forwardGroup.getJobVertexIds()) {
ret.put(jobVertexId, forwardGroup);
}
}
}
return ret;
}
|
@Test
void testOneInputSplitsIntoTwo() throws Exception {
JobVertex v1 = new JobVertex("v1");
JobVertex v2 = new JobVertex("v2");
JobVertex v3 = new JobVertex("v3");
JobVertex v4 = new JobVertex("v4");
v2.connectNewDataSetAsInput(
v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
v3.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
v4.connectNewDataSetAsInput(
v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
v2.getProducedDataSets().get(0).getConsumers().get(0).setForward(true);
v2.getProducedDataSets().get(1).getConsumers().get(0).setForward(true);
Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3, v4);
checkGroupSize(groups, 1, 3);
}
|
public FEELFnResult<Object> invoke(@ParameterName("input") String input, @ParameterName("pattern") String pattern,
@ParameterName( "replacement" ) String replacement ) {
return invoke(input, pattern, replacement, null);
}
|
@Test
void invokeWithFlagMultiline() {
FunctionTestUtil.assertResult(replaceFunction.invoke("foo\nbar", "^b", "ttt", "m"), "foo\ntttar");
}
|
public List<Exception> errors() {
return Collections.unmodifiableList(this.errors);
}
|
@Test
void errors() {
final var e = new BusinessException("unhandled");
final var retry = new RetryExponentialBackoff<String>(
() -> {
throw e;
},
2,
0
);
try {
retry.perform();
} catch (BusinessException ex) {
//ignore
}
assertThat(retry.errors(), hasItem(e));
}
|
public boolean isKeyColumn(final ColumnName columnName) {
return findColumnMatching(withNamespace(Namespace.KEY).and(withName(columnName)))
.isPresent();
}
|
@Test
public void shouldNotMatchRandomColumnNameAsBeingMetaOrKeyColumns() {
assertThat(SystemColumns.isPseudoColumn(ColumnName.of("well_this_ain't_in_the_schema")), is(false));
assertThat(SOME_SCHEMA.isKeyColumn(ColumnName.of("well_this_ain't_in_the_schema")), is(false));
}
|
public final static String getOutputCommand(ReturnObject rObject) {
StringBuilder builder = new StringBuilder();
// TODO Should be configurable
// TODO ADD RETURN MESSAGE TO OTHER OUTPUT COMMAND
builder.append(RETURN_MESSAGE);
if (rObject.isError()) {
builder.append(rObject.getCommandPart());
} else {
builder.append(SUCCESS);
builder.append(rObject.getCommandPart());
}
builder.append(END_OUTPUT);
return builder.toString();
}
|
@Test
public void testGetOutputCommand() {
ReturnObject rObject1 = ReturnObject.getErrorReturnObject();
ReturnObject rObject2 = ReturnObject.getPrimitiveReturnObject(2);
ReturnObject rObject3 = ReturnObject.getPrimitiveReturnObject(2.2);
ReturnObject rObject4 = ReturnObject.getPrimitiveReturnObject(2.2f);
ReturnObject rObject5 = ReturnObject.getPrimitiveReturnObject('c');
ReturnObject rObject6 = ReturnObject.getPrimitiveReturnObject("Hello\nWorld");
ReturnObject rObject7 = ReturnObject.getPrimitiveReturnObject(5L);
ReturnObject rObject8 = ReturnObject.getPrimitiveReturnObject(true);
ReturnObject rObject9 = ReturnObject.getPrimitiveReturnObject(false);
ReturnObject rObject10 = ReturnObject.getNullReturnObject();
ReturnObject rObject11 = ReturnObject.getReferenceReturnObject("o123");
ReturnObject rObject12 = ReturnObject.getListReturnObject("o123", 2);
ReturnObject rObject13 = ReturnObject.getMapReturnObject("o124", 3);
ReturnObject rObject14 = ReturnObject.getSetReturnObject("o125", 3);
ReturnObject rObject15 = ReturnObject.getArrayReturnObject("o126", 3);
ReturnObject rObject16 = ReturnObject.getIteratorReturnObject("o127");
ReturnObject rObject17 = ReturnObject.getDecimalReturnObject(new BigDecimal("-14.532"));
assertEquals("!x\n", Protocol.getOutputCommand(rObject1));
assertEquals("!yi2\n", Protocol.getOutputCommand(rObject2));
assertEquals("!yd2.2\n", Protocol.getOutputCommand(rObject3));
assertEquals("!yd2.2\n", Protocol.getOutputCommand(rObject4));
assertEquals("!ysc\n", Protocol.getOutputCommand(rObject5));
assertEquals("!ysHello\\nWorld\n", Protocol.getOutputCommand(rObject6));
assertEquals("!yL5\n", Protocol.getOutputCommand(rObject7));
assertEquals("!ybtrue\n", Protocol.getOutputCommand(rObject8));
assertEquals("!ybfalse\n", Protocol.getOutputCommand(rObject9));
assertEquals("!yn\n", Protocol.getOutputCommand(rObject10));
assertEquals("!yro123\n", Protocol.getOutputCommand(rObject11));
assertEquals("!ylo123\n", Protocol.getOutputCommand(rObject12));
assertEquals("!yao124\n", Protocol.getOutputCommand(rObject13));
assertEquals("!yho125\n", Protocol.getOutputCommand(rObject14));
assertEquals("!yto126\n", Protocol.getOutputCommand(rObject15));
assertEquals("!ygo127\n", Protocol.getOutputCommand(rObject16));
assertEquals("!yD-14.532\n", Protocol.getOutputCommand(rObject17));
}
|
public Timer add(long interval, TimerHandler handler, Object... args)
{
if (handler == null) {
return null;
}
return new Timer(timer.add(interval, handler, args));
}
|
@Test
public void testCancelTwice()
{
Timer timer = timers.add(10, handler);
assertThat(timer, notNullValue());
boolean rc = timer.cancel();
assertThat(rc, is(true));
rc = timer.cancel();
assertThat(rc, is(false));
}
|
public final boolean checkIfExecuted(String input) {
return this.validator.isExecuted(Optional.of(ByteString.copyFromUtf8(input)));
}
|
@Test
public void checkIfExecuted_withNoParameter_executesValidator() {
TestValidatorIsCalledValidator testValidator = new TestValidatorIsCalledValidator();
Payload payload = new Payload("my-payload", testValidator, PAYLOAD_ATTRIBUTES, CONFIG);
payload.checkIfExecuted();
assertTrue(testValidator.wasCalled);
}
|
@Override
public Map<String, Object> encode(Object object) throws EncodeException {
if (object == null) {
return Collections.emptyMap();
}
ObjectParamMetadata metadata =
classToMetadata.computeIfAbsent(object.getClass(), ObjectParamMetadata::parseObjectType);
return metadata.objectFields.stream()
.map(field -> this.FieldValuePair(object, field))
.filter(fieldObjectPair -> fieldObjectPair.right.isPresent())
.collect(Collectors.toMap(this::fieldName,
fieldObjectPair -> fieldObjectPair.right.get()));
}
|
@Test
void defaultEncoder_normalClassWithValues() {
final Map<String, Object> expected = new HashMap<>();
expected.put("foo", "fooz");
expected.put("bar", "barz");
final NormalObject normalObject = new NormalObject("fooz", "barz");
final Map<String, Object> encodedMap = encoder.encode(normalObject);
assertThat(encodedMap).as("Unexpected encoded query map").isEqualTo(expected);
}
|
@Override
public void updateMailSendResult(Long logId, String messageId, Exception exception) {
// 1. 成功
if (exception == null) {
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.SUCCESS.getStatus()).setSendMessageId(messageId));
return;
}
// 2. 失败
mailLogMapper.updateById(new MailLogDO().setId(logId).setSendTime(LocalDateTime.now())
.setSendStatus(MailSendStatusEnum.FAILURE.getStatus()).setSendException(getRootCauseMessage(exception)));
}
|
@Test
public void testUpdateMailSendResult_exception() {
// mock 数据
MailLogDO log = randomPojo(MailLogDO.class, o -> {
o.setSendStatus(MailSendStatusEnum.INIT.getStatus());
o.setSendTime(null).setSendMessageId(null).setSendException(null)
.setTemplateParams(randomTemplateParams());
});
mailLogMapper.insert(log);
// 准备参数
Long logId = log.getId();
Exception exception = new NullPointerException("测试异常");
// 调用
mailLogService.updateMailSendResult(logId, null, exception);
// 断言
MailLogDO dbLog = mailLogMapper.selectById(logId);
assertEquals(MailSendStatusEnum.FAILURE.getStatus(), dbLog.getSendStatus());
assertNotNull(dbLog.getSendTime());
assertNull(dbLog.getSendMessageId());
assertEquals("NullPointerException: 测试异常", dbLog.getSendException());
}
|
public void setIni(Ini ini) {
this.ini = ini;
}
|
@Test
public void testSetIni() {
Ini ini = new Ini();
ini.addSection("users").put("foo", "bar");
env = new IniEnvironment();
env.setIni(ini);
env.init();
authenticate();
}
|
@Override
public String decrypt(String encryptedText) {
try {
javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CRYPTO_ALGO);
ByteBuffer byteBuffer = ByteBuffer.wrap(Base64.decodeBase64(StringUtils.trim(encryptedText)));
byte[] iv = new byte[GCM_IV_LENGTH_IN_BYTES];
byteBuffer.get(iv);
byte[] cipherText = new byte[byteBuffer.remaining()];
byteBuffer.get(cipherText);
cipher.init(javax.crypto.Cipher.DECRYPT_MODE, loadSecretFile(), new GCMParameterSpec(GCM_TAG_LENGTH_IN_BITS, iv));
byte[] cipherData = cipher.doFinal(cipherText);
return new String(cipherData, StandardCharsets.UTF_8);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
|
@Test
public void decrypt_bad_key() throws Exception {
URL resource = getClass().getResource("/org/sonar/api/config/internal/AesCipherTest/bad_secret_key.txt");
AesGCMCipher cipher = new AesGCMCipher(new File(resource.toURI()).getCanonicalPath());
assertThatThrownBy(() -> cipher.decrypt("9mx5Zq4JVyjeChTcVjEide4kWCwusFl7P2dSVXtg9IY="))
.hasCauseInstanceOf(InvalidKeyException.class);
}
|
@Override
public void trash(final Local file) throws LocalAccessDeniedException {
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to Trash", file));
}
final ObjCObjectByReference error = new ObjCObjectByReference();
if(!NSFileManager.defaultManager().trashItemAtURL_resultingItemURL_error(
NSURL.fileURLWithPath(file.getAbsolute()), null, error)) {
final NSError f = error.getValueAs(NSError.class);
if(null == f) {
throw new LocalAccessDeniedException(file.getAbsolute());
}
throw new LocalAccessDeniedException(String.format("%s", f.localizedDescription()));
}
}
|
@Test(expected = LocalAccessDeniedException.class)
public void testTrashNotfound() throws Exception {
Local l = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
assertFalse(l.exists());
new FileManagerTrashFeature().trash(l);
}
|
public ConsumeStats getConsumeStats(final String addr, final String consumerGroup, final long timeoutMillis)
throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException,
MQBrokerException {
return getConsumeStats(addr, consumerGroup, null, timeoutMillis);
}
|
@Test
public void assertGetConsumeStats() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
ConsumeStats responseBody = new ConsumeStats();
responseBody.setConsumeTps(1000);
setResponseBody(responseBody);
ConsumeStats actual = mqClientAPI.getConsumeStats(defaultBrokerAddr, "", defaultTimeout);
assertNotNull(actual);
assertEquals(1000, actual.getConsumeTps(), 0.0);
}
|
@Override
public CompletionStage<V> putAsync(K key, V value) {
return map.putAsync(key, value);
}
|
@Test
public void testPutAsync() throws Exception {
map.put(42, "oldValue");
Future<String> future = adapter.putAsync(42, "newValue").toCompletableFuture();
String oldValue = future.get();
assertEquals("oldValue", oldValue);
assertEquals("newValue", map.get(42));
}
|
@Override
public void writeTo(ByteBuf byteBuf) throws LispWriterException {
WRITER.writeTo(byteBuf, this);
}
|
@Test
public void testSerialization() throws LispReaderException, LispWriterException,
LispParseError, DeserializationException {
ByteBuf byteBuf = Unpooled.buffer();
ReferralWriter writer = new ReferralWriter();
writer.writeTo(byteBuf, referral1);
ReferralReader reader = new ReferralReader();
LispReferral deserialized = reader.readFrom(byteBuf);
new EqualsTester()
.addEqualityGroup(referral1, deserialized).testEquals();
}
|
public static DataMap getAnnotationsMap(Annotation[] as)
{
return annotationsToData(as, true);
}
|
@Test(description = "Non-empty annotation, scalar members, default values: data map with annotation + no members")
public void succeedsOnSupportedScalarMembersWithDefaultValues()
{
@SupportedScalarMembers
class LocalClass {
}
final Annotation[] annotations = LocalClass.class.getAnnotations();
final DataMap actual = ResourceModelAnnotation.getAnnotationsMap(annotations);
Assert.assertNotNull(actual);
Assert.assertTrue(actual.get(SUPPORTED_SCALAR_MEMBERS) instanceof DataMap);
final DataMap dataMap = ((DataMap) actual.get(SUPPORTED_SCALAR_MEMBERS));
Assert.assertEquals(dataMap.size(), 0);
}
|
@Override
public boolean reportChecksumFailure(Path p, FSDataInputStream in,
long inPos,
FSDataInputStream sums, long sumsPos) {
try {
// canonicalize f
File f = ((RawLocalFileSystem)fs).pathToFile(p).getCanonicalFile();
// find highest writable parent dir of f on the same device
String device = new DF(f, getConf()).getMount();
File parent = f.getParentFile();
File dir = null;
while (parent != null && FileUtil.canWrite(parent) &&
parent.toString().startsWith(device)) {
dir = parent;
parent = parent.getParentFile();
}
if (dir==null) {
throw new IOException(
"not able to find the highest writable parent dir");
}
// move the file there
File badDir = new File(dir, "bad_files");
if (!badDir.mkdirs()) {
if (!badDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + badDir.toString());
}
}
String suffix = "." + rand.nextInt();
File badFile = new File(badDir, f.getName()+suffix);
LOG.warn("Moving bad file " + f + " to " + badFile);
in.close(); // close it first
boolean b = f.renameTo(badFile); // rename it
if (!b) {
LOG.warn("Ignoring failure of renameTo");
}
// move checksum file too
File checkFile = ((RawLocalFileSystem)fs).pathToFile(getChecksumFile(p));
// close the stream before rename to release the file handle
sums.close();
b = checkFile.renameTo(new File(badDir, checkFile.getName()+suffix));
if (!b) {
LOG.warn("Ignoring failure of renameTo");
}
} catch (IOException e) {
LOG.warn("Error moving bad file " + p, e);
}
return false;
}
|
@Test
public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1 = new File(base, "dir1");
final File dir2 = new File(dir1, "dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName = "corruptedData";
final Path dataPath = new Path(new File(dir2, dataFileName).toURI());
final Path checksumPath = fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos = fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
} finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength = fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
// check the the checksum file is created and not empty:
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength = fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
// this is a hack to force the #reportChecksumFailure() method to stop
// climbing up at the 'base' directory and use 'dir1/bad_files' as the
// corrupted files storage:
FileUtil.setWritable(base, false);
FSDataInputStream dataFsdis = fileSys.open(dataPath);
FSDataInputStream checksumFsdis = fileSys.open(checksumPath);
boolean retryIsNecessary = fileSys.reportChecksumFailure(dataPath, dataFsdis, 0, checksumFsdis, 0);
assertTrue(!retryIsNecessary);
// the data file should be moved:
assertTrue(!fileSys.pathToFile(dataPath).exists());
// the checksum file should be moved:
assertTrue(!fileSys.pathToFile(checksumPath).exists());
// check that the files exist in the new location where they were moved:
File[] dir1files = dir1.listFiles(new FileFilter() {
@Override
public boolean accept(File pathname) {
return pathname != null && !pathname.getName().equals("dir2");
}
});
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir = dir1files[0];
File[] badFiles = badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound = false;
boolean checksumFileFound = false;
for (File badFile: badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound = true;
} else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound = true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
|
public static List<AclEntry> filterAclEntriesByAclSpec(
List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
if (aclSpec.containsKey(existingEntry)) {
scopeDirty.add(existingEntry.getScope());
if (existingEntry.getType() == MASK) {
maskDirty.add(existingEntry.getScope());
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test
public void testFilterAclEntriesByAclSpecAccessMaskCalculated()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
|
public String encode(long... numbers) {
if (numbers.length == 0) {
return "";
}
for (final long number : numbers) {
if (number < 0) {
return "";
}
if (number > MAX_NUMBER) {
throw new IllegalArgumentException("number can not be greater than " + MAX_NUMBER + "L");
}
}
return this._encode(numbers);
}
|
@Test
public void test_issue31() {
final long[] numbers = new long[500000];
long current = Hashids.MAX_NUMBER;
for (int i = 0; i < numbers.length; i++) {
numbers[i] = current--;
}
final Hashids a = new Hashids("this is my salt");
Assert.assertNotEquals("", a.encode(numbers));
}
|
public static RuleDescriptionSectionDtoBuilder builder() {
return new RuleDescriptionSectionDtoBuilder();
}
|
@Test
void setDefault_whenKeyAlreadySet_shouldThrow() {
RuleDescriptionSectionDto.RuleDescriptionSectionDtoBuilder builderWithKey = RuleDescriptionSectionDto.builder()
.key("tagada");
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(builderWithKey::setDefault)
.withMessage("Only one of setDefault and key methods can be called");
}
|
@Description("bitwise NOT in 2's complement arithmetic")
@ScalarFunction
@SqlType(StandardTypes.BIGINT)
public static long bitwiseNot(@SqlType(StandardTypes.BIGINT) long num)
{
return ~num;
}
|
@Test
public void testBitwiseNot()
{
assertFunction("bitwise_not(0)", BIGINT, ~0L);
assertFunction("bitwise_not(-1)", BIGINT, ~-1L);
assertFunction("bitwise_not(8)", BIGINT, ~8L);
assertFunction("bitwise_not(-8)", BIGINT, ~-8L);
assertFunction("bitwise_not(" + Long.MAX_VALUE + ")", BIGINT, ~Long.MAX_VALUE);
assertFunction("bitwise_not(-" + Long.MAX_VALUE + "-1)", BIGINT, ~Long.MIN_VALUE); // bitwise_not(MIN_VALUE)
}
|
public boolean isRoundRobinSelection() {
return roundRobinSelection;
}
|
@Test
void roundRobinSelection() {
assertThat(builder.build().isRoundRobinSelection()).isFalse();
builder.roundRobinSelection(true);
assertThat(builder.build().isRoundRobinSelection()).isTrue();
}
|
public ActionResult apply(Agent agent, Map<String, String> request) {
log.debug("Reading file {} for agent {}", request.get("filename"), agent.getId());
Optional<Document> document = workspace.getDocument(agent.getId(), request.get("filename"));
if (document.isPresent()) {
byte[] artifactContent = workspace.getDocumentContent(agent.getId(), request.get("filename"));
return ActionResult.builder()
.status(ActionResult.Status.SUCCESS)
.result(new String(artifactContent))
.summary(String.format("Successfully read file %s from the agent workspace.", document.get().getName()))
.build();
} else {
return ActionResult.builder()
.status(ActionResult.Status.FAILURE)
.summary(String.format("I could not find the file %s in the workspace.", request.get("filename")))
.error("File not found.")
.build();
}
}
|
@Test
void testApplyWithExistingFile() {
String agentId = "agent1";
String filename = "test.txt";
String fileContent = "This is a test file.";
Map<String, String> request = new HashMap<>();
request.put("filename", filename);
when(agent.getId()).thenReturn(agentId);
when(workspace.getDocument(agentId, filename)).thenReturn(Optional.of(Document.builder().name(filename).metadata(Map.of("content-type","text/plain")).build()));
when(workspace.getDocumentContent(agentId, filename)).thenReturn(fileContent.getBytes());
ActionResult result = readFileAction.apply(agent, request);
assertEquals(ActionResult.Status.SUCCESS, result.getStatus());
assertEquals(fileContent, result.getResult());
assertEquals("Successfully read file test.txt from the agent workspace.", result.getSummary());
assertNull(result.getError());
verify(workspace).getDocument(agentId, filename);
verify(workspace).getDocumentContent(agentId, filename);
}
|
protected static void printConsumerProgress(int id,
long bytesRead,
long lastBytesRead,
long messagesRead,
long lastMessagesRead,
long startMs,
long endMs,
SimpleDateFormat dateFormat,
long joinTimeMsInSingleRound) {
printBasicProgress(id, bytesRead, lastBytesRead, messagesRead, lastMessagesRead, startMs, endMs, dateFormat);
printExtendedProgress(bytesRead, lastBytesRead, messagesRead, lastMessagesRead, startMs, endMs, joinTimeMsInSingleRound);
System.out.println();
}
|
@Test
public void testNonDetailedHeaderMatchBody() {
testHeaderMatchContent(false, 2,
() -> ConsumerPerformance.printConsumerProgress(1, 1024 * 1024, 0, 1, 0, 0, 1, dateFormat, 1L));
}
|
public void updateTableSchema(String tableName, Schema schema, List<String> partitionFields) {
Table existingTable = bigquery.getTable(TableId.of(projectId, datasetName, tableName));
ExternalTableDefinition definition = existingTable.getDefinition();
Schema remoteTableSchema = definition.getSchema();
List<Field> finalTableFields = new ArrayList<>(schema.getFields());
// Add the partition fields into the schema to avoid conflicts while updating. And ensure the partition fields are at the end to
// avoid unnecessary updates.
List<Field> bqPartitionFields = remoteTableSchema.getFields().stream()
.filter(field -> partitionFields.contains(field.getName()))
.collect(Collectors.toList());
finalTableFields.addAll(bqPartitionFields);
Schema finalSchema = Schema.of(finalTableFields);
boolean sameSchema = definition.getSchema() != null && definition.getSchema().equals(finalSchema);
boolean samePartitionFilter = partitionFields.isEmpty()
|| (requirePartitionFilter == (definition.getHivePartitioningOptions().getRequirePartitionFilter() != null && definition.getHivePartitioningOptions().getRequirePartitionFilter()));
if (sameSchema && samePartitionFilter) {
LOG.info("No table update is needed.");
return; // No need to update schema.
}
if (!StringUtils.isNullOrEmpty(bigLakeConnectionId)) {
Table updatedTable =
existingTable.toBuilder().setDefinition(StandardTableDefinition.of(finalSchema)).build();
updatedTable.update();
} else {
ExternalTableDefinition.Builder builder = definition.toBuilder();
builder.setSchema(finalSchema);
builder.setAutodetect(false);
if (definition.getHivePartitioningOptions() != null) {
builder.setHivePartitioningOptions(definition.getHivePartitioningOptions().toBuilder().setRequirePartitionFilter(requirePartitionFilter).build());
}
Table updatedTable = existingTable.toBuilder()
.setDefinition(builder.build())
.build();
bigquery.update(updatedTable);
}
}
|
@Test
void skipUpdatingSchema_partitioned() throws Exception {
BigQuerySyncConfig config = new BigQuerySyncConfig(properties);
client = new HoodieBigQuerySyncClient(config, mockBigQuery);
Table mockTable = mock(Table.class);
ExternalTableDefinition mockTableDefinition = mock(ExternalTableDefinition.class);
// The table schema has no change: it contains a "field" and a "partition_field".
Schema schema = Schema.of(Field.of("field", StandardSQLTypeName.STRING));
List<String> partitionFields = new ArrayList<String>();
partitionFields.add("partition_field");
List<Field> bqFields = new ArrayList<Field>();
// The "partition_field" always follows "field".
bqFields.add(Field.of("field", StandardSQLTypeName.STRING));
bqFields.add(Field.of("partition_field", StandardSQLTypeName.STRING));
Schema bqSchema = Schema.of(bqFields);
HivePartitioningOptions hivePartitioningOptions = HivePartitioningOptions.newBuilder().setRequirePartitionFilter(true).build();
when(mockBigQuery.getTable(any())).thenReturn(mockTable);
when(mockTable.getDefinition()).thenReturn(mockTableDefinition);
when(mockTableDefinition.getSchema()).thenReturn(bqSchema);
when(mockTableDefinition.getHivePartitioningOptions()).thenReturn(hivePartitioningOptions);
client.updateTableSchema(TEST_TABLE, schema, partitionFields);
// Expect no update.
verify(mockBigQuery, never()).update(mockTable);
}
|
public boolean appliesTo(Component project, @Nullable MetricEvaluationResult metricEvaluationResult) {
return metricEvaluationResult != null
&& metricEvaluationResult.evaluationResult.level() != Measure.Level.OK
&& METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricEvaluationResult.condition.getMetric().getKey())
&& config.getConfiguration().getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true)
&& isSmallChangeset(project);
}
|
@Test
public void ignore_errors_about_new_coverage_for_small_changesets() {
mapSettings.setProperty(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES, true);
QualityGateMeasuresStep.MetricEvaluationResult metricEvaluationResult = generateEvaluationResult(NEW_COVERAGE_KEY, ERROR);
Component project = generateNewRootProject();
measureRepository.addRawMeasure(PROJECT_REF, CoreMetrics.NEW_LINES_KEY, newMeasureBuilder().create(19));
boolean result = underTest.appliesTo(project, metricEvaluationResult);
assertThat(result).isTrue();
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return true;
}
try {
final Path found = this.search(file, listener);
return found != null;
}
catch(NotfoundException e) {
if(log.isDebugEnabled()) {
log.debug(String.format("Parent directory for file %s not found", file));
}
return false;
}
}
|
@Test
public void testCaseInsensitive() throws Exception {
assertTrue(new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public Protocol.Case getCaseSensitivity() {
return Protocol.Case.insensitive;
}
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
return new AttributedList<>(Collections.singletonList(new Path("/a/B", EnumSet.of(Path.Type.file))));
}
}).find(new Path("/a/b", EnumSet.of(Path.Type.file))));
assertFalse(new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public Protocol.Case getCaseSensitivity() {
return Protocol.Case.insensitive;
}
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
return new AttributedList<>(Collections.singletonList(new Path("/a/B", EnumSet.of(Path.Type.directory))));
}
}).find(new Path("/a/b", EnumSet.of(Path.Type.file))));
assertFalse(new DefaultFindFeature(new NullSession(new Host(new TestProtocol())) {
@Override
public Protocol.Case getCaseSensitivity() {
return Protocol.Case.sensitive;
}
@Override
public AttributedList<Path> list(final Path file, final ListProgressListener listener) {
return new AttributedList<>(Collections.singletonList(new Path("/a/B", EnumSet.of(Path.Type.file))));
}
}).find(new Path("/a/b", EnumSet.of(Path.Type.file))));
}
|
public static List<String> splitPlainTextLines(String text, int maxTokensPerLine) {
return internalSplitLines(text, maxTokensPerLine, true, s_plaintextSplitOptions);
}
|
@Test
public void canSplitPlainTextLinesLongStringWithSmallTokenCount() {
String input = "This is a very very very very very very very very very very very long string.";
List<String> expected = Arrays.asList(
"This is a",
"very very",
"very very",
"very very",
"very very",
"very very",
"very long",
"string.");
List<String> result = TextChunker.splitPlainTextLines(input, 2);
Assertions.assertEquals(expected, result);
}
|
@Override
public Num calculate(BarSeries series, Position position) {
return position.hasLoss() ? series.one() : series.zero();
}
|
@Test
public void calculateWithNoPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105);
assertNumEquals(0, getCriterion().calculate(series, new BaseTradingRecord()));
}
|
@Override
public final String toString() {
StringJoiner result = new StringJoiner(", ", "(", ")");
for (int i = 0; i < values.size(); i++) {
result.add(getValue(i));
}
return result.toString();
}
|
@Test
void assertToString() {
List<ExpressionSegment> expressionSegments = new ArrayList<>(4);
ParameterMarkerExpressionSegment parameterMarkerExpressionSegment = new ParameterMarkerExpressionSegment(1, 1, 1);
ParameterMarkerExpressionSegment positionalParameterMarkerExpressionSegment = new ParameterMarkerExpressionSegment(1, 1, 0, ParameterMarkerType.DOLLAR);
LiteralExpressionSegment literalExpressionSegment = new LiteralExpressionSegment(2, 2, "literals");
ComplexExpressionSegment complexExpressionSegment = new ComplexExpressionSegment() {
@Override
public String getText() {
return "complexExpressionSegment";
}
@Override
public int getStartIndex() {
return 3;
}
@Override
public int getStopIndex() {
return 3;
}
};
expressionSegments.add(parameterMarkerExpressionSegment);
expressionSegments.add(positionalParameterMarkerExpressionSegment);
expressionSegments.add(literalExpressionSegment);
expressionSegments.add(complexExpressionSegment);
expressionSegments.add(new TypeCastExpression(0, 0, "$2::varchar::jsonb", new TypeCastExpression(0, 0, "$2::varchar",
new ParameterMarkerExpressionSegment(0, 0, 1, ParameterMarkerType.DOLLAR), "varchar"), "jsonb"));
InsertValue insertValue = new InsertValue(expressionSegments);
String actualToString = insertValue.toString();
String expectedToString = "(?, $1, 'literals', complexExpressionSegment, $2::varchar::jsonb)";
assertThat(actualToString, is(expectedToString));
}
|
public String getOldValue() {
return oldValue;
}
|
@Test
void getOldValue() {
ConfigurationChangeEvent event = new ConfigurationChangeEvent();
event.setOldValue("oldValue");
Assertions.assertEquals("oldValue", event.getOldValue());
}
|
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception {
Http2HeadersSink sink = new Http2HeadersSink(
streamId, headers, maxHeaderListSize, validateHeaders);
// Check for dynamic table size updates, which must occur at the beginning:
// https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2
decodeDynamicTableSizeUpdates(in);
decode(in, sink);
// Now that we've read all of our headers we can perform the validation steps. We must
// delay throwing until this point to prevent dynamic table corruption.
sink.finish();
}
|
@Test
public void responsePseudoHeaderInRequest() throws Exception {
final ByteBuf in = Unpooled.buffer(200);
try {
HpackEncoder hpackEncoder = new HpackEncoder(true);
Http2Headers toEncode = new DefaultHttp2Headers();
toEncode.add(":method", "GET");
toEncode.add(":status", "200");
hpackEncoder.encodeHeaders(1, in, toEncode, NEVER_SENSITIVE);
final Http2Headers decoded = new DefaultHttp2Headers();
assertThrows(Http2Exception.StreamException.class, new Executable() {
@Override
public void execute() throws Throwable {
hpackDecoder.decode(1, in, decoded, true);
}
});
} finally {
in.release();
}
}
|
@Override
public BeamSqlTable buildBeamSqlTable(Table tableDefinition) {
ObjectNode tableProperties = tableDefinition.getProperties();
try {
RowJson.RowJsonDeserializer deserializer =
RowJson.RowJsonDeserializer.forSchema(getSchemaIOProvider().configurationSchema())
.withNullBehavior(RowJson.RowJsonDeserializer.NullBehavior.ACCEPT_MISSING_OR_NULL);
Row configurationRow =
newObjectMapperWith(deserializer).readValue(tableProperties.toString(), Row.class);
SchemaIO schemaIO =
getSchemaIOProvider()
.from(tableDefinition.getLocation(), configurationRow, tableDefinition.getSchema());
return new SchemaIOTableWrapper(schemaIO);
} catch (InvalidConfigurationException | InvalidSchemaException e) {
throw new InvalidTableException(e.getMessage());
} catch (JsonProcessingException e) {
throw new AssertionError("Failed to re-parse TBLPROPERTIES JSON " + tableProperties);
}
}
|
@Test
public void testBuildIOReader() {
TestSchemaIOTableProviderWrapper provider = new TestSchemaIOTableProviderWrapper();
BeamSqlTable beamSqlTable = provider.buildBeamSqlTable(testTable);
PCollection<Row> result = beamSqlTable.buildIOReader(pipeline.begin());
PAssert.that(result).containsInAnyOrder(rows);
pipeline.run();
}
|
boolean isEncodable(DiscreteResource resource) {
return resource.valueAs(Object.class)
.map(Object::getClass)
.map(codecs::containsKey)
.orElse(Boolean.FALSE);
}
|
@Test
public void isVlanEncodable() {
DiscreteResource resource = Resources.discrete(DID, PN, VLAN).resource();
assertThat(sut.isEncodable(resource), is(true));
}
|
@Override
public Integer getJavaVersion() {
return jarJavaVersion;
}
|
@Test
public void testGetJavaVersion() {
SpringBootExplodedProcessor springBootExplodedProcessor =
new SpringBootExplodedProcessor(Paths.get("ignore"), Paths.get("ignore"), 8);
assertThat(springBootExplodedProcessor.getJavaVersion()).isEqualTo(8);
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testFetchAfterPartitionWithFetchedRecordsIsUnassigned() {
buildFetcher(2);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
// Returns 3 records while `max.poll.records` is configured to 2
client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
assertEquals(1, sendFetches());
networkClientDelegate.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchRecords();
List<ConsumerRecord<byte[], byte[]>> recordsToTest = recordsByPartition.get(tp0);
assertEquals(2, recordsToTest.size());
assertEquals(3L, subscriptions.position(tp0).offset);
assertEquals(1, recordsToTest.get(0).offset());
assertEquals(2, recordsToTest.get(1).offset());
assignFromUser(singleton(tp1));
client.prepareResponse(matchesOffset(tidp1, 4), fullFetchResponse(tidp1, nextRecords, Errors.NONE, 100L, 0));
subscriptions.seek(tp1, 4);
assertEquals(1, sendFetches());
networkClientDelegate.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertNull(fetchedRecords.get(tp0));
recordsToTest = fetchedRecords.get(tp1);
assertEquals(2, recordsToTest.size());
assertEquals(6L, subscriptions.position(tp1).offset);
assertEquals(4, recordsToTest.get(0).offset());
assertEquals(5, recordsToTest.get(1).offset());
}
|
public Map<String, PartitionsSpec> materialize() {
HashMap<String, PartitionsSpec> all = new HashMap<>();
for (Map.Entry<String, PartitionsSpec> entry : map.entrySet()) {
String topicName = entry.getKey();
PartitionsSpec partitions = entry.getValue();
for (String expandedTopicName : StringExpander.expand(topicName))
all.put(expandedTopicName, partitions);
}
return all;
}
|
@Test
public void testMaterialize() {
Map<String, PartitionsSpec> parts = FOO.materialize();
assertTrue(parts.containsKey("topicA0"));
assertTrue(parts.containsKey("topicA1"));
assertTrue(parts.containsKey("topicA2"));
assertTrue(parts.containsKey("topicB"));
assertEquals(4, parts.keySet().size());
assertEquals(PARTSA, parts.get("topicA0"));
assertEquals(PARTSA, parts.get("topicA1"));
assertEquals(PARTSA, parts.get("topicA2"));
assertEquals(PARTSB, parts.get("topicB"));
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
}
|
@Test
public void shouldPropagateErrorOnSRClientError() throws Exception {
// Given:
givenStatement("CREATE STREAM sink WITH(value_format='AVRO') AS SELECT * FROM SOURCE;");
when(schemaRegistryClient.register(anyString(), any(ParsedSchema.class)))
.thenThrow(new IOException("FUBAR"));
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> injector.inject(statement)
);
// Then:
assertThat(e.getMessage(), containsString(
"Could not register schema for topic"));
assertThat(e.getCause(), (hasProperty("message",
is("Could not register schema for topic: FUBAR"))));
}
|
@Override
public void calculate() {
}
|
@Test
public void testCalculate() {
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), LARGE_VALUE);
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), SMALL_VALUE);
function.calculate();
assertThat(function.getValue()).isEqualTo(LARGE_VALUE);
}
|
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeTransitionFilter edgeTransitionFilter, boolean excludeSingleEdgeComponents) {
return new EdgeBasedTarjanSCC(graph, edgeTransitionFilter, excludeSingleEdgeComponents).findComponentsRecursive();
}
|
@Test
public void withTurnRestriction() {
// here 0-1-2-3 would be a circle and thus belong to same connected component. but if there is a
// turn restriction for going 0->2->3 this splits the graph into multiple components
// 0->1
// | |
// 3<-2->4
g.edge(0, 1).setDistance(1).set(speedEnc, 10, 0); // edge-keys 0,1
g.edge(1, 2).setDistance(1).set(speedEnc, 10, 0); // edge-keys 2,3
g.edge(2, 3).setDistance(1).set(speedEnc, 10, 0); // edge-keys 4,5
g.edge(3, 0).setDistance(1).set(speedEnc, 10, 0); // edge-keys 6,7
g.edge(2, 4).setDistance(1).set(speedEnc, 10, 0); // edge-keys 8,9
// first lets check what happens without turn costs
ConnectedComponents result = EdgeBasedTarjanSCC.findComponentsRecursive(g, fwdAccessFilter, false);
assertEquals(7, result.getTotalComponents());
assertEquals(1, result.getComponents().size());
assertEquals(IntArrayList.from(6, 4, 2, 0), result.getBiggestComponent());
assertEquals(6, result.getSingleEdgeComponents().cardinality());
for (IntCursor c : IntArrayList.from(1, 3, 5, 7, 8, 9)) {
assertTrue(result.getSingleEdgeComponents().get(c.value));
}
// now lets try with a restricted turn
result = EdgeBasedTarjanSCC.findComponentsRecursive(g,
(prev, edge) -> fwdAccessFilter.accept(prev, edge) && !(prev == 1 && edge.getBaseNode() == 2 && edge.getEdge() == 2), false);
// none of the edges are strongly connected anymore!
assertEquals(10, result.getTotalComponents());
assertEquals(0, result.getComponents().size());
assertEquals(IntArrayList.from(), result.getBiggestComponent());
assertEquals(10, result.getSingleEdgeComponents().cardinality());
for (IntCursor c : IntArrayList.from(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) {
assertTrue(result.getSingleEdgeComponents().get(c.value));
}
}
|
public static String readFile(String path, String fileName) {
File file = openFile(path, fileName);
if (file.exists()) {
return readFile(file);
}
return null;
}
|
@Test
void testReadFileWithInputStream() throws FileNotFoundException {
assertNotNull(DiskUtils.readFile(new FileInputStream(testFile)));
}
|
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException {
String relayState = request.getParameter(RELAY_STATE_PARAMETER);
if (isSamlValidation(relayState)) {
URI redirectionEndpointUrl = URI.create(server.getContextPath() + "/")
.resolve(SAML_VALIDATION_CONTROLLER_CONTEXT + "/")
.resolve(SAML_VALIDATION_KEY);
String samlResponse = StringEscapeUtils.escapeHtml3(request.getParameter(SAML_RESPONSE_PARAMETER));
String csrfToken = getCsrfTokenFromRelayState(relayState);
String nonce = SamlValidationCspHeaders.addCspHeadersWithNonceToResponse(response);
String template = StringUtils.replaceEachRepeatedly(redirectionPageTemplate,
new String[]{"%NONCE%", "%WEB_CONTEXT%", "%VALIDATION_URL%", "%SAML_RESPONSE%", "%CSRF_TOKEN%"},
new String[]{nonce, server.getContextPath(), redirectionEndpointUrl.toString(), samlResponse, csrfToken});
response.setContentType("text/html");
response.getWriter().print(template);
return;
}
chain.doFilter(request, response);
}
|
@Test
public void do_filter_validation_wrong_SAML_response() throws IOException {
HttpRequest servletRequest = mock(HttpRequest.class);
HttpResponse servletResponse = mock(HttpResponse.class);
FilterChain filterChain = mock(FilterChain.class);
String maliciousSaml = "test\"</input><script>/*hack website*/</script><input value=\"";
when(servletRequest.getParameter(matches("SAMLResponse"))).thenReturn(maliciousSaml);
when(servletRequest.getParameter(matches("RelayState"))).thenReturn("validation-query/CSRF_TOKEN");
PrintWriter pw = mock(PrintWriter.class);
when(servletResponse.getWriter()).thenReturn(pw);
underTest.doFilter(servletRequest, servletResponse, filterChain);
ArgumentCaptor<String> htmlProduced = ArgumentCaptor.forClass(String.class);
verify(pw).print(htmlProduced.capture());
CSP_HEADERS.forEach(h -> verify(servletResponse).setHeader(eq(h), anyString()));
assertThat(htmlProduced.getValue()).doesNotContain("<script>/*hack website*/</script>");
assertThat(htmlProduced.getValue()).contains("action=\"contextPath/saml/validation\"");
}
|
PubSubMessage rowToMessage(Row row) {
row = castRow(row, row.getSchema(), schema);
PubSubMessage.Builder builder = PubSubMessage.newBuilder();
if (schema.hasField(MESSAGE_KEY_FIELD)) {
byte[] bytes = row.getBytes(MESSAGE_KEY_FIELD);
if (bytes != null) {
builder.setKey(ByteString.copyFrom(bytes));
}
}
if (schema.hasField(EVENT_TIMESTAMP_FIELD)) {
ReadableDateTime time = row.getDateTime(EVENT_TIMESTAMP_FIELD);
if (time != null) {
builder.setEventTime(Timestamps.fromMillis(time.getMillis()));
}
}
if (schema.hasField(ATTRIBUTES_FIELD)) {
Collection<Row> attributes = row.getArray(ATTRIBUTES_FIELD);
if (attributes != null) {
attributes.forEach(
entry -> {
AttributeValues.Builder valuesBuilder = AttributeValues.newBuilder();
Collection<byte[]> values =
checkArgumentNotNull(entry.getArray(ATTRIBUTES_VALUES_FIELD));
values.forEach(bytes -> valuesBuilder.addValues(ByteString.copyFrom(bytes)));
builder.putAttributes(
checkArgumentNotNull(entry.getString(ATTRIBUTES_KEY_FIELD)),
valuesBuilder.build());
});
}
}
if (payloadSerializer == null) {
byte[] payload = row.getBytes(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payload));
}
} else {
Row payload = row.getRow(PAYLOAD_FIELD);
if (payload != null) {
builder.setData(ByteString.copyFrom(payloadSerializer.serialize(payload)));
}
}
return builder.build();
}
|
@Test
public void rowToMessageFailures() {
Schema payloadSchema = Schema.builder().addStringField("def").build();
Schema schema = Schema.builder().addRowField(RowHandler.PAYLOAD_FIELD, payloadSchema).build();
RowHandler rowHandler = new RowHandler(schema, serializer);
// badRow cannot be cast to schema
Schema badRowSchema = Schema.builder().addStringField("xxx").build();
Row badRow =
Row.withSchema(badRowSchema).attachValues(Row.withSchema(badRowSchema).attachValues("abc"));
assertThrows(IllegalArgumentException.class, () -> rowHandler.rowToMessage(badRow));
Row goodRow =
Row.withSchema(schema).addValue(Row.withSchema(payloadSchema).attachValues("abc")).build();
doThrow(new IllegalArgumentException("")).when(serializer).serialize(any());
assertThrows(IllegalArgumentException.class, () -> rowHandler.rowToMessage(goodRow));
}
|
@Override
public long[] getValues() {
return Arrays.copyOf(values, values.length);
}
|
@Test
public void hasValues() {
assertThat(snapshot.getValues())
.containsOnly(1, 2, 3, 4, 5);
}
|
public void inject(Inspector inspector, Inserter inserter) {
if (inspector.valid()) {
injectValue(inserter, inspector, null);
}
}
|
@Test
public void invalidInjectionIsIgnored() {
inject(f1.arrayValue.get(), new SlimeInserter(f2.slime1));
assertEquals(3, f2.slime1.get().entries());
inject(f1.longValue.get(), new ArrayInserter(f2.slime1.get()));
assertEquals(4, f2.slime1.get().entries());
inject(f1.doubleValue.get(), new ArrayInserter(f2.slime1.get()));
assertEquals(5, f2.slime1.get().entries());
inject(f1.nixValue.get().field("bogus"), new ArrayInserter(f2.slime1.get()));
assertEquals(5, f2.slime1.get().entries());
}
|
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) {
assert ctx.executor().inEventLoop();
return service.pulsar().getTransactionMetadataStoreService()
.verifyTxnOwnership(txnID, getPrincipal())
.thenComposeAsync(isOwner -> {
if (isOwner) {
return CompletableFuture.completedFuture(true);
}
if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) {
return isSuperUser();
} else {
return CompletableFuture.completedFuture(false);
}
}, ctx.executor());
}
|
@Test(timeOut = 30000)
public void sendAddSubscriptionToTxnResponseFailed() throws Exception {
final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class);
when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class)));
when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true));
when(txnStore.addAckedPartitionToTxn(any(TxnID.class), any()))
.thenReturn(CompletableFuture.failedFuture(new RuntimeException("server error")));
when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore);
svcConfig.setTransactionCoordinatorEnabled(true);
resetChannel();
setChannelConnected();
final Subscription sub = new Subscription();
sub.setTopic("topic1");
sub.setSubscription("sub1");
ByteBuf clientCommand = Commands.newAddSubscriptionToTxn(89L, 1L, 12L,
List.of(sub));
channel.writeInbound(clientCommand);
CommandAddSubscriptionToTxnResponse response = (CommandAddSubscriptionToTxnResponse) getResponse();
assertEquals(response.getRequestId(), 89L);
assertEquals(response.getTxnidLeastBits(), 1L);
assertEquals(response.getTxnidMostBits(), 12L);
assertEquals(response.getError().getValue(), 0);
assertEquals(response.getMessage(), "server error");
channel.finish();
}
|
protected static List<URL> filterEmpty(URL url, List<URL> urls) {
if (CollectionUtils.isEmpty(urls)) {
List<URL> result = new ArrayList<>(1);
result.add(url.setProtocol(EMPTY_PROTOCOL));
return result;
}
return urls;
}
|
@Test
void filterEmptyTest() {
// check parameters
try {
AbstractRegistry.filterEmpty(null, null);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof NullPointerException);
}
// check parameters
List<URL> urls = new ArrayList<>();
try {
AbstractRegistry.filterEmpty(null, urls);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof NullPointerException);
}
// check if the output is generated by a fixed way
urls.add(testUrl.setProtocol(EMPTY_PROTOCOL));
Assertions.assertEquals(AbstractRegistry.filterEmpty(testUrl, null), urls);
List<URL> testUrls = new ArrayList<>();
Assertions.assertEquals(AbstractRegistry.filterEmpty(testUrl, testUrls), urls);
// check if the output equals the input urls
testUrls.add(testUrl);
Assertions.assertEquals(AbstractRegistry.filterEmpty(testUrl, testUrls), testUrls);
}
|
public QueryBuilders.QueryBuilder convert(Expr conjunct) {
return visit(conjunct);
}
|
@Test
public void testTranslateCompoundPredicate() {
SlotRef col1SlotRef = mockSlotRef("col1", Type.INT);
IntLiteral intLiteral1 = new IntLiteral(100);
SlotRef col2SlotRef = mockSlotRef("col2", Type.INT);
IntLiteral intLiteral2 = new IntLiteral(200);
BinaryPredicate bp1 = new BinaryPredicate(BinaryType.EQ, col1SlotRef, intLiteral1);
BinaryPredicate bp2 = new BinaryPredicate(BinaryType.GT, col2SlotRef, intLiteral2);
CompoundPredicate andPredicate =
new CompoundPredicate(CompoundPredicate.Operator.AND, bp1, bp2);
Assert.assertEquals("{\"bool\":{\"must\":[{\"term\":{\"col1\":100}},{\"range\":{\"col2\":{\"gt\":200}}}]}}",
queryConverter.convert(andPredicate).toString());
CompoundPredicate orPredicate =
new CompoundPredicate(CompoundPredicate.Operator.OR, bp1, bp2);
Assert.assertEquals("{\"bool\":{\"should\":[{\"term\":{\"col1\":100}},{\"range\":{\"col2\":{\"gt\":200}}}]}}",
queryConverter.convert(orPredicate).toString());
CompoundPredicate notPredicate = new CompoundPredicate(CompoundPredicate.Operator.NOT, bp2, null);
Assert.assertEquals("{\"bool\":{\"must_not\":{\"range\":{\"col2\":{\"gt\":200}}}}}",
queryConverter.convert(notPredicate).toString());
}
|
public MessageQueueListener getMessageQueueListener() {
if (null == defaultMQPushConsumer) {
return null;
}
return defaultMQPushConsumer.getMessageQueueListener();
}
|
@Test
public void testGetMessageQueueListener() {
assertNull(defaultMQPushConsumerImpl.getMessageQueueListener());
}
|
@Override
public V pollLastAndOfferFirstTo(String queueName, long timeout, TimeUnit unit) throws InterruptedException {
return commandExecutor.getInterrupted(pollLastAndOfferFirstToAsync(queueName, timeout, unit));
}
|
@Test
public void testPollLastAndOfferFirstTo() throws InterruptedException {
final RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("{queue}1");
queue1.trySetCapacity(10);
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
try {
queue1.put(3);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}, 10, TimeUnit.SECONDS);
RBoundedBlockingQueue<Integer> queue2 = redisson.getBoundedBlockingQueue("{queue}2");
queue2.trySetCapacity(10);
queue2.put(4);
queue2.put(5);
queue2.put(6);
Integer value = queue1.pollLastAndOfferFirstTo(queue2.getName(), 10, TimeUnit.SECONDS);
assertThat(value).isEqualTo(3);
assertThat(queue2).containsExactly(3, 4, 5, 6);
}
|
@Override
public CompletableFuture<TopicList> queryTopicsByConsumer(String address,
QueryTopicsByConsumerRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<TopicList> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_TOPICS_BY_CONSUMER, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
TopicList topicList = TopicList.decode(response.getBody(), TopicList.class);
future.complete(topicList);
} else {
log.warn("queryTopicsByConsumer getResponseCommand failed, {} {}", response.getCode(), response.getRemark());
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
}
|
@Test
public void assertQueryTopicsByConsumerWithSuccess() throws Exception {
TopicList responseBody = new TopicList();
setResponseSuccess(RemotingSerializable.encode(responseBody));
QueryTopicsByConsumerRequestHeader requestHeader = mock(QueryTopicsByConsumerRequestHeader.class);
CompletableFuture<TopicList> actual = mqClientAdminImpl.queryTopicsByConsumer(defaultBrokerAddr, requestHeader, defaultTimeout);
TopicList result = actual.get();
assertNotNull(result);
assertEquals(0, result.getTopicList().size());
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void slidingWindowedCogroupedNamedMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.cogroup((key, value, aggregate) -> value)
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(1)))
.aggregate(() -> "", Materialized.<Object, Object, WindowStore<Bytes, byte[]>>as("aggregate-store")
.withStoreType(Materialized.StoreType.IN_MEMORY));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> COGROUPKSTREAM-AGGREGATE-0000000001\n" +
" Processor: COGROUPKSTREAM-AGGREGATE-0000000001 (stores: [aggregate-store])\n" +
" --> COGROUPKSTREAM-MERGE-0000000002\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: COGROUPKSTREAM-MERGE-0000000002 (stores: [])\n" +
" --> none\n" +
" <-- COGROUPKSTREAM-AGGREGATE-0000000001\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false));
}
|
public static <K, V> HashMap<K, V> toMap(Iterable<Entry<K, V>> entryIter) {
return IterUtil.toMap(entryIter);
}
|
@Test
public void toMapTest() {
final Collection<String> keys = CollUtil.newArrayList("a", "b", "c", "d");
final Map<String, String> map = CollUtil.toMap(keys, new HashMap<>(), (value) -> "key" + value);
assertEquals("a", map.get("keya"));
assertEquals("b", map.get("keyb"));
assertEquals("c", map.get("keyc"));
assertEquals("d", map.get("keyd"));
}
|
public static Tidy getParser() {
log.debug("Start : getParser1");
Tidy tidy = new Tidy();
tidy.setInputEncoding(StandardCharsets.UTF_8.name());
tidy.setOutputEncoding(StandardCharsets.UTF_8.name());
tidy.setQuiet(true);
tidy.setShowWarnings(false);
if (log.isDebugEnabled()) {
log.debug("getParser1 : tidy parser created - {}", tidy);
}
log.debug("End : getParser1");
return tidy;
}
|
@Test
public void testGetParser() throws Exception {
HtmlParsingUtils.getParser();
}
|
public void submitEtlJob(long loadJobId, String loadLabel, EtlJobConfig etlJobConfig, SparkResource resource,
BrokerDesc brokerDesc, SparkLoadAppHandle handle, SparkPendingTaskAttachment attachment,
Long sparkLoadSubmitTimeout)
throws LoadException {
// delete outputPath
deleteEtlOutputPath(etlJobConfig.outputPath, brokerDesc);
// init local dir
if (!FeConstants.runningUnitTest) {
initLocalDir();
}
// prepare dpp archive
SparkRepository.SparkArchive archive = resource.prepareArchive();
SparkRepository.SparkLibrary dppLibrary = archive.getDppLibrary();
SparkRepository.SparkLibrary spark2xLibrary = archive.getSpark2xLibrary();
// spark home
String sparkHome = Config.spark_home_default_dir;
// etl config path
String configsHdfsDir = etlJobConfig.outputPath + "/" + JOB_CONFIG_DIR + "/";
// etl config json path
String jobConfigHdfsPath = configsHdfsDir + CONFIG_FILE_NAME;
// spark submit app resource path
String appResourceHdfsPath = dppLibrary.remotePath;
// spark yarn archive path
String jobArchiveHdfsPath = spark2xLibrary.remotePath;
// spark yarn stage dir
String jobStageHdfsPath = resource.getWorkingDir();
// spark launcher log path
String logFilePath = Config.spark_launcher_log_dir + "/" + String.format(LAUNCHER_LOG, loadJobId, loadLabel);
// update archive and stage configs here
Map<String, String> sparkConfigs = resource.getSparkConfigs();
if (Strings.isNullOrEmpty(sparkConfigs.get("spark.yarn.archive"))) {
sparkConfigs.put("spark.yarn.archive", jobArchiveHdfsPath);
}
if (Strings.isNullOrEmpty(sparkConfigs.get("spark.yarn.stage.dir"))) {
sparkConfigs.put("spark.yarn.stage.dir", jobStageHdfsPath);
}
try {
byte[] configData = etlJobConfig.configToJson().getBytes(StandardCharsets.UTF_8);
if (brokerDesc.hasBroker()) {
BrokerUtil.writeFile(configData, jobConfigHdfsPath, brokerDesc);
} else {
HdfsUtil.writeFile(configData, jobConfigHdfsPath, brokerDesc);
}
} catch (UserException e) {
throw new LoadException(e.getMessage());
}
SparkLauncher launcher = new SparkLauncher();
// master | deployMode
// ------------|-------------
// yarn | cluster
// spark://xx | client
launcher.setMaster(resource.getMaster())
.setDeployMode(resource.getDeployMode().name().toLowerCase())
.setAppResource(appResourceHdfsPath)
.setMainClass(SparkEtlJob.class.getCanonicalName())
.setAppName(String.format(ETL_JOB_NAME, loadLabel))
.setSparkHome(sparkHome)
.addAppArgs(jobConfigHdfsPath)
.redirectError();
// spark configs
for (Map.Entry<String, String> entry : resource.getSparkConfigs().entrySet()) {
launcher.setConf(entry.getKey(), entry.getValue());
}
// start app
State state = null;
String appId = null;
String logPath = null;
String errMsg = "start spark app failed. error: ";
try {
Process process = launcher.launch();
handle.setProcess(process);
if (!FeConstants.runningUnitTest) {
SparkLauncherMonitor.LogMonitor logMonitor = SparkLauncherMonitor.createLogMonitor(handle);
logMonitor.setSubmitTimeoutMs(sparkLoadSubmitTimeout);
logMonitor.setRedirectLogPath(logFilePath);
logMonitor.start();
try {
logMonitor.join();
} catch (InterruptedException e) {
logMonitor.interrupt();
throw new LoadException(errMsg + e.getMessage());
}
}
appId = handle.getAppId();
state = handle.getState();
logPath = handle.getLogPath();
} catch (IOException e) {
LOG.warn(errMsg, e);
throw new LoadException(errMsg + e.getMessage());
}
if (fromSparkState(state) == TEtlState.CANCELLED) {
if (state == State.KILLED) {
try {
killYarnApplication(appId, loadJobId, resource);
} catch (UserException e) {
LOG.warn(errMsg, e);
}
}
throw new LoadException(
errMsg + "spark app state: " + state.toString() + ", loadJobId:" + loadJobId + ", logPath:" +
logPath);
}
if (appId == null) {
throw new LoadException(errMsg + "Waiting too much time to get appId from handle. spark app state: "
+ state.toString() + ", loadJobId:" + loadJobId);
}
// success
attachment.setAppId(appId);
attachment.setHandle(handle);
}
|
@Test(expected = LoadException.class)
public void testSubmitEtlJobFailed(@Mocked BrokerUtil brokerUtil, @Mocked SparkLauncher launcher,
@Injectable Process process,
@Mocked SparkLoadAppHandle handle) throws IOException, LoadException {
new Expectations() {
{
launcher.launch();
result = process;
handle.getAppId();
result = appId;
handle.getState();
result = SparkLoadAppHandle.State.FAILED;
}
};
EtlJobConfig etlJobConfig = new EtlJobConfig(Maps.newHashMap(), etlOutputPath, label, null);
SparkResource resource = new SparkResource(resourceName);
new Expectations(resource) {
{
resource.prepareArchive();
result = archive;
}
};
Map<String, String> sparkConfigs = resource.getSparkConfigs();
sparkConfigs.put("spark.master", "yarn");
sparkConfigs.put("spark.submit.deployMode", "cluster");
sparkConfigs.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999");
BrokerDesc brokerDesc = new BrokerDesc(broker, Maps.newHashMap());
SparkPendingTaskAttachment attachment = new SparkPendingTaskAttachment(pendingTaskId);
SparkEtlJobHandler handler = new SparkEtlJobHandler();
long sparkLoadSubmitTimeout = Config.spark_load_submit_timeout_second;
handler.submitEtlJob(loadJobId, label, etlJobConfig, resource, brokerDesc, handle, attachment, sparkLoadSubmitTimeout);
}
|
@Override
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
|
@Test
void testIsTraceEnabled() {
jobRunrDashboardLogger.isTraceEnabled();
verify(slfLogger).isTraceEnabled();
}
|
public static int toMonths(int year, int months)
{
try {
return addExact(multiplyExact(year, 12), months);
}
catch (ArithmeticException e) {
throw new IllegalArgumentException(e);
}
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testOverflow()
{
int days = (Integer.MAX_VALUE / 12) + 1;
toMonths(days, 0);
}
|
public static <T> Write<T> write(String jdbcUrl, String table) {
return new AutoValue_ClickHouseIO_Write.Builder<T>()
.jdbcUrl(jdbcUrl)
.table(table)
.properties(new Properties())
.maxInsertBlockSize(DEFAULT_MAX_INSERT_BLOCK_SIZE)
.initialBackoff(DEFAULT_INITIAL_BACKOFF)
.maxRetries(DEFAULT_MAX_RETRIES)
.maxCumulativeBackoff(DEFAULT_MAX_CUMULATIVE_BACKOFF)
.build()
.withInsertDeduplicate(true)
.withInsertDistributedSync(true);
}
|
@Test
public void testPrimitiveTypes() throws Exception {
Schema schema =
Schema.of(
Schema.Field.of("f0", FieldType.DATETIME),
Schema.Field.of("f1", FieldType.DATETIME),
Schema.Field.of("f2", FieldType.FLOAT),
Schema.Field.of("f3", FieldType.DOUBLE),
Schema.Field.of("f4", FieldType.BYTE),
Schema.Field.of("f5", FieldType.INT16),
Schema.Field.of("f6", FieldType.INT32),
Schema.Field.of("f7", FieldType.INT64),
Schema.Field.of("f8", FieldType.STRING),
Schema.Field.of("f9", FieldType.INT16),
Schema.Field.of("f10", FieldType.INT32),
Schema.Field.of("f11", FieldType.INT64),
Schema.Field.of("f12", FieldType.INT64),
Schema.Field.of("f13", FieldType.STRING),
Schema.Field.of("f14", FieldType.STRING),
Schema.Field.of("f15", FieldType.STRING),
Schema.Field.of("f16", FieldType.BYTES),
Schema.Field.of("f17", FieldType.logicalType(FixedBytes.of(3))),
Schema.Field.of("f18", FieldType.BOOLEAN),
Schema.Field.of("f19", FieldType.STRING));
Row row1 =
Row.withSchema(schema)
.addValue(new DateTime(2030, 10, 1, 0, 0, 0, DateTimeZone.UTC))
.addValue(new DateTime(2030, 10, 9, 8, 7, 6, DateTimeZone.UTC))
.addValue(2.2f)
.addValue(3.3)
.addValue((byte) 4)
.addValue((short) 5)
.addValue(6)
.addValue(7L)
.addValue("eight")
.addValue((short) 9)
.addValue(10)
.addValue(11L)
.addValue(12L)
.addValue("abc")
.addValue("cde")
.addValue("qwe")
.addValue(new byte[] {'a', 's', 'd'})
.addValue(new byte[] {'z', 'x', 'c'})
.addValue(true)
.addValue("lowcardenality")
.build();
executeSql(
"CREATE TABLE test_primitive_types ("
+ "f0 Date,"
+ "f1 DateTime,"
+ "f2 Float32,"
+ "f3 Float64,"
+ "f4 Int8,"
+ "f5 Int16,"
+ "f6 Int32,"
+ "f7 Int64,"
+ "f8 String,"
+ "f9 UInt8,"
+ "f10 UInt16,"
+ "f11 UInt32,"
+ "f12 UInt64,"
+ "f13 Enum8('abc' = 1, 'cde' = 2),"
+ "f14 Enum16('abc' = -1, 'cde' = -2),"
+ "f15 FixedString(3),"
+ "f16 FixedString(3),"
+ "f17 FixedString(3),"
+ "f18 Bool,"
+ "f19 LowCardinality(String)"
+ ") ENGINE=Log");
pipeline.apply(Create.of(row1).withRowSchema(schema)).apply(write("test_primitive_types"));
pipeline.run().waitUntilFinish();
try (ResultSet rs = executeQuery("SELECT * FROM test_primitive_types")) {
rs.next();
assertEquals("2030-10-01", rs.getString("f0"));
assertEquals("2030-10-09 08:07:06", rs.getString("f1"));
assertEquals("2.2", rs.getString("f2"));
assertEquals("3.3", rs.getString("f3"));
assertEquals("4", rs.getString("f4"));
assertEquals("5", rs.getString("f5"));
assertEquals("6", rs.getString("f6"));
assertEquals("7", rs.getString("f7"));
assertEquals("eight", rs.getString("f8"));
assertEquals("9", rs.getString("f9"));
assertEquals("10", rs.getString("f10"));
assertEquals("11", rs.getString("f11"));
assertEquals("12", rs.getString("f12"));
assertEquals("abc", rs.getString("f13"));
assertEquals("cde", rs.getString("f14"));
assertArrayEquals(new byte[] {'q', 'w', 'e'}, rs.getBytes("f15"));
assertArrayEquals(new byte[] {'a', 's', 'd'}, rs.getBytes("f16"));
assertArrayEquals(new byte[] {'z', 'x', 'c'}, rs.getBytes("f17"));
assertEquals("true", rs.getString("f18"));
assertEquals("lowcardenality", rs.getString("f19"));
}
}
|
public static String resolveMainClass(
@Nullable String configuredMainClass, ProjectProperties projectProperties)
throws MainClassInferenceException, IOException {
if (configuredMainClass != null) {
if (isValidJavaClass(configuredMainClass)) {
return configuredMainClass;
}
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"'mainClass' configured in "
+ projectProperties.getPluginName()
+ " is not a valid Java class: "
+ configuredMainClass,
projectProperties.getPluginName()));
}
projectProperties.log(
LogEvent.info(
"Searching for main class... Add a 'mainClass' configuration to '"
+ projectProperties.getPluginName()
+ "' to improve build speed."));
String mainClassFromJarPlugin = projectProperties.getMainClassFromJarPlugin();
if (mainClassFromJarPlugin != null && isValidJavaClass(mainClassFromJarPlugin)) {
return mainClassFromJarPlugin;
}
if (mainClassFromJarPlugin != null) {
projectProperties.log(
LogEvent.warn(
"'mainClass' configured in "
+ projectProperties.getJarPluginName()
+ " is not a valid Java class: "
+ mainClassFromJarPlugin));
}
projectProperties.log(
LogEvent.info(
"Could not find a valid main class from "
+ projectProperties.getJarPluginName()
+ "; looking into all class files to infer main class."));
MainClassFinder.Result mainClassFinderResult =
MainClassFinder.find(projectProperties.getClassFiles(), projectProperties::log);
switch (mainClassFinderResult.getType()) {
case MAIN_CLASS_FOUND:
return mainClassFinderResult.getFoundMainClass();
case MAIN_CLASS_NOT_FOUND:
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"Main class was not found", projectProperties.getPluginName()));
case MULTIPLE_MAIN_CLASSES:
throw new MainClassInferenceException(
HelpfulSuggestions.forMainClassNotFound(
"Multiple valid main classes were found: "
+ String.join(", ", mainClassFinderResult.getFoundMainClasses()),
projectProperties.getPluginName()));
default:
throw new IllegalStateException("Cannot reach here");
}
}
|
@Test
public void testResolveMainClass_multipleInferredWithInvalidMainClassFromJarPlugin()
throws URISyntaxException, IOException {
Mockito.when(mockProjectProperties.getMainClassFromJarPlugin()).thenReturn("${start-class}");
Mockito.when(mockProjectProperties.getClassFiles())
.thenReturn(
new DirectoryWalker(
Paths.get(Resources.getResource("core/class-finder-tests/multiple").toURI()))
.walk());
try {
MainClassResolver.resolveMainClass(null, mockProjectProperties);
Assert.fail();
} catch (MainClassInferenceException ex) {
MatcherAssert.assertThat(
ex.getMessage(),
CoreMatchers.containsString(
"Multiple valid main classes were found: HelloWorld, multi.layered.HelloMoon"));
String info1 =
"Searching for main class... Add a 'mainClass' configuration to 'jib-plugin' to "
+ "improve build speed.";
String info2 =
"Could not find a valid main class from jar-plugin; looking into all class files to "
+ "infer main class.";
String warn =
"'mainClass' configured in jar-plugin is not a valid Java class: ${start-class}";
Mockito.verify(mockProjectProperties).log(LogEvent.info(info1));
Mockito.verify(mockProjectProperties).log(LogEvent.info(info2));
Mockito.verify(mockProjectProperties).log(LogEvent.warn(warn));
}
}
|
public static <InputT, OutputT> PTransformRunnerFactory<?> forValueMapFnFactory(
ValueMapFnFactory<InputT, OutputT> fnFactory) {
return new Factory<>(new CompressedValueOnlyMapperFactory<>(fnFactory));
}
|
@Test
public void testValueOnlyMapping() throws Exception {
PTransformRunnerFactoryTestContext context =
PTransformRunnerFactoryTestContext.builder(EXPECTED_ID, EXPECTED_PTRANSFORM)
.processBundleInstructionId("57")
.pCollections(Collections.singletonMap("inputPC", INPUT_PCOLLECTION))
.coders(Collections.singletonMap("coder-id", valueCoder))
.build();
List<WindowedValue<?>> outputConsumer = new ArrayList<>();
context.addPCollectionConsumer("outputPC", outputConsumer::add);
ValueMapFnFactory<String, String> factory = (ptId, pt) -> String::toUpperCase;
MapFnRunners.forValueMapFnFactory(factory).createRunnerForPTransform(context);
assertThat(context.getStartBundleFunctions(), empty());
assertThat(context.getFinishBundleFunctions(), empty());
assertThat(context.getTearDownFunctions(), empty());
assertThat(
context.getPCollectionConsumers().keySet(), containsInAnyOrder("inputPC", "outputPC"));
context.getPCollectionConsumer("inputPC").accept(valueInGlobalWindow("abc"));
assertThat(outputConsumer, contains(valueInGlobalWindow("ABC")));
}
|
static boolean shouldStoreMessage(final Message message) {
// XEP-0334: Implement the <no-store/> hint to override offline storage
if (message.getChildElement("no-store", "urn:xmpp:hints") != null) {
return false;
}
// OF-2083: Prevent storing offline message that is already stored
if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) {
return false;
}
switch (message.getType()) {
case chat:
// XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content
// Iterate through the child elements to see if we can find anything that's not a chat state notification or
// real time text notification
Iterator<?> it = message.getElement().elementIterator();
while (it.hasNext()) {
Object item = it.next();
if (item instanceof Element) {
Element el = (Element) item;
if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) {
continue;
}
if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates")
&& !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0")))
) {
return true;
}
}
}
return message.getBody() != null && !message.getBody().isEmpty();
case groupchat:
case headline:
// XEP-0160: "groupchat" message types SHOULD NOT be stored offline
// XEP-0160: "headline" message types SHOULD NOT be stored offline
return false;
case error:
// XEP-0160: "error" message types SHOULD NOT be stored offline,
// although a server MAY store advanced message processing errors offline
if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) {
return false;
}
break;
default:
// XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline.
break;
}
return true;
}
|
@Test
public void shouldNotStoreEmptyChatMessagesWithOnlyChatStatesAndThread() {
Message message = new Message();
message.setType(Message.Type.chat);
message.setThread("1234");
PacketExtension chatState = new PacketExtension("composing", "http://jabber.org/protocol/chatstates");
message.addExtension(chatState);
assertFalse(OfflineMessageStore.shouldStoreMessage(message));
}
|
public void setFilePath(String filePath) {
if (filePath == null) {
throw new IllegalArgumentException("File path cannot be null.");
}
// TODO The job-submission web interface passes empty args (and thus empty
// paths) to compute the preview graph. The following is a workaround for
// this situation and we should fix this.
// comment (Stephan Ewen) this should be no longer relevant with the current Java/Scala
// APIs.
if (filePath.isEmpty()) {
setFilePath(new Path());
return;
}
try {
this.setFilePath(new Path(filePath));
} catch (RuntimeException rex) {
throw new RuntimeException(
"Could not create a valid URI from the given file path name: "
+ rex.getMessage());
}
}
|
@Test
void testSetPathNullString() {
assertThatThrownBy(() -> new DummyFileInputFormat().setFilePath((String) null))
.isInstanceOf(IllegalArgumentException.class);
}
|
@GET
@Path("{netId}")
@Produces(MediaType.APPLICATION_JSON)
public Response allocateIp(@PathParam("netId") String netId) {
log.trace("Received IP allocation request of network " + netId);
K8sNetwork network =
nullIsNotFound(networkService.network(netId), NETWORK_ID_NOT_FOUND);
IpAddress ip =
nullIsNotFound(ipamService.allocateIp(network.networkId()), IP_NOT_ALLOCATED);
ObjectNode root = mapper().createObjectNode();
String ipamId = network.networkId() + "-" + ip.toString();
K8sIpam ipam = new DefaultK8sIpam(ipamId, ip, network.networkId());
root.set(IPAM, codec(K8sIpam.class).encode(ipam, this));
return ok(root).build();
}
|
@Test
public void testAllocateIp() {
expect(mockNetworkService.network(anyObject())).andReturn(k8sNetwork);
expect(mockIpamService.allocateIp(anyObject()))
.andReturn(IpAddress.valueOf("10.10.10.2"));
replay(mockNetworkService);
replay(mockIpamService);
final WebTarget wt = target();
Response response = wt.path(IPAM + "/sona-network").request().get();
final int status = response.getStatus();
assertEquals(200, status);
verify(mockNetworkService);
verify(mockIpamService);
}
|
public String migrateIfNecessary(String rawXml) throws Exception {
String fileVersion = extractVersion(rawXml);
ThrowingConsumer<Document> migrator = getMigrationStrategy().start();
boolean supported;
switch (fileVersion) {
case "1.0":
migrator = migrator.andThen(getMigrationStrategy().from1_0to1_1());
case "1.1":
migrator = migrator.andThen(getMigrationStrategy().from1_1to1_2());
case "1.2":
migrator = migrator.andThen(getMigrationStrategy().from1_2to1_3());
case "1.3":
migrator = migrator.andThen(getMigrationStrategy().from1_3to1_4());
case "1.4":
migrator = migrator.andThen(getMigrationStrategy().from1_4to1_5());
case "1.5":
migrator = migrator.andThen(getMigrationStrategy().from1_5to1_6());
case "1.6":
migrator = migrator.andThen(getMigrationStrategy().from1_6to1_7());
case "1.7":
migrator = migrator.andThen(getMigrationStrategy().from1_7to1_8());
supported = true;
break;
default:
supported = CURRENT_VERSION.equals(fileVersion);
break;
}
if (!supported) {
throw new IllegalArgumentException(new StringBuilder().append("Version ").append(fileVersion)
.append(" of the file is not supported. Current version is ")
.append(CURRENT_VERSION).toString());
}
migrator = migrator.andThen(getMigrationStrategy().end());
Document document = DOMParserUtil.getDocument(rawXml);
migrator.accept(document);
return DOMParserUtil.getString(document);
}
|
@Test
public void migrateIfNecessary() throws Exception {
assertThatThrownBy(() -> instance.migrateIfNecessary("<ScenarioSimulationModel version=\"9999999999.99999999999\" />"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Version 9999999999.99999999999 of the file is not supported. Current version is " + ScenarioSimulationXMLPersistence.getCurrentVersion());
String noMigrationNeeded = "<ScenarioSimulationModel version=\"" + currentVersion + "\" />";
String afterMigration = instance.migrateIfNecessary(noMigrationNeeded);
Document document = DOMParserUtil.getDocument(afterMigration);
commonCheckVersion(document, ScenarioSimulationXMLPersistence.getCurrentVersion());
}
|
@Override
public Output run(RunContext runContext) throws Exception {
URI from = new URI(runContext.render(this.from));
final PebbleExpressionPredicate predicate = getExpressionPredication(runContext);
final Path path = runContext.workingDir().createTempFile(".ion");
long processedItemsTotal = 0L;
long droppedItemsTotal = 0L;
try (final BufferedWriter writer = Files.newBufferedWriter(path);
final BufferedReader reader = newBufferedReader(runContext, from)) {
String item;
while ((item = reader.readLine()) != null) {
IllegalVariableEvaluationException exception = null;
Boolean match = null;
try {
match = predicate.apply(item);
} catch (IllegalVariableEvaluationException e) {
exception = e;
}
FilterType action = this.filterType;
if (match == null) {
switch (errorOrNullBehavior) {
case FAIL -> {
if (exception != null) {
throw exception;
} else {
throw new IllegalVariableEvaluationException(String.format(
"Expression `%s` return `null` on item `%s`",
filterCondition,
item
));
}
}
case INCLUDE -> action = FilterType.INCLUDE;
case EXCLUDE -> action = FilterType.EXCLUDE;
}
match = true;
}
if (!match) {
action = action.reverse();
}
switch (action) {
case INCLUDE -> {
writer.write(item);
writer.newLine();
}
case EXCLUDE -> droppedItemsTotal++;
}
processedItemsTotal++;
}
}
URI uri = runContext.storage().putFile(path.toFile());
return Output.builder()
.uri(uri)
.processedItemsTotal(processedItemsTotal)
.droppedItemsTotal(droppedItemsTotal)
.build();
}
|
@Test
void shouldFilterGivenValidBooleanExpressionForExclude() throws Exception {
// Given
RunContext runContext = runContextFactory.of();
FilterItems task = FilterItems
.builder()
.from(generateKeyValueFile(TEST_VALID_ITEMS, runContext).toString())
.filterCondition(" {{ value % 2 == 0 }} ")
.filterType(FilterItems.FilterType.EXCLUDE)
.build();
// When
FilterItems.Output output = task.run(runContext);
// Then
Assertions.assertNotNull(output);
Assertions.assertNotNull(output.getUri());
Assertions.assertEquals(2, output.getDroppedItemsTotal());
Assertions.assertEquals(4, output.getProcessedItemsTotal());
assertFile(runContext, output, List.of(new KeyValue("k1", 1), new KeyValue("k3", 3)), KeyValue.class);
}
|
@Override
public String toString() {
if (mUriString != null) {
return mUriString;
}
StringBuilder sb = new StringBuilder();
if (mUri.getScheme() != null) {
sb.append(mUri.getScheme());
sb.append("://");
}
if (hasAuthority()) {
if (mUri.getScheme() == null) {
sb.append("//");
}
sb.append(mUri.getAuthority().toString());
}
if (mUri.getPath() != null) {
String path = mUri.getPath();
if (path.indexOf('/') == 0 && hasWindowsDrive(path, true) // has windows drive
&& mUri.getScheme() == null // but no scheme
&& (mUri.getAuthority() == null
|| mUri.getAuthority() instanceof NoAuthority)) { // or authority
path = path.substring(1); // remove slash before drive
}
sb.append(path);
}
if (mUri.getQuery() != null) {
sb.append("?");
sb.append(mUri.getQuery());
}
mUriString = sb.toString();
return mUriString;
}
|
@Test
public void toStringTests() {
String[] uris =
new String[] {"/", "/a", "/a/ b", "alluxio://a/b/c d.txt",
"alluxio://localhost:8080/a/b.txt", "foo", "foo/bar", "/foo/bar#boo", "foo/bar#boo",
"file:///foo/bar"};
for (String uri : uris) {
AlluxioURI turi = new AlluxioURI(uri);
assertEquals(uri, turi.toString());
}
assertEquals(".", new AlluxioURI(".").toString());
assertEquals("file:///a", new AlluxioURI("file:///a").toString());
assertEquals("file:///a", new AlluxioURI("file", null, "/a").toString());
}
|
public static int ARC(@NonNull final byte[] data, final int offset, final int length) {
return CRC(0x8005, 0x0000, data, offset, length, true, true, 0x0000);
}
|
@Test
public void ARC_123456789() {
final byte[] data = "123456789".getBytes();
assertEquals(0xBB3D, CRC16.ARC(data, 0, 9));
}
|
@SafeVarargs
public static <T> Set<T> unionDistinct(Collection<T> coll1, Collection<T> coll2, Collection<T>... otherColls) {
final Set<T> result;
if (isEmpty(coll1)) {
result = new LinkedHashSet<>();
} else {
result = new LinkedHashSet<>(coll1);
}
if (isNotEmpty(coll2)) {
result.addAll(coll2);
}
if (ArrayUtil.isNotEmpty(otherColls)) {
for (Collection<T> otherColl : otherColls) {
if (isEmpty(otherColl)) {
continue;
}
result.addAll(otherColl);
}
}
return result;
}
|
@SuppressWarnings("ConstantValue")
@Test
public void unionDistinctNullTest() {
final List<String> list1 = new ArrayList<>();
final List<String> list2 = null;
final List<String> list3 = null;
final Set<String> set = CollUtil.unionDistinct(list1, list2, list3);
assertNotNull(set);
}
|
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
}
|
@Test
public void testValidateValueMismatchMapSomeKeys() {
Map<Object, String> data = new HashMap<>();
data.put(1, "abc");
data.put("wrong", "it's as easy as one two three");
assertThrows(DataException.class,
() -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, data));
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) {
if ( point1 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null"));
}
if ( point2 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null"));
}
try {
boolean result = point1.compareTo( point2 ) > 0;
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2"));
}
}
|
@Test
void invokeParamSingleAndRange() {
FunctionTestUtil.assertResult( afterFunction.invoke( "a",
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )),
Boolean.FALSE );
FunctionTestUtil.assertResult( afterFunction.invoke( "f",
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )),
Boolean.FALSE );
FunctionTestUtil.assertResult( afterFunction.invoke( "f",
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN )),
Boolean.TRUE );
FunctionTestUtil.assertResult( afterFunction.invoke( "g",
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )),
Boolean.TRUE );
}
|
public static List<String> filterMatches(@Nullable List<String> candidates,
@Nullable Pattern[] positivePatterns,
@Nullable Pattern[] negativePatterns) {
if (candidates == null || candidates.isEmpty()) {
return Collections.emptyList();
}
final Pattern[] positive = (positivePatterns == null || positivePatterns.length == 0) ?
MATCH_ALL_PATTERN : positivePatterns;
final Pattern[] negative = negativePatterns == null ? EMPTY_PATTERN : negativePatterns;
return candidates.stream()
.filter(c -> Arrays.stream(positive).anyMatch(p -> p.matcher(c).matches()))
.filter(c -> Arrays.stream(negative).noneMatch(p -> p.matcher(c).matches()))
.collect(Collectors.toList());
}
|
@Test
public void filterMatchesMultiple() {
List<String> candidates = ImmutableList.of("a", "b", "any", "boom", "hello");
List<String> patterns = ImmutableList.of("^a", "!y$");
List<String> expected = ImmutableList.of("a");
assertThat(filterMatches(candidates, new Pattern[]{Pattern.compile("^a")}, new Pattern[]{Pattern.compile("y$")}),
is(expected));
}
|
@Override
public AttributedList<Path> search(final Path workdir, final Filter<Path> regex, final ListProgressListener listener) throws BackgroundException {
try {
return new DriveSearchListService(session, fileid, regex.toString()).list(workdir, listener);
}
catch(NotfoundException e) {
return AttributedList.emptyList();
}
}
|
@Test
public void testSearchFolderRecursively() throws Exception {
final String name = new AlphanumericRandomStringService().random();
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path workdir = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file = new DriveTouchFeature(session, fileid).touch(new Path(workdir, name, EnumSet.of(Path.Type.file)), new TransferStatus());
final DriveSearchFeature feature = new DriveSearchFeature(session, fileid);
assertTrue(feature.search(DriveHomeFinderService.MYDRIVE_FOLDER, new SearchFilter(name), new DisabledListProgressListener()).contains(file));
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(file, workdir), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static void tryCloseConnections(HazelcastInstance hazelcastInstance) {
if (hazelcastInstance == null) {
return;
}
HazelcastInstanceImpl factory = (HazelcastInstanceImpl) hazelcastInstance;
closeSockets(factory);
}
|
@Test
public void testTryCloseConnections_shouldDoNothingWithNullInstance() {
tryCloseConnections(null);
}
|
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
bytesPerChecksum = conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_DEFAULT);
Preconditions.checkState(bytesPerChecksum > 0,
"bytes per checksum should be positive but was %s",
bytesPerChecksum);
}
}
|
@Test
public void testSetConf() {
Configuration conf = new Configuration();
conf.setInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY, 0);
try {
localFs.setConf(conf);
fail("Should have failed because zero bytes per checksum is invalid");
} catch (IllegalStateException ignored) {
}
conf.setInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY, -1);
try {
localFs.setConf(conf);
fail("Should have failed because negative bytes per checksum is invalid");
} catch (IllegalStateException ignored) {
}
conf.setInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY, 512);
localFs.setConf(conf);
}
|
public static boolean isIpV6Endpoint(NetworkEndpoint networkEndpoint) {
return hasIpAddress(networkEndpoint)
&& networkEndpoint.getIpAddress().getAddressFamily().equals(AddressFamily.IPV6);
}
|
@Test
public void isIpV6Endpoint_withIpV6Endpoint_returnsFalse() {
NetworkEndpoint ipV6Endpoint =
NetworkEndpoint.newBuilder()
.setType(NetworkEndpoint.Type.IP)
.setIpAddress(
IpAddress.newBuilder().setAddress("3ffe::1").setAddressFamily(AddressFamily.IPV6))
.build();
assertThat(NetworkEndpointUtils.isIpV6Endpoint(ipV6Endpoint)).isTrue();
}
|
@VisibleForTesting
ArtifactFetcher getFetcher(URI uri) {
if ("local".equals(uri.getScheme())) {
return localFetcher;
}
if (isRawHttp(uri.getScheme()) || "https".equals(uri.getScheme())) {
return httpFetcher;
}
return fsFetcher;
}
|
@Test
void testGetFetcher() throws Exception {
configuration.set(ArtifactFetchOptions.RAW_HTTP_ENABLED, true);
ArtifactFetchManager fetchManager = new ArtifactFetchManager(configuration);
ArtifactFetcher fetcher = fetchManager.getFetcher(new URI("local:///a.jar"));
assertThat(fetcher).isInstanceOf(LocalArtifactFetcher.class);
fetcher = fetchManager.getFetcher(new URI("http://0.0.0.0:123/a.jar"));
assertThat(fetcher).isInstanceOf(HttpArtifactFetcher.class);
fetcher = fetchManager.getFetcher(new URI("https://0.0.0.0:123/a.jar"));
assertThat(fetcher).isInstanceOf(HttpArtifactFetcher.class);
fetcher = fetchManager.getFetcher(new URI("hdfs:///tmp/a.jar"));
assertThat(fetcher).isInstanceOf(FsArtifactFetcher.class);
fetcher = fetchManager.getFetcher(new URI("s3a:///tmp/a.jar"));
assertThat(fetcher).isInstanceOf(FsArtifactFetcher.class);
}
|
protected VersionedSecretsExtension getVersionedSecretsExtension(String pluginId) {
final String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, SECRETS_EXTENSION, goSupportedVersions());
return secretsExtensionMap.get(resolvedExtensionVersion);
}
|
@Test
void shouldHaveVersionedSecretsExtensionForAllSupportedVersions() {
for (String supportedVersion : SUPPORTED_VERSIONS) {
final String message = String.format("Must define versioned extension class for %s extension with version %s", SECRETS_EXTENSION, supportedVersion);
when(pluginManager.resolveExtensionVersion(PLUGIN_ID, SECRETS_EXTENSION, SUPPORTED_VERSIONS)).thenReturn(supportedVersion);
final VersionedSecretsExtension extension = this.extension.getVersionedSecretsExtension(PLUGIN_ID);
assertThat(extension).as(message).isNotNull();
assertThat((String) ReflectionUtil.getField(extension, "VERSION")).isEqualTo(supportedVersion);
}
}
|
public Seckill getSeckill(long seckillId) {
String key = "seckill:" + seckillId;
Seckill seckill = (Seckill) redisTemplate.opsForValue().get(key);
if (seckill != null) {
return seckill;
} else {
seckill = seckillMapper.selectById(seckillId);
if (seckill == null) {
throw new RuntimeException("秒杀活动不存在!");
}
putSeckill(seckill);
return seckill;
}
}
|
@Test
void getSeckillSuccessCache() {
long seckillId = 1001L;
String key = "seckill:" + seckillId;
ValueOperations valueOperations = mock(ValueOperations.class);
when(redisTemplate.opsForValue()).thenReturn(valueOperations);
when(valueOperations.get(key)).thenReturn(null);
when(seckillMapper.selectById(seckillId)).thenReturn(new Seckill());
assertNotNull(redisService.getSeckill(seckillId));
}
|
@Override
public int hashCode()
{
return Objects.hash(value, precision, sessionTimeZoneKey);
}
|
@Test
public void testEqualsHashcodeMillis()
{
SqlTimestamp t1Millis = new SqlTimestamp(0, MILLISECONDS);
SqlTimestamp t2Millis = new SqlTimestamp(0, MILLISECONDS);
assertEquals(t1Millis, t2Millis);
assertEquals(t1Millis.hashCode(), t2Millis.hashCode());
SqlTimestamp t3Millis = new SqlTimestamp(1, MILLISECONDS);
assertNotEquals(t1Millis, t3Millis);
SqlTimestamp t1Micros = new SqlTimestamp(0, MICROSECONDS);
assertNotEquals(t1Millis, t1Micros);
}
|
@Override
public Set<Entry<K, V>> cachedEntrySet() {
return localCacheView.cachedEntrySet();
}
|
@Test
public void testExpiration() {
testWithParams(redisson -> {
RLocalCachedMap<String, String> m = redisson.getLocalCachedMap(LocalCachedMapOptions.name("test"));
m.put("12", "32");
assertThat(m.cachedEntrySet()).hasSize(1);
m.expire(Duration.ofSeconds(1));
try {
Thread.sleep(1500);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
assertThat(m.cachedEntrySet()).hasSize(0);
assertThat(m.get("12")).isNull();
}, NOTIFY_KEYSPACE_EVENTS, "EKx");
}
|
@VisibleForTesting
CompleteMultipartUploadResponse multipartCopy(
S3ResourceId sourcePath, S3ResourceId destinationPath, HeadObjectResponse sourceObjectHead)
throws SdkServiceException {
CreateMultipartUploadRequest initiateUploadRequest =
CreateMultipartUploadRequest.builder()
.bucket(destinationPath.getBucket())
.key(destinationPath.getKey())
.storageClass(config.getS3StorageClass())
.metadata(sourceObjectHead.metadata())
.serverSideEncryption(config.getSSEAlgorithm())
.ssekmsKeyId(config.getSSEKMSKeyId())
.sseCustomerKey(config.getSSECustomerKey().getKey())
.sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm())
.sseCustomerKeyMD5(config.getSSECustomerKey().getMD5())
.build();
CreateMultipartUploadResponse createMultipartUploadResponse =
s3Client.get().createMultipartUpload(initiateUploadRequest);
final String uploadId = createMultipartUploadResponse.uploadId();
List<CompletedPart> completedParts = new ArrayList<>();
final long objectSize = sourceObjectHead.contentLength();
CopyPartResult copyPartResult;
CompletedPart completedPart;
// extra validation in case a caller calls directly S3FileSystem.multipartCopy
// without using S3FileSystem.copy in the future
if (objectSize == 0) {
final UploadPartCopyRequest uploadPartCopyRequest =
UploadPartCopyRequest.builder()
.destinationBucket(destinationPath.getBucket())
.destinationKey(destinationPath.getKey())
.sourceBucket(sourcePath.getBucket())
.sourceKey(sourcePath.getKey())
.uploadId(uploadId)
.partNumber(1)
.sseCustomerKey(config.getSSECustomerKey().getKey())
.sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm())
.sseCustomerKeyMD5(config.getSSECustomerKey().getMD5())
.copySourceSSECustomerKey(config.getSSECustomerKey().getKey())
.copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm())
.copySourceSSECustomerKeyMD5(config.getSSECustomerKey().getMD5())
.build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart = CompletedPart.builder().partNumber(1).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
} else {
long bytePosition = 0;
// Amazon parts are 1-indexed, not zero-indexed.
for (int partNumber = 1; bytePosition < objectSize; partNumber++) {
final UploadPartCopyRequest uploadPartCopyRequest =
UploadPartCopyRequest.builder()
.destinationBucket(destinationPath.getBucket())
.destinationKey(destinationPath.getKey())
.sourceBucket(sourcePath.getBucket())
.sourceKey(sourcePath.getKey())
.uploadId(uploadId)
.partNumber(partNumber)
.copySourceRange(
String.format(
"bytes=%s-%s",
bytePosition,
Math.min(objectSize - 1, bytePosition + MAX_COPY_OBJECT_SIZE_BYTES - 1)))
.sseCustomerKey(config.getSSECustomerKey().getKey())
.sseCustomerAlgorithm(config.getSSECustomerKey().getAlgorithm())
.sseCustomerKeyMD5(config.getSSECustomerKey().getMD5())
.copySourceSSECustomerKey(config.getSSECustomerKey().getKey())
.copySourceSSECustomerAlgorithm(config.getSSECustomerKey().getAlgorithm())
.copySourceSSECustomerKeyMD5(config.getSSECustomerKey().getMD5())
.build();
copyPartResult = s3Client.get().uploadPartCopy(uploadPartCopyRequest).copyPartResult();
completedPart =
CompletedPart.builder().partNumber(partNumber).eTag(copyPartResult.eTag()).build();
completedParts.add(completedPart);
bytePosition += MAX_COPY_OBJECT_SIZE_BYTES;
}
}
CompletedMultipartUpload completedMultipartUpload =
CompletedMultipartUpload.builder().parts(completedParts).build();
CompleteMultipartUploadRequest completeUploadRequest =
CompleteMultipartUploadRequest.builder()
.bucket(destinationPath.getBucket())
.key(destinationPath.getKey())
.uploadId(uploadId)
.multipartUpload(completedMultipartUpload)
.build();
return s3Client.get().completeMultipartUpload(completeUploadRequest);
}
|
@Test
public void testMultipartCopy() throws IOException {
testMultipartCopy(s3Config("s3"));
testMultipartCopy(s3Config("other"));
testMultipartCopy(s3ConfigWithSSECustomerKey("s3"));
testMultipartCopy(s3ConfigWithSSECustomerKey("other"));
}
|
@Override
public boolean complete() {
if (snapshotInProgress) {
return false;
}
while (emitFromTraverser(pendingTraverser)) {
try {
Message t = consumer.receiveNoWait();
if (t == null) {
pendingTraverser = eventTimeMapper.flatMapIdle();
break;
}
if (guarantee == EXACTLY_ONCE) {
// We don't know whether the messages with the restored IDs were acknowledged in the previous
// execution or not. They are acknowledged in phase-2 of the snapshot which might not be executed.
// If we receive a message with a restored ID, we ignore it. But if we don't receive some ID,
// we can never safely throw it out.
// In order to avoid storing the restored IDs forever, we set a timeout after which we clear the
// collection. We start the timeout after receiving the first message, at which time we know the
// broker is working. We assume it will redeliver the messages promptly; if it doesn't, we assume
// they were acknowledged in the previous execution or delivered to another processor in this
// execution.
if (restoredIdsExpiration == Long.MAX_VALUE) {
restoredIdsExpiration = System.nanoTime() + RESTORED_IDS_TTL;
} else if (!restoredIds.isEmpty() && restoredIdsExpiration <= System.nanoTime()) {
restoredIds = emptySet();
}
Object msgId = messageIdFn.apply(t);
if (msgId == null) {
throw new JetException("Received a message without an ID. All messages must have an ID, " +
"you can specify an extracting function using "
+ JmsSourceBuilder.class.getSimpleName() + ".messageIdFn()");
}
seenIds.add(msgId);
if (restoredIds.remove(msgId)) {
getLogger().fine("Redelivered message dropped: %s", t);
continue;
}
}
T projectedItem = projectionFn.apply(t);
pendingTraverser = projectedItem != null
? eventTimeMapper.flatMapEvent(projectedItem, 0, handleJmsTimestamp(t))
: eventTimeMapper.flatMapIdle();
} catch (JMSException e) {
throw sneakyThrow(e);
}
}
return false;
}
|
@Test
public void when_queue() throws Exception {
String queueName = randomString();
logger.info("using queue: " + queueName);
String message1 = sendMessage(queueName, true);
String message2 = sendMessage(queueName, true);
initializeProcessor(queueName, true, null);
Queue<Object> queue = outbox.queue(0);
// Even though both messages are in queue, the processor might not see them
// because it uses `consumer.receiveNoWait()`, so if they are not available immediately,
// it doesn't block and items should be available later.
// See https://github.com/hazelcast/hazelcast-jet/issues/1010
List<Object> actualOutput = new ArrayList<>();
assertTrueEventually(() -> {
outbox.reset();
processor.complete();
Object item = queue.poll();
if (item != null) {
actualOutput.add(item);
}
assertEquals(asList(message1, message2), actualOutput);
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.