focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static <T> AvroSchema<T> of(SchemaDefinition<T> schemaDefinition) {
if (schemaDefinition.getSchemaReaderOpt().isPresent() && schemaDefinition.getSchemaWriterOpt().isPresent()) {
return new AvroSchema<>(schemaDefinition.getSchemaReaderOpt().get(),
schemaDefinition.getSchemaWriterOpt().get(), parseSchemaInfo(schemaDefinition, SchemaType.AVRO));
}
ClassLoader pojoClassLoader = null;
if (schemaDefinition.getClassLoader() != null) {
pojoClassLoader = schemaDefinition.getClassLoader();
} else if (schemaDefinition.getPojo() != null) {
pojoClassLoader = schemaDefinition.getPojo().getClassLoader();
}
return new AvroSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.AVRO), pojoClassLoader);
}
|
@Test
public void testAvroSchemaUserDefinedReadAndWriter() {
SchemaReader<Foo> reader = new JacksonJsonReader<>(new ObjectMapper(), Foo.class);
SchemaWriter<Foo> writer = new JacksonJsonWriter<>(new ObjectMapper());
SchemaDefinition<Foo> schemaDefinition = SchemaDefinition.<Foo>builder()
.withPojo(Bar.class)
.withSchemaReader(reader)
.withSchemaWriter(writer)
.build();
AvroSchema<Foo> schema = AvroSchema.of(schemaDefinition);
Foo foo = new Foo();
foo.setColor(SchemaTestUtils.Color.RED);
String field1 = "test";
foo.setField1(field1);
schema.encode(foo);
foo = schema.decode(schema.encode(foo));
assertEquals(foo.getColor(), SchemaTestUtils.Color.RED);
assertEquals(field1, foo.getField1());
}
|
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
Class<?> typeClass = getMetadata(fieldsByPath)
.<Class<?>>map(KvMetadataJavaResolver::loadClass)
.orElseGet(() -> loadClass(options, isKey));
QueryDataType type = QueryDataTypeUtils.resolveTypeForClass(typeClass);
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return userFields.isEmpty()
? resolvePrimitiveField(isKey, type)
: resolveAndValidatePrimitiveField(isKey, fieldsByPath, type);
} else {
return userFields.isEmpty()
? resolveObjectFields(isKey, typeClass)
: resolveAndValidateObjectFields(isKey, fieldsByPath, typeClass);
}
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void when_userDeclaresObjectDuplicateExternalName_then_throws(boolean key, String prefix) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Type.class.getName()
);
assertThatThrownBy(() -> INSTANCE.resolveAndValidateFields(
key,
asList(
field("field1", QueryDataType.INT, prefix + ".field"),
field("field2", QueryDataType.VARCHAR, prefix + ".field")
),
options,
null
)).isInstanceOf(QueryException.class)
.hasMessageMatching("Duplicate external name: (__key|this).field");
}
|
@Override
public AppSettings load() {
Properties p = loadPropertiesFile(homeDir);
Set<String> keysOverridableFromEnv = stream(ProcessProperties.Property.values()).map(ProcessProperties.Property::getKey)
.collect(Collectors.toSet());
keysOverridableFromEnv.addAll(p.stringPropertyNames());
// 1st pass to load static properties
Props staticProps = reloadProperties(keysOverridableFromEnv, p);
keysOverridableFromEnv.addAll(getDynamicPropertiesKeys(staticProps));
// 2nd pass to load dynamic properties like `ldap.*.url` or `ldap.*.baseDn` which keys depend on values of static
// properties loaded in 1st step
Props props = reloadProperties(keysOverridableFromEnv, p);
new ProcessProperties(serviceLoaderWrapper).completeDefaults(props);
stream(consumers).forEach(c -> c.accept(props));
return new AppSettingsImpl(props);
}
|
@Test
public void file_is_not_loaded_if_it_does_not_exist() throws Exception {
File homeDir = temp.newFolder();
AppSettingsLoaderImpl underTest = new AppSettingsLoaderImpl(system, new String[0], homeDir, serviceLoaderWrapper);
AppSettings settings = underTest.load();
// no failure, file is ignored
assertThat(settings.getProps()).isNotNull();
}
|
void addFields( String prefix, Set<String> fieldNames, XmlObject field ) {
//Salesforce SOAP Api sends IDs always in the response, even if we don't request it in SOQL query and
//the id's value is null in this case. So, do not add this Id to the fields list
if ( isNullIdField( field ) ) {
return;
}
String fieldname = prefix + field.getName().getLocalPart();
if ( field instanceof SObject ) {
SObject sobject = (SObject) field;
for ( XmlObject element : SalesforceConnection.getChildren( sobject ) ) {
addFields( fieldname + ".", fieldNames, element );
}
} else {
addField( fieldname, fieldNames, (String) field.getValue() );
}
}
|
@Test
public void testAddFields_nullIdNotAdded() throws Exception {
final Set<String> fields = new LinkedHashSet<>();
XmlObject testObject = createObject( "Id", null, ObjectType.XMLOBJECT );
dialog.addFields( "", fields, testObject );
assertArrayEquals( "Null Id field not added", new String[]{}, fields.toArray() );
}
|
@Override
public void emit(NetworkId networkId, OutboundPacket packet) {
devirtualize(networkId, packet)
.forEach(outboundPacket -> packetService.emit(outboundPacket));
}
|
@Test
public void devirtualizePacket() {
TrafficTreatment tr = DefaultTrafficTreatment.builder()
.setOutput(VPORT_NUM1).build();
ByteBuffer data = ByteBuffer.wrap("abc".getBytes());
OutboundPacket vOutPacket = new DefaultOutboundPacket(VDID, tr, data);
virtualProvider.emit(VNET_ID, vOutPacket);
assertEquals("The count should be 1", 1,
testPacketService.getRequestedPacketCount());
OutboundPacket pOutPacket = testPacketService.getRequestedPacket(0);
assertEquals("The packet should be requested on DEV1", DID1,
pOutPacket.sendThrough());
PortNumber outPort = pOutPacket.treatment()
.allInstructions()
.stream()
.filter(i -> i.type() == Instruction.Type.OUTPUT)
.map(i -> (Instructions.OutputInstruction) i)
.map(i -> i.port())
.findFirst().get();
assertEquals("The packet should be out at PORT1 of DEV1", PORT_NUM1,
outPort);
}
|
@Override
public R apply(R record) {
final Object value = operatingValue(record);
final Schema schema = operatingSchema(record);
if (value == null && schema == null) {
return record;
}
requireSchema(schema, "updating schema metadata");
final boolean isArray = schema.type() == Schema.Type.ARRAY;
final boolean isMap = schema.type() == Schema.Type.MAP;
final Schema updatedSchema = new ConnectSchema(
schema.type(),
schema.isOptional(),
schema.defaultValue(),
schemaName != null ? schemaName : schema.name(),
schemaVersion != null ? schemaVersion : schema.version(),
schema.doc(),
schema.parameters(),
schema.fields(),
isMap ? schema.keySchema() : null,
isMap || isArray ? schema.valueSchema() : null
);
log.trace("Applying SetSchemaMetadata SMT. Original schema: {}, updated schema: {}",
schema, updatedSchema);
return newRecord(record, updatedSchema);
}
|
@Test
public void valueSchemaRequired() {
final SinkRecord record = new SinkRecord("", 0, null, null, null, 42, 0);
assertThrows(DataException.class, () -> xform.apply(record));
}
|
public static String wrapWithMarkdownClassDiv(String html) {
return new StringBuilder()
.append("<div class=\"markdown-body\">\n")
.append(html)
.append("\n</div>")
.toString();
}
|
@Test
void testStrongEmphasis() {
InterpreterResult result = md.interpret("This is **strong emphasis** text", null);
assertEquals(
wrapWithMarkdownClassDiv("<p>This is <strong>strong emphasis</strong> text</p>\n"),
result.message().get(0).getData());
}
|
@Override
public MetadataNode child(String name) {
try {
Integer brokerId = Integer.valueOf(name);
BrokerRegistration registration = image.brokers().get(brokerId);
if (registration == null) return null;
return new MetadataLeafNode(registration.toString());
} catch (NumberFormatException e) {
return null;
}
}
|
@Test
public void testNode1Child() {
MetadataNode child = NODE.child("1");
assertNotNull(child);
assertEquals("BrokerRegistration(id=1, epoch=1001, " +
"incarnationId=MJkaH0j0RwuC3W2GHQHtWA, " +
"listeners=[], " +
"supportedFeatures={metadata.version: 1-4}, " +
"rack=Optional.empty, " +
"fenced=false, " +
"inControlledShutdown=false, " +
"isMigratingZkBroker=false, " +
"directories=[JsnDDNVyTL289kYk6sPzig, anCdBWcFTlu8gE1wP6bh3g])",
child.stringify());
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void shadersMod() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/logs/shaders_mod.txt")),
CrashReportAnalyzer.Rule.SHADERS_MOD);
}
|
public ServiceInfo getService(String key) {
ServiceInfo serviceInfo = serviceMap.get(key);
if (serviceInfo == null) {
serviceInfo = new ServiceInfo();
serviceInfo.setName(key);
}
return serviceInfo;
}
|
@Test
void testRefreshFromEnabledToDisabled() throws InterruptedException, NoSuchFieldException, IllegalAccessException {
// make sure the first no delay refresh thread finished.
TimeUnit.MILLISECONDS.sleep(500);
FailoverSwitch mockFailoverSwitch = new FailoverSwitch(false);
when(failoverDataSource.getSwitch()).thenReturn(mockFailoverSwitch);
Field failoverSwitchEnableField = FailoverReactor.class.getDeclaredField("failoverSwitchEnable");
failoverSwitchEnableField.setAccessible(true);
failoverSwitchEnableField.set(failoverReactor, true);
Map<String, ServiceInfo> map = new HashMap<>();
ServiceInfo serviceInfo = new ServiceInfo("a@@b");
serviceInfo.addHost(new Instance());
map.put("a@@b", serviceInfo);
when(holder.getServiceInfoMap()).thenReturn(map);
Field serviceMapField = FailoverReactor.class.getDeclaredField("serviceMap");
serviceMapField.setAccessible(true);
serviceMapField.set(failoverReactor, map);
// waiting refresh thread work
TimeUnit.MILLISECONDS.sleep(5500);
ServiceInfo actual = failoverReactor.getService("a@@b");
assertNotEquals(serviceInfo, actual);
}
|
public MessageDescription shallowParseMessage(ByteBuf packet) {
final ByteBuf buffer = packet.readSlice(MessageHeader.LENGTH);
LOG.debug("Shallow parse header\n{}", ByteBufUtil.prettyHexDump(buffer));
final MessageHeader header = parseMessageHeader(buffer);
final MessageDescription messageDescription = new MessageDescription(header);
// sanity check: we need the complete packet in the buffer
if (header.length() != packet.readableBytes() + MessageHeader.LENGTH) {
throw new IllegalArgumentException("Buffer does not contain the complete IPFIX message");
}
// loop over all the contained sets in the message
while (packet.isReadable()) {
final int setId = packet.readUnsignedShort();
final int setLength = packet.readUnsignedShort();
// the buffer limited to the declared length of the set.
final ByteBuf setContent = packet.readSlice(setLength - 4);
switch (setId) {
case 0:
case 1:
throw new IpfixException("Invalid set id in IPFIX message: " + setId);
case 2:
final ShallowTemplateSet templateSet = shallowParseTemplateSet(setContent);
messageDescription.addTemplateSet(templateSet);
break;
case 3:
final ShallowOptionsTemplateSet optionsTemplateSet = shallowParseOptionsTemplateSet(setContent);
messageDescription.addOptionsTemplateSet(optionsTemplateSet);
break;
default:
final ShallowDataSet dataSet = shallowParseDataSet(setId, setLength, setContent, header.exportTime());
messageDescription.addDataSet(dataSet);
break;
}
}
return messageDescription;
}
|
@Test
public void shallowParsePacketMultilist() throws IOException {
ByteBuf packet = Utils.readPacket("ixia-multilist.ipfix");
InformationElementDefinitions definitions = new InformationElementDefinitions(
Resources.getResource("ipfix-iana-elements.json"),
Resources.getResource("ixia-ied.json")
);
final IpfixParser.MessageDescription description = new IpfixParser(definitions).shallowParseMessage(packet);
assertThat(description).isNotNull();
// this also refers to template id 257, but because we don't have the data-template set for 256 we don't know that
// there is a multilist element which then refers to 257 (we'd have to parse the data set first to know that)
assertThat(description.referencedTemplateIds()).contains(256);
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldThrowWhenNotAuthorizedToReadKeySchemaFromSR() throws Exception {
// Given
givenDataSourceWithSchema(
TOPIC_NAME,
SCHEMA,
SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES),
SerdeFeatures.of(),
FormatInfo.of(FormatFactory.AVRO.name()),
FormatInfo.of(FormatFactory.AVRO.name()),
false,
false);
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allColumnNames(SCHEMA),
ImmutableList.of(
new StringLiteral("key"),
new StringLiteral("str"),
new LongLiteral(2L)
)
);
when(srClient.getLatestSchemaMetadata(TOPIC_NAME + "-key")).thenThrow(
new RestClientException("User is denied operation Read on topic-key", 401, 1)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getMessage(), containsString(
"Authorization denied to Read on Schema Registry subject: ["
+ KsqlConstants.getSRSubject(TOPIC_NAME, true)));
}
|
public Future<CaReconciliationResult> reconcile(Clock clock) {
return reconcileCas(clock)
.compose(i -> verifyClusterCaFullyTrustedAndUsed())
.compose(i -> reconcileClusterOperatorSecret(clock))
.compose(i -> rollingUpdateForNewCaKey())
.compose(i -> maybeRemoveOldClusterCaCertificates())
.map(i -> new CaReconciliationResult(clusterCa, clientsCa));
}
|
@Test
public void testClientsCASecretsWithoutOwnerReference(Vertx vertx, VertxTestContext context) {
CertificateAuthority caConfig = new CertificateAuthority();
caConfig.setGenerateSecretOwnerReference(false);
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.withClientsCa(caConfig)
.endSpec()
.build();
OwnerReference ownerReference = new OwnerReferenceBuilder()
.withKind(kafka.getKind())
.withApiVersion(kafka.getApiVersion())
.withName(kafka.getMetadata().getName())
.withBlockOwnerDeletion(false)
.withController(false)
.build();
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false);
SecretOperator secretOps = supplier.secretOperations;
PodOperator podOps = supplier.podOperations;
ArgumentCaptor<Secret> clusterCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clusterCaKey = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaCert = ArgumentCaptor.forClass(Secret.class);
ArgumentCaptor<Secret> clientsCaKey = ArgumentCaptor.forClass(Secret.class);
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaCertificateSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.clusterOperatorCertsSecretName(NAME)), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0))));
when(secretOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of()));
when(podOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(List.of()));
Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, NAME);
Checkpoint async = context.checkpoint();
new CaReconciler(reconciliation, kafka, new ClusterOperatorConfig.ClusterOperatorConfigBuilder(ResourceUtils.dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()).with(ClusterOperatorConfig.OPERATION_TIMEOUT_MS.key(), "1").build(),
supplier, vertx, CERT_MANAGER, PASSWORD_GENERATOR)
.reconcile(Clock.systemUTC())
.onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(clusterCaCert.getAllValues(), hasSize(1));
assertThat(clusterCaKey.getAllValues(), hasSize(1));
assertThat(clientsCaCert.getAllValues(), hasSize(1));
assertThat(clientsCaKey.getAllValues(), hasSize(1));
Secret clusterCaCertSecret = clusterCaCert.getValue();
Secret clusterCaKeySecret = clusterCaKey.getValue();
Secret clientsCaCertSecret = clientsCaCert.getValue();
Secret clientsCaKeySecret = clientsCaKey.getValue();
assertThat(clusterCaCertSecret.getMetadata().getOwnerReferences(), hasSize(1));
assertThat(clusterCaKeySecret.getMetadata().getOwnerReferences(), hasSize(1));
assertThat(clientsCaCertSecret.getMetadata().getOwnerReferences(), hasSize(0));
assertThat(clientsCaKeySecret.getMetadata().getOwnerReferences(), hasSize(0));
assertThat(clusterCaCertSecret.getMetadata().getOwnerReferences().get(0), is(ownerReference));
assertThat(clusterCaKeySecret.getMetadata().getOwnerReferences().get(0), is(ownerReference));
async.flag();
})));
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public ClusterInfo get() {
return getClusterInfo();
}
|
@Test
public void testClusterDefault() throws JSONException, Exception {
WebResource r = resource();
// test with trailing "/" to make sure acts same as without slash
ClientResponse response = r.path("ws").path("v1").path("cluster")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
|
@Override
public Stream<HoodieBaseFile> getLatestBaseFilesInRange(List<String> commitsToReturn) {
return execute(commitsToReturn, preferredView::getLatestBaseFilesInRange, (commits) -> getSecondaryView().getLatestBaseFilesInRange(commits));
}
|
@Test
public void testGetLatestBaseFilesInRange() {
Stream<HoodieBaseFile> actual;
Stream<HoodieBaseFile> expected = testBaseFileStream;
List<String> commitsToReturn = Collections.singletonList("/table2");
when(primary.getLatestBaseFilesInRange(commitsToReturn)).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFilesInRange(commitsToReturn);
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getLatestBaseFilesInRange(commitsToReturn)).thenThrow(new RuntimeException());
when(secondary.getLatestBaseFilesInRange(commitsToReturn)).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFilesInRange(commitsToReturn);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestBaseFilesInRange(commitsToReturn)).thenReturn(testBaseFileStream);
actual = fsView.getLatestBaseFilesInRange(commitsToReturn);
assertEquals(expected, actual);
resetMocks();
when(secondary.getLatestBaseFilesInRange(commitsToReturn)).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getLatestBaseFilesInRange(commitsToReturn);
});
}
|
public Annotator getAnnotator(AnnotationStyle style) {
switch (style) {
case JACKSON:
case JACKSON2:
return new Jackson2Annotator(generationConfig);
case JSONB1:
return new Jsonb1Annotator(generationConfig);
case JSONB2:
return new Jsonb2Annotator(generationConfig);
case GSON:
return new GsonAnnotator(generationConfig);
case MOSHI1:
return new Moshi1Annotator(generationConfig);
case NONE:
return new NoopAnnotator();
default:
throw new IllegalArgumentException("Unrecognised annotation style: " + style);
}
}
|
@Test
public void canCreateCorrectAnnotatorFromAnnotationStyle() {
assertThat(factory.getAnnotator(JACKSON), is(instanceOf(Jackson2Annotator.class)));
assertThat(factory.getAnnotator(JACKSON2), is(instanceOf(Jackson2Annotator.class)));
assertThat(factory.getAnnotator(GSON), is(instanceOf(GsonAnnotator.class)));
assertThat(factory.getAnnotator(MOSHI1), is(instanceOf(Moshi1Annotator.class)));
assertThat(factory.getAnnotator(NONE), is(instanceOf(NoopAnnotator.class)));
}
|
@Override
public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf data,
int padding, boolean endStream, ChannelPromise promise) {
final SimpleChannelPromiseAggregator promiseAggregator =
new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
ByteBuf frameHeader = null;
try {
verifyStreamId(streamId, STREAM_ID);
verifyPadding(padding);
int remainingData = data.readableBytes();
Http2Flags flags = new Http2Flags();
flags.endOfStream(false);
flags.paddingPresent(false);
// Fast path to write frames of payload size maxFrameSize first.
if (remainingData > maxFrameSize) {
frameHeader = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(frameHeader, maxFrameSize, DATA, flags, streamId);
do {
// Write the header.
ctx.write(frameHeader.retainedSlice(), promiseAggregator.newPromise());
// Write the payload.
ctx.write(data.readRetainedSlice(maxFrameSize), promiseAggregator.newPromise());
remainingData -= maxFrameSize;
// Stop iterating if remainingData == maxFrameSize so we can take care of reference counts below.
} while (remainingData > maxFrameSize);
}
if (padding == 0) {
// Write the header.
if (frameHeader != null) {
frameHeader.release();
frameHeader = null;
}
ByteBuf frameHeader2 = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
flags.endOfStream(endStream);
writeFrameHeaderInternal(frameHeader2, remainingData, DATA, flags, streamId);
ctx.write(frameHeader2, promiseAggregator.newPromise());
// Write the payload.
ByteBuf lastFrame = data.readSlice(remainingData);
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
} else {
if (remainingData != maxFrameSize) {
if (frameHeader != null) {
frameHeader.release();
frameHeader = null;
}
} else {
remainingData -= maxFrameSize;
// Write the header.
ByteBuf lastFrame;
if (frameHeader == null) {
lastFrame = ctx.alloc().buffer(FRAME_HEADER_LENGTH);
writeFrameHeaderInternal(lastFrame, maxFrameSize, DATA, flags, streamId);
} else {
lastFrame = frameHeader.slice();
frameHeader = null;
}
ctx.write(lastFrame, promiseAggregator.newPromise());
// Write the payload.
lastFrame = data.readableBytes() != maxFrameSize ? data.readSlice(maxFrameSize) : data;
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
}
do {
int frameDataBytes = min(remainingData, maxFrameSize);
int framePaddingBytes = min(padding, max(0, maxFrameSize - 1 - frameDataBytes));
// Decrement the remaining counters.
padding -= framePaddingBytes;
remainingData -= frameDataBytes;
// Write the header.
ByteBuf frameHeader2 = ctx.alloc().buffer(DATA_FRAME_HEADER_LENGTH);
flags.endOfStream(endStream && remainingData == 0 && padding == 0);
flags.paddingPresent(framePaddingBytes > 0);
writeFrameHeaderInternal(frameHeader2, framePaddingBytes + frameDataBytes, DATA, flags, streamId);
writePaddingLength(frameHeader2, framePaddingBytes);
ctx.write(frameHeader2, promiseAggregator.newPromise());
// Write the payload.
if (data != null) { // Make sure Data is not null
if (remainingData == 0) {
ByteBuf lastFrame = data.readSlice(frameDataBytes);
data = null;
ctx.write(lastFrame, promiseAggregator.newPromise());
} else {
ctx.write(data.readRetainedSlice(frameDataBytes), promiseAggregator.newPromise());
}
}
// Write the frame padding.
if (paddingBytes(framePaddingBytes) > 0) {
ctx.write(ZERO_BUFFER.slice(0, paddingBytes(framePaddingBytes)),
promiseAggregator.newPromise());
}
} while (remainingData != 0 || padding != 0);
}
} catch (Throwable cause) {
if (frameHeader != null) {
frameHeader.release();
}
// Use a try/finally here in case the data has been released before calling this method. This is not
// necessary above because we internally allocate frameHeader.
try {
if (data != null) {
data.release();
}
} finally {
promiseAggregator.setFailure(cause);
promiseAggregator.doneAllocatingPromises();
}
return promiseAggregator;
}
return promiseAggregator.doneAllocatingPromises();
}
|
@Test
public void writeEmptyDataWithPadding() {
int streamId = 1;
ByteBuf payloadByteBuf = Unpooled.buffer();
frameWriter.writeData(ctx, streamId, payloadByteBuf, 2, true, promise);
assertEquals(0, payloadByteBuf.refCnt());
byte[] expectedFrameBytes = {
(byte) 0x00, (byte) 0x00, (byte) 0x02, // payload length
(byte) 0x00, // payload type
(byte) 0x09, // flags
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01, // stream id
(byte) 0x01, (byte) 0x00, // padding
};
expectedOutbound = Unpooled.copiedBuffer(expectedFrameBytes);
assertEquals(expectedOutbound, outbound);
}
|
@SneakyThrows
@Override
public Integer call() throws Exception {
super.call();
PicocliRunner.call(App.class, "namespace", "--help");
return 0;
}
|
@Test
void runWithNoParam() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) {
String[] args = {};
Integer call = PicocliRunner.call(NamespaceCommand.class, ctx, args);
assertThat(call, is(0));
assertThat(out.toString(), containsString("Usage: kestra namespace"));
}
}
|
public void containsCell(
@Nullable Object rowKey, @Nullable Object colKey, @Nullable Object value) {
containsCell(
Tables.<@Nullable Object, @Nullable Object, @Nullable Object>immutableCell(
rowKey, colKey, value));
}
|
@Test
public void containsCell() {
ImmutableTable<String, String, String> table = ImmutableTable.of("row", "col", "val");
assertThat(table).containsCell("row", "col", "val");
assertThat(table).containsCell(cell("row", "col", "val"));
}
|
static void parseServerIpAndPort(MysqlConnection connection, Span span) {
try {
URI url = URI.create(connection.getURL().substring(5)); // strip "jdbc:"
String remoteServiceName = connection.getProperties().getProperty("zipkinServiceName");
if (remoteServiceName == null || "".equals(remoteServiceName)) {
String databaseName = getDatabaseName(connection);
if (databaseName != null && !databaseName.isEmpty()) {
remoteServiceName = "mysql-" + databaseName;
} else {
remoteServiceName = "mysql";
}
}
span.remoteServiceName(remoteServiceName);
String host = getHost(connection);
if (host != null) {
span.remoteIpAndPort(host, url.getPort() == -1 ? 3306 : url.getPort());
}
} catch (Exception e) {
// remote address is optional
}
}
|
@Test void parseServerIpAndPort_emptyZipkinServiceNameIgnored() throws SQLException {
setupAndReturnPropertiesForHost("1.2.3.4").setProperty("zipkinServiceName", "");
TracingStatementInterceptor.parseServerIpAndPort(connection, span);
verify(span).remoteServiceName("mysql");
verify(span).remoteIpAndPort("1.2.3.4", 5555);
}
|
@SuppressWarnings("unchecked")
void sort(String[] filenames) {
Arrays.sort(filenames, new Comparator<String>() {
@Override
public int compare(String f1, String f2) {
int result = 0;
for (FilenameParser p : parsers) {
Comparable c2 = p.parseFilename(f2);
Comparable c1 = p.parseFilename(f1);
if (c2 != null && c1 != null) {
result += c2.compareTo(c1);
}
}
// fallback to raw filename comparison
if (result == 0) {
result = f2.compareTo(f1);
}
return result;
}
});
}
|
@Test
public void sortsDescendingByDateAndIntegerWithMultipleDatesInPattern() {
final String[] FILENAMES = new String[] {
"/var/logs/my-app/2018-10/2018-10-31/9.log",
"/var/logs/my-app/2019-01/2019-01-01/1.log",
"/var/logs/my-app/1999-03/1999-03-17/3.log",
"/var/logs/my-app/2019-01/2019-01-01/11.log",
"/var/logs/my-app/2019-01/2019-01-01/2.log",
"/var/logs/my-app/2016-12/2016-12-25/10.log",
};
final String[] EXPECTED_RESULT = new String[] {
"/var/logs/my-app/2019-01/2019-01-01/11.log",
"/var/logs/my-app/2019-01/2019-01-01/2.log",
"/var/logs/my-app/2019-01/2019-01-01/1.log",
"/var/logs/my-app/2018-10/2018-10-31/9.log",
"/var/logs/my-app/2016-12/2016-12-25/10.log",
"/var/logs/my-app/1999-03/1999-03-17/3.log",
};
assertThat(sort("/var/logs/my-app/%d{yyyy-MM,aux}/%d{yyyy-MM-dd}/%i.log", FILENAMES), contains(EXPECTED_RESULT));
}
|
public EndpointResponse streamQuery(
final KsqlSecurityContext securityContext,
final KsqlRequest request,
final CompletableFuture<Void> connectionClosedFuture,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context
) {
throwIfNotConfigured();
activenessRegistrar.updateLastRequestTime();
final PreparedStatement<?> statement = parseStatement(request);
CommandStoreUtil.httpWaitForCommandSequenceNumber(
commandQueue, request, commandQueueCatchupTimeout);
return handleStatement(securityContext, request, statement, connectionClosedFuture,
isInternalRequest, metricsCallbackHolder, context);
}
|
@Test
public void shouldNotCreateExternalClientsForPullQuery() {
// Given:
when(mockKsqlEngine.getKsqlConfig()).thenReturn(new KsqlConfig(ImmutableMap.of(
StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1"
)));
// When:
testResource.streamQuery(
securityContext,
new KsqlRequest(PULL_QUERY_STRING, Collections.emptyMap(), Collections.emptyMap(), null),
new CompletableFuture<>(),
Optional.empty(),
new MetricsCallbackHolder(),
context
);
// Then:
verify(serviceContext, never()).getAdminClient();
verify(serviceContext, never()).getConnectClient();
verify(serviceContext, never()).getSchemaRegistryClient();
verify(serviceContext, never()).getTopicClient();
}
|
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullValueJoinerOnTableLeftJoinWithJoined() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(testTable, (ValueJoiner<? super String, ? super String, ?>) null, Joined.as("name")));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
@Udf
public <T extends Comparable<? super T>> T arrayMax(@UdfParameter(
description = "Array of values from which to find the maximum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) > 0) {
candidate = thisVal;
}
}
}
return candidate;
}
|
@Test
public void shouldFindBigIntMax() {
final List<Long> input = Arrays.asList(1L, 3L, -2L);
assertThat(udf.arrayMax(input), is(Long.valueOf(3)));
}
|
@Nullable
@Override
public BlobHttpContent getContent() {
return null;
}
|
@Test
public void testGetContent() {
Assert.assertNull(testAuthenticationMethodRetriever.getContent());
}
|
public static <K, V> String toMap(final Supplier<MultiValueMap<K, V>> supplier) {
return GsonUtils.getInstance().toJson(supplier.get().toSingleValueMap());
}
|
@Test
public void testToMap() {
final MultiValueMap<String, String> args = new LinkedMultiValueMap<>();
args.add("a", "1");
args.add("a", "2");
args.add("b", "3");
String actual = HttpParamConverter.toMap(() -> args);
assertEquals("{\"a\":\"1\",\"b\":\"3\"}", actual);
}
|
public static EnvVar createEnvVarFromSecret(String name, String secret, String key) {
return new EnvVarBuilder()
.withName(name)
.withNewValueFrom()
.withNewSecretKeyRef()
.withName(secret)
.withKey(key)
.endSecretKeyRef()
.endValueFrom()
.build();
}
|
@Test
public void testCreateEnvVarFromSecret() {
EnvVar var = ContainerUtils.createEnvVarFromSecret("VAR_1", "my-secret", "my-key");
assertThat(var.getName(), is("VAR_1"));
assertThat(var.getValueFrom().getSecretKeyRef().getName(), is("my-secret"));
assertThat(var.getValueFrom().getSecretKeyRef().getKey(), is("my-key"));
}
|
@Override
public void set(long newValue) {
COUNTER.set(this, newValue);
}
|
@Test
public void set() {
counter.set(100_000);
assertEquals(100_000, counter.get());
}
|
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
}
|
@Test
public void shouldFormatInPredicate() {
final InPredicate predicate = new InPredicate(
new StringLiteral("foo"),
new InListExpression(ImmutableList.of(new StringLiteral("a"))));
assertThat(ExpressionFormatter.formatExpression(predicate), equalTo("('foo' IN ('a'))"));
}
|
public PropertyPanel id(String id) {
this.id = id;
return this;
}
|
@Test
public void setId() {
basic();
pp.id(SOME_IDENTIFICATION);
assertEquals("wrong id", SOME_IDENTIFICATION, pp.id());
}
|
@Override
@Transactional
public void sendMessage(String message, String channel) {
logger.info("Sending message {} to channel {}", message, channel);
if (!Objects.equals(channel, Topics.APOLLO_RELEASE_TOPIC)) {
logger.warn("Channel {} not supported by DatabaseMessageSender!", channel);
return;
}
Tracer.logEvent("Apollo.AdminService.ReleaseMessage", message);
Transaction transaction = Tracer.newTransaction("Apollo.AdminService", "sendMessage");
try {
ReleaseMessage newMessage = releaseMessageRepository.save(new ReleaseMessage(message));
if(!toClean.offer(newMessage.getId())){
logger.warn("Queue is full, Failed to add message {} to clean queue", newMessage.getId());
}
transaction.setStatus(Transaction.SUCCESS);
} catch (Throwable ex) {
logger.error("Sending message to database failed", ex);
transaction.setStatus(ex);
throw ex;
} finally {
transaction.complete();
}
}
|
@Test
public void testSendUnsupportedMessage() throws Exception {
String someMessage = "some-message";
String someUnsupportedTopic = "some-invalid-topic";
messageSender.sendMessage(someMessage, someUnsupportedTopic);
verify(releaseMessageRepository, never()).save(any(ReleaseMessage.class));
}
|
@Override
public Object invoke(MethodInvocation methodInvocation) throws Throwable {
// 入栈
DataPermission dataPermission = this.findAnnotation(methodInvocation);
if (dataPermission != null) {
DataPermissionContextHolder.add(dataPermission);
}
try {
// 执行逻辑
return methodInvocation.proceed();
} finally {
// 出栈
if (dataPermission != null) {
DataPermissionContextHolder.remove();
}
}
}
|
@Test // 在 Method 上有 @DataPermission 注解
public void testInvoke_method() throws Throwable {
// 参数
mockMethodInvocation(TestMethod.class);
// 调用
Object result = interceptor.invoke(methodInvocation);
// 断言
assertEquals("method", result);
assertEquals(1, interceptor.getDataPermissionCache().size());
assertFalse(CollUtil.getFirst(interceptor.getDataPermissionCache().values()).enable());
}
|
public List<ModelNode> topologicalSort() {
DependencyMap dependencyMap = new DependencyMap(modelNodes);
Queue<ModelNode> unprocessed = new LinkedList<>();
unprocessed.addAll(roots);
List<ModelNode> sortedList = new ArrayList<>();
while (!unprocessed.isEmpty()) {
ModelNode sortedNode = unprocessed.remove();
sortedList.add(sortedNode);
for (ModelNode node : modelNodes) {
if (dependencyMap.dependsOn(node, sortedNode)) {
dependencyMap.removeDependency(node, sortedNode);
if (!dependencyMap.hasDependencies(node)) {
unprocessed.add(node);
}
}
}
}
for (ModelNode node : modelNodes) {
if (dependencyMap.hasDependencies(node)) {
throw new IllegalArgumentException("Unable to sort graph because it contains cycles");
}
}
return sortedList;
}
|
@Test
void require_that_collections_can_be_empty() {
ModelGraph graph = new ModelGraphBuilder().addBuilder(new GraphMock.BC()).addBuilder(new GraphMock.BA()).build();
List<ModelNode> nodes = graph.topologicalSort();
MockRoot root = new MockRoot();
GraphMock.A a = (GraphMock.A) nodes.get(0).createModel(ConfigModelContext.create(root.getDeployState(), null, null, root, "first"));
GraphMock.C c = (GraphMock.C) nodes.get(1).createModel(ConfigModelContext.create(root.getDeployState(), null, null, root, "second"));
assertEquals(a, c.a);
assertTrue(c.b.isEmpty());
}
|
public abstract int getTabId();
|
@Test
public void checkIdDuplication() {
final Set<Integer> usedIds = new HashSet<>();
for (final Tab.Type type : Tab.Type.values()) {
final boolean added = usedIds.add(type.getTabId());
assertTrue("Id was already used: " + type.getTabId(), added);
}
}
|
@Override
public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) {
if (requestData == null) {
return super.handle(targetName, instances, null);
}
if (!shouldHandle(instances)) {
return instances;
}
List<Object> result = routerConfig.isUseRequestRouter()
? getTargetInstancesByRequest(targetName, instances, requestData.getTag())
: getTargetInstancesByRules(targetName, instances, requestData.getPath(), requestData.getTag());
return super.handle(targetName, result, requestData);
}
|
@Test
public void testGetMismatchInstances() {
RuleInitializationUtils.initFlowMatchRule();
List<Object> instances = new ArrayList<>();
ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0");
instances.add(instance1);
ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.1");
instances.add(instance2);
Map<String, List<String>> header = new HashMap<>();
header.put("bar", Collections.singletonList("bar2"));
List<Object> targetInstances = flowRouteHandler.handle("foo", instances, new RequestData(header, null, null));
Assert.assertEquals(1, targetInstances.size());
Assert.assertEquals(instance1, targetInstances.get(0));
ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap());
}
|
public static Builder custom() {
return new Builder();
}
|
@Test(expected = IllegalArgumentException.class)
public void testBuildWithIllegalMaxWaitDuration() {
BulkheadConfig.custom()
.maxWaitDuration(Duration.ofSeconds(-1))
.build();
}
|
public void recordFlush(long duration) {
flushTimeSensor.record(duration);
}
|
@Test
public void shouldRecordFlushTime() {
// When:
producerMetrics.recordFlush(METRIC_VALUE);
// Then:
assertMetricValue(FLUSH_TIME_TOTAL);
}
|
@Override
public boolean equals(Object obj) {
if (!(obj instanceof NormalKey)) {
return false;
}
NormalKey rhs = (NormalKey) obj;
if (algorithm != rhs.algorithm) {
return false;
}
if (plainKey != null) {
return Arrays.equals(plainKey, rhs.plainKey);
} else {
return Arrays.equals(encryptedKey, rhs.encryptedKey);
}
}
|
@Test
public void testEquals() {
NormalKey key1 = NormalKey.createRandom();
NormalKey key2 = NormalKey.createRandom();
assertNotEquals(key1, new String());
assertNotEquals(key1, key2);
NormalKey keyNoAlgo = new NormalKey(EncryptionAlgorithmPB.NO_ENCRYPTION, new byte[16], null);
assertNotEquals(key1, keyNoAlgo);
NormalKey key3 = (NormalKey) EncryptionKey.createFromSpec(key1.toSpec());
assertEquals(key1, key3);
assertEquals(key1.hashCode(), key3.hashCode());
assertArrayEquals(key1.getPlainKey(), key3.getPlainKey());
NormalKey keyEn1 = new NormalKey(EncryptionAlgorithmPB.AES_128, null, new byte[16]);
NormalKey keyEn2 = new NormalKey(EncryptionAlgorithmPB.AES_128, null, new byte[16]);
assertEquals(keyEn1, keyEn2);
assertEquals(keyEn1.hashCode(), keyEn2.hashCode());
}
|
public static String toUpperCase(@Nullable String value) {
if (value == null) {
throw new IllegalArgumentException("String value cannot be null");
}
return value.toUpperCase(Locale.ENGLISH);
}
|
@Test
public void testToUpperCase() {
//noinspection DataFlowIssue
assertThatThrownBy(() -> toUpperCase(null)).isInstanceOf(IllegalArgumentException.class);
assertThat(toUpperCase("")).isEqualTo("");
assertThat(toUpperCase(" ")).isEqualTo(" ");
assertThat(toUpperCase("Hello")).isEqualTo("HELLO");
}
|
public String compress(String compressorName, String uncompressedString) throws IOException {
Checks.notNull(uncompressedString, "uncompressedString cannot be null");
Compressor compressor =
getCompressor(compressorName == null ? DEFAULT_COMPRESSOR_NAME : compressorName);
return base64Encode(compressor.compress(uncompressedString.getBytes(DEFAULT_ENCODING)));
}
|
@Test
public void testCompress() throws IOException {
assertEquals("H4sIAAAAAAAA/0tMBAMAdCCLWwcAAAA=", stringCodec.compress("gzip", "aaaaaaa"));
assertEquals("H4sIAAAAAAAA/0tMBAMAdCCLWwcAAAA=", stringCodec.compress(null, "aaaaaaa"));
}
|
@Override
public PageResult<NotifyMessageDO> getMyMyNotifyMessagePage(NotifyMessageMyPageReqVO pageReqVO, Long userId, Integer userType) {
return notifyMessageMapper.selectPage(pageReqVO, userId, userType);
}
|
@Test
public void testGetMyNotifyMessagePage() {
// mock 数据
NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> { // 等会查询到
o.setUserId(1L);
o.setUserType(UserTypeEnum.ADMIN.getValue());
o.setReadStatus(true);
o.setCreateTime(buildTime(2022, 1, 2));
o.setTemplateParams(randomTemplateParams());
});
notifyMessageMapper.insert(dbNotifyMessage);
// 测试 userId 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserId(2L)));
// 测试 userType 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setUserType(UserTypeEnum.MEMBER.getValue())));
// 测试 readStatus 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setReadStatus(false)));
// 测试 createTime 不匹配
notifyMessageMapper.insert(cloneIgnoreId(dbNotifyMessage, o -> o.setCreateTime(buildTime(2022, 2, 1))));
// 准备参数
Long userId = 1L;
Integer userType = UserTypeEnum.ADMIN.getValue();
NotifyMessageMyPageReqVO reqVO = new NotifyMessageMyPageReqVO();
reqVO.setReadStatus(true);
reqVO.setCreateTime(buildBetweenTime(2022, 1, 1, 2022, 1, 10));
// 调用
PageResult<NotifyMessageDO> pageResult = notifyMessageService.getMyMyNotifyMessagePage(reqVO, userId, userType);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(dbNotifyMessage, pageResult.getList().get(0));
}
|
public String getEncryptedPassword() {
return encryptedPassword;
}
|
@Test
public void shouldEncryptMailHostPassword() throws CryptoException {
GoCipher mockGoCipher = mock(GoCipher.class);
when(mockGoCipher.encrypt("password")).thenReturn("encrypted");
MailHost mailHost = new MailHost("hostname", 42, "username", "password", null, true, true, "from", "mail@admin.com", mockGoCipher);
assertThat((String) ReflectionUtil.getField(mailHost, "password")).isEqualTo("password");
assertThat(mailHost.getEncryptedPassword()).isEqualTo("encrypted");
}
|
@VisibleForTesting
static String toString(@Nullable TaskManagerLocation location) {
// '(unassigned)' being the default value is added to support backward-compatibility for the
// deprecated fields
return location != null ? location.getEndpoint() : "(unassigned)";
}
|
@Test
void testArchivedTaskManagerLocationHandling() {
final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation();
assertThat(JobExceptionsHandler.toString(taskManagerLocation))
.isEqualTo(
String.format(
"%s:%s",
taskManagerLocation.getFQDNHostname(),
taskManagerLocation.dataPort()));
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
}
|
@Test
public void testJadChallengeNoPb()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your completion count for TzHaar-Ket-Rak's First Challenge is: <col=ff0000>3</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>1:10</col>. Personal best: <col=ff0000>0:59</col>", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("killcount", "TzHaar-Ket-Rak's First Challenge".toLowerCase(), 3);
verify(configManager).setRSProfileConfiguration("personalbest", "TzHaar-Ket-Rak's First Challenge".toLowerCase(), 59.0);
// Precise times
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>1:10.00</col>. Personal best: <col=ff0000>0:59.20</col>", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "TzHaar-Ket-Rak's First Challenge".toLowerCase(), 59.2);
}
|
public static void reset(File directory, int processNumber) {
try (DefaultProcessCommands processCommands = new DefaultProcessCommands(directory, processNumber, true)) {
// nothing else to do than open file and reset the space of specified process
}
}
|
@Test
public void reset_fails_if_processNumber_is_higher_than_MAX_PROCESSES() throws Exception {
int processNumber = MAX_PROCESSES + 1;
expectProcessNumberNoValidIAE(() -> DefaultProcessCommands.reset(temp.newFolder(), processNumber), processNumber);
}
|
@Override
public <R> HoodieData<HoodieRecord<R>> tagLocation(
HoodieData<HoodieRecord<R>> records, HoodieEngineContext context,
HoodieTable hoodieTable) {
return HoodieJavaRDD.of(HoodieJavaRDD.getJavaRDD(records)
.mapPartitionsWithIndex(locationTagFunction(hoodieTable.getMetaClient()), true));
}
|
@Test
public void testSimpleTagLocationAndUpdateWithRollback() throws Exception {
// Load to memory
HoodieWriteConfig config = getConfigBuilder(100, false, false)
.withRollbackUsingMarkers(false).build();
SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
try (SparkRDDWriteClient writeClient = getHoodieWriteClient(config)) {
final String newCommitTime = writeClient.startCommit();
final int numRecords = 10;
List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, numRecords);
JavaRDD<HoodieRecord> writeRecords = jsc().parallelize(records, 1);
metaClient = HoodieTableMetaClient.reload(metaClient);
// Insert 200 records
JavaRDD<WriteStatus> writeStatues = writeClient.upsert(writeRecords, newCommitTime);
assertNoWriteErrors(writeStatues.collect());
// commit this upsert
writeClient.commit(newCommitTime, writeStatues);
HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient);
// Now tagLocation for these records, hbaseIndex should tag them
List<HoodieRecord> records2 = tagLocation(index, writeRecords, hoodieTable).collect();
assertEquals(numRecords, records2.stream().filter(HoodieRecord::isCurrentLocationKnown).count());
// check tagged records are tagged with correct fileIds
List<String> fileIds = writeStatues.map(WriteStatus::getFileId).collect();
assertEquals(0, records2.stream().filter(record -> record.getCurrentLocation().getFileId() == null).count());
List<String> taggedFileIds = records2.stream().map(record -> record.getCurrentLocation().getFileId()).distinct().collect(Collectors.toList());
// both lists should match
assertTrue(taggedFileIds.containsAll(fileIds) && fileIds.containsAll(taggedFileIds));
// Rollback the last commit
writeClient.rollback(newCommitTime);
hoodieTable = HoodieSparkTable.create(config, context, metaClient);
// Now tagLocation for these records, hbaseIndex should not tag them since it was a rolled
// back commit
List<HoodieRecord> records3 = tagLocation(index, writeRecords, hoodieTable).collect();
assertEquals(0, records3.stream().filter(HoodieRecord::isCurrentLocationKnown).count());
assertEquals(0, records3.stream().filter(record -> record.getCurrentLocation() != null).count());
}
}
|
public void setAckSetByIndex(){
if (batchSize == 1){
return;
}
BitSetRecyclable bitSetRecyclable = BitSetRecyclable.create();
bitSetRecyclable.set(0, batchSize, true);
bitSetRecyclable.clear(batchIndex);
long[] ackSet = bitSetRecyclable.toLongArray();
bitSetRecyclable.recycle();
setAckSet(ackSet);
}
|
@Test(dataProvider = "batchSizeAndBatchIndexArgsArray")
public void testSetAckSetByIndex(int batchSize, int batchIndex){
// test 1/64
TxnBatchedPositionImpl txnBatchedPosition = new TxnBatchedPositionImpl(1,1, batchSize, batchIndex);
txnBatchedPosition.setAckSetByIndex();
long[] ls = txnBatchedPosition.getAckSet();
BitSetRecyclable bitSetRecyclable = BitSetRecyclable.valueOf(ls);
for (int i = 0; i < batchSize; i++){
if (i == batchIndex) {
Assert.assertFalse(bitSetRecyclable.get(i));
} else {
Assert.assertTrue(bitSetRecyclable.get(i));
}
}
bitSetRecyclable.recycle();
}
|
public static <T> Task<T> fromListenableFuture(ListenableFuture<T> future) {
/**
* BaseTask's promise will be listening to this
* also see {@link BaseTask#contextRun(Context, Task, Collection)}
*/
final SettablePromise<T> promise = Promises.settable();
// Setup cancellation propagation from Task -> ListenableFuture.
final Task<T> task =
new BaseTask<T>("fromListenableFuture: " + Task._taskDescriptor.getDescription(future.getClass().getName())) {
@Override
public boolean cancel(Exception rootReason) {
// <BaseTask>.cancel()'s result indicates whether cancel() successfully trigger state transition to "CANCELLED"
// And we should only cancel GRPC future when the transition was conducted.
boolean shouldCancelTask = super.cancel(rootReason);
if (shouldCancelTask && !future.isCancelled()) {
boolean futureCancelResult = future.cancel(true);
if (!futureCancelResult) {
LOGGER.warn("Unexpected: GRPC future was not cancelled but new attempt to cancel also failed.");
}
}
return shouldCancelTask;
}
@Override
protected Promise<? extends T> run(Context context) throws Throwable {
return promise;
}
};
// Setup forward event propagation ListenableFuture -> Task.
Runnable callbackRunnable = () -> {
if (promise.isDone()) {
boolean isPromiseFailed = promise.isFailed();
LOGGER.warn("ListenableFuture callback triggered but ParSeq already done. "
+ "Future is done: {}, "
+ "Future is cancelled: {}"
+ "Promise is failed:{}"
+ (isPromiseFailed? " Promise hold error: {}" : "Promise hold data:{}"),
future.isDone(),
future.isCancelled(),
isPromiseFailed,
isPromiseFailed ? promise.getError(): promise.get()
);
return;
}
try {
final T value = future.get();
promise.done(value);
} catch (CancellationException ex) {
task.cancel(ex);
} catch (ExecutionException ex) {
promise.fail(ex.getCause());
} catch (Exception | Error ex) {
promise.fail(ex);
}
};
future.addListener(callbackRunnable, MoreExecutors.directExecutor());
return task;
}
|
@Test
public void testFromListenableFuture() throws Exception {
ListenableFutureUtil.SettableFuture<String> listenableFuture = new ListenableFutureUtil.SettableFuture<>();
Task<String> task = ListenableFutureUtil.fromListenableFuture(listenableFuture);
// Test cancel propagation from Task to ListenableFuture
task.cancel(new RuntimeException());
runUntilComplete(task);
Assert.assertTrue(listenableFuture.isCancelled());
listenableFuture = new ListenableFutureUtil.SettableFuture<>();
task = ListenableFutureUtil.fromListenableFuture(listenableFuture);
// Test successful completion of ListenableFuture.
listenableFuture.set("COMPLETED");
runUntilComplete(task);
Assert.assertTrue(task.isDone());
Assert.assertFalse(task.isFailed());
Assert.assertEquals(task.get(), "COMPLETED");
listenableFuture = new ListenableFutureUtil.SettableFuture<>();
task = ListenableFutureUtil.fromListenableFuture(listenableFuture);
// Test exceptional completion of ListenableFuture.
listenableFuture.setException(new RuntimeException("Test"));
runUntilComplete(task);
Assert.assertTrue(task.isDone());
Assert.assertTrue(task.isFailed());
Assert.assertEquals(task.getError().getClass(), RuntimeException.class);
Assert.assertEquals(task.getError().getMessage(), "Test");
listenableFuture = new ListenableFutureUtil.SettableFuture<>();
task = ListenableFutureUtil.fromListenableFuture(listenableFuture);
// Test cancellation of ListenableFuture.
listenableFuture.cancel(true);
runUntilComplete(task);
Assert.assertTrue(task.isDone());
Assert.assertTrue(task.isFailed());
Assert.assertEquals(task.getError().getCause().getClass(), CancellationException.class);
}
|
public static String resolveIpAddress(String hostname) throws UnknownHostException {
Preconditions.checkNotNull(hostname, "hostname");
Preconditions.checkArgument(!hostname.isEmpty(),
"Cannot resolve IP address for empty hostname");
return InetAddress.getByName(hostname).getHostAddress();
}
|
@Test(expected = IllegalArgumentException.class)
public void resolveEmptyIpAddress() throws UnknownHostException {
NetworkAddressUtils.resolveIpAddress("");
}
|
public static double conversion(String expression) {
return (new Calculator()).calculate(expression);
}
|
@Test
public void conversationTest5(){
// https://github.com/dromara/hutool/issues/1984
final double conversion = Calculator.conversion("((1/1) / (1/1) -1) * 100");
assertEquals(0, conversion, 0);
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(true);
}
boolean result = true;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
}
|
@Test
void invokeArrayParamEmptyArray() {
FunctionTestUtil.assertResult(nnAllFunction.invoke(new Object[]{}), true);
}
|
public Set<String> userSelfEditPermissions(String username) {
ImmutableSet.Builder<String> perms = ImmutableSet.builder();
perms.add(perInstance(RestPermissions.USERS_EDIT, username));
perms.add(perInstance(RestPermissions.USERS_PASSWORDCHANGE, username));
perms.add(perInstance(RestPermissions.USERS_TOKENLIST, username));
perms.add(perInstance(RestPermissions.USERS_TOKENCREATE, username));
perms.add(perInstance(RestPermissions.USERS_TOKENREMOVE, username));
return perms.build();
}
|
@Test
public void testUserSelfEditPermissions() throws Exception {
assertThat(permissions.userSelfEditPermissions("john"))
.containsExactly("users:edit:john", "users:passwordchange:john", "users:tokenlist:john",
"users:tokencreate:john", "users:tokenremove:john");
}
|
public static int scan(final UnsafeBuffer termBuffer, final int termOffset, final int limitOffset)
{
int offset = termOffset;
while (offset < limitOffset)
{
final int frameLength = frameLengthVolatile(termBuffer, offset);
if (frameLength <= 0)
{
break;
}
final int alignedFrameLength = align(frameLength, FRAME_ALIGNMENT);
if (isPaddingFrame(termBuffer, offset))
{
if (termOffset == offset)
{
offset += alignedFrameLength;
}
break;
}
if (offset + alignedFrameLength > limitOffset)
{
break;
}
offset += alignedFrameLength;
}
return offset;
}
|
@Test
void shouldFailToReadFirstMessageBecauseOfLimit()
{
final int offset = 0;
final int messageLength = 50;
final int alignedMessageLength = BitUtil.align(messageLength, FRAME_ALIGNMENT);
final int limit = alignedMessageLength - 1;
when(termBuffer.getIntVolatile(lengthOffset(offset))).thenReturn(messageLength);
when(termBuffer.getShort(typeOffset(offset))).thenReturn((short)HDR_TYPE_DATA);
final int newOffset = TermBlockScanner.scan(termBuffer, offset, limit);
assertEquals(offset, newOffset);
}
|
static String getRelativeFileInternal(File canonicalBaseFile, File canonicalFileToRelativize) {
List<String> basePath = getPathComponents(canonicalBaseFile);
List<String> pathToRelativize = getPathComponents(canonicalFileToRelativize);
//if the roots aren't the same (i.e. different drives on a windows machine), we can't construct a relative
//path from one to the other, so just return the canonical file
if (!basePath.get(0).equals(pathToRelativize.get(0))) {
return canonicalFileToRelativize.getPath();
}
int commonDirs;
StringBuilder sb = new StringBuilder();
for (commonDirs=1; commonDirs<basePath.size() && commonDirs<pathToRelativize.size(); commonDirs++) {
if (!basePath.get(commonDirs).equals(pathToRelativize.get(commonDirs))) {
break;
}
}
boolean first = true;
for (int i=commonDirs; i<basePath.size(); i++) {
if (!first) {
sb.append(File.separatorChar);
} else {
first = false;
}
sb.append("..");
}
first = true;
for (int i=commonDirs; i<pathToRelativize.size(); i++) {
if (first) {
if (sb.length() != 0) {
sb.append(File.separatorChar);
}
first = false;
} else {
sb.append(File.separatorChar);
}
sb.append(pathToRelativize.get(i));
}
if (sb.length() == 0) {
return ".";
}
return sb.toString();
}
|
@Test
public void pathUtilTest16() {
File[] roots = File.listRoots();
File basePath = new File(roots[0] + "some2" + File.separatorChar + "dir3");
File relativePath = new File(roots[0] + "some" + File.separatorChar + "dir" + File.separatorChar + "dir2");
String path = PathUtil.getRelativeFileInternal(basePath, relativePath);
Assert.assertEquals(path, ".." + File.separatorChar + ".." + File.separatorChar + "some" + File.separatorChar + "dir" + File.separatorChar + "dir2");
}
|
private String getEnv(String envName, InterpreterLaunchContext context) {
String env = context.getProperties().getProperty(envName);
if (StringUtils.isBlank(env)) {
env = System.getenv(envName);
}
if (StringUtils.isBlank(env)) {
LOGGER.warn("environment variable: {} is empty", envName);
}
return env;
}
|
@Test
void testYarnClientMode_1() throws IOException {
SparkInterpreterLauncher launcher = new SparkInterpreterLauncher(zConf, null);
Properties properties = new Properties();
properties.setProperty("SPARK_HOME", sparkHome);
properties.setProperty("property_1", "value_1");
properties.setProperty("spark.master", "yarn-client");
properties.setProperty("spark.files", "file_1");
properties.setProperty("spark.jars", "jar_1");
InterpreterOption option = new InterpreterOption();
InterpreterLaunchContext context = new InterpreterLaunchContext(properties, option, null, "user1", "intpGroupId", "groupId", "spark", "spark", 0, "host");
InterpreterClient client = launcher.launch(context);
assertTrue( client instanceof ExecRemoteInterpreterProcess);
try (ExecRemoteInterpreterProcess interpreterProcess = (ExecRemoteInterpreterProcess) client) {
assertEquals("spark", interpreterProcess.getInterpreterSettingName());
assertTrue(interpreterProcess.getInterpreterDir().endsWith("/interpreter/spark"));
assertTrue(interpreterProcess.getLocalRepoDir().endsWith("/local-repo/groupId"));
assertEquals(zConf.getInterpreterRemoteRunnerPath(), interpreterProcess.getInterpreterRunner());
assertTrue(interpreterProcess.getEnv().size() >= 2);
assertEquals(sparkHome, interpreterProcess.getEnv().get("SPARK_HOME"));
String sparkJars = "jar_1";
String sparkrZip = sparkHome + "/R/lib/sparkr.zip#sparkr";
String sparkFiles = "file_1";
String expected = "--conf|spark.yarn.dist.archives=" + sparkrZip +
"|--conf|spark.files=" + sparkFiles + "|--conf|spark.jars=" + sparkJars +
"|--conf|spark.yarn.isPython=true|--conf|spark.app.name=intpGroupId|--conf|spark.master=yarn-client";
assertTrue(CollectionUtils.isEqualCollection(Arrays.asList(expected.split("\\|")),
Arrays.asList(interpreterProcess.getEnv().get("ZEPPELIN_SPARK_CONF").split("\\|"))));
}
}
|
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComInitDbPacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_INIT_DB, payload, connectionSession), instanceOf(MySQLComInitDbPacket.class));
}
|
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
updateErrorCounts(errorCounts, Errors.forCode(data.errorCode()));
for (UpdatableFeatureResult result : data.results()) {
updateErrorCounts(errorCounts, Errors.forCode(result.errorCode()));
}
return errorCounts;
}
|
@Test
public void testErrorCounts() {
UpdateFeaturesResponseData.UpdatableFeatureResultCollection results =
new UpdateFeaturesResponseData.UpdatableFeatureResultCollection();
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult()
.setFeature("foo")
.setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code())
);
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult()
.setFeature("bar")
.setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code())
);
results.add(new UpdateFeaturesResponseData.UpdatableFeatureResult()
.setFeature("baz")
.setErrorCode(Errors.FEATURE_UPDATE_FAILED.code())
);
UpdateFeaturesResponse response = new UpdateFeaturesResponse(new UpdateFeaturesResponseData()
.setErrorCode(Errors.INVALID_REQUEST.code())
.setResults(results)
);
Map<Errors, Integer> errorCounts = response.errorCounts();
assertEquals(3, errorCounts.size());
assertEquals(1, errorCounts.get(Errors.INVALID_REQUEST).intValue());
assertEquals(2, errorCounts.get(Errors.UNKNOWN_SERVER_ERROR).intValue());
assertEquals(1, errorCounts.get(Errors.FEATURE_UPDATE_FAILED).intValue());
}
|
public Cluster cluster() {
if (clusterInstance == null) {
throw new IllegalStateException("Cached Cluster instance should not be null, but was.");
} else {
return clusterInstance;
}
}
|
@Test
public void testMissingLeaderEndpoint() {
// Although the broker attempts to ensure leader information is available, the
// client metadata cache may retain partition metadata across multiple responses.
// For example, separate responses may contain conflicting leader epochs for
// separate partitions and the client will always retain the highest.
TopicPartition topicPartition = new TopicPartition("topic", 0);
MetadataResponse.PartitionMetadata partitionMetadata = new MetadataResponse.PartitionMetadata(
Errors.NONE,
topicPartition,
Optional.of(5),
Optional.of(10),
Arrays.asList(5, 6, 7),
Arrays.asList(5, 6, 7),
Collections.emptyList());
Map<Integer, Node> nodesById = new HashMap<>();
nodesById.put(6, new Node(6, "localhost", 2077));
nodesById.put(7, new Node(7, "localhost", 2078));
nodesById.put(8, new Node(8, "localhost", 2079));
MetadataSnapshot cache = new MetadataSnapshot("clusterId",
nodesById,
Collections.singleton(partitionMetadata),
Collections.emptySet(),
Collections.emptySet(),
Collections.emptySet(),
null,
Collections.emptyMap());
Cluster cluster = cache.cluster();
assertNull(cluster.leaderFor(topicPartition));
PartitionInfo partitionInfo = cluster.partition(topicPartition);
Map<Integer, Node> replicas = Arrays.stream(partitionInfo.replicas())
.collect(Collectors.toMap(Node::id, Function.identity()));
assertNull(partitionInfo.leader());
assertEquals(3, replicas.size());
assertTrue(replicas.get(5).isEmpty());
assertEquals(nodesById.get(6), replicas.get(6));
assertEquals(nodesById.get(7), replicas.get(7));
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> deleted = new ArrayList<Path>();
for(Path file : files.keySet()) {
boolean skip = false;
for(Path d : deleted) {
if(file.isChild(d)) {
skip = true;
break;
}
}
if(skip) {
continue;
}
deleted.add(file);
callback.delete(file);
try {
final IRODSFile f = session.getClient().getIRODSFileFactory().instanceIRODSFile(file.getAbsolute());
if(!f.exists()) {
throw new NotfoundException(String.format("%s doesn't exist", file.getAbsolute()));
}
if(f.isFile()) {
session.getClient().fileDeleteForce(f);
}
else if(f.isDirectory()) {
session.getClient().directoryDeleteForce(f);
}
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Cannot delete {0}", e, file);
}
}
}
|
@Test
public void testDeleteDirectory() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path folder = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
new IRODSDirectoryFeature(session).mkdir(folder, new TransferStatus());
final Path file = new Path(folder, "f", EnumSet.of(Path.Type.file));
new IRODSTouchFeature(session).touch(file, new TransferStatus());
assertTrue(new IRODSFindFeature(session).find(folder));
assertTrue(new IRODSFindFeature(session).find(file));
new IRODSDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new IRODSFindFeature(session).find(folder));
assertFalse(new IRODSFindFeature(session).find(file));
session.close();
}
|
public static <T> ExtensionLoader<T> getExtensionLoader(final Class<T> clazz, final ClassLoader cl) {
Objects.requireNonNull(clazz, "extension clazz is null");
if (!clazz.isInterface()) {
throw new IllegalArgumentException("extension clazz (" + clazz + ") is not interface!");
}
if (!clazz.isAnnotationPresent(SPI.class)) {
throw new IllegalArgumentException("extension clazz (" + clazz + ") without @" + SPI.class + " Annotation");
}
ExtensionLoader<T> extensionLoader = (ExtensionLoader<T>) LOADERS.get(clazz);
if (Objects.nonNull(extensionLoader)) {
return extensionLoader;
}
LOADERS.putIfAbsent(clazz, new ExtensionLoader<>(clazz, cl));
return (ExtensionLoader<T>) LOADERS.get(clazz);
}
|
@Test
public void testGetExtensionLoaderIsNull() {
try {
ExtensionLoader.getExtensionLoader(null);
fail();
} catch (NullPointerException expected) {
assertThat(expected.getMessage(), containsString("extension clazz is null"));
}
}
|
public static void readFully(InputStream stream, byte[] bytes, int offset, int length)
throws IOException {
int bytesRead = readRemaining(stream, bytes, offset, length);
if (bytesRead < length) {
throw new EOFException(
"Reached the end of stream with " + (length - bytesRead) + " bytes left to read");
}
}
|
@Test
public void testReadFullyStartAndLength() throws IOException {
byte[] buffer = new byte[10];
MockInputStream stream = new MockInputStream();
IOUtil.readFully(stream, buffer, 2, 5);
assertThat(Arrays.copyOfRange(buffer, 2, 7))
.as("Byte array contents should match")
.isEqualTo(Arrays.copyOfRange(MockInputStream.TEST_ARRAY, 0, 5));
assertThat(stream.getPos()).as("Stream position should reflect bytes read").isEqualTo(5);
}
|
@Override
public void handleDataDeleted(String dataPath) {
refresh();
}
|
@Test
public void testHandleDataDeleted() {
_dynamicBrokerSelectorUnderTest.handleDataDeleted("dataPath");
verify(_mockExternalViewReader, times(2)).getTableToBrokersMap();
}
|
static InjectorFunction injectorFunction(InjectorFunction existing, InjectorFunction... update) {
if (update == null) throw new NullPointerException("injectorFunctions == null");
LinkedHashSet<InjectorFunction> injectorFunctionSet =
new LinkedHashSet<InjectorFunction>(Arrays.asList(update));
if (injectorFunctionSet.contains(null)) {
throw new NullPointerException("injectorFunction == null");
}
injectorFunctionSet.remove(InjectorFunction.NOOP);
if (injectorFunctionSet.isEmpty()) return existing;
if (injectorFunctionSet.size() == 1) return injectorFunctionSet.iterator().next();
return new CompositeInjectorFunction(injectorFunctionSet.toArray(new InjectorFunction[0]));
}
|
@Test void injectorFunction_emptyIgnored() {
InjectorFunction existing = mock(InjectorFunction.class);
assertThat(injectorFunction(existing))
.isSameAs(existing);
}
|
@Override
public SmsTemplateRespDTO getSmsTemplate(String apiTemplateId) throws Throwable {
// 1. 执行请求
// 参考链接 https://api.aliyun.com/document/Dysmsapi/2017-05-25/QuerySmsTemplate
TreeMap<String, Object> queryParam = new TreeMap<>();
queryParam.put("TemplateCode", apiTemplateId);
JSONObject response = request("QuerySmsTemplate", queryParam);
// 2.1 请求失败
String code = response.getStr("Code");
if (ObjectUtil.notEqual(code, RESPONSE_CODE_SUCCESS)) {
log.error("[getSmsTemplate][模版编号({}) 响应不正确({})]", apiTemplateId, response);
return null;
}
// 2.2 请求成功
return new SmsTemplateRespDTO()
.setId(response.getStr("TemplateCode"))
.setContent(response.getStr("TemplateContent"))
.setAuditStatus(convertSmsTemplateAuditStatus(response.getInt("TemplateStatus")))
.setAuditReason(response.getStr("Reason"));
}
|
@Test
public void testGetSmsTemplate() throws Throwable {
try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) {
// 准备参数
String apiTemplateId = randomString();
// mock 方法
httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString()))
.thenReturn("{\"TemplateCode\":\"SMS_207945135\",\"RequestId\":\"6F4CC077-29C8-5BA5-AB62-5FF95068A5AC\",\"Message\":\"OK\",\"TemplateContent\":\"您的验证码${code},该验证码5分钟内有效,请勿泄漏于他人!\",\"TemplateName\":\"公告通知\",\"TemplateType\":0,\"Code\":\"OK\",\"CreateDate\":\"2020-12-23 17:34:42\",\"Reason\":\"无审批备注\",\"TemplateStatus\":1}");
// 调用
SmsTemplateRespDTO result = smsClient.getSmsTemplate(apiTemplateId);
// 断言
assertEquals("SMS_207945135", result.getId());
assertEquals("您的验证码${code},该验证码5分钟内有效,请勿泄漏于他人!", result.getContent());
assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(), result.getAuditStatus());
assertEquals("无审批备注", result.getAuditReason());
}
}
|
@Override
public List<String> getRegisterData(final String key) {
try {
List<Instance> instances = namingService.selectInstances(key, groupName, true);
List<String> registerData = new ArrayList<>();
for (Instance instance : instances) {
String data = buildUpstreamJsonFromInstance(instance);
registerData.add(data);
}
return registerData;
} catch (NacosException e) {
LOGGER.error("Error getting Nacos service instances: {}", e.getMessage(), e);
throw new ShenyuException(e);
}
}
|
@Test
void testGetRegisterData() throws NacosException {
final String key = "test";
Instance instance1 = new Instance();
instance1.setIp("192.168.1.1");
instance1.setPort(8080);
Instance instance2 = new Instance();
instance2.setIp("192.168.1.2");
instance2.setPort(8081);
final List<Instance> mockInstances = Arrays.asList(instance1, instance2);
when(namingService.selectInstances(key, "SHENYU_GROUP", true)).thenReturn(mockInstances);
final List<String> result = nacosDiscoveryServiceUnderTest.getRegisterData(key);
verify(namingService).selectInstances(key, "SHENYU_GROUP", true);
// Verify that the data format is consistent
assertEquals(mockInstances.size(), result.size());
for (int i = 0; i < mockInstances.size(); i++) {
Instance instance = mockInstances.get(i);
String expectedJson = buildInstanceInfoJson(instance);
assertEquals(expectedJson, result.get(i));
}
}
|
@Override
public Object merge(T mergingValue, T existingValue) {
if (mergingValue == null) {
return existingValue.getRawValue();
}
return mergingValue.getRawValue();
}
|
@Test
@SuppressWarnings("ConstantConditions")
public void merge_mergingNull() {
MapMergeTypes existing = mergingValueWithGivenValue(EXISTING);
MapMergeTypes merging = null;
assertEquals(EXISTING, mergePolicy.merge(merging, existing));
}
|
public static int readUint16(ByteBuffer buf) throws BufferUnderflowException {
return Short.toUnsignedInt(buf.order(ByteOrder.LITTLE_ENDIAN).getShort());
}
|
@Test(expected = ArrayIndexOutOfBoundsException.class)
public void testReadUint16ThrowsException3() {
ByteUtils.readUint16(new byte[]{1, 2, 3}, -1);
}
|
public static int[] rowMax(int[][] matrix) {
int[] x = new int[matrix.length];
for (int i = 0; i < x.length; i++) {
x[i] = max(matrix[i]);
}
return x;
}
|
@Test
public void testRowMax() {
System.out.println("rowMax");
double[][] A = {
{0.7220180, 0.07121225, 0.6881997},
{-0.2648886, -0.89044952, 0.3700456},
{-0.6391588, 0.44947578, 0.6240573}
};
double[] r = {0.7220180, 0.3700456, 0.6240573};
double[] result = MathEx.rowMax(A);
for (int i = 0; i < r.length; i++) {
assertEquals(result[i], r[i], 1E-7);
}
}
|
@SuppressWarnings("checkstyle:npathcomplexity")
public PartitionServiceState getPartitionServiceState() {
PartitionServiceState state = getPartitionTableState();
if (state != SAFE) {
return state;
}
if (!checkAndTriggerReplicaSync()) {
return REPLICA_NOT_SYNC;
}
return SAFE;
}
|
@Test
public void shouldBeSafe_whenInitializedOnMaster() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
HazelcastInstance hz = factory.newHazelcastInstance();
InternalPartitionServiceImpl partitionService = getNode(hz).partitionService;
partitionService.firstArrangement();
PartitionReplicaStateChecker replicaStateChecker = partitionService.getPartitionReplicaStateChecker();
PartitionServiceState state = replicaStateChecker.getPartitionServiceState();
assertEquals(PartitionServiceState.SAFE, state);
}
|
public Table getTable(String dbName, String tableName) {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "HMS.getTable")) {
return callRPC("getTable", String.format("Failed to get table [%s.%s]", dbName, tableName),
dbName, tableName);
}
}
|
@Test
public void testClientPool(@Mocked HiveMetaStoreClient metaStoreClient) throws Exception {
new Expectations() {
{
metaStoreClient.getTable(anyString, anyString);
result = new Table();
minTimes = 0;
}
};
final int[] clientNum = {0};
new MockUp<RetryingMetaStoreClient>() {
@Mock
public IMetaStoreClient getProxy(Configuration hiveConf, HiveMetaHookLoader hookLoader,
ConcurrentHashMap<String, Long> metaCallTimeMap, String mscClassName,
boolean allowEmbedded) throws MetaException {
clientNum[0]++;
return metaStoreClient;
}
};
HiveConf hiveConf = new HiveConf();
hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), "thrift://127.0.0.1:9030");
HiveMetaClient client = new HiveMetaClient(hiveConf);
// NOTE: this is HiveMetaClient.MAX_HMS_CONNECTION_POOL_SIZE
int poolSize = 32;
// call client method concurrently,
// and make sure the number of hive clients will not exceed poolSize
for (int i = 0; i < 10; i++) {
ExecutorService es = Executors.newCachedThreadPool();
for (int j = 0; j < poolSize; j++) {
es.execute(() -> {
try {
client.getTable("db", "tbl");
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.getMessage());
}
});
}
es.shutdown();
es.awaitTermination(1, TimeUnit.HOURS);
}
System.out.println("called times is " + clientNum[0]);
Assert.assertTrue(
clientNum[0] >= 1 && clientNum[0] <= poolSize);
}
|
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) {
final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps());
map.put(
MetricCollectors.RESOURCE_LABEL_PREFIX
+ StreamsConfig.APPLICATION_ID_CONFIG,
applicationId
);
// Streams client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
}
|
@Test
public void shouldSetLogAndContinueExceptionHandlerWhenFailOnProductionErrorFalse() {
final KsqlConfig ksqlConfig =
new KsqlConfig(Collections.singletonMap(KsqlConfig.FAIL_ON_PRODUCTION_ERROR_CONFIG, false));
final Object result = ksqlConfig.getKsqlStreamConfigProps()
.get(StreamsConfig.DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG);
assertThat(result, equalTo(LogAndContinueProductionExceptionHandler.class));
}
|
@Override
public int size() {
return eventHandler.size();
}
|
@Test
public void testSize() throws Exception {
try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testEmpty")) {
assertTrue(queue.isEmpty());
CompletableFuture<Void> future = new CompletableFuture<>();
queue.append(() -> future.get());
assertFalse(queue.isEmpty());
assertEquals(1, queue.size());
queue.append(() -> future.get());
assertEquals(2, queue.size());
future.complete(null);
TestUtils.waitForCondition(() -> queue.isEmpty(), "Failed to see the queue become empty.");
queue.scheduleDeferred("later",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(1)),
() -> {
});
assertFalse(queue.isEmpty());
queue.scheduleDeferred("soon",
__ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + TimeUnit.MILLISECONDS.toNanos(1)),
() -> {
});
assertFalse(queue.isEmpty());
queue.cancelDeferred("later");
queue.cancelDeferred("soon");
TestUtils.waitForCondition(() -> queue.isEmpty(), "Failed to see the queue become empty.");
assertTrue(queue.isEmpty());
}
}
|
public Map<Boolean, List<IssueDto>> mapIssuesByTaintStatus(List<IssueDto> issues) {
Map<Boolean, List<IssueDto>> issuesMap = new HashMap<>();
issuesMap.put(true, getTaintIssuesOnly(issues));
issuesMap.put(false, getStandardIssuesOnly(issues));
return issuesMap;
}
|
@Test
public void test_mapIssuesByTaintStatus() {
Map<Boolean, List<IssueDto>> issuesByTaintStatus = underTest.mapIssuesByTaintStatus(getIssues());
assertThat(issuesByTaintStatus.keySet()).hasSize(2);
assertThat(issuesByTaintStatus.get(true)).hasSize(6);
assertThat(issuesByTaintStatus.get(false)).hasSize(3);
assertThat(issuesByTaintStatus.get(true).get(0).getKey()).isEqualTo("taintIssue1");
assertThat(issuesByTaintStatus.get(true).get(1).getKey()).isEqualTo("taintIssue2");
assertThat(issuesByTaintStatus.get(true).get(2).getKey()).isEqualTo("taintIssue3");
assertThat(issuesByTaintStatus.get(true).get(3).getKey()).isEqualTo("taintIssue4");
assertThat(issuesByTaintStatus.get(true).get(4).getKey()).isEqualTo("taintIssue5");
assertThat(issuesByTaintStatus.get(true).get(5).getKey()).isEqualTo("taintIssue6");
assertThat(issuesByTaintStatus.get(false).get(0).getKey()).isEqualTo("standardIssue1");
assertThat(issuesByTaintStatus.get(false).get(1).getKey()).isEqualTo("standardIssue2");
assertThat(issuesByTaintStatus.get(false).get(2).getKey()).isEqualTo("standardIssue3");
}
|
public static List<InetSocketAddress> getJobMasterRpcAddresses(AlluxioConfiguration conf) {
// First check whether job rpc addresses are explicitly configured.
if (conf.isSet(PropertyKey.JOB_MASTER_RPC_ADDRESSES)) {
return parseInetSocketAddresses(
conf.getList(PropertyKey.JOB_MASTER_RPC_ADDRESSES));
}
int jobRpcPort =
NetworkAddressUtils.getPort(NetworkAddressUtils.ServiceType.JOB_MASTER_RPC, conf);
// Fall back on explicitly configured regular master rpc addresses.
if (conf.isSet(PropertyKey.MASTER_RPC_ADDRESSES)) {
List<InetSocketAddress> addrs =
parseInetSocketAddresses(conf.getList(PropertyKey.MASTER_RPC_ADDRESSES));
return overridePort(addrs, jobRpcPort);
}
// Fall back on server-side journal configuration.
return overridePort(getEmbeddedJournalAddresses(conf, ServiceType.JOB_MASTER_RAFT), jobRpcPort);
}
|
@Test
public void getJobMasterRpcAddressesDefault() {
AlluxioConfiguration conf = createConf(Collections.emptyMap());
String host = NetworkAddressUtils.getLocalHostName(5 * Constants.SECOND_MS);
assertEquals(Arrays.asList(InetSocketAddress.createUnresolved(host, 20001)),
ConfigurationUtils.getJobMasterRpcAddresses(conf));
}
|
@Udf(description = "Returns the inverse (arc) tangent of y / x")
public Double atan2(
@UdfParameter(
value = "y",
description = "The ordinate (y) coordinate."
) final Integer y,
@UdfParameter(
value = "x",
description = "The abscissa (x) coordinate."
) final Integer x
) {
return atan2(y == null ? null : y.doubleValue(), x == null ? null : x.doubleValue());
}
|
@Test
public void shouldHandleNegativeYNegativeX() {
assertThat(udf.atan2(-1.1, -0.24), closeTo(-1.7856117271965553, 0.000000000000001));
assertThat(udf.atan2(-6.0, -7.1), closeTo(-2.4399674339361113, 0.000000000000001));
assertThat(udf.atan2(-2, -3), closeTo(-2.5535900500422257, 0.000000000000001));
assertThat(udf.atan2(-2L, -2L), closeTo(-2.356194490192345, 0.000000000000001));
}
|
@Override
public SchemaTransform from(PubsubReadSchemaTransformConfiguration configuration) {
if (configuration.getSubscription() == null && configuration.getTopic() == null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided");
}
if (configuration.getSubscription() != null && configuration.getTopic() != null) {
throw new IllegalArgumentException(
"To read from Pubsub, a subscription name or a topic name must be provided. Not both.");
}
if (!"RAW".equals(configuration.getFormat())) {
if ((Strings.isNullOrEmpty(configuration.getSchema())
&& !Strings.isNullOrEmpty(configuration.getFormat()))
|| (!Strings.isNullOrEmpty(configuration.getSchema())
&& Strings.isNullOrEmpty(configuration.getFormat()))) {
throw new IllegalArgumentException(
"A schema was provided without a data format (or viceversa). Please provide "
+ "both of these parameters to read from Pubsub, or if you would like to use the Pubsub schema service,"
+ " please leave both of these blank.");
}
}
Schema payloadSchema;
SerializableFunction<byte[], Row> payloadMapper;
String format =
configuration.getFormat() == null ? null : configuration.getFormat().toUpperCase();
if ("RAW".equals(format)) {
payloadSchema = Schema.of(Schema.Field.of("payload", Schema.FieldType.BYTES));
payloadMapper = input -> Row.withSchema(payloadSchema).addValue(input).build();
} else if ("JSON".equals(format)) {
payloadSchema = JsonUtils.beamSchemaFromJsonSchema(configuration.getSchema());
payloadMapper = JsonUtils.getJsonBytesToRowFunction(payloadSchema);
} else if ("AVRO".equals(format)) {
payloadSchema =
AvroUtils.toBeamSchema(
new org.apache.avro.Schema.Parser().parse(configuration.getSchema()));
payloadMapper = AvroUtils.getAvroBytesToRowFunction(payloadSchema);
} else {
throw new IllegalArgumentException(
String.format(
"Format %s not supported. Only supported formats are %s",
configuration.getFormat(), VALID_FORMATS_STR));
}
PubsubReadSchemaTransform transform =
new PubsubReadSchemaTransform(configuration, payloadSchema, payloadMapper);
if (configuration.getClientFactory() != null) {
transform.setClientFactory(configuration.getClientFactory());
}
if (configuration.getClock() != null) {
transform.setClock(configuration.getClock());
}
return transform;
}
|
@Test
public void testNoSchema() {
PCollectionRowTuple begin = PCollectionRowTuple.empty(p);
assertThrows(
IllegalStateException.class,
() ->
begin.apply(
new PubsubReadSchemaTransformProvider()
.from(
PubsubReadSchemaTransformConfiguration.builder()
.setSubscription(SUBSCRIPTION)
.setFormat("AVRO")
.build())));
p.run().waitUntilFinish();
}
|
@Override
public String execute(SampleResult previousResult, Sampler currentSampler) throws InvalidVariableException {
String datetime;
if (format.isEmpty()) {// Default to milliseconds
datetime = Long.toString(System.currentTimeMillis());
} else {
// Resolve any aliases
String fmt = aliases.get(format);
if (fmt == null) {
fmt = format;// Not found
}
datetime = DATE_TIME_FORMATTER_CACHE.get(fmt).get();
}
if (!variable.isEmpty()) {
JMeterVariables vars = getVariables();
if (vars != null){// vars will be null on TestPlan
vars.put(variable, datetime);
}
}
return datetime;
}
|
@Test
void testDefaultNone() throws Exception {
long before = System.currentTimeMillis();
value = variable.execute(result, null);
long now = Long.parseLong(value);
long after = System.currentTimeMillis();
assertBetween(before, after, now);
}
|
public void writeUnsignedLongAsHex(long value) throws IOException {
int bufferIndex = 23;
do {
int digit = (int)(value & 15);
if (digit < 10) {
buffer[bufferIndex--] = (char)(digit + '0');
} else {
buffer[bufferIndex--] = (char)((digit - 10) + 'a');
}
value >>>= 4;
} while (value != 0);
bufferIndex++;
write(buffer, bufferIndex, 24-bufferIndex);
}
|
@Test
public void testWriteUnsignedLongAsHex() throws IOException {
Assert.assertEquals("ffffffffffffffff", performWriteUnsignedLongAsHex(-1));
Assert.assertEquals("7fffffff", performWriteUnsignedLongAsHex(Integer.MAX_VALUE));
Assert.assertEquals("ffffffff80000000", performWriteUnsignedLongAsHex(Integer.MIN_VALUE));
Assert.assertEquals("0", performWriteUnsignedLongAsHex(0));
Assert.assertEquals("1", performWriteUnsignedLongAsHex(1));
Assert.assertEquals("80000000", performWriteUnsignedLongAsHex(((long) Integer.MAX_VALUE) + 1));
Assert.assertEquals("ffffffff7fffffff", performWriteUnsignedLongAsHex(((long) Integer.MIN_VALUE) - 1));
Assert.assertEquals("7fffffffffffffff", performWriteUnsignedLongAsHex(Long.MAX_VALUE));
Assert.assertEquals("8000000000000000", performWriteUnsignedLongAsHex(Long.MIN_VALUE));
}
|
public static VerificationMode never() {
return times(0);
}
|
@Test
public void should_monitor_server_behavior_without_port() throws Exception {
final MocoMonitor monitor = mock(MocoMonitor.class);
final HttpServer server = httpServer(monitor);
server.get(by(uri("/foo"))).response("bar");
running(server, () -> assertThat(helper.get(remoteUrl(server.port(), "/foo")), is("bar")));
verify(monitor).onMessageArrived(any(HttpRequest.class));
verify(monitor).onMessageLeave(any(HttpResponse.class));
verify(monitor, Mockito.never()).onException(any(Exception.class));
}
|
@Override
public SubscriptionGroupConfig getSubscriptionGroupConfig(ProxyContext ctx, String group) {
SubscriptionGroupConfig config;
try {
config = this.subscriptionGroupConfigCache.get(group);
} catch (Exception e) {
return null;
}
if (config == EMPTY_SUBSCRIPTION_GROUP_CONFIG) {
return null;
}
return config;
}
|
@Test
public void testGetSubscriptionGroupConfig() {
ProxyContext ctx = ProxyContext.create();
assertNotNull(this.clusterMetadataService.getSubscriptionGroupConfig(ctx, GROUP));
assertEquals(1, this.clusterMetadataService.subscriptionGroupConfigCache.asMap().size());
}
|
public boolean releaseInbound() {
return releaseAll(inboundMessages);
}
|
@Test
public void testReleaseInbound() {
ByteBuf in = Unpooled.buffer();
ByteBuf out = Unpooled.buffer();
try {
EmbeddedChannel channel = new EmbeddedChannel();
assertTrue(channel.writeInbound(in));
assertEquals(1, in.refCnt());
assertTrue(channel.writeOutbound(out));
assertEquals(1, out.refCnt());
assertTrue(channel.releaseInbound());
assertEquals(0, in.refCnt());
assertEquals(1, out.refCnt());
assertTrue(channel.finish());
assertNull(channel.readInbound());
ByteBuf buffer = channel.readOutbound();
assertSame(out, buffer);
buffer.release();
assertNull(channel.readOutbound());
} finally {
release(in, out);
}
}
|
public static Iterable<ServiceBusMessage> createServiceBusMessages(
final Iterable<?> data, final Map<String, Object> applicationProperties, final String correlationId) {
return StreamSupport.stream(data.spliterator(), false)
.map(obj -> createServiceBusMessage(obj, applicationProperties, correlationId))
.collect(Collectors.toList());
}
|
@Test
void testCreateServiceBusMessages() {
final List<String> inputMessages = new LinkedList<>();
inputMessages.add("test data");
inputMessages.add(String.valueOf(12345));
final Iterable<ServiceBusMessage> busMessages = ServiceBusUtils.createServiceBusMessages(inputMessages, null, null);
assertTrue(StreamSupport.stream(busMessages.spliterator(), false)
.anyMatch(record -> record.getBody().toString().equals("test data")));
assertTrue(StreamSupport.stream(busMessages.spliterator(), false)
.anyMatch(record -> record.getBody().toString().equals("12345")));
//Test bytes
final List<byte[]> inputMessages2 = new LinkedList<>();
byte[] byteBody1 = "test data".getBytes(StandardCharsets.UTF_8);
byte[] byteBody2 = "test data2".getBytes(StandardCharsets.UTF_8);
inputMessages2.add(byteBody1);
inputMessages2.add(byteBody2);
final Iterable<ServiceBusMessage> busMessages2 = ServiceBusUtils.createServiceBusMessages(inputMessages2, null, null);
assertTrue(StreamSupport.stream(busMessages2.spliterator(), false)
.anyMatch(message -> Arrays.equals(message.getBody().toBytes(), byteBody1)));
assertTrue(StreamSupport.stream(busMessages2.spliterator(), false)
.anyMatch(message -> Arrays.equals(message.getBody().toBytes(), byteBody2)));
}
|
@Override
public ConfigData get(String path) {
return get(path, Files::isRegularFile);
}
|
@Test
public void testEmptyPath() {
ConfigData configData = provider.get("");
assertTrue(configData.data().isEmpty());
assertNull(configData.ttl());
}
|
public MaterialAgent createAgent(MaterialRevision revision) {
Material material = revision.getMaterial();
if (material instanceof DependencyMaterial) {
return MaterialAgent.NO_OP;
} else if (material instanceof PackageMaterial) {
return MaterialAgent.NO_OP;
} else if (material instanceof PluggableSCMMaterial) {
return new PluggableSCMMaterialAgent(scmExtension, revision, workingDirectory, consumer);
} else if (material instanceof ScmMaterial) {
String destFolderPath = ((ScmMaterial) material).workingdir(workingDirectory).getAbsolutePath();
return new AbstractMaterialAgent(revision, consumer, workingDirectory, new AgentSubprocessExecutionContext(agentIdentifier, destFolderPath));
}
throw new RuntimeException("Could not find MaterialChecker for material = " + material);
}
|
@Test
public void shouldGetPackageMaterialAgent() {
File workingDirectory = new File("/tmp/workingDirectory");
MaterialRevision revision = new MaterialRevision(new PackageMaterial(), new Modifications());
MaterialAgentFactory factory = new MaterialAgentFactory(null, workingDirectory, null, scmExtension);
MaterialAgent agent = factory.createAgent(revision);
assertThat(agent, is(NO_OP));
}
|
@Override
public <T> Optional<T> getProperty(String key, Class<T> targetType) {
var targetKey = targetPropertyName(key);
var result = binder.bind(targetKey, Bindable.of(targetType));
return result.isBound() ? Optional.of(result.get()) : Optional.empty();
}
|
@Test
void resolvesCustomConfigClassProperties() {
env.setProperty("prop.0.custProps.f1", "f1val");
env.setProperty("prop.0.custProps.f2", "1234");
var resolver = new PropertyResolverImpl(env);
assertThat(resolver.getProperty("prop.0.custProps", CustomPropertiesClass.class))
.hasValue(new CustomPropertiesClass("f1val", 1234));
}
|
void regionFinished(SchedulingPipelinedRegion region) {
for (ConsumerRegionGroupExecutionView executionView :
executionViewByRegion.getOrDefault(region, Collections.emptySet())) {
executionView.regionFinished(region);
}
}
|
@Test
void testRegionFinishedMultipleTimes() throws Exception {
consumerRegionGroupExecutionViewMaintainer.regionFinished(consumerRegion);
consumerRegionGroupExecutionViewMaintainer.regionFinished(consumerRegion);
assertThat(consumerRegionGroupExecutionView.isFinished()).isTrue();
}
|
public FEELFnResult<String> invoke(@ParameterName("from") Object val) {
if ( val == null ) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) );
}
}
|
@Test
void invokeRangeClosedOpen() {
FunctionTestUtil.assertResult(
stringFunction.invoke(new RangeImpl(Range.RangeBoundary.CLOSED, 12, 15, Range.RangeBoundary.OPEN)),
"[ 12 .. 15 )");
}
|
@Description("computes Hamming distance between two strings")
@ScalarFunction
@LiteralParameters({"x", "y"})
@SqlType(StandardTypes.BIGINT)
public static long hammingDistance(@SqlType("varchar(x)") Slice left, @SqlType("varchar(y)") Slice right)
{
int distance = 0;
int leftPosition = 0;
int rightPosition = 0;
while (leftPosition < left.length() && rightPosition < right.length()) {
int codePointLeft = tryGetCodePointAt(left, leftPosition);
int codePointRight = tryGetCodePointAt(right, rightPosition);
// if both code points are invalid, we do not care if they are equal
// the following code treats them as equal if they happen to be of the same length
if (codePointLeft != codePointRight) {
distance++;
}
leftPosition += codePointLeft >= 0 ? lengthOfCodePoint(codePointLeft) : -codePointLeft;
rightPosition += codePointRight >= 0 ? lengthOfCodePoint(codePointRight) : -codePointRight;
}
checkCondition(
leftPosition == left.length() && rightPosition == right.length(),
INVALID_FUNCTION_ARGUMENT,
"The input strings to hamming_distance function must have the same length");
return distance;
}
|
@Test
public void testHammingDistance()
{
assertFunction("HAMMING_DISTANCE('', '')", BIGINT, 0L);
assertFunction("HAMMING_DISTANCE('hello', 'hello')", BIGINT, 0L);
assertFunction("HAMMING_DISTANCE('hello', 'jello')", BIGINT, 1L);
assertFunction("HAMMING_DISTANCE('like', 'hate')", BIGINT, 3L);
assertFunction("HAMMING_DISTANCE('hello', 'world')", BIGINT, 4L);
assertFunction("HAMMING_DISTANCE('\u0000', '\u0001')", BIGINT, 1L);
assertFunction("HAMMING_DISTANCE(NULL, NULL)", BIGINT, null);
assertFunction("HAMMING_DISTANCE('hello', NULL)", BIGINT, null);
assertFunction("HAMMING_DISTANCE(NULL, 'world')", BIGINT, null);
// Test for unicode
assertFunction("HAMMING_DISTANCE('hello na\u00EFve world', 'hello naive world')", BIGINT, 1L);
assertFunction("HAMMING_DISTANCE('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', '\u4FE1\u4EF0,\u7231,\u5E0C\u671B')", BIGINT, 1L);
assertFunction("HAMMING_DISTANCE('\u4F11\u5FF5,\u7231,\u5E0C\u671B', '\u4FE1\u5FF5,\u7231,\u5E0C\u671B')", BIGINT, 1L);
// Test for invalid arguments
assertInvalidFunction("HAMMING_DISTANCE('hello', '')", "The input strings to hamming_distance function must have the same length");
assertInvalidFunction("HAMMING_DISTANCE('', 'hello')", "The input strings to hamming_distance function must have the same length");
assertInvalidFunction("HAMMING_DISTANCE('hello', 'o')", "The input strings to hamming_distance function must have the same length");
assertInvalidFunction("HAMMING_DISTANCE('h', 'hello')", "The input strings to hamming_distance function must have the same length");
assertInvalidFunction("HAMMING_DISTANCE('hello na\u00EFve world', 'hello na:ive world')", "The input strings to hamming_distance function must have the same length");
assertInvalidFunction("HAMMING_DISTANCE('\u4FE1\u5FF5,\u7231,\u5E0C\u671B', '\u4FE1\u5FF5\u5E0C\u671B')", "The input strings to hamming_distance function must have the same length");
}
|
@Override
public void handle(SeckillWebMockRequestDTO request) {
// 状态机初始化
stateMachineService.initStateMachine(request.getSeckillId());
// 初始化库存数量
// 使用状态机控制活动状态
if (!stateMachineService.feedMachine(Events.ACTIVITY_RESET, request.getSeckillId())) {
throw new RuntimeException("活动尚未结束,请等待活动结束后再次操作");
}
stateMachineService.feedMachine(Events.ACTIVITY_START, request.getSeckillId());
}
|
@Test
public void shouldHandleRequestSuccessfully() {
SeckillWebMockRequestDTO request = new SeckillWebMockRequestDTO();
request.setSeckillId(123L);
when(stateMachineService.feedMachine(Events.ACTIVITY_RESET, request.getSeckillId())).thenReturn(true);
when(stateMachineService.feedMachine(Events.ACTIVITY_START, request.getSeckillId())).thenReturn(true);
stateMachinePreRequestHandler.handle(request);
verify(stateMachineService, times(1)).initStateMachine(request.getSeckillId());
verify(stateMachineService, times(1)).feedMachine(Events.ACTIVITY_RESET, request.getSeckillId());
verify(stateMachineService, times(1)).feedMachine(Events.ACTIVITY_START, request.getSeckillId());
}
|
@ExecuteOn(TaskExecutors.IO)
@Get(value = "{id}/graph")
@Operation(tags = {"Blueprints"}, summary = "Get a blueprint graph")
public Map<String, Object> blueprintGraph(
@Parameter(description = "The blueprint id") String id,
HttpRequest<?> httpRequest
) throws URISyntaxException {
return fastForwardToKestraApi(httpRequest, "/v1/blueprints/" + id + "/graph", Argument.mapOf(String.class, Object.class));
}
|
@SuppressWarnings("unchecked")
@Test
void blueprintGraph(WireMockRuntimeInfo wmRuntimeInfo) {
stubFor(get(urlMatching("/v1/blueprints/id_1/graph.*"))
.willReturn(aResponse()
.withHeader("Content-Type", "application/json")
.withBodyFile("blueprint-graph.json"))
);
Map<String, Object> graph = client.toBlocking().retrieve(
HttpRequest.GET("/api/v1/blueprints/community/id_1/graph"),
Argument.mapOf(String.class, Object.class)
);
List<Map<String, Object>> nodes = (List<Map<String, Object>>) graph.get("nodes");
List<Map<String, Object>> edges = (List<Map<String, Object>>) graph.get("edges");
List<Map<String, Object>> clusters = (List<Map<String, Object>>) graph.get("clusters");
assertThat(nodes.size(), is(12));
assertThat(nodes.stream().filter(abstractGraph -> abstractGraph.get("uid").equals("3mTDtNoUxYIFaQtgjEg28_root")).count(), is(1L));
assertThat(edges.size(), is(16));
assertThat(clusters.size(), is(1));
WireMock wireMock = wmRuntimeInfo.getWireMock();
wireMock.verifyThat(getRequestedFor(urlEqualTo("/v1/blueprints/id_1/graph")));
}
|
@Override
public UUID generateId() {
long counterValue = counter.incrementAndGet();
if (counterValue == MAX_COUNTER_VALUE) {
throw new CucumberException(
"Out of " + IncrementingUuidGenerator.class.getSimpleName() +
" capacity. Please generate using a new instance or use another " +
UuidGenerator.class.getSimpleName() + "implementation.");
}
long leastSigBits = counterValue | 0x8000000000000000L; // set variant
return new UUID(msb, leastSigBits);
}
|
@Test
void generates_different_non_null_uuids() {
// Given
UuidGenerator generator = new IncrementingUuidGenerator();
// When
List<UUID> uuids = IntStream.rangeClosed(1, 10)
.mapToObj(i -> generator.generateId())
.collect(Collectors.toList());
// Then
checkUuidProperties(uuids);
}
|
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
}
|
@Test
public void testMapIntMessage() throws Exception {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
ProtoWriteSupport<TestProtobuf.MapIntMessage> instance =
createReadConsumerInstance(TestProtobuf.MapIntMessage.class, readConsumerMock);
TestProtobuf.MapIntMessage.Builder msg = TestProtobuf.MapIntMessage.newBuilder();
msg.putMapInt(123, 1);
msg.putMapInt(234, 2);
instance.write(msg.build());
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).startField("mapInt", 0);
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("key", 0);
inOrder.verify(readConsumerMock).addInteger(123);
inOrder.verify(readConsumerMock).endField("key", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock).addInteger(1);
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).startGroup();
inOrder.verify(readConsumerMock).startField("key", 0);
inOrder.verify(readConsumerMock).addInteger(234);
inOrder.verify(readConsumerMock).endField("key", 0);
inOrder.verify(readConsumerMock).startField("value", 1);
inOrder.verify(readConsumerMock).addInteger(2);
inOrder.verify(readConsumerMock).endField("value", 1);
inOrder.verify(readConsumerMock).endGroup();
inOrder.verify(readConsumerMock).endField("mapInt", 0);
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
}
|
@Override
public boolean isOperational() {
if (nodeOperational) {
return true;
}
boolean flag = false;
try {
flag = checkOperational();
} catch (InterruptedException e) {
LOG.trace("Interrupted while checking ES node is operational", e);
Thread.currentThread().interrupt();
} finally {
if (flag) {
esConnector.stop();
nodeOperational = true;
}
}
return nodeOperational;
}
|
@Test
public void isOperational_should_return_false_if_ElasticsearchException_with_connection_timeout_thrown() {
EsConnector esConnector = mock(EsConnector.class);
when(esConnector.getClusterHealthStatus())
.thenThrow(new ElasticsearchException(new ExecutionException(new ConnectException("Timeout connecting to [/127.0.0.1:9001]"))));
EsManagedProcess underTest = new EsManagedProcess(mock(Process.class), ProcessId.ELASTICSEARCH, esConnector, WAIT_FOR_UP_TIMEOUT_LONG);
assertThat(underTest.isOperational()).isFalse();
}
|
@Override
public V get()
throws InterruptedException, ExecutionException {
try {
return get(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (TimeoutException e) {
throw new ExecutionException(e);
}
}
|
@Test
public void completeDelegate_getWithTimeout_outerAsked() throws Exception {
delegateFuture.run();
assertEquals(DELEGATE_RESULT, outerFuture.get(10, TimeUnit.MILLISECONDS));
}
|
public long readIntLenenc() {
int firstByte = readInt1();
if (firstByte < 0xfb) {
return firstByte;
}
if (0xfb == firstByte) {
return 0L;
}
if (0xfc == firstByte) {
return readInt2();
}
if (0xfd == firstByte) {
return readInt3();
}
return byteBuf.readLongLE();
}
|
@Test
void assertReadIntLenencWithFourBytes() {
when(byteBuf.readUnsignedByte()).thenReturn((short) 0xff);
when(byteBuf.readLongLE()).thenReturn(Long.MAX_VALUE);
assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readIntLenenc(), is(Long.MAX_VALUE));
}
|
public long computeExpirationTime(final String pHttpExpiresHeader, final String pHttpCacheControlHeader, final long pNow) {
final Long override = Configuration.getInstance().getExpirationOverrideDuration();
if (override != null) {
return pNow + override;
}
final long extension = Configuration.getInstance().getExpirationExtendedDuration();
final Long cacheControlDuration = getHttpCacheControlDuration(pHttpCacheControlHeader);
if (cacheControlDuration != null) {
return pNow + cacheControlDuration * 1000 + extension;
}
final Long httpExpiresTime = getHttpExpiresTime(pHttpExpiresHeader);
if (httpExpiresTime != null) {
return httpExpiresTime + extension;
}
return pNow + OpenStreetMapTileProviderConstants.DEFAULT_MAXIMUM_CACHED_FILE_AGE + extension;
}
|
@Test
public void testComputeExpirationTime() {
final Random random = new Random();
final int oneWeek = 7 * 24 * 3600 * 1000; // 7 days in milliseconds
testComputeExpirationTimeHelper(null, random.nextInt(oneWeek));
testComputeExpirationTimeHelper((long) random.nextInt(oneWeek), random.nextInt(oneWeek));
}
|
public List<InputSplit> getSplits(JobContext job) throws IOException {
StopWatch sw = new StopWatch().start();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
// generate splits
List<InputSplit> splits = new ArrayList<InputSplit>();
List<FileStatus> files = listStatus(job);
boolean ignoreDirs = !getInputDirRecursive(job)
&& job.getConfiguration().getBoolean(INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS, false);
for (FileStatus file: files) {
if (ignoreDirs && file.isDirectory()) {
continue;
}
Path path = file.getPath();
long length = file.getLen();
if (length != 0) {
BlockLocation[] blkLocations;
if (file instanceof LocatedFileStatus) {
blkLocations = ((LocatedFileStatus) file).getBlockLocations();
} else {
FileSystem fs = path.getFileSystem(job.getConfiguration());
blkLocations = fs.getFileBlockLocations(file, 0, length);
}
if (isSplitable(job, path)) {
long blockSize = file.getBlockSize();
long splitSize = computeSplitSize(blockSize, minSize, maxSize);
long bytesRemaining = length;
while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, splitSize,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
bytesRemaining -= splitSize;
}
if (bytesRemaining != 0) {
int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
splits.add(makeSplit(path, length-bytesRemaining, bytesRemaining,
blkLocations[blkIndex].getHosts(),
blkLocations[blkIndex].getCachedHosts()));
}
} else { // not splitable
if (LOG.isDebugEnabled()) {
// Log only if the file is big enough to be splitted
if (length > Math.min(file.getBlockSize(), minSize)) {
LOG.debug("File is not splittable so no parallelization "
+ "is possible: " + file.getPath());
}
}
splits.add(makeSplit(path, 0, length, blkLocations[0].getHosts(),
blkLocations[0].getCachedHosts()));
}
} else {
//Create empty hosts array for zero length files
splits.add(makeSplit(path, 0, length, new String[0]));
}
}
// Save the number of input files for metrics/loadgen
job.getConfiguration().setLong(NUM_INPUT_FILES, files.size());
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+ ", TimeTaken: " + sw.now(TimeUnit.MILLISECONDS));
}
return splits;
}
|
@Test
public void testNumInputFilesWithoutRecursively() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
Job job = Job.getInstance(conf);
FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
List<InputSplit> splits = fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct", 2, splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2", "test:/a1/file1"), splits);
}
|
public String doLayout(ILoggingEvent event) {
StringBuilder buf = new StringBuilder();
startNewTableIfLimitReached(buf);
boolean odd = true;
if (((counter++) & 1) == 0) {
odd = false;
}
String level = event.getLevel().toString().toLowerCase(Locale.US);
buf.append(LINE_SEPARATOR);
buf.append("<tr class=\"");
buf.append(level);
if (odd) {
buf.append(" odd\">");
} else {
buf.append(" even\">");
}
buf.append(LINE_SEPARATOR);
Converter<ILoggingEvent> c = head;
while (c != null) {
appendEventToBuffer(buf, c, event);
c = c.getNext();
}
buf.append("</tr>");
buf.append(LINE_SEPARATOR);
if (event.getThrowableProxy() != null) {
throwableRenderer.render(buf, event);
}
return buf.toString();
}
|
@Test
@Ignore
public void rawLimit() throws Exception {
StringBuilder sb = new StringBuilder();
String header = layout.getFileHeader();
assertTrue(header
.startsWith("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">"));
sb.append(header);
sb.append(layout.getPresentationHeader());
for (int i = 0; i < CoreConstants.TABLE_ROW_LIMIT * 3; i++) {
sb.append(layout.doLayout(new LoggingEvent(this.getClass().getName(),
root, Level.DEBUG, "test message" + i, null, null)));
}
sb.append(layout.getPresentationFooter());
sb.append(layout.getFileFooter());
// check that the output adheres to xhtml-strict.dtd
parseOutput(sb.toString());
}
|
@Override
public boolean mkdirs(String path, MkdirsOptions options) throws IOException {
path = stripPath(path);
File file = new File(path);
if (!options.getCreateParent()) {
if (file.mkdir()) {
setFileSecurity(options, file);
return true;
}
return false;
}
boolean result = file.mkdirs();
if (result) {
setFileSecurity(options, file);
}
return result;
}
|
@Test
public void mkdirs() throws IOException {
String parentPath = PathUtils.concatPath(mLocalUfsRoot, getUniqueFileName());
String dirpath = PathUtils.concatPath(parentPath, getUniqueFileName());
mLocalUfs.mkdirs(dirpath);
assertTrue(mLocalUfs.isDirectory(dirpath));
File file = new File(dirpath);
assertTrue(file.exists());
}
|
public static String elasticSearchTimeFormatToISO8601(String time) {
try {
DateTime dt = DateTime.parse(time, ES_DATE_FORMAT_FORMATTER);
return getISO8601String(dt);
} catch (IllegalArgumentException e) {
return time;
}
}
|
@Test
public void testElasticSearchTimeFormatToISO8601() {
assertTrue(Tools.elasticSearchTimeFormatToISO8601("2014-07-31 14:21:02.000").equals("2014-07-31T14:21:02.000Z"));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.