focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public Protocol protocol(boolean enclave) {
if (enclave) throw new IllegalArgumentException("enclave is not supported with " + getClass());
return Protocol.dualstack;
}
|
@Test
public void test_protocol() {
assertEquals(LoadBalancerService.Protocol.dualstack, loadBalancerService.protocol(false));
}
|
ImmutableList<PayloadDefinition> validatePayloads(List<PayloadDefinition> payloads) {
for (PayloadDefinition p : payloads) {
checkArgument(p.hasName(), "Parsed payload does not have a name.");
checkArgument(
p.getInterpretationEnvironment()
!= PayloadGeneratorConfig.InterpretationEnvironment
.INTERPRETATION_ENVIRONMENT_UNSPECIFIED,
"Parsed payload does not have an interpretation_environment.");
checkArgument(
p.getExecutionEnvironment()
!= PayloadGeneratorConfig.ExecutionEnvironment.EXECUTION_ENVIRONMENT_UNSPECIFIED,
"Parsed payload does not have an exeuction_environment.");
checkArgument(
!p.getVulnerabilityTypeList().isEmpty(),
"Parsed payload has no entries for vulnerability_type.");
checkArgument(p.hasPayloadString(), "Parsed payload does not have a payload_string.");
if (p.getUsesCallbackServer().getValue()) {
checkArgument(
p.getPayloadString().getValue().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"),
"Parsed payload uses callback server but $TSUNAMI_PAYLOAD_TOKEN_URL not found in"
+ " payload_string.");
} else {
checkArgument(
p.getValidationType() != PayloadValidationType.VALIDATION_TYPE_UNSPECIFIED,
"Parsed payload has no validation_type and does not use the callback server.");
if (p.getValidationType() == PayloadValidationType.VALIDATION_REGEX) {
checkArgument(
p.hasValidationRegex(),
"Parsed payload has no validation_regex but uses PayloadValidationType.REGEX");
}
}
}
return ImmutableList.copyOf(payloads);
}
|
@Test
public void validatePayloads_withRegexValidationWithoutValidationRegex_throwsException()
throws IOException {
PayloadDefinition p = goodNoCallbackDefinition.clearValidationRegex().build();
Throwable thrown =
assertThrows(
IllegalArgumentException.class, () -> module.validatePayloads(ImmutableList.of(p)));
assertThat(thrown).hasMessageThat().contains("validation_regex");
}
|
public long getNewJobId() {
return mNextJobId.getAndIncrement();
}
|
@Test
public void newIdTest() {
JobIdGenerator generator = new JobIdGenerator();
Assert.assertNotEquals(generator.getNewJobId(), generator.getNewJobId());
}
|
@Override
public void resetConfigStats(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT);
syncFuture(f);
}
|
@Test
public void testResetConfigStats() {
RedisClusterNode master = getFirstMaster();
connection.resetConfigStats(master);
}
|
public boolean writeRack() {
Map<String, String> nodeLabels = client.nodes().withName(config.getNodeName()).get().getMetadata().getLabels();
LOGGER.info("NodeLabels = {}", nodeLabels);
String rackId = nodeLabels.get(config.getRackTopologyKey());
LOGGER.info("Rack: {} = {}", config.getRackTopologyKey(), rackId);
if (rackId == null) {
LOGGER.error("Node {} doesn't have the label {} for getting the rackid",
config.getNodeName(), config.getRackTopologyKey());
return false;
}
return write(FILE_RACK_ID, rackId);
}
|
@Test
public void testWriteRackFailWithMissingKubernetesZoneLabel() {
// the cluster node will not have the requested label
Map<String, String> labels = new HashMap<>(LABELS);
labels.remove("failure-domain.beta.kubernetes.io/zone");
InitWriterConfig config = InitWriterConfig.fromMap(ENV_VARS);
KubernetesClient client = mockKubernetesClient(config.getNodeName(), labels, List.of());
InitWriter writer = new InitWriter(client, config);
assertThat(writer.writeRack(), is(false));
}
|
public static boolean load(final String library) {
synchronized(lock) {
final String path = Native.getPath(library);
try {
// Load using absolute path. Otherwise we may load
// a library in java.library.path that was not intended
// because of a naming conflict.
System.load(path);
return true;
}
catch(UnsatisfiedLinkError e) {
log.warn(String.format("Failed to load %s:%s", path, e.getMessage()), e);
try {
System.loadLibrary(library);
return true;
}
catch(UnsatisfiedLinkError f) {
log.warn(String.format("Failed to load %s:%s", library, e.getMessage()), e);
return false;
}
}
}
}
|
@Test
public void testLoad() {
assertFalse(Native.load("notfound"));
}
|
@Override
protected int command() {
if (!validateConfigFilePresent()) {
return 1;
}
final MigrationConfig config;
try {
config = MigrationConfig.load(getConfigFile());
} catch (KsqlException | MigrationException e) {
LOGGER.error(e.getMessage());
return 1;
}
return command(config, MigrationsUtil::getKsqlClient);
}
|
@Test
public void shouldCleanMigrationsTableEvenIfStreamDoesntExist() throws Exception {
// Given:
givenMigrationsStreamDoesNotExist();
// When:
final int status = command.command(config, cfg -> client);
// Then:
assertThat(status, is(0));
verify(client).executeStatement("DROP TABLE " + MIGRATIONS_TABLE + " DELETE TOPIC;");
verify(client, never()).executeStatement("DROP STREAM " + MIGRATIONS_STREAM + " DELETE TOPIC;");
// a single query writing to the table will still be dropped, even if the stream
// doesn't exist. we could change this in the future but it's an unlikely (and
// unexpected) edge case that doesn't seem too important.
verify(client).executeStatement("TERMINATE " + CTAS_QUERY_ID + ";");
}
|
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_contains_otherTypes() {
// Expected value is Integer - supported up to +/- 2^24
assertThat(array(1.0f, 2.0f, 3.0f)).usingExactEquality().contains(2);
assertThat(array(1.0f, 1 << 24, 3.0f)).usingExactEquality().contains(1 << 24);
// Expected value is Long - supported up to +/- 2^24
assertThat(array(1.0f, 2.0f, 3.0f)).usingExactEquality().contains(2L);
assertThat(array(1.0f, 1 << 24, 3.0f)).usingExactEquality().contains(1L << 24);
}
|
@VisibleForTesting
List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, TeamsEventNotificationConfig config) {
List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx);
if (config.backlogSize() > 0 && backlog != null) {
return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList());
}
return backlog;
}
|
@Test
public void testBacklogMessageLimitWhenEventNotificationContextIsNull() {
TeamsEventNotificationConfig TeamsConfig = TeamsEventNotificationConfig.builder()
.backlogSize(0)
.build();
//global setting is at N and the eventNotificationContext is null then the message summaries is null
List<MessageSummary> messageSummaries = teamsEventNotification.getMessageBacklog(null, TeamsConfig);
assertThat(messageSummaries).isNull();
}
|
@Override
public Num calculate(BarSeries series, Position position) {
Num profitLossRatio = profitLossRatioCriterion.calculate(series, position);
Num numberOfPositions = numberOfPositionsCriterion.calculate(series, position);
Num numberOfWinningPositions = numberOfWinningPositionsCriterion.calculate(series, position);
return calculate(series, profitLossRatio, numberOfWinningPositions, numberOfPositions);
}
|
@Test
public void calculateOnlyWithProfitPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 110, 120, 130, 150, 160);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series),
Trade.buyAt(3, series), Trade.sellAt(5, series));
AnalysisCriterion avgLoss = getCriterion();
assertNumEquals(1.0, avgLoss.calculate(series, tradingRecord));
}
|
public FloatArrayAsIterable usingExactEquality() {
return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject());
}
|
@Test
public void usingExactEquality_containsAtLeast_primitiveFloatArray_inOrder_success() {
assertThat(array(1.0f, 2.0f, 3.0f))
.usingExactEquality()
.containsAtLeast(array(1.0f, 2.0f))
.inOrder();
}
|
public void makeReadOnly()
{
_isReadOnly = true;
}
|
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testMakeReadOnly()
{
CompoundKey compoundKey = new CompoundKey();
compoundKey.append("foo", "foo-value");
compoundKey.append("bar", 1);
compoundKey.append("baz", 7L);
compoundKey.makeReadOnly();
compoundKey.append("abc", "def");
}
|
public static Gson instance() {
return SingletonHolder.INSTANCE;
}
|
@Test
void configurationPropertyWithSecretParamsShouldSerializeResolvedValues() {
ConfigurationProperty configurationProperty = new ConfigurationProperty(new ConfigurationKey("db_password"),
new ConfigurationValue("{{SECRET:[test_id][password]}}"));
configurationProperty.getSecretParams().get(0).setValue("secret");
String json = Serialization.instance().toJson(configurationProperty);
assertThat(json).isEqualTo("{\"key\":\"db_password\",\"value\":\"secret\"}");
}
|
public static Collection<Integer> getShardingItems(final String jobId) {
return JOBS.containsKey(jobId) ? JOBS.get(jobId).getJobRunnerManager().getShardingItems() : Collections.emptyList();
}
|
@Test
void assertGetExistedShardingItems() {
when(job.getJobRunnerManager().getShardingItems()).thenReturn(Arrays.asList(1, 2, 3));
assertThat(PipelineJobRegistry.getShardingItems("foo_job"), is(Arrays.asList(1, 2, 3)));
}
|
public static DecryptionResultHandler getHandler(@NonNull ReactApplicationContext reactContext,
@NonNull final CipherStorage storage,
@NonNull final BiometricPrompt.PromptInfo promptInfo) {
if (storage.isBiometrySupported()) {
if (hasOnePlusBiometricBug()) {
return new DecryptionResultHandlerInteractiveBiometricManualRetry(reactContext, storage, promptInfo);
}
return new DecryptionResultHandlerInteractiveBiometric(reactContext, storage, promptInfo);
}
return new DecryptionResultHandlerNonInteractive();
}
|
@Test
@Config(sdk = Build.VERSION_CODES.M)
public void testBiometryNotSupported() {
// GIVEN
final ReactApplicationContext mockContext = mock(ReactApplicationContext.class);
final CipherStorage storage = mock(CipherStorage.class);
when(storage.isBiometrySupported()).thenReturn(false);
final BiometricPrompt.PromptInfo promptInfo = mock(BiometricPrompt.PromptInfo.class);
// WHEN
DecryptionResultHandler handler = DecryptionResultHandlerProvider.getHandler(mockContext, storage, promptInfo);
//THEN
assertThat(handler, instanceOf(DecryptionResultHandlerNonInteractive.class));
}
|
public static Builder route() {
return new RouterFunctionBuilder();
}
|
@Test
void filter() {
Mono<String> stringMono = Mono.just("42");
HandlerFunction<EntityResponse<Mono<String>>> handlerFunction =
request -> EntityResponse.fromPublisher(stringMono, String.class).build();
RouterFunction<EntityResponse<Mono<String>>> routerFunction =
request -> Mono.just(handlerFunction);
HandlerFilterFunction<EntityResponse<Mono<String>>, EntityResponse<Mono<Integer>>> filterFunction =
(request, next) -> next.handle(request).flatMap(
response -> {
Mono<Integer> intMono = response.entity()
.map(Integer::parseInt);
return EntityResponse.fromPublisher(intMono, Integer.class).build();
});
RouterFunction<EntityResponse<Mono<Integer>>> result = routerFunction.filter(filterFunction);
assertThat(result).isNotNull();
MockServerHttpRequest mockRequest = MockServerHttpRequest.get("https://example.com").build();
ServerRequest request = new DefaultServerRequest(MockServerWebExchange.from(mockRequest), Collections.emptyList());
Mono<EntityResponse<Mono<Integer>>> responseMono =
result.route(request).flatMap(hf -> hf.handle(request));
StepVerifier.create(responseMono)
.consumeNextWith(
serverResponse ->
StepVerifier.create(serverResponse.entity())
.expectNext(42)
.expectComplete()
.verify()
)
.expectComplete()
.verify();
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void loaderExceptionModCrash() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/loader_exception_mod_crash.txt")),
CrashReportAnalyzer.Rule.LOADING_CRASHED_FORGE);
assertEquals("Better PvP", result.getMatcher().group("name"));
assertEquals("xaerobetterpvp", result.getMatcher().group("id"));
}
|
Map<MetricName, ? extends Metric> metrics() {
return producer.metrics();
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
public void shouldForwardCallToMetrics() {
final Map metrics = new HashMap<>();
when(mockedProducer.metrics()).thenReturn(metrics);
assertSame(metrics, streamsProducerWithMock.metrics());
}
|
public static String toJson(MetadataUpdate metadataUpdate) {
return toJson(metadataUpdate, false);
}
|
@Test
public void testSetCurrentViewVersionToJson() {
String action = MetadataUpdateParser.SET_CURRENT_VIEW_VERSION;
String expected = String.format("{\"action\":\"%s\",\"view-version-id\":23}", action);
MetadataUpdate update = new MetadataUpdate.SetCurrentViewVersion(23);
assertThat(MetadataUpdateParser.toJson(update)).isEqualTo(expected);
}
|
public static List<DiskRange> mergeAdjacentDiskRanges(Collection<DiskRange> diskRanges, DataSize maxMergeDistance, DataSize maxReadSize)
{
// sort ranges by start offset
List<DiskRange> ranges = new ArrayList<>(diskRanges);
Collections.sort(ranges, new Comparator<DiskRange>()
{
@Override
public int compare(DiskRange o1, DiskRange o2)
{
return Long.compare(o1.getOffset(), o2.getOffset());
}
});
// merge overlapping ranges
long maxReadSizeBytes = maxReadSize.toBytes();
long maxMergeDistanceBytes = maxMergeDistance.toBytes();
ImmutableList.Builder<DiskRange> result = ImmutableList.builder();
DiskRange last = ranges.get(0);
for (int i = 1; i < ranges.size(); i++) {
DiskRange current = ranges.get(i);
DiskRange merged = last.span(current);
if (merged.getLength() <= maxReadSizeBytes && last.getEnd() + maxMergeDistanceBytes >= current.getOffset()) {
last = merged;
}
else {
result.add(last);
last = current;
}
}
result.add(last);
return result.build();
}
|
@Test
public void testMergeSingle()
{
List<DiskRange> diskRanges = mergeAdjacentDiskRanges(
ImmutableList.of(new DiskRange(100, 100)),
new DataSize(0, BYTE),
new DataSize(0, BYTE));
assertEquals(diskRanges, ImmutableList.of(new DiskRange(100, 100)));
}
|
public static Optional<ScalablePushRegistry> create(
final LogicalSchema logicalSchema,
final Supplier<List<PersistentQueryMetadata>> allPersistentQueries,
final boolean isTable,
final Map<String, Object> streamsProperties,
final Map<String, Object> consumerProperties,
final String sourceApplicationId,
final KsqlTopic ksqlTopic,
final ServiceContext serviceContext,
final KsqlConfig ksqlConfig
) {
final Object appServer = streamsProperties.get(StreamsConfig.APPLICATION_SERVER_CONFIG);
if (appServer == null) {
return Optional.empty();
}
if (!(appServer instanceof String)) {
throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " not String");
}
final URL localhost;
try {
localhost = new URL((String) appServer);
} catch (final MalformedURLException e) {
throw new IllegalArgumentException(StreamsConfig.APPLICATION_SERVER_CONFIG + " malformed: "
+ "'" + appServer + "'");
}
final PushLocator pushLocator = new AllHostsLocator(allPersistentQueries, localhost);
return Optional.of(new ScalablePushRegistry(
pushLocator, logicalSchema, isTable,
consumerProperties, ksqlTopic, serviceContext, ksqlConfig, sourceApplicationId,
KafkaConsumerFactory::create, LatestConsumer::new, CatchupConsumer::new,
Executors.newSingleThreadExecutor(),
Executors.newScheduledThreadPool(
ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PUSH_V2_MAX_CATCHUP_CONSUMERS))));
}
|
@Test
public void shouldCreate_badUrlApplicationServer() {
// When
final Exception e = assertThrows(
IllegalArgumentException.class,
() -> ScalablePushRegistry.create(SCHEMA, Collections::emptyList, false,
ImmutableMap.of(StreamsConfig.APPLICATION_SERVER_CONFIG, "abc"),
ImmutableMap.of(), SOURCE_APP_ID, ksqlTopic, serviceContext, ksqlConfig)
);
// Then
assertThat(e.getMessage(), containsString("malformed"));
}
|
@Override
public int read() throws EOFException {
return (pos < size) ? (data[pos++] & 0xff) : -1;
}
|
@Test
public void testReadPosition() throws Exception {
int read = in.read(1);
int readUnsigned = in.read(INIT_DATA.length - 1);
int readEnd = in.read(INIT_DATA.length);
assertEquals(1, read);
// Map the expected negative value to unsigned byte range
assertEquals(0xFF, readUnsigned);
assertEquals(-1, readEnd);
}
|
public short encapEthType() {
return encapEthType;
}
|
@Test
public void testConstruction() {
final NiciraEncapEthType encapEthType = new NiciraEncapEthType(ethType1);
assertThat(encapEthType, is(notNullValue()));
assertThat(encapEthType.encapEthType(), is(ethType1));
}
|
@Converter
static Predicate obtainPredicateFromBeanName(
final String predicateBeanName,
final CamelContext camelContext) {
return Optional.ofNullable(CamelContextHelper.lookup(camelContext, predicateBeanName, Predicate.class))
.orElseThrow(() -> new IllegalStateException(ERROR_NO_PREDICATE_BEAN_FOUND));
}
|
@Test
void obtainPredicateFromBeanName() {
context.getRegistry().bind(predicateBeanName, Predicate.class, predicateInstance);
Predicate actualPredicate = DynamicRouterControlService.obtainPredicateFromBeanName(predicateBeanName, context);
assertEquals(predicateInstance, actualPredicate);
}
|
@Override
public int hashCode() {
return map.hashCode();
}
|
@Test
public void testHashCode_doesNotThrowExceptionWhenEmpty() {
counter.hashCode();
}
|
public CompletableFuture<Void> redeemReceipt(
final Account account,
final ReceiptCredentialPresentation receiptCredentialPresentation) {
try {
serverZkReceiptOperations.verifyReceiptCredentialPresentation(receiptCredentialPresentation);
} catch (VerificationFailedException e) {
throw Status.INVALID_ARGUMENT
.withDescription("receipt credential presentation verification failed")
.asRuntimeException();
}
final ReceiptSerial receiptSerial = receiptCredentialPresentation.getReceiptSerial();
final Instant receiptExpiration = Instant.ofEpochSecond(receiptCredentialPresentation.getReceiptExpirationTime());
if (clock.instant().isAfter(receiptExpiration)) {
throw Status.INVALID_ARGUMENT.withDescription("receipt is already expired").asRuntimeException();
}
final long receiptLevel = receiptCredentialPresentation.getReceiptLevel();
if (BackupLevelUtil.fromReceiptLevel(receiptLevel) != BackupLevel.MEDIA) {
throw Status.INVALID_ARGUMENT
.withDescription("server does not recognize the requested receipt level")
.asRuntimeException();
}
return redeemedReceiptsManager
.put(receiptSerial, receiptExpiration.getEpochSecond(), receiptLevel, account.getUuid())
.thenCompose(receiptAllowed -> {
if (!receiptAllowed) {
throw Status.INVALID_ARGUMENT
.withDescription("receipt serial is already redeemed")
.asRuntimeException();
}
return accountsManager.updateAsync(account, a -> {
final Account.BackupVoucher newPayment = new Account.BackupVoucher(receiptLevel, receiptExpiration);
final Account.BackupVoucher existingPayment = a.getBackupVoucher();
account.setBackupVoucher(merge(existingPayment, newPayment));
});
})
.thenRun(Util.NOOP);
}
|
@Test
void mergeRedemptions() throws InvalidInputException, VerificationFailedException {
final Instant newExpirationTime = Instant.EPOCH.plus(Duration.ofDays(1));
final Instant existingExpirationTime = Instant.EPOCH.plus(Duration.ofDays(1)).plus(Duration.ofSeconds(1));
final BackupAuthManager authManager = create(BackupLevel.MESSAGES, false);
final Account account = mock(Account.class);
when(account.getUuid()).thenReturn(aci);
// The account has an existing voucher with a later expiration date
when(account.getBackupVoucher()).thenReturn(new Account.BackupVoucher(201, existingExpirationTime));
clock.pin(Instant.EPOCH.plus(Duration.ofDays(1)));
when(accountsManager.updateAsync(any(), any())).thenReturn(CompletableFuture.completedFuture(account));
when(redeemedReceiptsManager.put(any(), eq(newExpirationTime.getEpochSecond()), eq(201L), eq(aci)))
.thenReturn(CompletableFuture.completedFuture(true));
authManager.redeemReceipt(account, receiptPresentation(201, newExpirationTime)).join();
final ArgumentCaptor<Consumer<Account>> updaterCaptor = ArgumentCaptor.captor();
verify(accountsManager, times(1)).updateAsync(any(), updaterCaptor.capture());
updaterCaptor.getValue().accept(account);
// Should select the voucher with the later expiration time
verify(account).setBackupVoucher(eq(new Account.BackupVoucher(201, existingExpirationTime)));
}
|
@Override
public PluginWrapper whichPlugin(Class<?> clazz) {
ClassLoader classLoader = clazz.getClassLoader();
PluginWrapper plugin = getPlugin(currentPluginId);
if (plugin.getPluginClassLoader() == classLoader) {
return plugin;
}
return null;
}
|
@Test
public void whichPlugin() {
pluginManager.loadPlugins();
pluginManager.startPlugins();
assertEquals(null, wrappedPluginManager.whichPlugin(pluginManager.getExtensionClasses(OTHER_PLUGIN_ID).get(0)));
assertEquals(THIS_PLUGIN_ID, wrappedPluginManager.whichPlugin(pluginManager.getExtensionClasses(THIS_PLUGIN_ID).get(0)).getPluginId());
}
|
private T injectExtension(T instance) {
if (injector == null) {
return instance;
}
try {
for (Method method : instance.getClass().getMethods()) {
if (!isSetter(method)) {
continue;
}
/**
* Check {@link DisableInject} to see if we need auto-injection for this property
*/
if (method.isAnnotationPresent(DisableInject.class)) {
continue;
}
// When spiXXX implements ScopeModelAware, ExtensionAccessorAware,
// the setXXX of ScopeModelAware and ExtensionAccessorAware does not need to be injected
if (method.getDeclaringClass() == ScopeModelAware.class) {
continue;
}
if (instance instanceof ScopeModelAware || instance instanceof ExtensionAccessorAware) {
if (ignoredInjectMethodsDesc.contains(ReflectUtils.getDesc(method))) {
continue;
}
}
Class<?> pt = method.getParameterTypes()[0];
if (ReflectUtils.isPrimitives(pt)) {
continue;
}
try {
String property = getSetterProperty(method);
Object object = injector.getInstance(pt, property);
if (object != null) {
method.invoke(instance, object);
}
} catch (Exception e) {
logger.error(
COMMON_ERROR_LOAD_EXTENSION,
"",
"",
"Failed to inject via method " + method.getName() + " of interface " + type.getName() + ": "
+ e.getMessage(),
e);
}
}
} catch (Exception e) {
logger.error(COMMON_ERROR_LOAD_EXTENSION, "", "", e.getMessage(), e);
}
return instance;
}
|
@Test
void testInjectExtension() {
// register bean for test ScopeBeanExtensionInjector
DemoImpl demoBean = new DemoImpl();
ApplicationModel.defaultModel().getBeanFactory().registerBean(demoBean);
// test default
InjectExt injectExt = getExtensionLoader(InjectExt.class).getExtension("injection");
InjectExtImpl injectExtImpl = (InjectExtImpl) injectExt;
Assertions.assertNotNull(injectExtImpl.getSimpleExt());
Assertions.assertNull(injectExtImpl.getSimpleExt1());
Assertions.assertNull(injectExtImpl.getGenericType());
Assertions.assertSame(demoBean, injectExtImpl.getDemo());
}
|
@JsonProperty("location")
public String location() {
return location;
}
|
@Test
public void testPluginDescWithNullVersion() {
String nullVersion = "null";
PluginDesc<SourceConnector> connectorDesc = new PluginDesc<>(
SourceConnector.class,
null,
PluginType.SOURCE,
pluginLoader
);
assertPluginDesc(
connectorDesc,
SourceConnector.class,
nullVersion,
PluginType.SOURCE,
pluginLoader.location()
);
String location = "classpath";
PluginDesc<Converter> converterDesc = new PluginDesc<>(
Converter.class,
null,
PluginType.CONVERTER,
systemLoader
);
assertPluginDesc(converterDesc, Converter.class, nullVersion, PluginType.CONVERTER, location);
}
|
public String getPath() {
return file.getAbsolutePath();
}
|
@Test
public void shouldGetFilePath() {
// When
final String path = replayFile.getPath();
// Then
assertThat(path, is(String.format(
"%s/%s", backupLocation.getRoot().getAbsolutePath(), REPLAY_FILE_NAME)));
}
|
@Override
public int getMedium(int index) {
int value = getUnsignedMedium(index);
if ((value & 0x800000) != 0) {
value |= 0xff000000;
}
return value;
}
|
@Test
public void getMediumBoundaryCheck1() {
assertThrows(IndexOutOfBoundsException.class, new Executable() {
@Override
public void execute() {
buffer.getMedium(-1);
}
});
}
|
public static void clearMemoryUtm() {
sUtmProperties.clear();
sLatestUtmProperties.clear();
}
|
@Test
public void clearMemoryUtm() {
ChannelUtils.clearMemoryUtm();
}
|
@Override
public boolean add(E element) {
return add(element, element.hashCode());
}
|
@Test(expected = NullPointerException.class)
public void testAddWithHashNull() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
set.add(null, 1);
}
|
public static Map<String, Object> compare(byte[] baselineImg, byte[] latestImg, Map<String, Object> options,
Map<String, Object> defaultOptions) throws MismatchException {
boolean allowScaling = toBool(defaultOptions.get("allowScaling"));
ImageComparison imageComparison = new ImageComparison(baselineImg, latestImg, options, allowScaling);
imageComparison.configure(defaultOptions);
if (imageComparison.baselineMissing) {
imageComparison.result.put("isBaselineMissing", true);
throw new MismatchException("baseline image was empty or not found", imageComparison.result);
}
if (imageComparison.scaleMismatch) {
imageComparison.result.put("isScaleMismatch", true);
throw new MismatchException("latest image dimensions != baseline image dimensions", imageComparison.result);
}
double mismatchPercentage = 100.0;
for (String engine : imageComparison.engines) {
double currentMismatchPercentage;
switch (engine) {
case RESEMBLE:
currentMismatchPercentage = imageComparison.execResemble();
break;
case SSIM:
currentMismatchPercentage = imageComparison.execSSIM();
break;
default:
logger.error("skipping unsupported image comparison engine: {}", engine);
continue;
}
if (currentMismatchPercentage <= mismatchPercentage) {
mismatchPercentage = currentMismatchPercentage;
}
if (mismatchPercentage < imageComparison.stopWhenMismatchIsLessThan) {
break;
}
}
return imageComparison.checkMismatch(mismatchPercentage);
}
|
@Test
void testDataUrl() {
Map<String, Object> result = ImageComparison.compare(R_1x1_IMG, R_1x1_IMG, opts(), opts());
String dataUrl = "data:image/png;base64," + R_1x1_BASE64;
assertEquals(dataUrl, result.get("baseline"));
assertEquals(dataUrl, result.get("latest"));
}
|
public static <T extends PipelineOptions> T as(Class<T> klass) {
return new Builder().as(klass);
}
|
@Test
public void testHavingExtraneousMethodThrows() throws Exception {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage(
"Methods [extraneousMethod(int, String)] on "
+ "[org.apache.beam.sdk.options.PipelineOptionsFactoryTest$ExtraneousMethod] "
+ "do not conform to being bean properties.");
PipelineOptionsFactory.as(ExtraneousMethod.class);
}
|
@Override
public List<Service> getServiceDefinitions() throws MockRepositoryImportException {
List<Service> result = new ArrayList<>();
List<Element> interfaceNodes = getConfigDirectChildren(projectElement, "interface");
for (Element interfaceNode : interfaceNodes) {
// Filter complete interface definition with name as attribute.
if (interfaceNode.getAttribute(NAME_ATTRIBUTE) != null) {
log.info("Found a service interface named: {}", interfaceNode.getAttribute(NAME_ATTRIBUTE));
interfaces.put(interfaceNode.getAttribute(NAME_ATTRIBUTE), interfaceNode);
serviceInterface = interfaceNode;
}
}
// Try loading definitions from Soap mock services.
List<Element> mockServices = getConfigDirectChildren(projectElement, MOCK_SERVICE_TAG);
if (!mockServices.isEmpty()) {
result.addAll(getSoapServicesDefinitions(mockServices));
}
// Then try loading from Rest mock services.
List<Element> restMockServices = getConfigDirectChildren(projectElement, REST_MOCK_SERVICE_TAG);
if (!restMockServices.isEmpty()) {
result.addAll(getRestServicesDefinitions(restMockServices));
}
return result;
}
|
@Test
void testSimpleProjectNoVersionImport() {
SoapUIProjectImporter importer = null;
try {
importer = new SoapUIProjectImporter(
"target/test-classes/io/github/microcks/util/soapui/RefTest-no-version-soapui-project.xml");
} catch (Exception e) {
fail("Exception should not be thrown");
}
// Check that basic service properties are there.
boolean failure = false;
List<Service> services = null;
try {
services = importer.getServiceDefinitions();
} catch (MockRepositoryImportException e) {
failure = true;
assertNotEquals(-1, e.getMessage().indexOf("Version property"));
}
assertTrue(failure);
}
|
public DoubleValue increment(double increment) {
this.value += increment;
this.set = true;
return this;
}
|
@Test
public void multiple_calls_to_increment_DoubleVariationValue_increments_by_the_value_of_the_arg() {
DoubleValue target = new DoubleValue()
.increment(new DoubleValue().increment(35))
.increment(new DoubleValue().increment(10));
verifySetVariationValue(target, 45);
}
|
public Flowable<EthBlock> replayPastBlocksFlowable(
DefaultBlockParameter startBlock,
boolean fullTransactionObjects,
Flowable<EthBlock> onCompleteFlowable) {
// We use a scheduler to ensure this Flowable runs asynchronously for users to be
// consistent with the other Flowables
return replayPastBlocksFlowableSync(startBlock, fullTransactionObjects, onCompleteFlowable)
.subscribeOn(scheduler);
}
|
@Test
public void testReplayPastBlocksFlowable() throws Exception {
List<EthBlock> expected =
Arrays.asList(
createBlock(0),
createBlock(1),
createBlock(2),
createBlock(3),
createBlock(4));
List<EthBlock> ethBlocks =
Arrays.asList(
expected.get(2), // greatest block
expected.get(0),
expected.get(1),
expected.get(2),
expected.get(4), // greatest block
expected.get(3),
expected.get(4),
expected.get(4)); // greatest block
OngoingStubbing<EthBlock> stubbing =
when(web3jService.send(any(Request.class), eq(EthBlock.class)));
for (EthBlock ethBlock : ethBlocks) {
stubbing = stubbing.thenReturn(ethBlock);
}
EthFilter ethFilter =
objectMapper.readValue(
"{\n"
+ " \"id\":1,\n"
+ " \"jsonrpc\": \"2.0\",\n"
+ " \"result\": \"0x1\"\n"
+ "}",
EthFilter.class);
EthLog ethLog =
objectMapper.readValue(
"{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":["
+ "\"0x31c2342b1e0b8ffda1507fbffddf213c4b3c1e819ff6a84b943faabb0ebf2403\""
+ "]}",
EthLog.class);
EthUninstallFilter ethUninstallFilter =
objectMapper.readValue(
"{\"jsonrpc\":\"2.0\",\"id\":1,\"result\":true}", EthUninstallFilter.class);
when(web3jService.send(any(Request.class), eq(EthFilter.class))).thenReturn(ethFilter);
when(web3jService.send(any(Request.class), eq(EthLog.class))).thenReturn(ethLog);
when(web3jService.send(any(Request.class), eq(EthUninstallFilter.class)))
.thenReturn(ethUninstallFilter);
Flowable<EthBlock> flowable =
web3j.replayPastBlocksFlowable(
new DefaultBlockParameterNumber(BigInteger.ZERO), false);
CountDownLatch transactionLatch = new CountDownLatch(expected.size());
CountDownLatch completedLatch = new CountDownLatch(1);
List<EthBlock> results = new ArrayList<>(expected.size());
Disposable subscription =
flowable.subscribe(
result -> {
results.add(result);
transactionLatch.countDown();
},
throwable -> fail(throwable.getMessage()),
() -> completedLatch.countDown());
transactionLatch.await(1250, TimeUnit.MILLISECONDS);
assertEquals(results, (expected));
subscription.dispose();
completedLatch.await(1, TimeUnit.SECONDS);
assertTrue(subscription.isDisposed());
}
|
public LoggerLevel getRootLoggerLevel() {
return Loggers.get(Logger.ROOT_LOGGER_NAME).getLevel();
}
|
@Test
public void getRootLoggerLevel() {
logTester.setLevel(TRACE);
assertThat(underTest.getRootLoggerLevel()).isEqualTo(TRACE);
}
|
@VisibleForTesting
static Pair<TableRebalanceContext, Long> getLatestJob(
Map<String, Set<Pair<TableRebalanceContext, Long>>> candidateJobs) {
Pair<TableRebalanceContext, Long> candidateJobRun = null;
for (Map.Entry<String, Set<Pair<TableRebalanceContext, Long>>> entry : candidateJobs.entrySet()) {
// The job configs from all retry jobs are same, as the same set of job configs is used to do retry.
// The job metadata kept in ZK is cleaned by submission time order gradually, so we can't compare Set.size()
// against maxAttempts, but check retryNum of each run to see if retries have exceeded limit.
Set<Pair<TableRebalanceContext, Long>> jobRuns = entry.getValue();
int maxAttempts = jobRuns.iterator().next().getLeft().getConfig().getMaxAttempts();
Pair<TableRebalanceContext, Long> latestJobRun = null;
for (Pair<TableRebalanceContext, Long> jobRun : jobRuns) {
if (jobRun.getLeft().getAttemptId() >= maxAttempts) {
latestJobRun = null;
break;
}
if (latestJobRun == null || latestJobRun.getRight() < jobRun.getRight()) {
latestJobRun = jobRun;
}
}
if (latestJobRun == null) {
LOGGER.info("Rebalance job: {} had exceeded maxAttempts: {}. Skip retry", entry.getKey(), maxAttempts);
continue;
}
if (candidateJobRun == null || candidateJobRun.getRight() < latestJobRun.getRight()) {
candidateJobRun = latestJobRun;
}
}
return candidateJobRun;
}
|
@Test
public void testGetLatestJob() {
Map<String, Set<Pair<TableRebalanceContext, Long>>> jobs = new HashMap<>();
// The most recent job run is job1_3, and within 3 maxAttempts.
jobs.put("job1",
ImmutableSet.of(Pair.of(createDummyJobCtx("job1", 1), 10L), Pair.of(createDummyJobCtx("job1", 2), 20L),
Pair.of(createDummyJobCtx("job1", 3), 1020L)));
jobs.put("job2", ImmutableSet.of(Pair.of(createDummyJobCtx("job2", 1), 1000L)));
Pair<TableRebalanceContext, Long> jobTime = RebalanceChecker.getLatestJob(jobs);
assertNotNull(jobTime);
assertEquals(jobTime.getLeft().getJobId(), "job1_3");
// The most recent job run is job1_4, but reached 3 maxAttempts.
jobs.put("job1",
ImmutableSet.of(Pair.of(createDummyJobCtx("job1", 1), 10L), Pair.of(createDummyJobCtx("job1", 2), 20L),
Pair.of(createDummyJobCtx("job1", 3), 1020L), Pair.of(createDummyJobCtx("job1", 4), 2020L)));
jobTime = RebalanceChecker.getLatestJob(jobs);
assertNotNull(jobTime);
assertEquals(jobTime.getLeft().getJobId(), "job2");
// Add job3 that's started more recently.
jobs.put("job3", ImmutableSet.of(Pair.of(createDummyJobCtx("job3", 1), 3000L)));
jobTime = RebalanceChecker.getLatestJob(jobs);
assertNotNull(jobTime);
assertEquals(jobTime.getLeft().getJobId(), "job3");
// Remove job2 and job3, and we'd have no job to retry then.
jobs.remove("job2");
jobs.remove("job3");
jobTime = RebalanceChecker.getLatestJob(jobs);
assertNull(jobTime);
}
|
@Deprecated
public String createToken(Authentication authentication) {
return createToken(authentication.getName());
}
|
@Test
void testCreateTokenWhenDisableAuthAndSecretKeyIsBlank() {
when(authConfigs.isAuthEnabled()).thenReturn(false);
MockEnvironment mockEnvironment = new MockEnvironment();
mockEnvironment.setProperty(AuthConstants.TOKEN_SECRET_KEY, "");
mockEnvironment.setProperty(AuthConstants.TOKEN_EXPIRE_SECONDS, AuthConstants.DEFAULT_TOKEN_EXPIRE_SECONDS.toString());
EnvUtil.setEnvironment(mockEnvironment);
jwtTokenManager = new JwtTokenManager(authConfigs);
assertEquals("AUTH_DISABLED", jwtTokenManager.createToken("nacos"));
}
|
List<String> getAnnotValues(Annotated a, String... annotKeys) {
List<String> result = new ArrayList<>(annotKeys.length);
for (String k : annotKeys) {
String v = a.annotations().value(k);
if (v == null) {
return null;
}
result.add(v);
}
return result;
}
|
@Test
public void annotValues() {
title("annotValues()");
verifyValues(t2.getAnnotValues(THING, K1), V1);
verifyValues(t2.getAnnotValues(THING, K3, K1), V3, V1);
verifyValues(t2.getAnnotValues(THING, K1, K2, K3), V1, V2, V3);
verifyValues(t2.getAnnotValues(THING, K1, K4));
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
}
|
@Test
public void testGauntletPersonalBest()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>10:24</col>. Personal best: 7:59.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Gauntlet completion count is: <col=ff0000>124</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("killcount", "gauntlet", 124);
verify(configManager).setRSProfileConfiguration("personalbest", "gauntlet", 7 * 60 + 59.0);
// Precise times
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Challenge duration: <col=ff0000>10:24.20</col>. Personal best: 7:52.40.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Gauntlet completion count is: <col=ff0000>124</col>.", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("personalbest", "gauntlet", 7 * 60 + 52.4);
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull18() {
// Arrange
final int type = 29;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Rows_query", actual);
}
|
@GetMapping("/queryList")
@RequiresPermissions("system:meta:list")
public ShenyuAdminResult queryList(final String path,
@RequestParam @NotNull(message = "currentPage not null") final Integer currentPage,
@RequestParam @NotNull(message = "pageSize not null") final Integer pageSize) {
CommonPager<MetaDataVO> commonPager = metaDataService.listByPage(new MetaDataQuery(path, new PageParameter(currentPage, pageSize)));
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, commonPager);
}
|
@Test
public void testQueryList() throws Exception {
final PageParameter pageParameter = new PageParameter();
List<MetaDataVO> metaDataVOS = new ArrayList<>();
metaDataVOS.add(metaDataVO);
final CommonPager<MetaDataVO> commonPager = new CommonPager<>();
commonPager.setPage(pageParameter);
commonPager.setDataList(metaDataVOS);
final MetaDataQuery metaDataQuery = new MetaDataQuery("path", pageParameter);
given(this.metaDataService.listByPage(metaDataQuery)).willReturn(commonPager);
this.mockMvc.perform(MockMvcRequestBuilders.get("/meta-data/queryList")
.param("path", "path")
.param("currentPage", String.valueOf(pageParameter.getCurrentPage()))
.param("pageSize", String.valueOf(pageParameter.getPageSize())))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data.dataList[0].appName", is(metaDataVO.getAppName())))
.andReturn();
}
|
public static String[] hadoopFsListAsArray(String files, Configuration conf,
String user)
throws URISyntaxException, FileNotFoundException, IOException,
InterruptedException {
if (files == null || conf == null) {
return null;
}
String[] dirty = files.split(",");
String[] clean = new String[dirty.length];
for (int i = 0; i < dirty.length; ++i)
clean[i] = hadoopFsFilename(dirty[i], conf, user);
return clean;
}
|
@Test
public void testHadoopFsListAsArray() {
try {
String tmpFileName1 = "/tmp/testHadoopFsListAsArray1";
String tmpFileName2 = "/tmp/testHadoopFsListAsArray2";
File tmpFile1 = new File(tmpFileName1);
File tmpFile2 = new File(tmpFileName2);
tmpFile1.createNewFile();
tmpFile2.createNewFile();
Assert.assertTrue(TempletonUtils.hadoopFsListAsArray(null, null, null) == null);
Assert.assertTrue(TempletonUtils.hadoopFsListAsArray(tmpFileName1 + "," + tmpFileName2,
null, null) == null);
String[] tmp2
= TempletonUtils.hadoopFsListAsArray(tmpFileName1 + "," + tmpFileName2,
new Configuration(), null);
Assert.assertEquals("file:" + tmpFileName1, tmp2[0]);
Assert.assertEquals("file:" + tmpFileName2, tmp2[1]);
tmpFile1.delete();
tmpFile2.delete();
} catch (FileNotFoundException e) {
Assert.fail("Couldn't find name for " + tmpFile.toURI().toString());
} catch (Exception e) {
// Something else is wrong
e.printStackTrace();
}
try {
TempletonUtils.hadoopFsListAsArray("/scoobydoo/teddybear,joe",
new Configuration(),
null);
Assert.fail("Should not have found /scoobydoo/teddybear");
} catch (FileNotFoundException e) {
// Should go here.
} catch (Exception e) {
// Something else is wrong.
e.printStackTrace();
}
}
|
public static String getRmPrincipal(Configuration conf) throws IOException {
String principal = conf.get(YarnConfiguration.RM_PRINCIPAL);
String prepared = null;
if (principal != null) {
prepared = getRmPrincipal(principal, conf);
}
return prepared;
}
|
@Test
public void testGetRMPrincipalStandAlone_String() throws IOException {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_ADDRESS, "myhost");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, false);
String result = YarnClientUtils.getRmPrincipal("test/_HOST@REALM", conf);
assertEquals("The hostname translation did not produce the expected "
+ "results: " + result, "test/myhost@REALM", result);
result = YarnClientUtils.getRmPrincipal("test/yourhost@REALM", conf);
assertEquals("The hostname translation did not produce the expected "
+ "results: " + result, "test/yourhost@REALM", result);
try {
result = YarnClientUtils.getRmPrincipal(null, conf);
fail("The hostname translation succeeded even though the RM principal "
+ "was null: " + result);
} catch (IllegalArgumentException ex) {
// Expected
}
}
|
@VisibleForTesting
Process execute() throws IOException, InterruptedException, ExecutionException {
// Nmap is a long running process and the collectStream method is a blocking method.
// By default CompletableFuture uses ForkJoinPool, which is for suitable short
// non-blocking operations.
Executor executor = Executors.newCachedThreadPool();
return execute(executor);
}
|
@Test
public void execute_always_startsProcessAndReturnsProcessInstance()
throws IOException, InterruptedException, ExecutionException {
CommandExecutor executor = new CommandExecutor("/bin/sh", "-c", "echo 1");
Process process = executor.execute();
process.waitFor();
assertThat(process.exitValue()).isEqualTo(0);
}
|
public static List<TargetInfo> parseOptTarget(CommandLine cmd, AlluxioConfiguration conf)
throws IOException {
String[] targets;
if (cmd.hasOption(TARGET_OPTION_NAME)) {
String argTarget = cmd.getOptionValue(TARGET_OPTION_NAME);
if (StringUtils.isBlank(argTarget)) {
throw new IOException("Option " + TARGET_OPTION_NAME + " can not be blank.");
} else if (argTarget.contains(TARGET_SEPARATOR)) {
targets = argTarget.split(TARGET_SEPARATOR);
} else {
targets = new String[]{argTarget};
}
} else {
// By default we set on all targets (master/workers/job_master/job_workers)
targets = new String[]{ROLE_MASTER, ROLE_JOB_MASTER, ROLE_WORKERS, ROLE_JOB_WORKERS};
}
return getTargetInfos(targets, conf);
}
|
@Test
public void unrecognizedPort() throws Exception {
String allTargets = "localhost:12345";
CommandLine mockCommandLine = mock(CommandLine.class);
String[] mockArgs = new String[]{"--target", allTargets};
when(mockCommandLine.getArgs()).thenReturn(mockArgs);
when(mockCommandLine.hasOption(LogLevel.TARGET_OPTION_NAME)).thenReturn(true);
when(mockCommandLine.getOptionValue(LogLevel.TARGET_OPTION_NAME)).thenReturn(mockArgs[1]);
assertThrows("Unrecognized port in localhost:12345", IllegalArgumentException.class, () ->
LogLevel.parseOptTarget(mockCommandLine, mConf));
}
|
@Override
public synchronized Snapshot record(long duration, TimeUnit durationUnit, Outcome outcome) {
totalAggregation.record(duration, durationUnit, outcome);
moveWindowByOne().record(duration, durationUnit, outcome);
return new SnapshotImpl(totalAggregation);
}
|
@Test
public void testRecordSuccess() {
Metrics metrics = new FixedSizeSlidingWindowMetrics(5);
Snapshot snapshot = metrics.record(100, TimeUnit.MILLISECONDS, Metrics.Outcome.SUCCESS);
assertThat(snapshot.getTotalNumberOfCalls()).isEqualTo(1);
assertThat(snapshot.getNumberOfSuccessfulCalls()).isEqualTo(1);
assertThat(snapshot.getNumberOfFailedCalls()).isZero();
assertThat(snapshot.getTotalNumberOfSlowCalls()).isZero();
assertThat(snapshot.getNumberOfSlowSuccessfulCalls()).isZero();
assertThat(snapshot.getNumberOfSlowFailedCalls()).isZero();
assertThat(snapshot.getTotalDuration().toMillis()).isEqualTo(100);
assertThat(snapshot.getAverageDuration().toMillis()).isEqualTo(100);
assertThat(snapshot.getFailureRate()).isZero();
}
|
public void delaySendingAckToUpstream(final String upstreamAddr)
throws IOException {
}
|
@Test(timeout = 60000)
public void testDelaySendingAckToUpstream() throws Exception {
final MetricsDataNodeFaultInjector mdnFaultInjector =
new MetricsDataNodeFaultInjector() {
@Override
public void delaySendingAckToUpstream(final String upstreamAddr)
throws IOException {
delayOnce();
}
@Override
public void logDelaySendingAckToUpstream(final String upstreamAddr,
final long delayMs) throws IOException {
logDelay(delayMs);
}
};
verifyFaultInjectionDelayPipeline(mdnFaultInjector);
}
|
@Override
public void createPort(Port osPort) {
checkNotNull(osPort, ERR_NULL_PORT);
checkArgument(!Strings.isNullOrEmpty(osPort.getId()), ERR_NULL_PORT_ID);
checkArgument(!Strings.isNullOrEmpty(osPort.getNetworkId()), ERR_NULL_PORT_NET_ID);
osNetworkStore.createPort(osPort);
log.info(String.format(MSG_PORT, osPort.getId(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreatePortWithNullNetworkId() {
final Port testPort = NeutronPort.builder().build();
testPort.setId(PORT_ID);
target.createPort(testPort);
}
|
public String convert(final String hostname) {
if(!PreferencesFactory.get().getBoolean("connection.hostname.idn")) {
return StringUtils.strip(hostname);
}
if(StringUtils.isNotEmpty(hostname)) {
try {
// Convenience function that implements the IDNToASCII operation as defined in
// the IDNA RFC. This operation is done on complete domain names, e.g: "www.example.com".
// It is important to note that this operation can fail. If it fails, then the input
// domain name cannot be used as an Internationalized Domain Name and the application
// should have methods defined to deal with the failure.
// IDNA.DEFAULT Use default options, i.e., do not process unassigned code points
// and do not use STD3 ASCII rules If unassigned code points are found
// the operation fails with ParseException
final String idn = IDN.toASCII(StringUtils.strip(hostname));
if(log.isDebugEnabled()) {
if(!StringUtils.equals(StringUtils.strip(hostname), idn)) {
log.debug(String.format("IDN hostname for %s is %s", hostname, idn));
}
}
if(StringUtils.isNotEmpty(idn)) {
return idn;
}
}
catch(IllegalArgumentException e) {
log.warn(String.format("Failed to convert hostname %s to IDNA", hostname), e);
}
}
return StringUtils.strip(hostname);
}
|
@Test
public void testHostnameStartsWithDot() {
assertEquals(".blob.core.windows.net", new PunycodeConverter().convert(".blob.core.windows.net"));
}
|
Map<String, String> mergeReleaseConfigurations(List<Release> releases) {
Map<String, String> result = Maps.newLinkedHashMap();
for (Release release : Lists.reverse(releases)) {
result.putAll(gson.fromJson(release.getConfigurations(), configurationTypeReference));
}
return result;
}
|
@Test(expected = JsonSyntaxException.class)
public void testTransformConfigurationToMapFailed() throws Exception {
String someInvalidConfiguration = "xxx";
Release someRelease = new Release();
someRelease.setConfigurations(someInvalidConfiguration);
configController.mergeReleaseConfigurations(Lists.newArrayList(someRelease));
}
|
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
return super.touch(file, status);
}
|
@Test(expected = BackgroundException.class)
public void testTouchFileRoot() throws Exception {
try {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
new SDSTouchFeature(session, nodeid).touch(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
}
catch(InteroperabilityException e) {
assertEquals("Error -80001. Parent ID must be positive. Please contact your web hosting service provider for assistance.", e.getDetail());
throw e;
}
}
|
@Override
public void setViewID(View view, String viewID) {
}
|
@Test
public void testSetViewID1() {
Object view = new AlertDialog.Builder(mApplication).create();
mSensorsAPI.setViewID(view, "R.id.login");
Object tag = ((AlertDialog) view).getWindow().getDecorView().getTag(R.id.sensors_analytics_tag_view_id);
Assert.assertNull(tag);
}
|
@SuppressWarnings("MethodLength")
static void dissectControlRequest(
final ArchiveEventCode eventCode,
final MutableDirectBuffer buffer,
final int offset,
final StringBuilder builder)
{
int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder);
HEADER_DECODER.wrap(buffer, offset + encodedLength);
encodedLength += MessageHeaderDecoder.ENCODED_LENGTH;
switch (eventCode)
{
case CMD_IN_CONNECT:
CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendConnect(builder);
break;
case CMD_IN_CLOSE_SESSION:
CLOSE_SESSION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendCloseSession(builder);
break;
case CMD_IN_START_RECORDING:
START_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording(builder);
break;
case CMD_IN_STOP_RECORDING:
STOP_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecording(builder);
break;
case CMD_IN_REPLAY:
REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplay(builder);
break;
case CMD_IN_STOP_REPLAY:
STOP_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplay(builder);
break;
case CMD_IN_LIST_RECORDINGS:
LIST_RECORDINGS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordings(builder);
break;
case CMD_IN_LIST_RECORDINGS_FOR_URI:
LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingsForUri(builder);
break;
case CMD_IN_LIST_RECORDING:
LIST_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecording(builder);
break;
case CMD_IN_EXTEND_RECORDING:
EXTEND_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording(builder);
break;
case CMD_IN_RECORDING_POSITION:
RECORDING_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendRecordingPosition(builder);
break;
case CMD_IN_TRUNCATE_RECORDING:
TRUNCATE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTruncateRecording(builder);
break;
case CMD_IN_STOP_RECORDING_SUBSCRIPTION:
STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingSubscription(builder);
break;
case CMD_IN_STOP_POSITION:
STOP_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopPosition(builder);
break;
case CMD_IN_FIND_LAST_MATCHING_RECORD:
FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendFindLastMatchingRecord(builder);
break;
case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS:
LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendListRecordingSubscriptions(builder);
break;
case CMD_IN_START_BOUNDED_REPLAY:
BOUNDED_REPLAY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartBoundedReplay(builder);
break;
case CMD_IN_STOP_ALL_REPLAYS:
STOP_ALL_REPLAYS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopAllReplays(builder);
break;
case CMD_IN_REPLICATE:
REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate(builder);
break;
case CMD_IN_STOP_REPLICATION:
STOP_REPLICATION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopReplication(builder);
break;
case CMD_IN_START_POSITION:
START_POSITION_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartPosition(builder);
break;
case CMD_IN_DETACH_SEGMENTS:
DETACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDetachSegments(builder);
break;
case CMD_IN_DELETE_DETACHED_SEGMENTS:
DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendDeleteDetachedSegments(builder);
break;
case CMD_IN_PURGE_SEGMENTS:
PURGE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeSegments(builder);
break;
case CMD_IN_ATTACH_SEGMENTS:
ATTACH_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAttachSegments(builder);
break;
case CMD_IN_MIGRATE_SEGMENTS:
MIGRATE_SEGMENTS_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendMigrateSegments(builder);
break;
case CMD_IN_AUTH_CONNECT:
AUTH_CONNECT_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendAuthConnect(builder);
break;
case CMD_IN_KEEP_ALIVE:
KEEP_ALIVE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendKeepAlive(builder);
break;
case CMD_IN_TAGGED_REPLICATE:
TAGGED_REPLICATE_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendTaggedReplicate(builder);
break;
case CMD_IN_START_RECORDING2:
START_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStartRecording2(builder);
break;
case CMD_IN_EXTEND_RECORDING2:
EXTEND_RECORDING_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendExtendRecording2(builder);
break;
case CMD_IN_STOP_RECORDING_BY_IDENTITY:
STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendStopRecordingByIdentity(builder);
break;
case CMD_IN_PURGE_RECORDING:
PURGE_RECORDING_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendPurgeRecording(builder);
break;
case CMD_IN_REPLICATE2:
REPLICATE_REQUEST2_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplicate2(builder);
break;
case CMD_IN_REQUEST_REPLAY_TOKEN:
REPLAY_TOKEN_REQUEST_DECODER.wrap(
buffer,
offset + encodedLength,
HEADER_DECODER.blockLength(),
HEADER_DECODER.version());
appendReplayToken(builder);
break;
default:
builder.append(": unknown command");
}
}
|
@Test
void controlRequestStopRecordingByIdentity()
{
internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L);
final StopRecordingByIdentityRequestEncoder requestEncoder = new StopRecordingByIdentityRequestEncoder();
requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder)
.controlSessionId(22)
.correlationId(33)
.recordingId(777);
dissectControlRequest(CMD_IN_STOP_RECORDING_BY_IDENTITY, buffer, 0, builder);
assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_STOP_RECORDING_BY_IDENTITY.name() + " [12/32]:" +
" controlSessionId=22" +
" correlationId=33" +
" recordingId=777",
builder.toString());
}
|
@Deprecated
public static SofaRequest buildSofaRequest(Class<?> clazz, String method, Class[] argTypes, Object[] args) {
SofaRequest request = new SofaRequest();
request.setInterfaceName(clazz.getName());
request.setMethodName(method);
request.setMethodArgs(args == null ? CodecUtils.EMPTY_OBJECT_ARRAY : args);
request.setMethodArgSigs(ClassTypeUtils.getTypeStrs(argTypes, true));
return request;
}
|
@Test
public void buildSofaRequest() throws Exception {
SofaRequest request = MessageBuilder.buildSofaRequest(Number.class, "intValue",
new Class[0], CodecUtils.EMPTY_OBJECT_ARRAY);
Assert.assertEquals(request.getInterfaceName(), Number.class.getName());
Assert.assertEquals(request.getMethodName(), "intValue");
Assert.assertArrayEquals(CodecUtils.EMPTY_OBJECT_ARRAY, request.getMethodArgs());
Assert.assertArrayEquals(StringUtils.EMPTY_STRING_ARRAY, request.getMethodArgSigs());
request = MessageBuilder.buildSofaRequest(Comparable.class, "compareTo",
new Class[] { Object.class }, new Object[] { null });
Assert.assertEquals(request.getInterfaceName(), Comparable.class.getName());
Assert.assertEquals(request.getMethodName(), "compareTo");
Assert.assertArrayEquals(request.getMethodArgs(), new Object[] { null });
Assert.assertArrayEquals(request.getMethodArgSigs(), new String[] { "java.lang.Object" });
}
|
public static Type convertType(TypeInfo typeInfo) {
switch (typeInfo.getOdpsType()) {
case BIGINT:
return Type.BIGINT;
case INT:
return Type.INT;
case SMALLINT:
return Type.SMALLINT;
case TINYINT:
return Type.TINYINT;
case FLOAT:
return Type.FLOAT;
case DECIMAL:
DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo;
return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale());
case DOUBLE:
return Type.DOUBLE;
case CHAR:
CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo;
return ScalarType.createCharType(charTypeInfo.getLength());
case VARCHAR:
VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo;
return ScalarType.createVarcharType(varcharTypeInfo.getLength());
case STRING:
case JSON:
return ScalarType.createDefaultCatalogString();
case BINARY:
return Type.VARBINARY;
case BOOLEAN:
return Type.BOOLEAN;
case DATE:
return Type.DATE;
case TIMESTAMP:
case DATETIME:
return Type.DATETIME;
case MAP:
MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo;
return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()),
convertType(mapTypeInfo.getValueTypeInfo()));
case ARRAY:
ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo;
return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo()));
case STRUCT:
StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo;
List<Type> fieldTypeList =
structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType)
.collect(Collectors.toList());
return new StructType(fieldTypeList);
default:
return Type.VARCHAR;
}
}
|
@Test
public void testConvertTypeCaseMap() {
TypeInfo keyTypeInfo = TypeInfoFactory.STRING;
TypeInfo valueTypeInfo = TypeInfoFactory.INT;
MapTypeInfo mapTypeInfo = TypeInfoFactory.getMapTypeInfo(keyTypeInfo, valueTypeInfo);
Type result = EntityConvertUtils.convertType(mapTypeInfo);
Type expectedType = new MapType(ScalarType.createDefaultCatalogString(), Type.INT);
assertEquals(expectedType, result);
}
|
public static Builder newIntegerColumnDefBuilder() {
return new Builder();
}
|
@Test
public void integerColumDef_is_nullable_by_default() {
assertThat(newIntegerColumnDefBuilder().setColumnName("a").build().isNullable()).isTrue();
}
|
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) {
boolean exactPathMatch = true;
if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) {
// This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI
return false;
}
// Paths ending with '*' are Vert.x wildcard routes so match on the path prefix
if (hostPath.endsWith("*")) {
exactPathMatch = false;
hostPath = hostPath.substring(0, hostPath.lastIndexOf('*'));
}
String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/");
String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/");
String[] hostPathElements = normalizedHostPath.split("/");
String[] targetPathElements = normalizedTargetPath.split("/");
if (exactPathMatch && hostPathElements.length != targetPathElements.length) {
return false;
}
if (exactPathMatch) {
return normalizedHostPath.equals(normalizedTargetPath);
} else {
return normalizedTargetPath.startsWith(normalizedHostPath);
}
}
|
@Test
void webSocketHostExactPathMatches() {
String hostPath = "/foo/bar/cheese/wine";
String targetPath = "/foo/bar/cheese/wine";
assertTrue(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath));
}
|
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) {
T value = mapping.get(query);
if (value == null) {
for (String key : mapping.keySet()) {
// Turn the search key into a regex, using all characters but the * as a literal.
String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them
.map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex)
.collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard.
if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that.
regex += ".*";
}
if (query.matches(regex)) {
value = mapping.get(key);
break;
}
}
}
return value;
}
|
@Test
public void testSubdirExactConcat() throws Exception
{
// Setup test fixture.
final Map<String, Object> haystack = Map.of("myplugin/baz/foo", new Object());
// Execute system under test.
final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/baz/foobar");
// Verify results.
assertNull(result);
}
|
static Builder newBuilder() {
return new AutoValue_SplunkEventWriter.Builder();
}
|
@Test
@Category(NeedsRunner.class)
public void failedSplunkWriteSingleBatchTest() {
// Create server expectation for FAILURE.
mockServerListening(404);
int testPort = mockServerRule.getPort();
List<KV<Integer, SplunkEvent>> testEvents =
ImmutableList.of(
KV.of(
123,
SplunkEvent.newBuilder()
.withEvent("test-event-1")
.withHost("test-host-1")
.withIndex("test-index-1")
.withSource("test-source-1")
.withSourceType("test-source-type-1")
.withTime(12345L)
.create()));
PCollection<SplunkWriteError> actual =
pipeline
.apply("Create Input data", Create.of(testEvents))
.apply(
"SplunkEventWriter",
ParDo.of(
SplunkEventWriter.newBuilder()
.withUrl(Joiner.on(':').join("http://localhost", testPort))
.withInputBatchCount(
StaticValueProvider.of(
testEvents.size())) // all requests in a single batch.
.withToken("test-token")
.build()));
// Expect a single 404 Not found SplunkWriteError
PAssert.that(actual)
.containsInAnyOrder(
SplunkWriteError.newBuilder()
.withStatusCode(404)
.withStatusMessage("Not Found")
.withPayload(
"{\"time\":12345,\"host\":\"test-host-1\","
+ "\"source\":\"test-source-1\",\"sourcetype\":\"test-source-type-1\","
+ "\"index\":\"test-index-1\",\"event\":\"test-event-1\"}")
.create());
pipeline.run();
// Server received exactly one POST request.
mockServerClient.verify(HttpRequest.request(EXPECTED_PATH), VerificationTimes.once());
}
|
public boolean eval(ContentFile<?> file) {
// TODO: detect the case where a column is missing from the file using file's max field id.
return new MetricsEvalVisitor().eval(file);
}
|
@Test
public void testIntegerLt() {
boolean shouldRead =
new StrictMetricsEvaluator(SCHEMA, lessThan("id", INT_MIN_VALUE)).eval(FILE);
assertThat(shouldRead).as("Should not match: always false").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", INT_MIN_VALUE + 1)).eval(FILE);
assertThat(shouldRead).as("Should not match: 32 and greater not in range").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", INT_MAX_VALUE)).eval(FILE);
assertThat(shouldRead).as("Should not match: 79 not in range").isFalse();
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", INT_MAX_VALUE + 1)).eval(FILE);
assertThat(shouldRead).as("Should match: all values in range").isTrue();
}
|
public static AvroGenericCoder of(Schema schema) {
return AvroGenericCoder.of(schema);
}
|
@Test
public void testDeterminismUnorderedMap() {
// LinkedHashMap is not deterministically ordered, so we should fail.
assertNonDeterministic(
AvroCoder.of(LinkedHashMapField.class),
reasonField(
LinkedHashMapField.class,
"nonDeterministicMap",
"java.util.LinkedHashMap<java.lang.String, java.lang.String> "
+ "may not be deterministically ordered"));
}
|
@Override
@MethodNotAvailable
public void loadAll(boolean replaceExistingValues) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testLoadAllWithKeys() {
adapter.loadAll(Collections.emptySet(), true);
}
|
@Override
public void validateDeptList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return;
}
// 获得科室信息
Map<Long, DeptDO> deptMap = getDeptMap(ids);
// 校验
ids.forEach(id -> {
DeptDO dept = deptMap.get(id);
if (dept == null) {
throw exception(DEPT_NOT_FOUND);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dept.getStatus())) {
throw exception(DEPT_NOT_ENABLE, dept.getName());
}
});
}
|
@Test
public void testValidateDeptList_success() {
// mock 数据
DeptDO deptDO = randomPojo(DeptDO.class).setStatus(CommonStatusEnum.ENABLE.getStatus());
deptMapper.insert(deptDO);
// 准备参数
List<Long> ids = singletonList(deptDO.getId());
// 调用,无需断言
deptService.validateDeptList(ids);
}
|
@Override
public String toString() {
return toStringHelper(this)
.add("matchAny", matchAny)
.add("negation", negation)
.add("value", value)
.toString();
}
|
@Test
public void testToString() {
Match<String> m1 = Match.any();
Match<String> m2 = Match.any();
Match<String> m3 = Match.ifValue("foo");
Match<String> m4 = Match.ifValue("foo");
Match<String> m5 = Match.ifNotValue("foo");
String note = "Results of toString() should be consistent -- ";
assertTrue(note, m1.toString().equals(m2.toString()));
assertTrue(note, m3.toString().equals(m4.toString()));
assertFalse(note, m4.toString().equals(m5.toString()));
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldPostQueryRequestStreamedWithLimit() throws Exception {
// Given:
int numRows = 10;
List<StreamedRow> expectedResponse = setQueryStreamResponse(numRows, true);
String sql = "whateva";
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<StreamPublisher<StreamedRow>> response = target
.postQueryRequestStreamed(sql, Collections.emptyMap(), Optional.of(321L));
// Then:
assertThat(server.getHttpMethod(), is(HttpMethod.POST));
assertThat(server.getPath(), is("/query"));
assertThat(server.getHeaders().get("Accept"), is("application/json"));
assertThat(getKsqlRequest(), is(new KsqlRequest(sql, properties, Collections.emptyMap(), 321L)));
List<StreamedRow> rows = getElementsFromPublisher(numRows + 1, response.getResponse());
assertThat(rows, is(expectedResponse));
}
|
@Override
public String getName() {
return FUNCTION_NAME;
}
|
@Test
public void testRoundDecimalNullColumn() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("round_decimal(%s)", INT_SV_NULL_COLUMN, 0));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof RoundDecimalTransformFunction);
Assert.assertEquals(transformFunction.getName(), TransformFunctionType.ROUND_DECIMAL.getName());
double[] expectedValues = new double[NUM_ROWS];
RoaringBitmap roaringBitmap = new RoaringBitmap();
for (int i = 0; i < NUM_ROWS; i++) {
if (isNullRow(i)) {
roaringBitmap.add(i);
} else {
expectedValues[i] = _intSVValues[i];
}
}
testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap);
}
|
public CompletableFuture<List<Credential>> getBackupAuthCredentials(
final Account account,
final Instant redemptionStart,
final Instant redemptionEnd) {
// If the account has an expired payment, clear it before continuing
if (hasExpiredVoucher(account)) {
return accountsManager.updateAsync(account, a -> {
// Re-check in case we raced with an update
if (hasExpiredVoucher(a)) {
a.setBackupVoucher(null);
}
}).thenCompose(updated -> getBackupAuthCredentials(updated, redemptionStart, redemptionEnd));
}
// If this account isn't allowed some level of backup access via configuration, don't continue
final BackupLevel configuredBackupLevel = configuredBackupLevel(account).orElseThrow(() ->
Status.PERMISSION_DENIED.withDescription("Backups not allowed on account").asRuntimeException());
final Instant startOfDay = clock.instant().truncatedTo(ChronoUnit.DAYS);
if (redemptionStart.isAfter(redemptionEnd) ||
redemptionStart.isBefore(startOfDay) ||
redemptionEnd.isAfter(startOfDay.plus(MAX_REDEMPTION_DURATION)) ||
!redemptionStart.equals(redemptionStart.truncatedTo(ChronoUnit.DAYS)) ||
!redemptionEnd.equals(redemptionEnd.truncatedTo(ChronoUnit.DAYS))) {
throw Status.INVALID_ARGUMENT.withDescription("invalid redemption window").asRuntimeException();
}
// fetch the blinded backup-id the account should have previously committed to
final byte[] committedBytes = account.getBackupCredentialRequest();
if (committedBytes == null) {
throw Status.NOT_FOUND.withDescription("No blinded backup-id has been added to the account").asRuntimeException();
}
try {
// create a credential for every day in the requested period
final BackupAuthCredentialRequest credentialReq = new BackupAuthCredentialRequest(committedBytes);
return CompletableFuture.completedFuture(Stream
.iterate(redemptionStart, curr -> curr.plus(Duration.ofDays(1)))
.takeWhile(redemptionTime -> !redemptionTime.isAfter(redemptionEnd))
.map(redemptionTime -> {
// Check if the account has a voucher that's good for a certain receiptLevel at redemption time, otherwise
// use the default receipt level
final BackupLevel backupLevel = storedBackupLevel(account, redemptionTime).orElse(configuredBackupLevel);
return new Credential(
credentialReq.issueCredential(redemptionTime, backupLevel, serverSecretParams),
redemptionTime);
})
.toList());
} catch (InvalidInputException e) {
throw Status.INTERNAL
.withDescription("Could not deserialize stored request credential")
.withCause(e)
.asRuntimeException();
}
}
|
@Test
void expiringBackupPayment() throws VerificationFailedException {
clock.pin(Instant.ofEpochSecond(1));
final Instant day0 = Instant.EPOCH;
final Instant day4 = Instant.EPOCH.plus(Duration.ofDays(4));
final Instant dayMax = day0.plus(BackupAuthManager.MAX_REDEMPTION_DURATION);
final BackupAuthManager authManager = create(BackupLevel.MESSAGES, false);
final Account account = mock(Account.class);
when(account.getUuid()).thenReturn(aci);
when(account.getBackupCredentialRequest()).thenReturn(backupAuthTestUtil.getRequest(backupKey, aci).serialize());
when(account.getBackupVoucher()).thenReturn(new Account.BackupVoucher(201, day4));
final List<BackupAuthManager.Credential> creds = authManager.getBackupAuthCredentials(account, day0, dayMax).join();
Instant redemptionTime = day0;
final BackupAuthCredentialRequestContext requestContext = BackupAuthCredentialRequestContext.create(backupKey, aci);
for (int i = 0; i < creds.size(); i++) {
// Before the expiration, credentials should have a media receipt, otherwise messages only
final BackupLevel level = i < 5 ? BackupLevel.MEDIA : BackupLevel.MESSAGES;
final BackupAuthManager.Credential cred = creds.get(i);
assertThat(requestContext
.receiveResponse(cred.credential(), redemptionTime, backupAuthTestUtil.params.getPublicParams())
.getBackupLevel())
.isEqualTo(level);
assertThat(cred.redemptionTime().getEpochSecond()).isEqualTo(redemptionTime.getEpochSecond());
redemptionTime = redemptionTime.plus(Duration.ofDays(1));
}
}
|
@Override
public Long createProject(GoViewProjectCreateReqVO createReqVO) {
// 插入
GoViewProjectDO goViewProject = GoViewProjectConvert.INSTANCE.convert(createReqVO)
.setStatus(CommonStatusEnum.DISABLE.getStatus());
goViewProjectMapper.insert(goViewProject);
// 返回
return goViewProject.getId();
}
|
@Test
public void testCreateProject_success() {
// 准备参数
GoViewProjectCreateReqVO reqVO = randomPojo(GoViewProjectCreateReqVO.class);
// 调用
Long goViewProjectId = goViewProjectService.createProject(reqVO);
// 断言
assertNotNull(goViewProjectId);
// 校验记录的属性是否正确
GoViewProjectDO goViewProject = goViewProjectMapper.selectById(goViewProjectId);
assertPojoEquals(reqVO, goViewProject);
}
|
public static void refreshSuperUserGroupsConfiguration() {
//load server side configuration;
refreshSuperUserGroupsConfiguration(new Configuration());
}
|
@Test
public void testNoHostsForUsers() throws Exception {
Configuration conf = new Configuration(false);
conf.set("y." + REAL_USER_NAME + ".users",
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "y");
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// IP doesn't matter
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
}
|
@Override
public FailureResult howToHandleFailure(
Throwable failure, CompletableFuture<Map<String, String>> failureLabels) {
FailureResult failureResult = howToHandleFailure(failure);
if (reportEventsAsSpans) {
// TODO: replace with reporting as event once events are supported.
// Add reporting as callback for when the failure labeling is completed.
failureLabels.thenAcceptAsync(
(labels) -> jobFailureMetricReporter.reportJobFailure(failureResult, labels),
componentMainThreadExecutor);
}
return failureResult;
}
|
@Test
void testHowToHandleFailureAllowedByStrategy() throws Exception {
final Configuration configuration = new Configuration();
configuration.set(TraceOptions.REPORT_EVENTS_AS_SPANS, Boolean.TRUE);
final List<Span> spanCollector = new ArrayList<>(1);
final UnregisteredMetricGroups.UnregisteredJobManagerJobMetricGroup testMetricGroup =
createTestMetricGroup(spanCollector);
final TestRestartBackoffTimeStrategy restartBackoffTimeStrategy =
new TestRestartBackoffTimeStrategy(true, 1234);
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
createJobGraph(),
mainThreadExecutor,
EXECUTOR_RESOURCE.getExecutor())
.setRestartBackoffTimeStrategy(restartBackoffTimeStrategy)
.setJobMasterConfiguration(configuration)
.setJobManagerJobMetricGroup(testMetricGroup)
.build();
final FailureResult failureResult =
scheduler.howToHandleFailure(new Exception("test"), createFailureLabelsFuture());
assertThat(failureResult.canRestart()).isTrue();
assertThat(failureResult.getBackoffTime().toMillis())
.isEqualTo(restartBackoffTimeStrategy.getBackoffTime());
assertThat(spanCollector).isEmpty();
mainThreadExecutor.trigger();
checkMetrics(spanCollector, true);
}
|
public static Comparator<Object[]> getComparator(List<OrderByExpressionContext> orderByExpressions,
ColumnContext[] orderByColumnContexts, boolean nullHandlingEnabled) {
return getComparator(orderByExpressions, orderByColumnContexts, nullHandlingEnabled, 0, orderByExpressions.size());
}
|
@Test
public void testTwoNullsCompareNextColumn() {
List<OrderByExpressionContext> orderBys = Arrays.asList(new OrderByExpressionContext(COLUMN1, ASC, NULLS_LAST),
new OrderByExpressionContext(COLUMN2, ASC, NULLS_LAST));
_rows = Arrays.asList(new Object[]{null, 2}, new Object[]{null, 3}, new Object[]{1, 1});
_rows.sort(OrderByComparatorFactory.getComparator(orderBys, ENABLE_NULL_HANDLING));
assertEquals(extractColumn(_rows, COLUMN2_INDEX), Arrays.asList(1, 2, 3));
}
|
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
throw new NoSuchTableException("Invalid identifier: %s", identifier);
}
Path tablePath = new Path(defaultWarehouseLocation(identifier));
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata = ops.current();
try {
if (lastMetadata == null) {
LOG.debug("Not an iceberg table: {}", identifier);
return false;
} else {
if (purge) {
// Since the data files and the metadata files may store in different locations,
// so it has to call dropTableData to force delete the data file.
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
return fs.delete(tablePath, true /* recursive */);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to delete file: %s", tablePath);
}
}
|
@Test
public void testDropTable() throws Exception {
HadoopCatalog catalog = hadoopCatalog();
TableIdentifier testTable = TableIdentifier.of("db", "ns1", "ns2", "tbl");
catalog.createTable(testTable, SCHEMA, PartitionSpec.unpartitioned());
String metaLocation = catalog.defaultWarehouseLocation(testTable);
FileSystem fs = Util.getFs(new Path(metaLocation), catalog.getConf());
assertThat(fs.isDirectory(new Path(metaLocation))).isTrue();
catalog.dropTable(testTable);
assertThat(fs.isDirectory(new Path(metaLocation))).isFalse();
}
|
public static AlterReplicaTask rollupLocalTablet(long backendId, long dbId, long tableId, long partitionId,
long rollupIndexId, long rollupTabletId, long baseTabletId,
long newReplicaId, int newSchemaHash, int baseSchemaHash, long version,
long jobId, RollupJobV2Params rollupJobV2Params,
List<Column> baseSchemaColumns) {
return new AlterReplicaTask(backendId, dbId, tableId, partitionId, rollupIndexId, rollupTabletId,
baseTabletId, newReplicaId, newSchemaHash, baseSchemaHash, version, jobId, AlterJobV2.JobType.ROLLUP,
TTabletType.TABLET_TYPE_DISK, 0, null,
baseSchemaColumns, rollupJobV2Params);
}
|
@Test
public void testRollupLocalTablet() {
AlterReplicaTask task = AlterReplicaTask.rollupLocalTablet(1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, null, null);
Assert.assertEquals(1, task.getBackendId());
Assert.assertEquals(2, task.getDbId());
Assert.assertEquals(3, task.getTableId());
Assert.assertEquals(4, task.getPartitionId());
Assert.assertEquals(5, task.getIndexId());
Assert.assertEquals(6, task.getTabletId());
Assert.assertEquals(7, task.getBaseTabletId());
Assert.assertEquals(8, task.getNewReplicaId());
Assert.assertEquals(9, task.getNewSchemaHash());
Assert.assertEquals(10, task.getBaseSchemaHash());
Assert.assertEquals(11, task.getVersion());
Assert.assertEquals(12, task.getJobId());
Assert.assertEquals(AlterJobV2.JobType.ROLLUP, task.getJobType());
TAlterTabletReqV2 request = task.toThrift();
Assert.assertEquals(7, request.base_tablet_id);
Assert.assertEquals(6, request.new_tablet_id);
Assert.assertEquals(10, request.base_schema_hash);
Assert.assertEquals(9, request.new_schema_hash);
Assert.assertEquals(11, request.alter_version);
Assert.assertEquals(TTabletType.TABLET_TYPE_DISK, request.tablet_type);
}
|
@Override
public CompletableFuture<Void> compensate(Exchange exchange) {
return sagaService.getClient().compensate(this.lraURL, exchange);
}
|
@DisplayName("Tests whether compensate is called on LRAClient")
@Test
void testCompensate() throws Exception {
CompletableFuture<Void> expected = CompletableFuture.completedFuture(null);
Mockito.when(client.compensate(url, exchange)).thenReturn(expected);
CompletableFuture<Void> actual = coordinator.compensate(exchange);
Assertions.assertSame(expected, actual);
Mockito.verify(sagaService).getClient();
Mockito.verify(client).compensate(url, exchange);
}
|
@Override
public void onEvent(Event event) {
if (event instanceof ClientOperationEvent.ClientReleaseEvent) {
handleClientDisconnect((ClientOperationEvent.ClientReleaseEvent) event);
} else if (event instanceof ClientOperationEvent) {
handleClientOperation((ClientOperationEvent) event);
}
}
|
@Test
void testOnEvent() {
Mockito.when(clientReleaseEvent.getClient()).thenReturn(client);
clientServiceIndexesManager.onEvent(clientReleaseEvent);
Mockito.verify(clientReleaseEvent).getClient();
clientServiceIndexesManager.onEvent(clientOperationEvent);
Mockito.verify(clientOperationEvent).getService();
Mockito.verify(clientOperationEvent).getClientId();
}
|
public static TimeLimiterMetrics ofTimeLimiter(TimeLimiter timeLimiter) {
return new TimeLimiterMetrics(List.of(timeLimiter));
}
|
@Test
public void shouldRecordSuccesses() {
TimeLimiter timeLimiter = TimeLimiter.of(TimeLimiterConfig.ofDefaults());
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiter(timeLimiter));
timeLimiter.onSuccess();
timeLimiter.onSuccess();
assertThat(metricRegistry).hasMetricsSize(3);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + SUCCESSFUL)
.hasValue(2L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + FAILED)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + TIMEOUT)
.hasValue(0L);
}
|
public static Map<String, String> getKiePMMLMiningModelSourcesMap(final MiningModelCompilationDTO compilationDTO,
final List<KiePMMLModel> nestedModels) {
logger.trace("getKiePMMLMiningModelSourcesMap {} {} {}", compilationDTO.getFields(),
compilationDTO.getModel(), compilationDTO.getPackageName());
final Map<String, String> toReturn = getSegmentationSourcesMap(compilationDTO,
nestedModels);
nestedModels.forEach(nestedModel -> {
if (!(nestedModel instanceof KiePMMLModelWithSources)) {
throw new KiePMMLException("Expecting only KiePMMLModelWithSources at this phase; retrieved " + nestedModel.getClass());
}
toReturn.putAll(((KiePMMLModelWithSources) nestedModel).getSourcesMap());
});
return getKiePMMLMiningModelSourcesMapCommon(compilationDTO,
toReturn);
}
|
@Test
void getKiePMMLMiningModelSourcesMap() {
final List<KiePMMLModel> nestedModels = new ArrayList<>();
final CommonCompilationDTO<MiningModel> source =
CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml,
MINING_MODEL,
new PMMLCompilationContextMock(),
"FILE_NAME");
final MiningModelCompilationDTO compilationDTO =
MiningModelCompilationDTO.fromCompilationDTO(source);
final Map<String, String> retrieved =
KiePMMLMiningModelFactory.getKiePMMLMiningModelSourcesMap(compilationDTO, nestedModels);
assertThat(retrieved).isNotNull();
int expectedNestedModels = MINING_MODEL.getSegmentation().getSegments().size();
assertThat(nestedModels).hasSize(expectedNestedModels);
}
|
public static RpcQosOptions defaultOptions() {
return newBuilder().build();
}
|
@Test
public void ensureSerializable() {
SerializableUtils.ensureSerializable(RpcQosOptions.defaultOptions());
}
|
public static boolean canDrop(
FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) {
Objects.requireNonNull(pred, "pred cannnot be null");
Objects.requireNonNull(columns, "columns cannnot be null");
return pred.accept(new DictionaryFilter(columns, dictionaries));
}
|
@Test
public void testColumnWithDictionaryAndPlainEncodings() throws Exception {
IntColumn plain = intColumn("fallback_binary_field");
DictionaryPageReadStore dictionaryStore = mock(DictionaryPageReadStore.class);
assertFalse("Should never drop block using plain encoding", canDrop(eq(plain, -10), ccmd, dictionaryStore));
assertFalse("Should never drop block using plain encoding", canDrop(lt(plain, -10), ccmd, dictionaryStore));
assertFalse("Should never drop block using plain encoding", canDrop(ltEq(plain, -10), ccmd, dictionaryStore));
assertFalse(
"Should never drop block using plain encoding",
canDrop(gt(plain, nElements + 10), ccmd, dictionaryStore));
assertFalse(
"Should never drop block using plain encoding",
canDrop(gtEq(plain, nElements + 10), ccmd, dictionaryStore));
assertFalse(
"Should never drop block using plain encoding",
canDrop(notEq(plain, nElements + 10), ccmd, dictionaryStore));
verifyZeroInteractions(dictionaryStore);
}
|
@Override
public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) {
return payload.getByteBuf().readShort();
}
|
@Test
void assertRead() {
byte[] data = {(byte) 0x80, (byte) 0x00, (byte) 0xFF, (byte) 0xFF, (byte) 0x7F, (byte) 0xFF};
PostgreSQLInt2BinaryProtocolValue actual = new PostgreSQLInt2BinaryProtocolValue();
PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(Unpooled.wrappedBuffer(data), StandardCharsets.UTF_8);
assertThat(actual.read(payload, 2), is(Short.MIN_VALUE));
assertThat(actual.read(payload, 2), is((short) -1));
assertThat(actual.read(payload, 2), is(Short.MAX_VALUE));
}
|
@Override
public int hashCode() {
int result = topic != null ? topic.hashCode() : 0;
result = 31 * result + (partition != null ? partition.hashCode() : 0);
result = 31 * result + (headers != null ? headers.hashCode() : 0);
result = 31 * result + (key != null ? key.hashCode() : 0);
result = 31 * result + (value != null ? value.hashCode() : 0);
result = 31 * result + (timestamp != null ? timestamp.hashCode() : 0);
return result;
}
|
@Test
public void testEqualsAndHashCode() {
ProducerRecord<String, Integer> producerRecord = new ProducerRecord<>("test", 1, "key", 1);
assertEquals(producerRecord, producerRecord);
assertEquals(producerRecord.hashCode(), producerRecord.hashCode());
ProducerRecord<String, Integer> equalRecord = new ProducerRecord<>("test", 1, "key", 1);
assertEquals(producerRecord, equalRecord);
assertEquals(producerRecord.hashCode(), equalRecord.hashCode());
ProducerRecord<String, Integer> topicMisMatch = new ProducerRecord<>("test-1", 1, "key", 1);
assertNotEquals(producerRecord, topicMisMatch);
ProducerRecord<String, Integer> partitionMismatch = new ProducerRecord<>("test", 2, "key", 1);
assertNotEquals(producerRecord, partitionMismatch);
ProducerRecord<String, Integer> keyMisMatch = new ProducerRecord<>("test", 1, "key-1", 1);
assertNotEquals(producerRecord, keyMisMatch);
ProducerRecord<String, Integer> valueMisMatch = new ProducerRecord<>("test", 1, "key", 2);
assertNotEquals(producerRecord, valueMisMatch);
ProducerRecord<String, Integer> nullFieldsRecord = new ProducerRecord<>("topic", null, null, null, null, null);
assertEquals(nullFieldsRecord, nullFieldsRecord);
assertEquals(nullFieldsRecord.hashCode(), nullFieldsRecord.hashCode());
}
|
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
this.metrics.incrRequestCount();
long startTime = clock.getTime();
try {
RequestInterceptorChainWrapper pipeline =
authorizeAndGetInterceptorChain();
LOG.info("RegisteringAM Host: {}, Port: {}, Tracking Url: {} for application {}. ",
request.getHost(), request.getRpcPort(), request.getTrackingUrl(),
pipeline.getApplicationAttemptId());
RegisterApplicationMasterResponse response =
pipeline.getRootInterceptor().registerApplicationMaster(request);
long endTime = clock.getTime();
this.metrics.succeededRegisterAMRequests(endTime - startTime);
LOG.info("RegisterAM processing finished in {} ms for application {}.",
endTime - startTime, pipeline.getApplicationAttemptId());
return response;
} catch (Throwable t) {
this.metrics.incrFailedRegisterAMRequests();
throw t;
}
}
|
@Test
public void testRegisterOneApplicationMaster() throws Exception {
// The testAppId identifier is used as host name and the mock resource
// manager return it as the queue name. Assert that we received the queue
// name
int testAppId = 1;
RegisterApplicationMasterResponse response1 = registerApplicationMaster(testAppId);
Assert.assertNotNull(response1);
Assert.assertEquals(Integer.toString(testAppId), response1.getQueue());
}
|
static boolean apply(@Nullable HttpStatus httpStatus) {
if (Objects.isNull(httpStatus)) {
return false;
}
RpcEnhancementReporterProperties reportProperties;
try {
reportProperties = ApplicationContextAwareUtils.getApplicationContext()
.getBean(RpcEnhancementReporterProperties.class);
}
catch (BeansException e) {
LOG.error("get RpcEnhancementReporterProperties bean err", e);
reportProperties = new RpcEnhancementReporterProperties();
}
// statuses > series
List<HttpStatus> status = reportProperties.getStatuses();
if (status.isEmpty()) {
List<HttpStatus.Series> series = reportProperties.getSeries();
// Check INTERNAL_SERVER_ERROR (500) status.
if (reportProperties.isIgnoreInternalServerError() && Objects.equals(httpStatus, INTERNAL_SERVER_ERROR)) {
return false;
}
if (series.isEmpty()) {
return HTTP_STATUSES.contains(httpStatus);
}
return series.contains(httpStatus.series());
}
// Use the user-specified fuse status code.
return status.contains(httpStatus);
}
|
@Test
public void testApplyWithSeries() {
RpcEnhancementReporterProperties properties = new RpcEnhancementReporterProperties();
// Mock Condition
properties.getStatuses().clear();
properties.getSeries().clear();
properties.getSeries().add(HttpStatus.Series.CLIENT_ERROR);
ApplicationContext applicationContext = mock(ApplicationContext.class);
doReturn(properties)
.when(applicationContext).getBean(RpcEnhancementReporterProperties.class);
mockedApplicationContextAwareUtils.when(ApplicationContextAwareUtils::getApplicationContext)
.thenReturn(applicationContext);
// Assert
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.OK)).isEqualTo(false);
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.INTERNAL_SERVER_ERROR)).isEqualTo(false);
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.BAD_GATEWAY)).isEqualTo(false);
assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.FORBIDDEN)).isEqualTo(true);
}
|
@Override
@CheckForNull
public EmailMessage format(Notification notif) {
if (!(notif instanceof ChangesOnMyIssuesNotification)) {
return null;
}
ChangesOnMyIssuesNotification notification = (ChangesOnMyIssuesNotification) notif;
if (notification.getChange() instanceof AnalysisChange) {
checkState(!notification.getChangedIssues().isEmpty(), "changedIssues can't be empty");
return formatAnalysisNotification(notification.getChangedIssues().keySet().iterator().next(), notification);
}
return formatMultiProject(notification);
}
|
@Test
public void user_input_content_should_be_html_escape() {
Project project = new Project.Builder("uuid").setProjectName("</projectName>").setKey("project_key").build();
String ruleName = "</RandomRule>";
String host = randomAlphabetic(15);
Rule rule = newRule(ruleName, randomRuleTypeHotspotExcluded());
List<ChangedIssue> changedIssues = IntStream.range(0, 2 + new Random().nextInt(5))
.mapToObj(i -> newChangedIssue("issue_" + i, randomValidStatus(), project, rule))
.collect(toList());
UserChange userChange = newUserChange();
when(emailSettings.getServerBaseURL()).thenReturn(host);
EmailMessage emailMessage = underTest.format(new ChangesOnMyIssuesNotification(userChange, ImmutableSet.copyOf(changedIssues)));
assertThat(emailMessage.getMessage())
.doesNotContain(project.getProjectName())
.contains(escapeHtml4(project.getProjectName()))
.doesNotContain(ruleName)
.contains(escapeHtml4(ruleName));
String expectedHref = host + "/project/issues?id=" + project.getKey()
+ "&issues=" + changedIssues.stream().map(ChangedIssue::getKey).collect(joining("%2C"));
String expectedLinkText = "See all " + changedIssues.size() + " issues";
HtmlFragmentAssert.assertThat(emailMessage.getMessage())
.hasParagraph().hasParagraph() // skip header
.hasParagraph(project.getProjectName())
.hasList("Rule " + ruleName + " - " + expectedLinkText)
.withLink(expectedLinkText, expectedHref)
.hasParagraph().hasParagraph() // skip footer
.noMoreBlock();
}
|
@Override
public boolean equals(Object obj)
{
if (this == obj)
{
return true;
}
if (obj == null || getClass() != obj.getClass())
{
return false;
}
return areNewFieldsEqual((Request<?>) obj);
}
|
@Test(dataProvider = "toRequestFieldsData")
public void testRequestPagingFieldsEqual(List<PathSpec> pathSpecs1, List<PathSpec> pathSpecs2, Map<String,String> param1, Map<String,String> param2, boolean expect)
{
GetRequestBuilder<Long, TestRecord> builder1 = generateDummyRequestBuilder();
GetRequestBuilder<Long, TestRecord> builder2 = generateDummyRequestBuilder();
for (Map.Entry<String, String> entry : param1.entrySet())
{
builder1.setParam(entry.getKey(), entry.getValue());
}
for (Map.Entry<String, String> entry : param2.entrySet())
{
builder2.setParam(entry.getKey(), entry.getValue());
}
builder1.addPagingFields(pathSpecs1.toArray(new PathSpec[pathSpecs1.size()]));
builder2.addPagingFields(pathSpecs2.toArray(new PathSpec[pathSpecs2.size()]));
assertEquals(builder1.build().equals(builder2.build()), expect);
}
|
static void run(
final SystemExit systemExit,
final String... args
) throws Throwable {
final Arguments arguments = new Arguments.Builder()
.parseArgs(args)
.build();
if (arguments.help) {
usage();
return;
}
final Properties props = getProperties(arguments);
final DataGenProducer dataProducer = ProducerFactory
.getProducer(arguments.keyFormat, arguments.valueFormat, arguments.valueDelimiter, props);
final Optional<RateLimiter> rateLimiter = arguments.msgRate != -1
? Optional.of(RateLimiter.create(arguments.msgRate)) : Optional.empty();
final Executor executor = Executors.newFixedThreadPool(
arguments.numThreads,
r -> {
final Thread thread = new Thread(r);
thread.setDaemon(true);
return thread;
}
);
final CompletionService<Void> service = new ExecutorCompletionService<>(executor);
for (int i = 0; i < arguments.numThreads; i++) {
service.submit(getProducerTask(arguments, dataProducer, props, rateLimiter));
}
for (int i = 0; i < arguments.numThreads; i++) {
try {
service.take().get();
} catch (final InterruptedException e) {
System.err.println("Interrupted waiting for threads to exit.");
systemExit.exit(1);
} catch (final ExecutionException e) {
throw e.getCause();
}
}
}
|
@Test
public void shouldThrowIfSchemaFileDoesNotExist() throws Throwable {
// When:
final Exception e = assertThrows(
IllegalArgumentException.class,
() -> DataGen.run(
mockSystem,
"schema=you/won't/find/me/right?",
"format=avro",
"topic=foo",
"key=id"
)
);
// Then:
assertThat(e.getMessage(), containsString(
"File not found: you/won't/find/me/right?"));
}
|
@Override
public Database getDb(String dbName) {
Database database;
try {
database = hmsOps.getDb(dbName);
} catch (Exception e) {
LOG.error("Failed to get hive database [{}.{}]", catalogName, dbName, e);
return null;
}
return database;
}
|
@Test
public void testGetDb() {
Database database = hiveMetadata.getDb("db1");
Assert.assertEquals("db1", database.getFullName());
}
|
public static final String getTrimTypeDesc( int i ) {
if ( i < 0 || i >= trimTypeDesc.length ) {
return trimTypeDesc[0];
}
return trimTypeDesc[i];
}
|
@Test
public void testGetTrimTypeDesc() {
assertEquals( ValueMetaBase.getTrimTypeDesc( ValueMetaInterface.TRIM_TYPE_NONE ), BaseMessages.getString( PKG,
"ValueMeta.TrimType.None" ) );
assertEquals( ValueMetaBase.getTrimTypeDesc( ValueMetaInterface.TRIM_TYPE_LEFT ), BaseMessages.getString( PKG,
"ValueMeta.TrimType.Left" ) );
assertEquals( ValueMetaBase.getTrimTypeDesc( ValueMetaInterface.TRIM_TYPE_RIGHT ), BaseMessages.getString( PKG,
"ValueMeta.TrimType.Right" ) );
assertEquals( ValueMetaBase.getTrimTypeDesc( ValueMetaInterface.TRIM_TYPE_BOTH ), BaseMessages.getString( PKG,
"ValueMeta.TrimType.Both" ) );
assertEquals( ValueMetaBase.getTrimTypeDesc( -1 ), BaseMessages.getString( PKG, "ValueMeta.TrimType.None" ) );
assertEquals( ValueMetaBase.getTrimTypeDesc( 10000 ), BaseMessages.getString( PKG, "ValueMeta.TrimType.None" ) );
}
|
@Udf
public Map<String, String> splitToMap(
@UdfParameter(
description = "Separator string and values to join") final String input,
@UdfParameter(
description = "Separator string and values to join") final String entryDelimiter,
@UdfParameter(
description = "Separator string and values to join") final String kvDelimiter) {
if (input == null || entryDelimiter == null || kvDelimiter == null) {
return null;
}
if (entryDelimiter.isEmpty() || kvDelimiter.isEmpty() || entryDelimiter.equals(kvDelimiter)) {
return null;
}
final Iterable<String> entries = Splitter.on(entryDelimiter).omitEmptyStrings().split(input);
return StreamSupport.stream(entries.spliterator(), false)
.filter(e -> e.contains(kvDelimiter))
.map(kv -> Splitter.on(kvDelimiter).split(kv).iterator())
.collect(Collectors.toMap(
Iterator::next,
Iterator::next,
(v1, v2) -> v2));
}
|
@Test
public void shouldSplitStringByGivenDelimiterChars() {
Map<String, String> result = udf.splitToMap("foo=apple;bar=cherry", ";", "=");
assertThat(result, hasEntry("foo", "apple"));
assertThat(result, hasEntry("bar", "cherry"));
assertThat(result.size(), equalTo(2));
}
|
@Override
public List<?> deserialize(final String topic, final byte[] bytes) {
if (bytes == null) {
return null;
}
try {
final String recordCsvString = new String(bytes, StandardCharsets.UTF_8);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat)
.getRecords();
if (csvRecords.isEmpty()) {
throw new SerializationException("No fields in record");
}
final CSVRecord csvRecord = csvRecords.get(0);
if (csvRecord == null || csvRecord.size() == 0) {
throw new SerializationException("No fields in record.");
}
SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic);
final List<Object> values = new ArrayList<>(parsers.size());
final Iterator<Parser> pIt = parsers.iterator();
for (int i = 0; i < csvRecord.size(); i++) {
final String value = csvRecord.get(i);
final Parser parser = pIt.next();
final Object parsed = value == null || value.isEmpty()
? null
: parser.parse(value);
values.add(parsed);
}
return values;
} catch (final Exception e) {
throw new SerializationException("Error deserializing delimited", e);
}
}
|
@Test
public void shouldThrowIfRowHasTooMayColumns() {
// Given:
final byte[] bytes = "1511897796092,1,item_1,10.0,10.10,100,100,100,100,extra\r\n"
.getBytes(StandardCharsets.UTF_8);
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize("t", bytes)
);
// Then:
assertThat(e.getCause().getMessage(),
is("Column count mismatch on deserialization. topic: t, expected: 9, got: 10"));
}
|
@Override
public Collection<ShutdownAwarePlugin> getShutdownAwarePluginList() {
return Collections.emptyList();
}
|
@Test
public void testGetShutdownAwarePluginList() {
Assert.assertEquals(Collections.emptyList(), manager.getShutdownAwarePluginList());
}
|
public void verifyAndValidate(final String jwt) {
try {
Jws<Claims> claimsJws = Jwts.parser()
.verifyWith(tokenConfigurationParameter.getPublicKey())
.build()
.parseSignedClaims(jwt);
// Log the claims for debugging purposes
Claims claims = claimsJws.getPayload();
log.info("Token claims: {}", claims);
// Additional checks (e.g., expiration, issuer, etc.)
if (claims.getExpiration().before(new Date())) {
throw new JwtException("Token has expired");
}
log.info("Token is valid");
} catch (ExpiredJwtException e) {
log.error("Token has expired", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Token has expired", e);
} catch (JwtException e) {
log.error("Invalid JWT token", e);
throw new ResponseStatusException(HttpStatus.UNAUTHORIZED, "Invalid JWT token", e);
} catch (Exception e) {
log.error("Error validating token", e);
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Error validating token", e);
}
}
|
@Test
void givenExpiredToken_whenVerifyAndValidate_thenThrowJwtException() {
// Given
String expiredToken = Jwts.builder()
.claim("user_id", "12345")
.issuedAt(new Date(System.currentTimeMillis() - 86400000L)) // 1 day ago
.expiration(new Date(System.currentTimeMillis() - 43200000L)) // 12 hours ago
.signWith(keyPair.getPrivate())
.compact();
// When & Then
assertThatThrownBy(() -> tokenService.verifyAndValidate(expiredToken))
.isInstanceOf(ResponseStatusException.class)
.hasMessageContaining("Token has expired")
.hasCauseInstanceOf(JwtException.class);
}
|
public Model parse(File file) throws PomParseException {
try (FileInputStream fis = new FileInputStream(file)) {
return parse(fis);
} catch (IOException ex) {
if (ex instanceof PomParseException) {
throw (PomParseException) ex;
}
LOGGER.debug("", ex);
throw new PomParseException(String.format("Unable to parse pom '%s'", file), ex);
}
}
|
@Test
public void testParse_InputStreamWithDocType() throws Exception {
InputStream inputStream = BaseTest.getResourceAsStream(this, "pom/mailapi-1.4.3_doctype.pom");
PomParser instance = new PomParser();
String expVersion = "1.4.3";
Model result = instance.parse(inputStream);
assertEquals("Invalid version extracted", expVersion, result.getParentVersion());
}
|
public Rule<ProjectNode> projectNodeRule()
{
return new PullUpExpressionInLambdaProjectNodeRule();
}
|
@Test
public void testIfExpressionOnCondition()
{
tester().assertThat(new PullUpExpressionInLambdaRules(getFunctionManager()).projectNodeRule())
.setSystemProperty(PULL_EXPRESSION_FROM_LAMBDA_ENABLED, "true")
.on(p ->
{
p.variable("col1", new ArrayType(BOOLEAN));
p.variable("col2", new ArrayType(BIGINT));
p.variable("col3");
return p.project(
Assignments.builder().put(p.variable("expr", VARCHAR), p.rowExpression(
"transform(col1, x -> if(col3 > 2, col2[2], 0))")).build(),
p.values(p.variable("col1", new ArrayType(BOOLEAN)), p.variable("col2", new ArrayType(BIGINT)), p.variable("col3")));
}).matches(
project(
ImmutableMap.of("expr", expression("transform(col1, x -> if(greater_than, col2[2], 0))")),
project(ImmutableMap.of("greater_than", expression("col3>2")),
values("col1", "col2", "col3"))));
}
|
public LU lu() {
return lu(false);
}
|
@Test
public void testLU() {
System.out.println("LU");
double[][] A = {
{0.9000, 0.4000, 0.7000f},
{0.4000, 0.5000, 0.3000f},
{0.7000, 0.3000, 0.8000f}
};
double[] b = {0.5, 0.5, 0.5f};
double[] x = {-0.2027027, 0.8783784, 0.4729730f};
Matrix a = Matrix.of(A);
Matrix.LU lu = a.lu();
double[] x2 = lu.solve(b);
assertEquals(x.length, x2.length);
for (int i = 0; i < x.length; i++) {
assertEquals(x[i], x2[i], 1E-7);
}
double[][] B = {
{0.5, 0.2f},
{0.5, 0.8f},
{0.5, 0.3f}
};
double[][] X = {
{-0.2027027, -1.2837838f},
{ 0.8783784, 2.2297297f},
{ 0.4729730, 0.6621622f}
};
Matrix X2 = Matrix.of(B);
lu.solve(X2);
assertEquals(X.length, X2.nrow());
assertEquals(X[0].length, X2.ncol());
for (int i = 0; i < X.length; i++) {
for (int j = 0; j < X[i].length; j++) {
assertEquals(X[i][j], X2.get(i, j), 1E-7);
}
}
}
|
private LinkKey(ConnectPoint src, ConnectPoint dst) {
this.src = checkNotNull(src);
this.dst = checkNotNull(dst);
}
|
@Test
public void testCompareNotEquals() {
LinkKey k1 = LinkKey.linkKey(SRC1, DST1);
LinkKey k2 = LinkKey.linkKey(SRC1, DST2);
assertThat(k1, is(not(equalTo(k2))));
assertThat(k1, is(not(equalTo(new Object()))));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.