focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String getConfig(final String dataId) { try { return configService.getConfig(dataId, NacosPathConstants.GROUP, NacosPathConstants.DEFAULT_TIME_OUT); } catch (NacosException e) { LOG.error("Get data from nacos error.", e); throw new ShenyuException(e.getMessage()); } }
@Test public void testOnMetaDataChanged() throws NacosException { when(configService.getConfig(anyString(), anyString(), anyLong())).thenReturn(null); MetaData metaData = MetaData.builder().id(MOCK_ID).path(MOCK_PATH).appName(MOCK_APP_NAME).build(); nacosDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.DELETE); nacosDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.REFRESH); nacosDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.MYSELF); nacosDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.CREATE); verify(configService, times(7)).publishConfig(any(String.class), any(String.class), any(String.class), any(String.class)); }
@Override public int read() throws IOException { fill(); if (buffered > position) { return 0xff & buffer[position++]; } else { return -1; } }
@Test public void testNullStream() throws IOException { InputStream lookahead = new LookaheadInputStream(null, 100); assertEquals(-1, lookahead.read()); }
@ThriftField(1) public boolean isAll() { return all; }
@Test public void testFromValueSetAll() { PrestoThriftValueSet thriftValueSet = fromValueSet(ValueSet.all(HYPER_LOG_LOG)); assertNotNull(thriftValueSet.getAllOrNoneValueSet()); assertTrue(thriftValueSet.getAllOrNoneValueSet().isAll()); }
public Parser getParser() { return parser; }
@Test public void testDynamicServiceLoaderFromConfig() throws Exception { URL url = getResourceAsUrl("TIKA-1700-dynamic.xml"); TikaConfig config = new TikaConfig(url); DummyParser parser = (DummyParser) config.getParser(); ServiceLoader loader = parser.getLoader(); boolean dynamicValue = loader.isDynamic(); assertTrue(dynamicValue, "Dynamic Service Loading Should be true"); }
public static InfluxDBSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), InfluxDBSinkConfig.class); }
@Test public final void loadFromYamlFileTest() throws IOException { File yamlFile = getFile("sinkConfig-v1.yaml"); String path = yamlFile.getAbsolutePath(); InfluxDBSinkConfig config = InfluxDBSinkConfig.load(path); assertNotNull(config); assertEquals("http://localhost:8086", config.getInfluxdbUrl()); assertEquals("test_db", config.getDatabase()); assertEquals("ONE", config.getConsistencyLevel()); assertEquals("NONE", config.getLogLevel()); assertEquals("autogen", config.getRetentionPolicy()); assertEquals(Boolean.parseBoolean("false"), config.isGzipEnable()); assertEquals(Long.parseLong("1000"), config.getBatchTimeMs()); assertEquals(Integer.parseInt("100"), config.getBatchSize()); }
public ResignedState( Time time, int localId, int epoch, Set<Integer> voters, long electionTimeoutMs, List<ReplicaKey> preferredSuccessors, Endpoints endpoints, LogContext logContext ) { this.localId = localId; this.epoch = epoch; this.voters = voters; this.unackedVoters = new HashSet<>(voters); this.unackedVoters.remove(localId); this.electionTimeoutMs = electionTimeoutMs; this.electionTimer = time.timer(electionTimeoutMs); this.preferredSuccessors = preferredSuccessors; this.endpoints = endpoints; this.log = logContext.logger(ResignedState.class); }
@Test public void testResignedState() { int remoteId = 1; Set<Integer> voters = Utils.mkSet(localId, remoteId); ResignedState state = newResignedState(voters); assertEquals(ElectionState.withElectedLeader(epoch, localId, voters), state.election()); assertEquals(epoch, state.epoch()); assertEquals(Collections.singleton(remoteId), state.unackedVoters()); state.acknowledgeResignation(remoteId); assertEquals(Collections.emptySet(), state.unackedVoters()); assertEquals(electionTimeoutMs, state.remainingElectionTimeMs(time.milliseconds())); assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); time.sleep(electionTimeoutMs / 2); assertEquals(electionTimeoutMs / 2, state.remainingElectionTimeMs(time.milliseconds())); assertFalse(state.hasElectionTimeoutExpired(time.milliseconds())); time.sleep(electionTimeoutMs / 2); assertEquals(0, state.remainingElectionTimeMs(time.milliseconds())); assertTrue(state.hasElectionTimeoutExpired(time.milliseconds())); }
@Override public Object[] toArray() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void testToArray() { queue.toArray(); }
public String stringify(boolean value) { throw new UnsupportedOperationException( "stringify(boolean) was called on a non-boolean stringifier: " + toString()); }
@Test public void testUUIDStringifier() { PrimitiveStringifier stringifier = PrimitiveStringifier.UUID_STRINGIFIER; assertEquals( "00112233-4455-6677-8899-aabbccddeeff", stringifier.stringify(toBinary( 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff))); assertEquals( "00000000-0000-0000-0000-000000000000", stringifier.stringify(toBinary( 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00))); assertEquals( "ffffffff-ffff-ffff-ffff-ffffffffffff", stringifier.stringify(toBinary( 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff))); assertEquals( "0eb1497c-19b6-42bc-b028-b4b612bed141", stringifier.stringify(toBinary( 0x0e, 0xb1, 0x49, 0x7c, 0x19, 0xb6, 0x42, 0xbc, 0xb0, 0x28, 0xb4, 0xb6, 0x12, 0xbe, 0xd1, 0x41))); // Check that the stringifier does not care about the length, it always takes the first 16 bytes assertEquals( "87a09cca-3b1e-4a0a-9c77-591924c3b57b", stringifier.stringify(toBinary( 0x87, 0xa0, 0x9c, 0xca, 0x3b, 0x1e, 0x4a, 0x0a, 0x9c, 0x77, 0x59, 0x19, 0x24, 0xc3, 0xb5, 0x7b, 0x00, 0x00, 0x00))); // As there is no validation implemented, if the 16 bytes is not available, the array will be over-indexed TestUtils.assertThrows( "Expected exception for over-indexing", ArrayIndexOutOfBoundsException.class, () -> stringifier.stringify(toBinary( 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee))); checkThrowingUnsupportedException(stringifier, Binary.class); }
@VisibleForTesting static Instant getCreationTime(String configuredCreationTime, ProjectProperties projectProperties) throws DateTimeParseException, InvalidCreationTimeException { try { switch (configuredCreationTime) { case "EPOCH": return Instant.EPOCH; case "USE_CURRENT_TIMESTAMP": projectProperties.log( LogEvent.debug( "Setting image creation time to current time; your image may not be reproducible.")); return Instant.now(); default: DateTimeFormatter formatter = new DateTimeFormatterBuilder() .append(DateTimeFormatter.ISO_DATE_TIME) // parses isoStrict // add ability to parse with no ":" in tz .optionalStart() .appendOffset("+HHmm", "+0000") .optionalEnd() .toFormatter(); return formatter.parse(configuredCreationTime, Instant::from); } } catch (DateTimeParseException ex) { throw new InvalidCreationTimeException(configuredCreationTime, configuredCreationTime, ex); } }
@Test public void testGetCreationTime_epoch() throws InvalidCreationTimeException { Instant time = PluginConfigurationProcessor.getCreationTime("EPOCH", projectProperties); assertThat(time).isEqualTo(Instant.EPOCH); }
@Override public String getSessionId() { return sessionID; }
@Test public void testEditConfigRequestWithOnlyNewConfiguration() { log.info("Starting edit-config async"); assertNotNull("Incorrect sessionId", session1.getSessionId()); try { assertTrue("NETCONF edit-config command failed", session1.editConfig(EDIT_CONFIG_REQUEST)); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF edit-config test failed: " + e.getMessage()); } log.info("Finishing edit-config async"); }
public void close(final boolean closeQueries) { primaryContext.getQueryRegistry().close(closeQueries); try { cleanupService.stopAsync().awaitTerminated( this.primaryContext.getKsqlConfig() .getLong(KsqlConfig.KSQL_QUERY_CLEANUP_SHUTDOWN_TIMEOUT_MS), TimeUnit.MILLISECONDS); } catch (final TimeoutException e) { log.warn("Timed out while closing cleanup service. " + "External resources for the following applications may be orphaned: {}", cleanupService.pendingApplicationIds() ); } engineMetrics.close(); aggregateMetricsCollector.shutdown(); }
@Test public void shouldCleanUpInternalTopicsOnEngineCloseForTransientQueries() { // Given: setupKsqlEngineWithSharedRuntimeDisabled(); final QueryMetadata query = KsqlEngineTestUtil.executeQuery( serviceContext, ksqlEngine, "select * from test1 EMIT CHANGES;", ksqlConfig, Collections.emptyMap() ); query.start(); // When: ksqlEngine.close(); isKsqlEngineClosed = true; // Then: verify(topicClient, times(2)).deleteInternalTopics(query.getQueryApplicationId()); }
@Override public Set<IpSubnet> convertFrom(String value) { final Set<IpSubnet> converted = new LinkedHashSet<>(); if (value != null) { Iterable<String> subnets = Splitter.on(',').trimResults().split(value); for (String subnet : subnets) { try { converted.add(new IpSubnet(subnet)); } catch (UnknownHostException e) { throw new ParameterException("Invalid subnet: " + subnet); } } } return converted; }
@Test public void convertFromThrowsParameterExceptionWithInvalidSubnet() { expectedException.expect(ParameterException.class); expectedException.expectMessage("Invalid subnet: HODOR"); converter.convertFrom("127.0.0.1/32, ::1/128, HODOR"); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testDisplayDataDeserializationWithRegistration() throws Exception { PipelineOptionsFactory.register(HasClassOptions.class); HasClassOptions options = PipelineOptionsFactory.as(HasClassOptions.class); options.setClassOption(ProxyInvocationHandlerTest.class); PipelineOptions deserializedOptions = serializeDeserialize(PipelineOptions.class, options); DisplayData displayData = DisplayData.from(deserializedOptions); assertThat(displayData, hasDisplayItem("classOption", ProxyInvocationHandlerTest.class)); }
@Override public void writeTo(ByteBuf byteBuf) throws LispWriterException { WRITER.writeTo(byteBuf, this); }
@Test public void testSerialization() throws LispReaderException, LispWriterException, LispParseError { ByteBuf byteBuf = Unpooled.buffer(); LocatorWriter writer = new LocatorWriter(); writer.writeTo(byteBuf, record1); LocatorReader reader = new LocatorReader(); LispLocator deserialized = reader.readFrom(byteBuf); new EqualsTester() .addEqualityGroup(record1, deserialized).testEquals(); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullMaterializedOnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, Named.as("Test"), null)); }
@Override public String getManagedUsersSqlFilter(boolean filterByManaged) { return findManagedInstanceService() .map(managedInstanceService -> managedInstanceService.getManagedUsersSqlFilter(filterByManaged)) .orElseThrow(() -> NOT_MANAGED_INSTANCE_EXCEPTION); }
@Test public void getManagedUsersSqlFilter_whenNoDelegates_throws() { Set<ManagedInstanceService> managedInstanceServices = emptySet(); DelegatingManagedServices delegatingManagedServices = new DelegatingManagedServices(managedInstanceServices); assertThatIllegalStateException() .isThrownBy(() -> delegatingManagedServices.getManagedUsersSqlFilter(true)) .withMessage("This instance is not managed."); }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { return StrUtils.splitToLongSet(param); }
@Test public void testCalculateUsers() { // 准备参数 String param = "1,2"; // 调用 Set<Long> results = strategy.calculateUsers(null, param); // 断言 assertEquals(asSet(1L, 2L), results); }
static int inferParallelism( ReadableConfig readableConfig, long limitCount, Supplier<Integer> splitCountProvider) { int parallelism = readableConfig.get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM); if (readableConfig.get(FlinkConfigOptions.TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM)) { int maxInferParallelism = readableConfig.get(FlinkConfigOptions.TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM_MAX); Preconditions.checkState( maxInferParallelism >= 1, FlinkConfigOptions.TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM_MAX.key() + " cannot be less than 1"); parallelism = Math.min(splitCountProvider.get(), maxInferParallelism); } if (limitCount > 0) { int limit = limitCount >= Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) limitCount; parallelism = Math.min(parallelism, limit); } // parallelism must be positive. parallelism = Math.max(1, parallelism); return parallelism; }
@Test public void testInferedParallelism() throws IOException { Configuration configuration = new Configuration(); // Empty table, infer parallelism should be at least 1 int parallelism = SourceUtil.inferParallelism(configuration, -1L, () -> 0); assertThat(parallelism).isEqualTo(1); // 2 splits (max infer is the default value 100 , max > splits num), the parallelism is splits // num : 2 parallelism = SourceUtil.inferParallelism(configuration, -1L, () -> 2); assertThat(parallelism).isEqualTo(2); // 2 splits and limit is 1 , max infer parallelism is default 100, // which is greater than splits num and limit, the parallelism is the limit value : 1 parallelism = SourceUtil.inferParallelism(configuration, 1, () -> 2); assertThat(parallelism).isEqualTo(1); // 2 splits and max infer parallelism is 1 (max < splits num), the parallelism is 1 configuration.setInteger(FlinkConfigOptions.TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM_MAX, 1); parallelism = SourceUtil.inferParallelism(configuration, -1L, () -> 2); assertThat(parallelism).isEqualTo(1); // 2 splits, max infer parallelism is 1, limit is 3, the parallelism is max infer parallelism : // 1 parallelism = SourceUtil.inferParallelism(configuration, 3, () -> 2); assertThat(parallelism).isEqualTo(1); // 2 splits, infer parallelism is disabled, the parallelism is flink default parallelism 1 configuration.setBoolean(FlinkConfigOptions.TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM, false); parallelism = SourceUtil.inferParallelism(configuration, 3, () -> 2); assertThat(parallelism).isEqualTo(1); }
public static boolean versionSupportsMultiKeyPullQuery(final String ksqlServerVersion) { final KsqlVersion version; try { version = new KsqlVersion(ksqlServerVersion); } catch (IllegalArgumentException e) { LOGGER.warn("Could not parse ksqlDB server version to verify whether multi-key pull queries " + "are supported. Falling back to single-key pull queries only."); return false; } return version.isAtLeast(new KsqlVersion("6.1.")); }
@Test public void shouldReturnMultiKeyPullQueriesUnsupported() { assertThat(versionSupportsMultiKeyPullQuery("v6.0.3"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v6.0.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v5.5.5"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v4.0.1"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v0.13.5"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v0.8.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v0.6.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v0.6.0-rc123"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("v0.6.0-ksqldb"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("6.0.3"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("6.0.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("5.5.5"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("4.0.1"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("0.13.5"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("0.8.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("0.6.0"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("0.6.0-rc123"), is(false)); assertThat(versionSupportsMultiKeyPullQuery("0.6.0-ksqldb"), is(false)); }
@Override public synchronized T getValue(int index) { BarSeries series = getBarSeries(); if (series == null) { // Series is null; the indicator doesn't need cache. // (e.g. simple computation of the value) // --> Calculating the value T result = calculate(index); if (log.isTraceEnabled()) { log.trace("{}({}): {}", this, index, result); } return result; } // Series is not null final int removedBarsCount = series.getRemovedBarsCount(); final int maximumResultCount = series.getMaximumBarCount(); T result; if (index < removedBarsCount) { // Result already removed from cache if (log.isTraceEnabled()) { log.trace("{}: result from bar {} already removed from cache, use {}-th instead", getClass().getSimpleName(), index, removedBarsCount); } increaseLengthTo(removedBarsCount, maximumResultCount); highestResultIndex = removedBarsCount; result = results.get(0); if (result == null) { // It should be "result = calculate(removedBarsCount);". // We use "result = calculate(0);" as a workaround // to fix issue #120 (https://github.com/mdeverdelhan/ta4j/issues/120). result = calculate(0); results.set(0, result); } } else { if (index == series.getEndIndex()) { // Don't cache result if last bar result = calculate(index); } else { increaseLengthTo(index, maximumResultCount); if (index > highestResultIndex) { // Result not calculated yet highestResultIndex = index; result = calculate(index); results.set(results.size() - 1, result); } else { // Result covered by current cache int resultInnerIndex = results.size() - 1 - (highestResultIndex - index); result = results.get(resultInnerIndex); if (result == null) { result = calculate(index); results.set(resultInnerIndex, result); } } } } if (log.isTraceEnabled()) { log.trace("{}({}): {}", this, index, result); } return result; }
@Test public void leaveLastBarUncached() { BarSeries barSeries = new MockBarSeries(numFunction); SMAIndicator smaIndicator = new SMAIndicator(new ClosePriceIndicator(barSeries), 5); assertNumEquals(4998.0, smaIndicator.getValue(barSeries.getEndIndex())); barSeries.getLastBar().addTrade(numOf(10), numOf(5)); // (4996 + 4997 + 4998 + 4999 + 5) / 5 assertNumEquals(3999, smaIndicator.getValue(barSeries.getEndIndex())); }
public static String quoteStringLiteralForJson(String string) { return '"' + new String(JsonStringEncoder.getInstance().quoteAsUTF8(string)) + '"'; }
@Test public void testQuoteJson() { assertEquals("\"foo\"", quoteStringLiteralForJson("foo")); assertEquals("\"Presto's\"", quoteStringLiteralForJson("Presto's")); assertEquals("\"xx\\\"xx\"", quoteStringLiteralForJson("xx\"xx")); }
public static Comparator<byte[]> arrayUnsignedComparator() { return ARRAY_UNSIGNED_COMPARATOR; }
@Test @Parameters(method = "arrayUnsignedComparatorVectors") public void testArrayUnsignedComparator(String stringA, String stringB, int expectedResult) { Comparator<byte[]> comparator = ByteUtils.arrayUnsignedComparator(); byte[] a = ByteUtils.parseHex(stringA); byte[] b = ByteUtils.parseHex(stringB); int actual = comparator.compare(a, b); assertEquals("", expectedResult, Integer.signum(actual)); }
Double getSum() { if (count == 0) { return null; } // Better error bounds to add both terms as the final sum double tmp = sum + sumCompensation; if (Double.isNaN(tmp) && Double.isInfinite(simpleSum)) { // If the compensated sum is spuriously NaN from // accumulating one or more same-signed infinite values, // return the correctly-signed infinity stored in simpleSum. return simpleSum; } else { return tmp; } }
@Test public void testEmptySum() { DoubleStat sum = new DoubleStat(); assertNull(sum.getSum()); }
public static String getArrayType(TypeRef<?> type) { return getArrayType(getRawType(type)); }
@Test public void getArrayTypeTest() { assertEquals("int[][][][][]", TypeUtils.getArrayType(int[][][][][].class)); assertEquals("java.lang.Object[][][][][]", TypeUtils.getArrayType(Object[][][][][].class)); assertEquals("int[][][][][]", TypeUtils.getArrayType(TypeRef.of(int[][][][][].class))); assertEquals( "java.lang.Object[][][][][]", TypeUtils.getArrayType(TypeRef.of(Object[][][][][].class))); }
public boolean checkAccess(UserGroupInformation callerUGI, ApplicationAccessType applicationAccessType, String applicationOwner, ApplicationId applicationId) { LOG.debug("Verifying access-type {} for {} on application {} owned by {}", applicationAccessType, callerUGI, applicationId, applicationOwner); String user = callerUGI.getShortUserName(); if (!areACLsEnabled()) { return true; } AccessControlList applicationACL = DEFAULT_YARN_APP_ACL; Map<ApplicationAccessType, AccessControlList> acls = this.applicationACLS .get(applicationId); if (acls == null) { LOG.debug("ACL not found for application {} owned by {}." + " Using default [{}]", applicationId, applicationOwner, YarnConfiguration.DEFAULT_YARN_APP_ACL); } else { AccessControlList applicationACLInMap = acls.get(applicationAccessType); if (applicationACLInMap != null) { applicationACL = applicationACLInMap; } else { LOG.debug("ACL not found for access-type {} for application {}" + " owned by {}. Using default [{}]", applicationAccessType, applicationId, applicationOwner, YarnConfiguration.DEFAULT_YARN_APP_ACL); } } // Allow application-owner for any type of access on the application if (this.adminAclsManager.isAdmin(callerUGI) || user.equals(applicationOwner) || applicationACL.isUserAllowed(callerUGI)) { return true; } return false; }
@Test void testCheckAccessWithNullACLS() { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); conf.set(YarnConfiguration.YARN_ADMIN_ACL, ADMIN_USER); ApplicationACLsManager aclManager = new ApplicationACLsManager(conf); UserGroupInformation appOwner = UserGroupInformation .createRemoteUser(APP_OWNER); ApplicationId appId = ApplicationId.newInstance(1, 1); //Application ACL is not added //Application Owner should have all access even if Application ACL is not added assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.MODIFY_APP, APP_OWNER, appId)); assertTrue(aclManager.checkAccess(appOwner, ApplicationAccessType.VIEW_APP, APP_OWNER, appId)); //Admin should have all access UserGroupInformation adminUser = UserGroupInformation .createRemoteUser(ADMIN_USER); assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.VIEW_APP, APP_OWNER, appId)); assertTrue(aclManager.checkAccess(adminUser, ApplicationAccessType.MODIFY_APP, APP_OWNER, appId)); // A regular user should Not have access UserGroupInformation testUser1 = UserGroupInformation .createRemoteUser(TESTUSER1); assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.VIEW_APP, APP_OWNER, appId)); assertFalse(aclManager.checkAccess(testUser1, ApplicationAccessType.MODIFY_APP, APP_OWNER, appId)); }
public static PersistenceSchema from( final List<? extends SimpleColumn> columns, final SerdeFeatures features ) { return new PersistenceSchema(columns, features); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowOnWrapIfMultipleFields() { PersistenceSchema.from(MULTI_COLUMN, SerdeFeatures.of(SerdeFeature.WRAP_SINGLES)); }
@Override public void releaseDataView() { memoryDataManagerOperation.onConsumerReleased(subpartitionId, consumerId); }
@Test void testRelease() { CompletableFuture<HsConsumerId> consumerReleasedFuture = new CompletableFuture<>(); TestingMemoryDataManagerOperation memoryDataManagerOperation = TestingMemoryDataManagerOperation.builder() .setOnConsumerReleasedBiConsumer( (subpartitionId, consumerId) -> { consumerReleasedFuture.complete(consumerId); }) .build(); HsConsumerId consumerId = HsConsumerId.newId(null); HsSubpartitionConsumerMemoryDataManager subpartitionConsumerMemoryDataManager = createSubpartitionConsumerMemoryDataManager(consumerId, memoryDataManagerOperation); subpartitionConsumerMemoryDataManager.releaseDataView(); assertThat(consumerReleasedFuture).isCompletedWithValue(consumerId); }
@Override public Mono<Subscription> subscribe(Subscription.Subscriber subscriber, Subscription.InterestReason reason) { return unsubscribe(subscriber, reason) .then(Mono.defer(() -> { var subscription = new Subscription(); subscription.setMetadata(new Metadata()); subscription.getMetadata().setGenerateName("subscription-"); subscription.setSpec(new Subscription.Spec()); subscription.getSpec().setUnsubscribeToken(Subscription.generateUnsubscribeToken()); subscription.getSpec().setSubscriber(subscriber); Subscription.InterestReason.ensureSubjectHasValue(reason); subscription.getSpec().setReason(reason); return client.create(subscription); })); }
@Test public void testSubscribe() { var spyNotificationCenter = spy(notificationCenter); Subscription subscription = createSubscriptions().get(0); var subscriber = subscription.getSpec().getSubscriber(); var reason = subscription.getSpec().getReason(); doReturn(Mono.empty()) .when(spyNotificationCenter).unsubscribe(eq(subscriber), eq(reason)); when(client.create(any(Subscription.class))).thenReturn(Mono.empty()); spyNotificationCenter.subscribe(subscriber, reason).block(); verify(client).create(any(Subscription.class)); }
private static <R extends JarRequestBody, M extends MessageParameters> List<String> getProgramArgs(HandlerRequest<R> request, Logger log) throws RestHandlerException { JarRequestBody requestBody = request.getRequestBody(); @SuppressWarnings("deprecation") List<String> programArgs = tokenizeArguments( fromRequestBodyOrQueryParameter( emptyToNull(requestBody.getProgramArguments()), () -> getQueryParameter(request, ProgramArgsQueryParameter.class), null, log)); List<String> programArgsList = fromRequestBodyOrQueryParameter( requestBody.getProgramArgumentsList(), () -> request.getQueryParameter(ProgramArgQueryParameter.class), null, log); if (!programArgsList.isEmpty()) { if (!programArgs.isEmpty()) { throw new RestHandlerException( "Confusing request: programArgs and programArgsList are specified, please, use only programArgsList", HttpResponseStatus.BAD_REQUEST); } return programArgsList; } else { return programArgs; } }
@Test void testFromRequestRequestBody() throws Exception { final JarPlanRequestBody requestBody = getDummyJarPlanRequestBody("entry-class", 37, null); final HandlerRequest<JarPlanRequestBody> request = getDummyRequest(requestBody); final JarHandlerUtils.JarHandlerContext jarHandlerContext = JarHandlerUtils.JarHandlerContext.fromRequest(request, tempDir, LOG); assertThat(jarHandlerContext.getEntryClass()).isEqualTo(requestBody.getEntryClassName()); assertThat(jarHandlerContext.getProgramArgs()) .containsExactlyElementsOf(requestBody.getProgramArgumentsList()); assertThat(jarHandlerContext.getParallelism()).isEqualTo(requestBody.getParallelism()); assertThat(jarHandlerContext.getJobId()).isEqualTo(requestBody.getJobId()); }
public static boolean isCarDrivingLicence(CharSequence value) { return isMatchRegex(CAR_DRIVING_LICENCE, value); }
@Test public void isCarDrivingLicenceTest() { assertTrue(Validator.isCarDrivingLicence("430101758218")); }
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) { return ConfigInstanceUtil.getNewInstance(clazz, configId, this); }
@Test public void test_empty_payload() { Slime slime = new Slime(); slime.setObject(); IntConfig config = new ConfigPayload(slime).toInstance(IntConfig.class, ""); assertThat(config.intVal(), is(1)); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ExportStorageNodesStatement sqlStatement, final ContextManager contextManager) { checkSQLStatement(contextManager.getMetaDataContexts().getMetaData(), sqlStatement); String exportedData = generateExportData(contextManager.getMetaDataContexts().getMetaData(), sqlStatement); if (sqlStatement.getFilePath().isPresent()) { String filePath = sqlStatement.getFilePath().get(); ExportUtils.exportToFile(filePath, exportedData); return Collections.singleton(new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(), String.format("Successfully exported to:'%s'", filePath))); } return Collections.singleton( new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(), exportedData)); }
@Test void assertExecuteWithWrongDatabaseName() { ContextManager contextManager = mockEmptyContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); when(ProxyContext.getInstance().getAllDatabaseNames()).thenReturn(Collections.singleton("empty_metadata")); ExportStorageNodesStatement sqlStatement = new ExportStorageNodesStatement("foo", null); assertThrows(IllegalArgumentException.class, () -> new ExportStorageNodesExecutor().getRows(sqlStatement, contextManager)); }
public int encodedLength() { if (presence() == CONSTANT) { return 0; } if (varLen) { return Token.VARIABLE_LENGTH; } return primitiveType.size() * length; }
@Test void shouldReturnCorrectSizeForPrimitiveTypes() throws Exception { final String testXmlString = "<types>" + " <type name=\"testTypeChar\" primitiveType=\"char\"/>" + " <type name=\"testTypeInt8\" primitiveType=\"int8\"/>" + " <type name=\"testTypeInt16\" primitiveType=\"int16\"/>" + " <type name=\"testTypeInt32\" primitiveType=\"int32\"/>" + " <type name=\"testTypeInt64\" primitiveType=\"int64\"/>" + " <type name=\"testTypeUInt8\" primitiveType=\"uint8\"/>" + " <type name=\"testTypeUInt16\" primitiveType=\"uint16\"/>" + " <type name=\"testTypeUInt32\" primitiveType=\"uint32\"/>" + " <type name=\"testTypeUInt64\" primitiveType=\"uint64\"/>" + " <type name=\"testTypeFloat\" primitiveType=\"float\"/>" + " <type name=\"testTypeDouble\" primitiveType=\"double\"/>" + "</types>"; final Map<String, Type> map = parseTestXmlWithMap("/types/type", testXmlString); assertThat(map.get("testTypeChar").encodedLength(), is(1)); assertThat(map.get("testTypeInt8").encodedLength(), is(1)); assertThat(map.get("testTypeInt32").encodedLength(), is(4)); assertThat(map.get("testTypeInt64").encodedLength(), is(8)); assertThat(map.get("testTypeUInt8").encodedLength(), is(1)); assertThat(map.get("testTypeUInt16").encodedLength(), is(2)); assertThat(map.get("testTypeUInt32").encodedLength(), is(4)); assertThat(map.get("testTypeUInt64").encodedLength(), is(8)); assertThat(map.get("testTypeFloat").encodedLength(), is(4)); assertThat(map.get("testTypeDouble").encodedLength(), is(8)); }
@Override public Set<String> getValues(Extension object) { if (getObjectType().isInstance(object)) { return getNonNullValues(getObjectType().cast(object)); } throw new IllegalArgumentException("Object type does not match"); }
@Test void getValues() { var attribute = new FunctionalMultiValueIndexAttribute<>(FakeExtension.class, FakeExtension::getCategories); var fake = new FakeExtension(); fake.setCategories(Set.of("test", "halo")); assertThat(attribute.getValues(fake)).isEqualTo(fake.getCategories()); var unstructured = Unstructured.OBJECT_MAPPER.convertValue(fake, Unstructured.class); assertThatThrownBy(() -> attribute.getValues(unstructured)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Object type does not match"); var demoExt = new DemoExtension(); assertThatThrownBy(() -> attribute.getValues(demoExt)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Object type does not match"); }
public static List<BlockParserFactory> calculateBlockParserFactories(List<BlockParserFactory> customBlockParserFactories, Set<Class<? extends Block>> enabledBlockTypes) { List<BlockParserFactory> list = new ArrayList<>(); // By having the custom factories come first, extensions are able to change behavior of core syntax. list.addAll(customBlockParserFactories); for (Class<? extends Block> blockType : enabledBlockTypes) { list.add(NODES_TO_CORE_FACTORIES.get(blockType)); } return list; }
@Test public void calculateBlockParserFactories_givenAListOfAllowedNodes_includesAssociatedFactories() { List<BlockParserFactory> customParserFactories = List.of(); Set<Class<? extends Block>> nodes = new HashSet<>(); nodes.add(IndentedCodeBlock.class); List<BlockParserFactory> blockParserFactories = DocumentParser.calculateBlockParserFactories(customParserFactories, nodes); assertThat(blockParserFactories.size(), is(1)); assertTrue(hasInstance(blockParserFactories, IndentedCodeBlockParser.Factory.class)); }
public static KTableHolder<GenericKey> build( final KGroupedStreamHolder groupedStream, final StreamAggregate aggregate, final RuntimeBuildContext buildContext, final MaterializedFactory materializedFactory) { return build( groupedStream, aggregate, buildContext, materializedFactory, new AggregateParamsFactory() ); }
@Test public void shouldBuildMaterializationCorrectlyForUnwindowedAggregate() { // Given: givenUnwindowedAggregate(); // When: final KTableHolder<GenericKey> result = aggregate.build(planBuilder, planInfo); // Then: assertCorrectMaterializationBuilder(result, false); }
public static NacosRestTemplate getNacosRestTemplate(Logger logger) { return getNacosRestTemplate(new DefaultHttpClientFactory(logger)); }
@Test void testGetNacosRestTemplateWithDefault() { assertTrue(restMap.isEmpty()); NacosRestTemplate actual = HttpClientBeanHolder.getNacosRestTemplate((Logger) null); assertEquals(1, restMap.size()); NacosRestTemplate duplicateGet = HttpClientBeanHolder.getNacosRestTemplate((Logger) null); assertEquals(1, restMap.size()); assertEquals(actual, duplicateGet); }
public static boolean isNotEmpty(@Nullable String string) { return string != null && !string.isEmpty(); }
@Test public void testEmptyString() { assertThat(StringUtils.isNotEmpty("")).isFalse(); }
public static int getTag(byte[] raw) { try (final Asn1InputStream is = new Asn1InputStream(raw)) { return is.readTag(); } }
@Test public void getTagDoubleByte() { assertEquals(0x7f01, Asn1Utils.getTag(new byte[] { 0x7f, 0x01, 0})); }
@Override public String execute(CommandContext commandContext, String[] args) { if (args.length > 0) { return "Unsupported parameter " + Arrays.toString(args) + " for pwd."; } String service = commandContext.getRemote().attr(ChangeTelnet.SERVICE_KEY).get(); StringBuilder buf = new StringBuilder(); if (StringUtils.isEmpty(service)) { buf.append('/'); } else { buf.append(service); } return buf.toString(); }
@Test void testSlash() throws RemotingException { defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(null); String result = pwdTelnet.execute(mockCommandContext, new String[0]); assertEquals("/", result); }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testRetract() { final String raw = "System.out.println(\"some text\");retract(object);"; assertEqualsIgnoreWhitespace( "System.out.println(\"some text\");drools.retract(object);", KnowledgeHelperFixerTest.fixer.fix( raw ) ); }
@Nullable String getCollectionName(BsonDocument command, String commandName) { if (COMMANDS_WITH_COLLECTION_NAME.contains(commandName)) { String collectionName = getNonEmptyBsonString(command.get(commandName)); if (collectionName != null) { return collectionName; } } // Some other commands, like getMore, have a field like {"collection": collectionName}. return getNonEmptyBsonString(command.get("collection")); }
@Test void getCollectionName_notAllowListedCommand() { assertThat( listener.getCollectionName(new BsonDocument("cmd", new BsonString(" bar ")), "cmd")).isNull(); }
public static LoggingContext forConnector(String connectorName) { Objects.requireNonNull(connectorName); LoggingContext context = new LoggingContext(); MDC.put(CONNECTOR_CONTEXT, prefixFor(connectorName, Scope.WORKER, null)); return context; }
@Test public void shouldNotAllowNullConnectorNameForConnectorContext() { assertThrows(NullPointerException.class, () -> LoggingContext.forConnector(null)); }
public static void mergeParams( Map<String, ParamDefinition> params, Map<String, ParamDefinition> paramsToMerge, MergeContext context) { if (paramsToMerge == null) { return; } Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream()) .forEach( name -> { ParamDefinition paramToMerge = paramsToMerge.get(name); if (paramToMerge == null) { return; } if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) { Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name); Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name); mergeParams( baseMap, toMergeMap, MergeContext.copyWithParentMode( context, params.getOrDefault(name, paramToMerge).getMode())); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else if (paramToMerge.getType() == ParamType.STRING_MAP && paramToMerge.isLiteral()) { Map<String, String> baseMap = stringMapValueOrEmpty(params, name); Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name); baseMap.putAll(toMergeMap); params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, baseMap)); } else { params.put( name, buildMergedParamDefinition( name, paramToMerge, params.get(name), context, paramToMerge.getValue())); } }); }
@Test public void testKeepMergeKeepOrder() { Map<String, ParamDefinition> allParams = new LinkedHashMap<>(); Map<String, ParamDefinition> paramsToMerge = new LinkedHashMap<>(); String[] keyOrder = new String[40]; // add params with some order, and few overlapping for (int i = 0; i < 20; i++) { String key = "prev_param_" + i; allParams.put(key, buildParam(key, key).toDefinition()); keyOrder[i] = key; if (i <= 5) { paramsToMerge.put(key, buildParam(key, key + "_updated").toDefinition()); } String newKey = "new_param_" + i; keyOrder[20 + i] = newKey; paramsToMerge.put(newKey, buildParam(newKey, newKey).toDefinition()); } ParamsMergeHelper.mergeParams(allParams, paramsToMerge, definitionContext); assertArrayEquals(keyOrder, allParams.keySet().toArray()); }
public Set<Argument> getArguments(ServerWebExchange request, String namespace, String service) { RateLimitProto.RateLimit rateLimitRule = serviceRuleManager.getServiceRateLimitRule(namespace, service); if (rateLimitRule == null) { return Collections.emptySet(); } List<RateLimitProto.Rule> rules = rateLimitRule.getRulesList(); if (CollectionUtils.isEmpty(rules)) { return Collections.emptySet(); } return rules.stream() .flatMap(rule -> rule.getArgumentsList().stream()) .map(matchArgument -> { String matchKey = matchArgument.getKey(); Argument argument = null; switch (matchArgument.getType()) { case CUSTOM: argument = StringUtils.isBlank(matchKey) ? null : Argument.buildCustom(matchKey, Optional.ofNullable(getCustomResolvedLabels(request).get(matchKey)).orElse(StringUtils.EMPTY)); break; case METHOD: argument = Argument.buildMethod(request.getRequest().getMethodValue()); break; case HEADER: argument = StringUtils.isBlank(matchKey) ? null : Argument.buildHeader(matchKey, Optional.ofNullable(request.getRequest().getHeaders().getFirst(matchKey)).orElse(StringUtils.EMPTY)); break; case QUERY: argument = StringUtils.isBlank(matchKey) ? null : Argument.buildQuery(matchKey, Optional.ofNullable(request.getRequest().getQueryParams().getFirst(matchKey)).orElse(StringUtils.EMPTY)); break; case CALLER_SERVICE: String sourceServiceNamespace = MetadataContextHolder.getDisposableMetadata(DEFAULT_METADATA_SOURCE_SERVICE_NAMESPACE, true).orElse(StringUtils.EMPTY); String sourceServiceName = MetadataContextHolder.getDisposableMetadata(DEFAULT_METADATA_SOURCE_SERVICE_NAME, true).orElse(StringUtils.EMPTY); if (!StringUtils.isEmpty(sourceServiceNamespace) && !StringUtils.isEmpty(sourceServiceName)) { argument = Argument.buildCallerService(sourceServiceNamespace, sourceServiceName); } break; case CALLER_IP: InetSocketAddress remoteAddress = request.getRequest().getRemoteAddress(); argument = Argument.buildCallerIP(remoteAddress != null ? remoteAddress.getAddress().getHostAddress() : StringUtils.EMPTY); break; default: break; } return argument; }).filter(Objects::nonNull).collect(Collectors.toSet()); }
@Test public void testGetRuleArguments() { // Mock request MetadataContext.LOCAL_SERVICE = "Test"; // Mock request MockServerHttpRequest request = MockServerHttpRequest.get("http://127.0.0.1:8080/test") .remoteAddress(new InetSocketAddress("127.0.0.1", 8080)) .header("xxx", "xxx") .queryParam("yyy", "yyy") .build(); ServerWebExchange exchange = MockServerWebExchange.from(request); MetadataContext metadataContext = new MetadataContext(); metadataContext.setUpstreamDisposableMetadata(new HashMap<String, String>() {{ put(DEFAULT_METADATA_SOURCE_SERVICE_NAMESPACE, MetadataContext.LOCAL_NAMESPACE); put(DEFAULT_METADATA_SOURCE_SERVICE_NAME, MetadataContext.LOCAL_SERVICE); }}); MetadataContextHolder.set(metadataContext); Set<Argument> arguments = rateLimitRuleArgumentReactiveResolver1.getArguments(exchange, MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE); Set<Argument> exceptRes = new HashSet<>(); exceptRes.add(Argument.buildMethod("GET")); exceptRes.add(Argument.buildHeader("xxx", "xxx")); exceptRes.add(Argument.buildQuery("yyy", "yyy")); exceptRes.add(Argument.buildCallerIP("127.0.0.1")); exceptRes.add(Argument.buildCustom("xxx", "xxx")); exceptRes.add(Argument.buildCallerService(MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE)); assertThat(arguments).isEqualTo(exceptRes); rateLimitRuleArgumentReactiveResolver2.getArguments(exchange, MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE); rateLimitRuleArgumentReactiveResolver3.getArguments(exchange, MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE); rateLimitRuleArgumentReactiveResolver4.getArguments(exchange, MetadataContext.LOCAL_NAMESPACE, MetadataContext.LOCAL_SERVICE); }
@Override public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException { return new Checksum(HashAlgorithm.md5, this.digest("MD5", this.normalize(in, status), status)); }
@Test public void testCompute() throws Exception { assertEquals("a43c1b0aa53a0c908810c06ab1ff3967", new MD5FastChecksumCompute().compute(IOUtils.toInputStream("input", Charset.defaultCharset()), new TransferStatus()).hash); }
public static void main(String[] args) { // create party and members Party party = new PartyImpl(); var hobbit = new Hobbit(); var wizard = new Wizard(); var rogue = new Rogue(); var hunter = new Hunter(); // add party members party.addMember(hobbit); party.addMember(wizard); party.addMember(rogue); party.addMember(hunter); // perform actions -> the other party members // are notified by the party hobbit.act(Action.ENEMY); wizard.act(Action.TALE); rogue.act(Action.GOLD); hunter.act(Action.HUNT); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public Map<String, Object> assembleFrom(OAuth2AccessTokenEntity accessToken, UserInfo userInfo, Set<String> authScopes) { Map<String, Object> result = newLinkedHashMap(); OAuth2Authentication authentication = accessToken.getAuthenticationHolder().getAuthentication(); result.put(ACTIVE, true); if (accessToken.getPermissions() != null && !accessToken.getPermissions().isEmpty()) { Set<Object> permissions = Sets.newHashSet(); for (Permission perm : accessToken.getPermissions()) { Map<String, Object> o = newLinkedHashMap(); o.put("resource_set_id", perm.getResourceSet().getId().toString()); Set<String> scopes = Sets.newHashSet(perm.getScopes()); o.put("scopes", scopes); permissions.add(o); } result.put("permissions", permissions); } else { Set<String> scopes = Sets.intersection(authScopes, accessToken.getScope()); result.put(SCOPE, Joiner.on(SCOPE_SEPARATOR).join(scopes)); } if (accessToken.getExpiration() != null) { try { result.put(EXPIRES_AT, dateFormat.valueToString(accessToken.getExpiration())); result.put(EXP, accessToken.getExpiration().getTime() / 1000L); } catch (ParseException e) { logger.error("Parse exception in token introspection", e); } } if (userInfo != null) { // if we have a UserInfo, use that for the subject result.put(SUB, userInfo.getSub()); } else { // otherwise, use the authentication's username result.put(SUB, authentication.getName()); } if(authentication.getUserAuthentication() != null) { result.put(USER_ID, authentication.getUserAuthentication().getName()); } result.put(CLIENT_ID, authentication.getOAuth2Request().getClientId()); result.put(TOKEN_TYPE, accessToken.getTokenType()); return result; }
@Test public void shouldAssembleExpectedResultForAccessToken() throws ParseException { // given OAuth2AccessTokenEntity accessToken = accessToken(new Date(123 * 1000L), scopes("foo", "bar"), null, "Bearer", oauth2AuthenticationWithUser(oauth2Request("clientId"), "name")); UserInfo userInfo = userInfo("sub"); Set<String> authScopes = scopes("foo", "bar", "baz"); // when Map<String, Object> result = assembler.assembleFrom(accessToken, userInfo, authScopes); // then Map<String, Object> expected = new ImmutableMap.Builder<String, Object>() .put("sub", "sub") .put("exp", 123L) .put("expires_at", dateFormat.valueToString(new Date(123 * 1000L))) .put("scope", "bar foo") .put("active", Boolean.TRUE) .put("user_id", "name") .put("client_id", "clientId") .put("token_type", "Bearer") .build(); assertThat(result, is(equalTo(expected))); }
@Override public boolean equals(Object other) { if (!(other instanceof Match)) { return false; } Match<T> that = (Match<T>) other; return this.matchAny == that.matchAny && Objects.equals(this.value, that.value) && this.negation == that.negation; }
@Test public void testEquals() { Match<String> m1 = Match.any(); Match<String> m2 = Match.any(); Match<String> m3 = Match.ifNull(); Match<String> m4 = Match.ifValue("bar"); assertEquals(m1, m2); assertFalse(Objects.equal(m1, m3)); assertFalse(Objects.equal(m3, m4)); Object o = new Object(); assertFalse(Objects.equal(m1, o)); }
public Optional<Projection> createProjection(final ProjectionSegment projectionSegment) { if (projectionSegment instanceof ShorthandProjectionSegment) { return Optional.of(createProjection((ShorthandProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ColumnProjectionSegment) { return Optional.of(createProjection((ColumnProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ExpressionProjectionSegment) { return Optional.of(createProjection((ExpressionProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationDistinctProjectionSegment) { return Optional.of(createProjection((AggregationDistinctProjectionSegment) projectionSegment)); } if (projectionSegment instanceof AggregationProjectionSegment) { return Optional.of(createProjection((AggregationProjectionSegment) projectionSegment)); } if (projectionSegment instanceof SubqueryProjectionSegment) { return Optional.of(createProjection((SubqueryProjectionSegment) projectionSegment)); } if (projectionSegment instanceof ParameterMarkerExpressionSegment) { return Optional.of(createProjection((ParameterMarkerExpressionSegment) projectionSegment)); } return Optional.empty(); }
@Test void assertCreateProjectionWhenProjectionSegmentInstanceOfAggregationProjectionSegment() { AggregationProjectionSegment aggregationProjectionSegment = new AggregationProjectionSegment(0, 10, AggregationType.COUNT, "COUNT(1)"); Optional<Projection> actual = new ProjectionEngine(databaseType).createProjection(aggregationProjectionSegment); assertTrue(actual.isPresent()); assertThat(actual.get(), instanceOf(AggregationProjection.class)); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DescriptorProperties that = (DescriptorProperties) o; return Objects.equals(properties, that.properties); }
@Test void testEquals() { DescriptorProperties properties1 = new DescriptorProperties(); properties1.putString("hello1", "12"); properties1.putString("hello2", "13"); properties1.putString("hello3", "14"); DescriptorProperties properties2 = new DescriptorProperties(); properties2.putString("hello1", "12"); properties2.putString("hello2", "13"); properties2.putString("hello3", "14"); DescriptorProperties properties3 = new DescriptorProperties(); properties3.putString("hello1", "12"); properties3.putString("hello3", "14"); properties3.putString("hello2", "13"); assertThat(properties2).isEqualTo(properties1); assertThat(properties3).isEqualTo(properties1); }
@Override public double entropy() { return entropy; }
@Test public void testEntropy() { System.out.println("entropy"); WeibullDistribution instance = new WeibullDistribution(1.5, 1.0); instance.rand(); assertEquals(0.78694011, instance.entropy(), 1E-7); }
@Override public String toString() { return "[" + Arrays.stream(MetadataVersion.VERSIONS).map(MetadataVersion::version).collect( Collectors.joining(", ")) + "]"; }
@Test public void testMetadataVersionValidator() { String str = new MetadataVersionValidator().toString(); String[] apiVersions = str.substring(1).split(","); assertEquals(MetadataVersion.VERSIONS.length, apiVersions.length); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testLabelsAreSet() throws Exception { new JmxCollector( "\n---\nrules:\n- pattern: `^hadoop<service=DataNode, name=DataNodeActivity-ams-hdd001-50010><>replaceBlockOpMinTime:`\n name: foo\n labels:\n l: v" .replace('`', '"')) .register(prometheusRegistry); assertEquals(200, getSampleValue("foo", new String[] {"l"}, new String[] {"v"}), .001); }
public static DatabaseBackendHandler newInstance(final QueryContext queryContext, final ConnectionSession connectionSession, final boolean preferPreparedStatement) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof DoStatement) { return new UnicastDatabaseBackendHandler(queryContext, connectionSession); } if (sqlStatement instanceof SetStatement && null == connectionSession.getUsedDatabaseName()) { return () -> new UpdateResponseHeader(sqlStatement); } if (sqlStatement instanceof DALStatement && !isDatabaseRequiredDALStatement(sqlStatement) || sqlStatement instanceof SelectStatement && !((SelectStatement) sqlStatement).getFrom().isPresent()) { return new UnicastDatabaseBackendHandler(queryContext, connectionSession); } return DatabaseConnectorFactory.getInstance().newInstance(queryContext, connectionSession.getDatabaseConnectionManager(), preferPreparedStatement); }
@Test void assertNewInstanceReturnedUnicastDatabaseBackendHandlerWithQueryWithoutFrom() { String sql = "SELECT 1"; SQLStatementContext sqlStatementContext = mock(SQLStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(mock(SelectStatement.class)); DatabaseBackendHandler actual = DatabaseBackendHandlerFactory.newInstance( new QueryContext(sqlStatementContext, sql, Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)), mock(ConnectionSession.class), false); assertThat(actual, instanceOf(UnicastDatabaseBackendHandler.class)); }
@Override public boolean othersUpdatesAreVisible(final int type) { return false; }
@Test void assertOthersUpdatesAreVisible() { assertFalse(metaData.othersUpdatesAreVisible(0)); }
public T setEnvVariable(String key, String value) { envVariables.put( requireNonNull(key, "key can't be null"), requireNonNull(value, "value can't be null")); return castThis(); }
@Test public void setEnvVariable_fails_with_NPE_if_value_is_null() throws IOException { File workDir = temp.newFolder(); AbstractCommand underTest = new AbstractCommand(ProcessId.ELASTICSEARCH, workDir, System2.INSTANCE) { }; assertThatThrownBy(() -> underTest.setEnvVariable(randomAlphanumeric(30), null)) .isInstanceOf(NullPointerException.class) .hasMessage("value can't be null"); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(schema, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize " + typeName + ": ", e); } }
@Test public void testDeserializingDataWithTooManyBytes() { assertThrows(DataException.class, () -> converter.toConnectData(TOPIC, new byte[10])); }
public void dump(DumpRequest dumpRequest) { if (dumpRequest.isBeta()) { dumpBeta(dumpRequest.getDataId(), dumpRequest.getGroup(), dumpRequest.getTenant(), dumpRequest.getLastModifiedTs(), dumpRequest.getSourceIp()); } else if (dumpRequest.isBatch()) { dumpBatch(dumpRequest.getDataId(), dumpRequest.getGroup(), dumpRequest.getTenant(), dumpRequest.getLastModifiedTs(), dumpRequest.getSourceIp()); } else if (StringUtils.isNotBlank(dumpRequest.getTag())) { dumpTag(dumpRequest.getDataId(), dumpRequest.getGroup(), dumpRequest.getTenant(), dumpRequest.getTag(), dumpRequest.getLastModifiedTs(), dumpRequest.getSourceIp()); } else { dumpFormal(dumpRequest.getDataId(), dumpRequest.getGroup(), dumpRequest.getTenant(), dumpRequest.getLastModifiedTs(), dumpRequest.getSourceIp()); } }
@Test void dumpRequest() throws Throwable { String dataId = "12345667dataId"; String group = "234445group"; DumpRequest dumpRequest = DumpRequest.create(dataId, group, "testtenant", System.currentTimeMillis(), "127.0.0.1"); // TaskManager dumpTaskMgr; ReflectionTestUtils.setField(dumpService, "dumpTaskMgr", dumpTaskMgr); Mockito.doNothing().when(dumpTaskMgr).addTask(any(), any()); dumpService.dump(dumpRequest); Mockito.verify(dumpTaskMgr, times(1)) .addTask(eq(GroupKey.getKeyTenant(dataId, group, dumpRequest.getTenant())), any(DumpTask.class)); dumpRequest.setBeta(true); dumpService.dump(dumpRequest); Mockito.verify(dumpTaskMgr, times(1)) .addTask(eq(GroupKey.getKeyTenant(dataId, group, dumpRequest.getTenant()) + "+beta"), any(DumpTask.class)); dumpRequest.setBeta(false); dumpRequest.setBatch(true); dumpService.dump(dumpRequest); Mockito.verify(dumpTaskMgr, times(1)) .addTask(eq(GroupKey.getKeyTenant(dataId, group, dumpRequest.getTenant()) + "+batch"), any(DumpTask.class)); dumpRequest.setBatch(false); dumpRequest.setTag("testTag111"); dumpService.dump(dumpRequest); Mockito.verify(dumpTaskMgr, times(1)) .addTask(eq(GroupKey.getKeyTenant(dataId, group, dumpRequest.getTenant()) + "+tag+" + dumpRequest.getTag()), any(DumpTask.class)); }
public Map<String, Object> toObjectMap(final String json) { return GSON_MAP.fromJson(json, new TypeToken<LinkedHashMap<String, Object>>() { }.getType()); }
@Test public void testToObjectMap() { Map<String, Object> map = ImmutableMap.of("id", 123L, "name", "test", "double", 1.0D, "boolean", true, "data", generateTestObject()); String json = "{\"name\":\"test\",\"id\":123,\"double\":1.0,\"boolean\":true,\"data\":" + EXPECTED_JSON + "}"; Map<String, Object> parseMap = GsonUtils.getInstance().toObjectMap(json); map.forEach((key, value) -> { assertTrue(parseMap.containsKey(key)); Object jsonValue = parseMap.get(key); if (jsonValue instanceof JsonElement) { assertEquals(value, GsonUtils.getInstance().fromJson((JsonElement) jsonValue, TestObject.class)); } else { assertEquals(value, parseMap.get(key)); } }); assertNull(GsonUtils.getInstance().toObjectMap(null)); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseB3SingleFormat_spanIdsUnsampled() { assertThat(parseB3SingleFormat(traceId + "-" + spanId + "-0").context()) .isEqualToComparingFieldByField(TraceContext.newBuilder() .traceId(Long.parseUnsignedLong(traceId, 16)) .spanId(Long.parseUnsignedLong(spanId, 16)) .sampled(false).build() ); }
public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup) { String key = customModel.toString(); Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null; if (CACHE_SIZE > 0 && clazz == null) clazz = CACHE.get(key); if (clazz == null) { clazz = createClazz(customModel, lookup); if (customModel.isInternal()) { INTERNAL_CACHE.put(key, clazz); if (INTERNAL_CACHE.size() > 100) { CACHE.putAll(INTERNAL_CACHE); INTERNAL_CACHE.clear(); LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was " + INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?"); } } else if (CACHE_SIZE > 0) { CACHE.put(key, clazz); } } try { // The class does not need to be thread-safe as we create an instance per request CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance(); prio.init(customModel, lookup, CustomModel.getAreasAsMap(customModel.getAreas())); return new CustomWeighting.Parameters( prio::getSpeed, prio::calcMaxSpeed, prio::getPriority, prio::calcMaxPriority, customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(), customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty()); } catch (ReflectiveOperationException ex) { throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex); } }
@Test public void parseValueWithError() { CustomModel customModel1 = new CustomModel(); customModel1.addToSpeed(If("true", LIMIT, "unknown")); IllegalArgumentException ret = assertThrows(IllegalArgumentException.class, () -> CustomModelParser.createWeightingParameters(customModel1, encodingManager)); assertEquals("Cannot compile expression: 'unknown' not available", ret.getMessage()); CustomModel customModel3 = new CustomModel(); customModel3.addToSpeed(If("true", LIMIT, avgSpeedEnc.getName())); customModel3.addToSpeed(If("road_class == PRIMARY", MULTIPLY, "0.5")); customModel3.addToSpeed(Else(MULTIPLY, "road_class")); ret = assertThrows(IllegalArgumentException.class, () -> CustomModelParser.createWeightingParameters(customModel3, encodingManager)); assertTrue(ret.getMessage().contains("Binary numeric promotion not possible on types \"double\" and \"com.graphhopper.routing.ev.RoadClass\""), ret.getMessage()); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); if(file.attributes().getLink() != DescriptiveUrl.EMPTY) { list.add(file.attributes().getLink()); } list.add(new DescriptiveUrl(URI.create(String.format("%s%s", new HostUrlProvider().withUsername(false).get(host), URIEncoder.encode(file.getAbsolute()))), DescriptiveUrl.Type.provider, MessageFormat.format(LocaleFactory.localizedString("{0} URL"), host.getProtocol().getScheme().toString().toUpperCase(Locale.ROOT)))); list.addAll(new HostWebUrlProvider(host).toUrl(file)); return list; }
@Test public void testDefaultPathRoot() { Host host = new Host(new TestProtocol(), "localhost"); host.setDefaultPath("/"); Path path = new Path("/file", EnumSet.of(Path.Type.directory)); assertEquals("http://localhost/file", new DefaultUrlProvider(host).toUrl(path).find(DescriptiveUrl.Type.provider).getUrl()); host.setWebURL("http://127.0.0.1/~dkocher"); assertEquals("http://127.0.0.1/~dkocher/file", new DefaultUrlProvider(host).toUrl(path).find(DescriptiveUrl.Type.http).getUrl()); }
public static ThreadFactory createThreadFactory(final String pattern, final boolean daemon) { return new ThreadFactory() { private final AtomicLong threadEpoch = new AtomicLong(0); @Override public Thread newThread(Runnable r) { String threadName; if (pattern.contains("%d")) { threadName = String.format(pattern, threadEpoch.addAndGet(1)); } else { threadName = pattern; } Thread thread = new Thread(r, threadName); thread.setDaemon(daemon); return thread; } }; }
@Test public void testThreadNameWithoutNumberDemon() { Thread daemonThread = ThreadUtils.createThreadFactory(THREAD_NAME, true).newThread(EMPTY_RUNNABLE); try { assertEquals(THREAD_NAME, daemonThread.getName()); assertTrue(daemonThread.isDaemon()); } finally { try { daemonThread.join(); } catch (InterruptedException e) { // can be ignored } } }
public ManagedProcess launch(AbstractCommand command) { EsInstallation esInstallation = command.getEsInstallation(); if (esInstallation != null) { cleanupOutdatedEsData(esInstallation); writeConfFiles(esInstallation); } Process process; if (command instanceof JavaCommand<?> javaCommand) { process = launchJava(javaCommand); } else { throw new IllegalStateException("Unexpected type of command: " + command.getClass()); } ProcessId processId = command.getProcessId(); try { if (processId == ProcessId.ELASTICSEARCH) { checkArgument(esInstallation != null, "Incorrect configuration EsInstallation is null"); EsConnectorImpl esConnector = new EsConnectorImpl(singleton(HostAndPort.fromParts(esInstallation.getHost(), esInstallation.getHttpPort())), esInstallation.getBootstrapPassword(), esInstallation.getHttpKeyStoreLocation(), esInstallation.getHttpKeyStorePassword().orElse(null)); return new EsManagedProcess(process, processId, esConnector); } else { ProcessCommands commands = allProcessesCommands.createAfterClean(processId.getIpcIndex()); return new ProcessCommandsManagedProcess(process, processId, commands); } } catch (Exception e) { // just in case if (process != null) { process.destroyForcibly(); } throw new IllegalStateException(format("Fail to launch monitor of process [%s]", processId.getHumanReadableName()), e); } }
@Test public void do_not_fail_if_outdated_es_directory_does_not_exist() throws Exception { File tempDir = temp.newFolder(); File homeDir = temp.newFolder(); File dataDir = temp.newFolder(); File logDir = temp.newFolder(); ProcessLauncher underTest = new ProcessLauncherImpl(tempDir, commands, TestProcessBuilder::new); JavaCommand command = createEsCommand(tempDir, homeDir, dataDir, logDir); File outdatedEsDir = new File(dataDir, "es"); assertThat(outdatedEsDir).doesNotExist(); underTest.launch(command); assertThat(outdatedEsDir).doesNotExist(); }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void should_load_values_of_deprecated_key() { Settings settings = new MapSettings(definitions); settings.setProperty("oldKey", "a,b"); assertThat(settings.getStringArray("newKey")).containsOnly("a", "b"); assertThat(settings.getStringArray("oldKey")).containsOnly("a", "b"); }
public long getUnknown() { return unknown; }
@Test public void testGetUnknown() { assertEquals(TestParameters.VP_RES_TBL_UNKNOWN, chmLzxcResetTable.getUnknown()); }
CompletableFuture<Map<TopicIdPartition, PartitionData>> processFetchResponse( ShareFetchPartitionData shareFetchPartitionData, List<Tuple2<TopicIdPartition, FetchPartitionData>> responseData ) { Map<TopicIdPartition, CompletableFuture<PartitionData>> futures = new HashMap<>(); responseData.forEach(data -> { TopicIdPartition topicIdPartition = data._1; FetchPartitionData fetchPartitionData = data._2; SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey(shareFetchPartitionData.groupId, topicIdPartition)); futures.put(topicIdPartition, sharePartition.acquire(shareFetchPartitionData.memberId, fetchPartitionData) .handle((acquiredRecords, throwable) -> { log.trace("Acquired records for topicIdPartition: {} with share fetch data: {}, records: {}", topicIdPartition, shareFetchPartitionData, acquiredRecords); PartitionData partitionData = new PartitionData() .setPartitionIndex(topicIdPartition.partition()); if (throwable != null) { partitionData.setErrorCode(Errors.forException(throwable).code()); return partitionData; } if (fetchPartitionData.error.code() == Errors.OFFSET_OUT_OF_RANGE.code()) { // In case we get OFFSET_OUT_OF_RANGE error, that's because the LSO is later than the fetch offset. // So, we would update the start and end offset of the share partition and still return an empty // response and let the client retry the fetch. This way we do not lose out on the data that // would be returned for other share partitions in the fetch request. sharePartition.updateCacheAndOffsets(offsetForEarliestTimestamp(topicIdPartition)); partitionData .setPartitionIndex(topicIdPartition.partition()) .setRecords(null) .setErrorCode(Errors.NONE.code()) .setAcquiredRecords(Collections.emptyList()) .setAcknowledgeErrorCode(Errors.NONE.code()); return partitionData; } // Maybe, in the future, check if no records are acquired, and we want to retry // replica manager fetch. Depends on the share partition manager implementation, // if we want parallel requests for the same share partition or not. partitionData .setPartitionIndex(topicIdPartition.partition()) .setRecords(fetchPartitionData.records) .setErrorCode(fetchPartitionData.error.code()) .setAcquiredRecords(acquiredRecords) .setAcknowledgeErrorCode(Errors.NONE.code()); return partitionData; })); }); return CompletableFuture.allOf(futures.values().toArray(new CompletableFuture[0])).thenApply(v -> { Map<TopicIdPartition, PartitionData> processedResult = new HashMap<>(); futures.forEach((topicIdPartition, future) -> processedResult.put(topicIdPartition, future.join())); return processedResult; }); }
@Test public void testProcessFetchResponse() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); Uuid topicId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); Map<TopicIdPartition, Integer> partitionMaxBytes = new HashMap<>(); partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); Map<SharePartitionManager.SharePartitionKey, SharePartition> partitionCacheMap = new ConcurrentHashMap<>(); partitionCacheMap.computeIfAbsent(new SharePartitionManager.SharePartitionKey(groupId, tp0), k -> new SharePartition(groupId, tp0, MAX_IN_FLIGHT_MESSAGES, MAX_DELIVERY_COUNT, RECORD_LOCK_DURATION_MS, mockTimer, new MockTime(), NoOpShareStatePersister.getInstance())); partitionCacheMap.computeIfAbsent(new SharePartitionManager.SharePartitionKey(groupId, tp1), k -> new SharePartition(groupId, tp1, MAX_IN_FLIGHT_MESSAGES, MAX_DELIVERY_COUNT, RECORD_LOCK_DURATION_MS, mockTimer, new MockTime(), NoOpShareStatePersister.getInstance())); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withPartitionCacheMap(partitionCacheMap).build(); CompletableFuture<Map<TopicIdPartition, PartitionData>> future = new CompletableFuture<>(); SharePartitionManager.ShareFetchPartitionData shareFetchPartitionData = new SharePartitionManager.ShareFetchPartitionData( new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, memberId, future, partitionMaxBytes); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes())); MemoryRecords records1 = MemoryRecords.withRecords(100L, Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes())); List<Tuple2<TopicIdPartition, FetchPartitionData>> responseData = new ArrayList<>(); responseData.add(new Tuple2<>(tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))); responseData.add(new Tuple2<>(tp1, new FetchPartitionData(Errors.NONE, 0L, 100L, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))); CompletableFuture<Map<TopicIdPartition, ShareFetchResponseData.PartitionData>> result = sharePartitionManager.processFetchResponse(shareFetchPartitionData, responseData); assertTrue(result.isDone()); Map<TopicIdPartition, ShareFetchResponseData.PartitionData> resultData = result.join(); assertEquals(2, resultData.size()); assertTrue(resultData.containsKey(tp0)); assertTrue(resultData.containsKey(tp1)); assertEquals(0, resultData.get(tp0).partitionIndex()); assertEquals(1, resultData.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); assertEquals(Errors.NONE.code(), resultData.get(tp1).errorCode()); assertEquals(Collections.singletonList(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1)), resultData.get(tp0).acquiredRecords()); assertEquals(Collections.singletonList(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1)), resultData.get(tp1).acquiredRecords()); }
public static synchronized @Nonnull Map<String, Object> loadYamlFile(File file) throws Exception { try (FileInputStream inputStream = new FileInputStream((file))) { Map<String, Object> yamlResult = (Map<String, Object>) loader.loadFromInputStream(inputStream); return yamlResult == null ? new HashMap<>() : yamlResult; } catch (FileNotFoundException e) { LOG.error("Failed to find YAML file", e); throw e; } catch (IOException | YamlEngineException e) { if (e instanceof MarkedYamlEngineException) { YamlEngineException exception = wrapExceptionToHiddenSensitiveData((MarkedYamlEngineException) e); LOG.error("Failed to parse YAML configuration", exception); throw exception; } else { throw e; } } }
@Test void testLoadYamlFile_InvalidYAMLSyntaxException() { File confFile = new File(tmpDir, "invalid.yaml"); try (final PrintWriter pw = new PrintWriter(confFile)) { pw.println("key: value: secret"); } catch (FileNotFoundException e) { throw new RuntimeException(e); } assertThatThrownBy(() -> YamlParserUtils.loadYamlFile(confFile)) .isInstanceOf(YamlEngineException.class) .satisfies( e -> assertThat(ExceptionUtils.stringifyException(e)) .doesNotContain("secret")); }
public List<RawErasureCoderFactory> getCoders(String codecName) { List<RawErasureCoderFactory> coders = coderMap.get(codecName); return coders; }
@Test public void testGetCoders() { List<RawErasureCoderFactory> coders = CodecRegistry.getInstance(). getCoders(ErasureCodeConstants.RS_CODEC_NAME); assertEquals(2, coders.size()); assertTrue(coders.get(0) instanceof NativeRSRawErasureCoderFactory); assertTrue(coders.get(1) instanceof RSRawErasureCoderFactory); coders = CodecRegistry.getInstance(). getCoders(ErasureCodeConstants.RS_LEGACY_CODEC_NAME); assertEquals(1, coders.size()); assertTrue(coders.get(0) instanceof RSLegacyRawErasureCoderFactory); coders = CodecRegistry.getInstance(). getCoders(ErasureCodeConstants.XOR_CODEC_NAME); assertEquals(2, coders.size()); assertTrue(coders.get(0) instanceof NativeXORRawErasureCoderFactory); assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testNuspecAnalysis() throws Exception { File file = BaseTest.getResourceAsFile(this, "nuspec/test.nuspec"); Dependency result = new Dependency(file); instance.analyze(result, null); assertEquals(NuspecAnalyzer.DEPENDENCY_ECOSYSTEM, result.getEcosystem()); //checking the owner field assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("bobsmack")); //checking the author field assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().toLowerCase().contains("brianfox")); //checking the id field assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("TestDepCheck")); //checking the title field assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Test Package")); assertTrue(result.getEvidence(EvidenceType.VERSION).toString().contains("1.0.0")); assertEquals("1.0.0", result.getVersion()); assertEquals("TestDepCheck", result.getName()); assertEquals("TestDepCheck:1.0.0", result.getDisplayFileName()); }
@Override public synchronized RoleRecvStatus deliverRoleReply(RoleReplyInfo rri) throws SwitchStateException { long xid = rri.getXid(); RoleState receivedRole = rri.getRole(); RoleState expectedRole = pendingReplies.getIfPresent(xid); if (expectedRole == null) { RoleState currentRole = (sw != null) ? sw.getRole() : null; if (currentRole != null) { if (currentRole == rri.getRole()) { // Don't disconnect if the role reply we received is // for the same role we are already in. // FIXME: but we do from the caller anyways. log.debug("Received unexpected RoleReply from " + "Switch: {}. " + "Role in reply is same as current role of this " + "controller for this sw. Ignoring ...", sw.getStringId()); return RoleRecvStatus.OTHER_EXPECTATION; } else { String msg = String.format("Switch: [%s], " + "received unexpected RoleReply[%s]. " + "No roles are pending, and this controller's " + "current role:[%s] does not match reply. " + "Disconnecting switch ... ", sw.getStringId(), rri, currentRole); throw new SwitchStateException(msg); } } log.debug("Received unexpected RoleReply {} from " + "Switch: {}. " + "This controller has no current role for this sw. " + "Ignoring ...", rri, sw == null ? "(null)" : sw.getStringId()); return RoleRecvStatus.OTHER_EXPECTATION; } // XXX Should check generation id meaningfully and other cases of expectations //if (pendingXid != xid) { // log.info("Received older role reply from " + // "switch {} ({}). Ignoring. " + // "Waiting for {}, xid={}", // new Object[] {sw.getStringId(), rri, // pendingRole, pendingXid }); // return RoleRecvStatus.OLD_REPLY; //} sw.returnRoleReply(expectedRole, receivedRole); if (expectedRole == receivedRole) { log.debug("Received role reply message from {} that matched " + "expected role-reply {} with expectations {}", sw.getStringId(), receivedRole, expectation); // Done with this RoleReply; Invalidate pendingReplies.invalidate(xid); if (expectation == RoleRecvStatus.MATCHED_CURRENT_ROLE || expectation == RoleRecvStatus.MATCHED_SET_ROLE) { return expectation; } else { return RoleRecvStatus.OTHER_EXPECTATION; } } pendingReplies.invalidate(xid); // if xids match but role's don't, perhaps its a query (OF1.3) if (expectation == RoleRecvStatus.REPLY_QUERY) { return expectation; } return RoleRecvStatus.OTHER_EXPECTATION; }
@Test public void deliverRoleReply() { RoleRecvStatus status; RoleReplyInfo asserted = new RoleReplyInfo(MASTER, GID, XID); RoleReplyInfo unasserted = new RoleReplyInfo(SLAVE, GID, XID); try { //call without sendRoleReq() for requestPending = false //first, sw.role == null status = manager.deliverRoleReply(asserted); assertEquals("expectation wrong", OTHER_EXPECTATION, status); sw.setRole(MASTER); assertEquals("expectation wrong", OTHER_EXPECTATION, status); sw.setRole(SLAVE); //match to pendingRole = MASTER, requestPending = true manager.sendRoleRequest(MASTER, MATCHED_CURRENT_ROLE); status = manager.deliverRoleReply(asserted); assertEquals("expectation wrong", MATCHED_CURRENT_ROLE, status); //requestPending never gets reset -- this might be a bug. status = manager.deliverRoleReply(unasserted); assertEquals("expectation wrong", OTHER_EXPECTATION, status); assertEquals("pending role mismatch", MASTER, ((TestSwitchDriver) sw).failed); } catch (IOException | SwitchStateException e) { assertEquals("unexpected error thrown", SwitchStateException.class, e.getClass()); } }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .precision(column.getColumnLength()) .length(column.getColumnLength()) .nullable(column.isNullable()) .comment(column.getComment()) .scale(column.getScale()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.columnType(IRIS_NULL); builder.dataType(IRIS_NULL); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", IRIS_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(IRIS_VARCHAR); } else if (column.getColumnLength() < MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", IRIS_VARCHAR, column.getColumnLength())); builder.dataType(IRIS_VARCHAR); } else { builder.columnType(IRIS_LONG_VARCHAR); builder.dataType(IRIS_LONG_VARCHAR); } break; case BOOLEAN: builder.columnType(IRIS_BIT); builder.dataType(IRIS_BIT); break; case TINYINT: builder.columnType(IRIS_TINYINT); builder.dataType(IRIS_TINYINT); break; case SMALLINT: builder.columnType(IRIS_SMALLINT); builder.dataType(IRIS_SMALLINT); break; case INT: builder.columnType(IRIS_INTEGER); builder.dataType(IRIS_INTEGER); break; case BIGINT: builder.columnType(IRIS_BIGINT); builder.dataType(IRIS_BIGINT); break; case FLOAT: builder.columnType(IRIS_FLOAT); builder.dataType(IRIS_FLOAT); break; case DOUBLE: builder.columnType(IRIS_DOUBLE); builder.dataType(IRIS_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } if (precision < scale) { precision = scale; } if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = MAX_SCALE; precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } builder.columnType(String.format("%s(%s,%s)", IRIS_DECIMAL, precision, scale)); builder.dataType(IRIS_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } else if (column.getColumnLength() < MAX_BINARY_LENGTH) { builder.dataType(IRIS_BINARY); builder.columnType( String.format("%s(%s)", IRIS_BINARY, column.getColumnLength())); } else { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } break; case DATE: builder.columnType(IRIS_DATE); builder.dataType(IRIS_DATE); break; case TIME: builder.dataType(IRIS_TIME); if (Objects.nonNull(column.getScale()) && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_TIME_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", IRIS_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(IRIS_TIME); } break; case TIMESTAMP: builder.columnType(IRIS_TIMESTAMP2); builder.dataType(IRIS_TIMESTAMP2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.IRIS, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertUnsupported() { Column column = PhysicalColumn.of( "test", new MapType<>(BasicType.STRING_TYPE, BasicType.STRING_TYPE), (Long) null, true, null, null); try { IrisTypeConverter.INSTANCE.reconvert(column); Assertions.fail(); } catch (SeaTunnelRuntimeException e) { // ignore } catch (Throwable e) { Assertions.fail(); } }
@ScalarOperator(MODULUS) @SqlType(StandardTypes.SMALLINT) public static long modulus(@SqlType(StandardTypes.SMALLINT) long left, @SqlType(StandardTypes.SMALLINT) long right) { try { return left % right; } catch (ArithmeticException e) { throw new PrestoException(DIVISION_BY_ZERO, e); } }
@Test public void testModulus() { assertFunction("SMALLINT'37' % SMALLINT'37'", SMALLINT, (short) 0); assertFunction("SMALLINT'37' % SMALLINT'17'", SMALLINT, (short) (37 % 17)); assertFunction("SMALLINT'17' % SMALLINT'37'", SMALLINT, (short) (17 % 37)); assertFunction("SMALLINT'17' % SMALLINT'17'", SMALLINT, (short) 0); assertInvalidFunction("SMALLINT'17' % SMALLINT'0'", DIVISION_BY_ZERO); }
public static boolean isClassNameSerializable(UserDefinedFunction function) { final Class<?> functionClass = function.getClass(); if (!InstantiationUtil.hasPublicNullaryConstructor(functionClass)) { // function must be parameterized return false; } Class<?> currentClass = functionClass; while (!currentClass.equals(UserDefinedFunction.class)) { for (Field field : currentClass.getDeclaredFields()) { if (!Modifier.isTransient(field.getModifiers()) && !Modifier.isStatic(field.getModifiers())) { // function seems to be stateful return false; } } currentClass = currentClass.getSuperclass(); } return true; }
@Test void testSerialization() { assertThat(isClassNameSerializable(new ValidTableFunction())).isTrue(); assertThat(isClassNameSerializable(new ValidScalarFunction())).isTrue(); assertThat(isClassNameSerializable(new ParameterizedTableFunction(12))).isFalse(); assertThat(isClassNameSerializable(new StatefulScalarFunction())).isFalse(); }
@Override public List<TenantDO> getTenantListByPackageId(Long packageId) { return tenantMapper.selectListByPackageId(packageId); }
@Test public void testGetTenantListByPackageId() { // mock 数据 TenantDO dbTenant1 = randomPojo(TenantDO.class, o -> o.setPackageId(1L)); tenantMapper.insert(dbTenant1);// @Sql: 先插入出一条存在的数据 TenantDO dbTenant2 = randomPojo(TenantDO.class, o -> o.setPackageId(2L)); tenantMapper.insert(dbTenant2);// @Sql: 先插入出一条存在的数据 // 调用 List<TenantDO> result = tenantService.getTenantListByPackageId(1L); assertEquals(1, result.size()); assertPojoEquals(dbTenant1, result.get(0)); }
public String getPath() { return path; }
@Test public void getPath() { assertThat(sourceFile.getPath()).isEqualTo(DUMMY_PATH); }
void start() throws TransientKinesisException { ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder(); for (ShardCheckpoint checkpoint : initialCheckpoint) { shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint)); } shardIteratorsMap.set(shardsMap.build()); if (!shardIteratorsMap.get().isEmpty()) { recordsQueue = new ArrayBlockingQueue<>(queueCapacityPerShard * shardIteratorsMap.get().size()); String streamName = initialCheckpoint.getStreamName(); startReadingShards(shardIteratorsMap.get().values(), streamName); } else { // There are no shards to handle when restoring from an empty checkpoint. Empty checkpoints // are generated when the last shard handled by this pool was closed recordsQueue = new ArrayBlockingQueue<>(1); } }
@Test public void shouldStartReadingSuccessiveShardsAfterReceivingShardClosedException() throws Exception { when(firstIterator.readNextBatch()).thenThrow(KinesisShardClosedException.class); when(firstIterator.findSuccessiveShardRecordIterators()) .thenReturn(ImmutableList.of(thirdIterator, fourthIterator)); shardReadersPool.start(); verify(thirdIterator, timeout(TIMEOUT_IN_MILLIS).atLeast(2)).readNextBatch(); verify(fourthIterator, timeout(TIMEOUT_IN_MILLIS).atLeast(2)).readNextBatch(); }
public void writeXml(OutputStream out) throws IOException { writeXml(new OutputStreamWriter(out, StandardCharsets.UTF_8)); }
@Test public void testCDATA() throws IOException { String xml = new String( "<configuration>" + "<property>" + "<name>cdata</name>" + "<value><![CDATA[>cdata]]></value>" + "</property>\n" + "<property>" + "<name>cdata-multiple</name>" + "<value><![CDATA[>cdata1]]> and <![CDATA[>cdata2]]></value>" + "</property>\n" + "<property>" + "<name>cdata-multiline</name>" + "<value><![CDATA[>cdata\nmultiline<>]]></value>" + "</property>\n" + "<property>" + "<name>cdata-whitespace</name>" + "<value> prefix <![CDATA[>cdata]]>\nsuffix </value>" + "</property>\n" + "</configuration>"); Configuration conf = checkCDATA(xml.getBytes()); ByteArrayOutputStream os = new ByteArrayOutputStream(); conf.writeXml(os); checkCDATA(os.toByteArray()); }
@Override public ParDoFn create( PipelineOptions options, CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, TupleTag<?> mainOutputTag, Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { DoFnInstanceManager instanceManager = fnCache.get( operationContext.nameContext().systemName(), () -> DoFnInstanceManagers.cloningPool(doFnExtractor.getDoFnInfo(cloudUserFn), options)); DoFnInfo<?, ?> doFnInfo = instanceManager.peek(); DataflowExecutionContext.DataflowStepContext stepContext = executionContext.getStepContext(operationContext); Iterable<PCollectionView<?>> sideInputViews = doFnInfo.getSideInputViews(); SideInputReader sideInputReader = executionContext.getSideInputReader(sideInputInfos, sideInputViews, operationContext); if (doFnInfo.getDoFn() instanceof BatchStatefulParDoOverrides.BatchStatefulDoFn) { // HACK: BatchStatefulDoFn is a class from DataflowRunner's overrides // that just instructs the worker to execute it differently. This will // be replaced by metadata in the Runner API payload BatchStatefulParDoOverrides.BatchStatefulDoFn fn = (BatchStatefulParDoOverrides.BatchStatefulDoFn) doFnInfo.getDoFn(); DoFn underlyingFn = fn.getUnderlyingDoFn(); return new BatchModeUngroupingParDoFn( (BatchModeExecutionContext.StepContext) stepContext, new SimpleParDoFn( options, DoFnInstanceManagers.singleInstance(doFnInfo.withFn(underlyingFn)), sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory)); } else if (doFnInfo.getDoFn() instanceof StreamingPCollectionViewWriterFn) { // HACK: StreamingPCollectionViewWriterFn is a class from // DataflowPipelineTranslator. Using the class as an indicator is a migration path // to simply having an indicator string. checkArgument( stepContext instanceof StreamingModeExecutionContext.StreamingModeStepContext, "stepContext must be a StreamingModeStepContext to use StreamingPCollectionViewWriterFn"); DataflowRunner.StreamingPCollectionViewWriterFn<Object> writerFn = (StreamingPCollectionViewWriterFn<Object>) doFnInfo.getDoFn(); return new StreamingPCollectionViewWriterParDoFn( (StreamingModeExecutionContext.StreamingModeStepContext) stepContext, writerFn.getView().getTagInternal(), writerFn.getDataCoder(), (Coder<BoundedWindow>) doFnInfo.getWindowingStrategy().getWindowFn().windowCoder()); } else { return new SimpleParDoFn( options, instanceManager, sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory); } }
@Test public void testCleanupWorks() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); CounterSet counters = new CounterSet(); DoFn<?, ?> initialFn = new TestStatefulDoFn(); CloudObject cloudObject = getCloudObject(initialFn, WindowingStrategy.of(FixedWindows.of(Duration.millis(10)))); StateInternals stateInternals = InMemoryStateInternals.forKey("dummy"); // The overarching step context that only ParDoFn gets DataflowStepContext stepContext = mock(DataflowStepContext.class); // The user step context that the DoFnRunner gets a handle on DataflowStepContext userStepContext = mock(DataflowStepContext.class); when(stepContext.namespacedToUser()).thenReturn(userStepContext); when(stepContext.stateInternals()).thenReturn(stateInternals); when(userStepContext.stateInternals()).thenReturn((StateInternals) stateInternals); DataflowExecutionContext<DataflowStepContext> executionContext = mock(DataflowExecutionContext.class); TestOperationContext operationContext = TestOperationContext.create(counters); when(executionContext.getStepContext(operationContext)).thenReturn(stepContext); when(executionContext.getSideInputReader(any(), any(), any())) .thenReturn(NullSideInputReader.empty()); ParDoFn parDoFn = factory.create( options, cloudObject, Collections.emptyList(), MAIN_OUTPUT, ImmutableMap.of(MAIN_OUTPUT, 0), executionContext, operationContext); Receiver rcvr = new OutputReceiver(); parDoFn.startBundle(rcvr); IntervalWindow firstWindow = new IntervalWindow(new Instant(0), new Instant(9)); IntervalWindow secondWindow = new IntervalWindow(new Instant(10), new Instant(19)); Coder<IntervalWindow> windowCoder = IntervalWindow.getCoder(); StateNamespace firstWindowNamespace = StateNamespaces.window(windowCoder, firstWindow); StateNamespace secondWindowNamespace = StateNamespaces.window(windowCoder, secondWindow); StateTag<ValueState<String>> tag = StateTags.tagForSpec(TestStatefulDoFn.STATE_ID, StateSpecs.value(StringUtf8Coder.of())); // Set up non-empty state. We don't mock + verify calls to clear() but instead // check that state is actually empty. We musn't care how it is accomplished. stateInternals.state(firstWindowNamespace, tag).write("first"); stateInternals.state(secondWindowNamespace, tag).write("second"); when(userStepContext.getNextFiredTimer(windowCoder)).thenReturn(null); when(stepContext.getNextFiredTimer(windowCoder)) .thenReturn( TimerData.of( SimpleParDoFn.CLEANUP_TIMER_ID, firstWindowNamespace, firstWindow.maxTimestamp().plus(Duration.millis(1L)), firstWindow.maxTimestamp().plus(Duration.millis(1L)), TimeDomain.EVENT_TIME)) .thenReturn(null); // This should fire the timer to clean up the first window parDoFn.processTimers(); assertThat(stateInternals.state(firstWindowNamespace, tag).read(), nullValue()); assertThat(stateInternals.state(secondWindowNamespace, tag).read(), equalTo("second")); when(stepContext.getNextFiredTimer((Coder) windowCoder)) .thenReturn( TimerData.of( SimpleParDoFn.CLEANUP_TIMER_ID, secondWindowNamespace, secondWindow.maxTimestamp().plus(Duration.millis(1L)), secondWindow.maxTimestamp().plus(Duration.millis(1L)), TimeDomain.EVENT_TIME)) .thenReturn(null); // And this should clean up the second window parDoFn.processTimers(); assertThat(stateInternals.state(firstWindowNamespace, tag).read(), nullValue()); assertThat(stateInternals.state(secondWindowNamespace, tag).read(), nullValue()); }
@Override public boolean dropTable(TableIdentifier identifier, boolean purge) { if (!isValidIdentifier(identifier)) { return false; } String database = identifier.namespace().level(0); TableOperations ops = newTableOps(identifier); TableMetadata lastMetadata = null; if (purge) { try { lastMetadata = ops.current(); } catch (NotFoundException e) { LOG.warn( "Failed to load table metadata for table: {}, continuing drop without purge", identifier, e); } } try { clients.run( client -> { client.dropTable( database, identifier.name(), false /* do not delete data */, false /* throw NoSuchObjectException if the table doesn't exist */); return null; }); if (purge && lastMetadata != null) { CatalogUtil.dropTableData(ops.io(), lastMetadata); } LOG.info("Dropped table: {}", identifier); return true; } catch (NoSuchTableException | NoSuchObjectException e) { LOG.info("Skipping drop, table does not exist: {}", identifier, e); return false; } catch (TException e) { throw new RuntimeException("Failed to drop " + identifier, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted in call to dropTable", e); } }
@Test public void testCreateTableCustomSortOrder() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); SortOrder order = SortOrder.builderFor(schema).asc("id", NULLS_FIRST).build(); TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { Table table = catalog .buildTable(tableIdent, schema) .withPartitionSpec(spec) .withSortOrder(order) .create(); SortOrder sortOrder = table.sortOrder(); assertThat(sortOrder.orderId()).as("Order ID must match").isEqualTo(1); assertThat(sortOrder.fields()).as("Order must have 1 field").hasSize(1); assertThat(sortOrder.fields().get(0).direction()).as("Direction must match ").isEqualTo(ASC); assertThat(sortOrder.fields().get(0).nullOrder()) .as("Null order must match ") .isEqualTo(NULLS_FIRST); Transform<?, ?> transform = Transforms.identity(Types.IntegerType.get()); assertThat(sortOrder.fields().get(0).transform()) .as("Transform must match") .isEqualTo(transform); assertThat(hmsTableParameters()) .containsEntry(DEFAULT_SORT_ORDER, SortOrderParser.toJson(table.sortOrder())); } finally { catalog.dropTable(tableIdent); } }
public synchronized Schema create(URI id, String refFragmentPathDelimiters) { URI normalizedId = id.normalize(); if (!schemas.containsKey(normalizedId)) { URI baseId = removeFragment(id).normalize(); if (!schemas.containsKey(baseId)) { logger.debug("Reading schema: " + baseId); final JsonNode baseContent = contentResolver.resolve(baseId); schemas.put(baseId, new Schema(baseId, baseContent, null)); } final Schema baseSchema = schemas.get(baseId); if (normalizedId.toString().contains("#")) { JsonNode childContent = fragmentResolver.resolve(baseSchema.getContent(), '#' + id.getFragment(), refFragmentPathDelimiters); schemas.put(normalizedId, new Schema(normalizedId, childContent, baseSchema)); } } return schemas.get(normalizedId); }
@Test public void createWithEmbeddedSelfRef() throws URISyntaxException { URI schemaUri = getClass().getResource("/schema/embeddedRef.json").toURI(); SchemaStore schemaStore = new SchemaStore(); Schema topSchema = schemaStore.create(schemaUri, "#/."); Schema embeddedSchema = schemaStore.create(topSchema, "#/definitions/embedded", "#/."); Schema selfRefSchema = schemaStore.create(embeddedSchema, "#", "#/."); assertThat(topSchema, is(sameInstance(selfRefSchema))); }
@Override public void apply(IntentOperationContext<FlowRuleIntent> context) { Optional<IntentData> toUninstall = context.toUninstall(); Optional<IntentData> toInstall = context.toInstall(); if (toInstall.isPresent() && toUninstall.isPresent()) { Intent intentToInstall = toInstall.get().intent(); if (requireNonDisruptive(intentToInstall) && INSTALLED.equals(toUninstall.get().state())) { reallocate(context); return; } } if (!toInstall.isPresent() && !toUninstall.isPresent()) { // Nothing to do. intentInstallCoordinator.intentInstallSuccess(context); return; } List<FlowRuleIntent> uninstallIntents = context.intentsToUninstall(); List<FlowRuleIntent> installIntents = context.intentsToInstall(); List<FlowRule> flowRulesToUninstall; List<FlowRule> flowRulesToInstall; if (toUninstall.isPresent()) { // Remove tracked resource from both Intent and installable Intents. trackIntentResources(toUninstall.get(), uninstallIntents, REMOVE); // Retrieves all flow rules from all flow rule Intents. flowRulesToUninstall = uninstallIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .filter(flowRule -> flowRuleService.getFlowEntry(flowRule) != null) .collect(Collectors.toList()); } else { // No flow rules to be uninstalled. flowRulesToUninstall = Collections.emptyList(); } if (toInstall.isPresent()) { // Track resource from both Intent and installable Intents. trackIntentResources(toInstall.get(), installIntents, ADD); // Retrieves all flow rules from all flow rule Intents. flowRulesToInstall = installIntents.stream() .map(FlowRuleIntent::flowRules) .flatMap(Collection::stream) .collect(Collectors.toList()); } else { // No flow rules to be installed. flowRulesToInstall = Collections.emptyList(); } List<FlowRule> flowRuleToModify; List<FlowRule> dontTouch; // If both uninstall/install list contained equal (=match conditions are equal) FlowRules, // omit it from remove list, since it will/should be overwritten by install flowRuleToModify = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::equals)) .collect(Collectors.toList()); // If both contained exactMatch-ing FlowRules, remove from both list, // since it will result in no-op. dontTouch = flowRulesToInstall.stream() .filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::exactMatch)) .collect(Collectors.toList()); flowRulesToUninstall.removeAll(flowRuleToModify); flowRulesToUninstall.removeAll(dontTouch); flowRulesToInstall.removeAll(flowRuleToModify); flowRulesToInstall.removeAll(dontTouch); flowRuleToModify.removeAll(dontTouch); if (flowRulesToInstall.isEmpty() && flowRulesToUninstall.isEmpty() && flowRuleToModify.isEmpty()) { // There is no flow rules to install/uninstall intentInstallCoordinator.intentInstallSuccess(context); return; } FlowRuleOperations.Builder builder = FlowRuleOperations.builder(); // Add flows flowRulesToInstall.forEach(builder::add); // Modify flows flowRuleToModify.forEach(builder::modify); // Remove flows flowRulesToUninstall.forEach(builder::remove); FlowRuleOperationsContext flowRuleOperationsContext = new FlowRuleOperationsContext() { @Override public void onSuccess(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallSuccess(context); } @Override public void onError(FlowRuleOperations ops) { intentInstallCoordinator.intentInstallFailed(context); } }; FlowRuleOperations operations = builder.build(flowRuleOperationsContext); log.debug("applying intent {} -> {} with {} rules: {}", toUninstall.map(x -> x.key().toString()).orElse("<empty>"), toInstall.map(x -> x.key().toString()).orElse("<empty>"), operations.stages().stream().mapToLong(Set::size).sum(), operations.stages()); flowRuleService.apply(operations); }
@Test public void testNoAnyIntentToApply() { IntentData toInstall = null; IntentData toUninstall = null; IntentOperationContext<FlowRuleIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext<>(ImmutableList.of(), ImmutableList.of(), context); installer.apply(operationContext); IntentOperationContext successContext = intentInstallCoordinator.successContext; assertEquals(successContext, operationContext); assertEquals(0, flowRuleService.flowRulesRemove.size()); assertEquals(0, flowRuleService.flowRulesAdd.size()); assertEquals(0, flowRuleService.flowRulesModify.size()); }
@Override public Collection<V> values() { return mInternalMap.values(); }
@Test public void values() { String x1 = new String("x"); String x2 = new String("x"); assertNull(mMap.put(x1, "z")); assertNull(mMap.put(x2, "z")); Collection<String> v = mMap.values(); assertEquals(2, v.size()); v.forEach(val -> assertEquals("z", val)); assertEquals("z", mMap.remove(x1)); assertEquals(1, v.size()); }
public Set<ContentPack> findAllById(ModelId id) { final DBCursor<ContentPack> result = dbCollection.find(DBQuery.is(Identified.FIELD_META_ID, id)); return ImmutableSet.copyOf((Iterable<ContentPack>) result); }
@Test @MongoDBFixtures("ContentPackPersistenceServiceTest.json") public void findAllByIdWithInvalidId() { final Set<ContentPack> contentPacks = contentPackPersistenceService.findAllById(ModelId.of("does-not-exist")); assertThat(contentPacks).isEmpty(); }
public void getFields( RowMetaInterface inputRowMeta, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { String realresultfieldname = space.environmentSubstitute( resultfieldname ); if ( !Utils.isEmpty( realresultfieldname ) ) { ValueMetaInterface v = new ValueMetaBoolean( realresultfieldname ); v.setOrigin( name ); inputRowMeta.addValueMeta( v ); } String realcardtype = space.environmentSubstitute( cardtype ); if ( !Utils.isEmpty( realcardtype ) ) { ValueMetaInterface v = new ValueMetaString( realcardtype ); v.setOrigin( name ); inputRowMeta.addValueMeta( v ); } String realnotvalidmsg = space.environmentSubstitute( notvalidmsg ); if ( !Utils.isEmpty( notvalidmsg ) ) { ValueMetaInterface v = new ValueMetaString( realnotvalidmsg ); v.setOrigin( name ); inputRowMeta.addValueMeta( v ); } }
@Test public void testGetFields() throws KettleStepException { CreditCardValidatorMeta meta = new CreditCardValidatorMeta(); meta.setDefault(); meta.setResultFieldName( "The Result Field" ); meta.setCardType( "The Card Type Field" ); meta.setNotValidMsg( "Is Card Valid" ); RowMeta rowMeta = new RowMeta(); meta.getFields( rowMeta, "this step", null, null, new Variables(), null, null ); assertEquals( 3, rowMeta.size() ); assertEquals( "The Result Field", rowMeta.getValueMeta( 0 ).getName() ); assertEquals( ValueMetaInterface.TYPE_BOOLEAN, rowMeta.getValueMeta( 0 ).getType() ); assertEquals( "this step", rowMeta.getValueMeta( 0 ).getOrigin() ); assertEquals( "The Card Type Field", rowMeta.getValueMeta( 1 ).getName() ); assertEquals( ValueMetaInterface.TYPE_STRING, rowMeta.getValueMeta( 1 ).getType() ); assertEquals( "this step", rowMeta.getValueMeta( 1 ).getOrigin() ); assertEquals( "Is Card Valid", rowMeta.getValueMeta( 2 ).getName() ); assertEquals( ValueMetaInterface.TYPE_STRING, rowMeta.getValueMeta( 2 ).getType() ); assertEquals( "this step", rowMeta.getValueMeta( 2 ).getOrigin() ); }
public static String trim(String str) { return str == null ? null : str.trim(); }
@Test public void testTrim() { Assert.assertNull(StringUtil.trim(null)); Assert.assertEquals("", StringUtil.trim("")); Assert.assertEquals("foo", StringUtil.trim("foo ")); }
public static Deserializer<LacpBaseTlv> deserializer() { return (data, offset, length) -> { checkInput(data, offset, length, LENGTH - HEADER_LENGTH); LacpBaseTlv lacpBaseTlv = new LacpBaseTlv(); ByteBuffer bb = ByteBuffer.wrap(data, offset, length); lacpBaseTlv.setSystemPriority(bb.getShort()); byte[] mac = new byte[6]; bb.get(mac); lacpBaseTlv.setSystemMac(MacAddress.valueOf(mac)); lacpBaseTlv.setKey(bb.getShort()); lacpBaseTlv.setPortPriority(bb.getShort()); lacpBaseTlv.setPort(bb.getShort()); lacpBaseTlv.setState(bb.get()); return lacpBaseTlv; }; }
@Test public void deserializer() throws Exception { LacpBaseTlv actorInfo = LacpBaseTlv.deserializer().deserialize(data, 0, data.length); assertEquals(SYS_PRIORITY, actorInfo.getSystemPriority()); assertEquals(SYS_MAC, actorInfo.getSystemMac()); assertEquals(KEY, actorInfo.getKey()); assertEquals(PORT_PRIORITY, actorInfo.getPortPriority()); assertEquals(PORT, actorInfo.getPort()); assertEquals(STATE, actorInfo.getState().toByte()); }
static int determineCoordinatorReservoirSize(int numPartitions) { int reservoirSize = numPartitions * COORDINATOR_TARGET_PARTITIONS_MULTIPLIER; if (reservoirSize < COORDINATOR_MIN_RESERVOIR_SIZE) { // adjust it up and still make reservoirSize divisible by numPartitions int remainder = COORDINATOR_MIN_RESERVOIR_SIZE % numPartitions; reservoirSize = COORDINATOR_MIN_RESERVOIR_SIZE + (numPartitions - remainder); } else if (reservoirSize > COORDINATOR_MAX_RESERVOIR_SIZE) { // adjust it down and still make reservoirSize divisible by numPartitions int remainder = COORDINATOR_MAX_RESERVOIR_SIZE % numPartitions; reservoirSize = COORDINATOR_MAX_RESERVOIR_SIZE - remainder; } return reservoirSize; }
@Test public void testCoordinatorReservoirSize() { // adjusted to over min threshold of 10_000 and is divisible by number of partitions (3) assertThat(SketchUtil.determineCoordinatorReservoirSize(3)).isEqualTo(10_002); // adjust to multiplier of 100 assertThat(SketchUtil.determineCoordinatorReservoirSize(123)).isEqualTo(123_00); // adjusted to below max threshold of 1_000_000 and is divisible by number of partitions (3) assertThat(SketchUtil.determineCoordinatorReservoirSize(10_123)) .isEqualTo(1_000_000 - (1_000_000 % 10_123)); }
@Override protected void doStop() throws Exception { // First stop the polling process if (future != null) { future.cancel(true); } super.doStop(); }
@Test public void doStop() { }
public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { Object[] r = getRow(); // get row, set busy! // no more input to be expected... if ( r == null ) { setOutputDone(); return false; } putRow( getInputRowMeta(), r ); // copy row to possible alternate rowset(s). if ( checkFeedback( getLinesRead() ) ) { if ( log.isBasic() ) { logBasic( BaseMessages.getString( PKG, "DummyTrans.Log.LineNumber" ) + getLinesRead() ); } } return true; }
@Test public void testDummyTransWritesOutputWithInputRow() throws KettleException { DummyTrans dummy = new DummyTrans( stepMockHelper.stepMeta, stepMockHelper.stepDataInterface, 0, stepMockHelper.transMeta, stepMockHelper.trans ); dummy.init( stepMockHelper.initStepMetaInterface, stepMockHelper.initStepDataInterface ); Object[] row = new Object[] { "abcd" }; RowSet rowSet = stepMockHelper.getMockInputRowSet( row ); RowMetaInterface inputRowMeta = mock( RowMetaInterface.class ); when( inputRowMeta.clone() ).thenReturn( inputRowMeta ); when( rowSet.getRowMeta() ).thenReturn( inputRowMeta ); dummy.addRowSetToInputRowSets( rowSet ); RowSet outputRowSet = mock( RowSet.class ); dummy.addRowSetToOutputRowSets( outputRowSet ); when( outputRowSet.putRow( inputRowMeta, row ) ).thenReturn( true ); dummy.processRow( stepMockHelper.processRowsStepMetaInterface, stepMockHelper.processRowsStepDataInterface ); verify( outputRowSet, times( 1 ) ).putRow( inputRowMeta, row ); }
@Override public Set<KubevirtNetwork> networks() { return ImmutableSet.copyOf(networkStore.networks()); }
@Test public void testGetNetworks() { createBasicNetworks(); assertEquals("Number of network did not match", 1, target.networks().size()); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchForgetTopicIdWhenReplaced() { buildFetcher(); TopicIdPartition fooWithOldTopicId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition fooWithNewTopicId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); // Assign foo with old topic id. subscriptions.assignFromUser(singleton(fooWithOldTopicId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithOldTopicId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithOldTopicId.topicPartition(), 0); // Fetch should use latest version. assertEquals(1, sendFetches()); client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithOldTopicId, new PartitionData( fooWithOldTopicId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithOldTopicId, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Replace foo with old topic id with foo with new topic id. subscriptions.assignFromUser(singleton(fooWithNewTopicId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithNewTopicId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithNewTopicId.topicPartition(), 0); // Fetch should use latest version. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // foo with old topic id should be removed from the session. client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithNewTopicId, new PartitionData( fooWithNewTopicId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), singletonList(fooWithOldTopicId) ), fullFetchResponse(1, fooWithNewTopicId, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); }
@Override public boolean isVersionCompatible(String hiveVersion, String dbVersion) { hiveVersion = getEquivalentVersion(hiveVersion); dbVersion = getEquivalentVersion(dbVersion); if (hiveVersion.equals(dbVersion)) { return true; } String[] hiveVerParts = hiveVersion.split("\\."); String[] dbVerParts = dbVersion.split("\\."); if (hiveVerParts.length != 3 || dbVerParts.length != 3) { // these are non standard version numbers. can't perform the // comparison on these, so assume that they are incompatible return false; } hiveVerParts = hiveVersion.split("\\.|-"); dbVerParts = dbVersion.split("\\.|-"); for (int i = 0; i < Math.min(hiveVerParts.length, dbVerParts.length); i++) { int compare = compareVersion(dbVerParts[i], hiveVerParts[i]); if (compare != 0) { return compare > 0; } } return hiveVerParts.length > dbVerParts.length; }
@Test public void testIsVersionCompatible() throws Exception { // first argument is hiveVersion, it is compatible if 2nd argument - dbVersion is // greater than or equal to it // check the compatible case IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(MetastoreConf.newMetastoreConf()); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("0.0.1", "0.0.1")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("0.0.1", "0.0.2")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("1.0.2", "2.0.1")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("0.0.9", "9.0.0")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-1", "4.0.0-alpha-2")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-1", "4.0.0-alpha")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-1", "4.0.0")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-1", "4.0.1")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-1", "4.0.0-beta")); // check equivalent versions, should be compatible Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("0.13.0", "0.13.1")); Assert.assertTrue(metastoreSchemaInfo.isVersionCompatible("0.13.1", "0.13.0")); // check incompatible versions Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("0.1.1", "0.1.0")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.1", "0.1.0")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.1", "4.0.0-alpha-1")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.0", "4.0.0-alpha-1")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha-2", "4.0.0-alpha-1")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.0-alpha", "4.0.0-alpha-1")); Assert.assertFalse(metastoreSchemaInfo.isVersionCompatible("4.0.0-beta", "4.0.0-alpha-1")); }
@VisibleForTesting static void setProxyProperties(Proxy proxy) { String protocol = proxy.getProtocol(); setPropertySafe(protocol + ".proxyHost", proxy.getHost()); setPropertySafe(protocol + ".proxyPort", String.valueOf(proxy.getPort())); setPropertySafe(protocol + ".proxyUser", proxy.getUsername()); setPropertySafe(protocol + ".proxyPassword", proxy.getPassword()); setPropertySafe("http.nonProxyHosts", proxy.getNonProxyHosts()); }
@Test public void testSetProxyProperties() { Proxy httpProxy = new Proxy(); httpProxy.setProtocol("http"); httpProxy.setHost("host"); httpProxy.setPort(1080); httpProxy.setUsername("user"); httpProxy.setPassword("pass"); httpProxy.setNonProxyHosts("non proxy hosts"); MavenSettingsProxyProvider.setProxyProperties(httpProxy); Assert.assertEquals("host", System.getProperty("http.proxyHost")); Assert.assertEquals("1080", System.getProperty("http.proxyPort")); Assert.assertEquals("user", System.getProperty("http.proxyUser")); Assert.assertEquals("pass", System.getProperty("http.proxyPassword")); Assert.assertEquals("non proxy hosts", System.getProperty("http.nonProxyHosts")); Proxy httpsProxy = new Proxy(); httpsProxy.setProtocol("https"); httpsProxy.setHost("https host"); httpsProxy.setPort(1443); httpsProxy.setUsername("https user"); httpsProxy.setPassword("https pass"); MavenSettingsProxyProvider.setProxyProperties(httpsProxy); Assert.assertEquals("https host", System.getProperty("https.proxyHost")); Assert.assertEquals("1443", System.getProperty("https.proxyPort")); Assert.assertEquals("https user", System.getProperty("https.proxyUser")); Assert.assertEquals("https pass", System.getProperty("https.proxyPassword")); }
@Override public ApiResult<TopicPartition, ListOffsetsResultInfo> handleResponse( Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse ) { ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse; Map<TopicPartition, ListOffsetsResultInfo> completed = new HashMap<>(); Map<TopicPartition, Throwable> failed = new HashMap<>(); List<TopicPartition> unmapped = new ArrayList<>(); Set<TopicPartition> retriable = new HashSet<>(); for (ListOffsetsTopicResponse topic : response.topics()) { for (ListOffsetsPartitionResponse partition : topic.partitions()) { TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex()); Errors error = Errors.forCode(partition.errorCode()); if (!offsetTimestampsByPartition.containsKey(topicPartition)) { log.warn("ListOffsets response includes unknown topic partition {}", topicPartition); } else if (error == Errors.NONE) { Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH) ? Optional.empty() : Optional.of(partition.leaderEpoch()); completed.put( topicPartition, new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch)); } else { handlePartitionError(topicPartition, error, failed, unmapped, retriable); } } } // Sanity-check if the current leader for these partitions returned results for all of them for (TopicPartition topicPartition : keys) { if (unmapped.isEmpty() && !completed.containsKey(topicPartition) && !failed.containsKey(topicPartition) && !retriable.contains(topicPartition) ) { ApiException sanityCheckException = new ApiException( "The response from broker " + broker.id() + " did not contain a result for topic partition " + topicPartition); log.error( "ListOffsets request for topic partition {} failed sanity check", topicPartition, sanityCheckException); failed.put(topicPartition, sanityCheckException); } } return new ApiResult<>(completed, failed, unmapped); }
@Test public void testHandleLookupRetriablePartitionInvalidMetadataResponse() { TopicPartition errorPartition = t0p0; Errors error = Errors.NOT_LEADER_OR_FOLLOWER; Map<TopicPartition, Short> errorsByPartition = new HashMap<>(); errorsByPartition.put(errorPartition, error.code()); ApiResult<TopicPartition, ListOffsetsResultInfo> result = handleResponse(createResponse(errorsByPartition)); // Some invalid metadata errors should be retried from the lookup stage as the partition-to-leader // mappings should be recalculated. List<TopicPartition> unmapped = new ArrayList<>(); unmapped.add(errorPartition); Set<TopicPartition> completed = new HashSet<>(offsetTimestampsByPartition.keySet()); completed.removeAll(unmapped); assertResult(result, completed, emptyMap(), unmapped, emptySet()); }
@Override public void write(final Path file, final Distribution distribution, final LoginCallback prompt) throws BackgroundException { final Path container = session.getFeature(PathContainerService.class).getContainer(file); try { if(null == distribution.getId()) { // No existing configuration if(log.isDebugEnabled()) { log.debug(String.format("No existing distribution found for method %s", distribution.getMethod())); } if(distribution.getMethod().equals(Distribution.STREAMING)) { distribution.setId(this.createStreamingDistribution(container, distribution).getId()); } else if(distribution.getMethod().equals(Distribution.DOWNLOAD)) { distribution.setId(this.createDownloadDistribution(container, distribution).getId()); } else if(distribution.getMethod().equals(Distribution.CUSTOM) || distribution.getMethod().equals(Distribution.WEBSITE_CDN)) { distribution.setId(this.createCustomDistribution(container, distribution).getId()); } } else { if(distribution.getMethod().equals(Distribution.DOWNLOAD)) { distribution.setEtag(this.updateDownloadDistribution(container, distribution).getETag()); } else if(distribution.getMethod().equals(Distribution.STREAMING)) { distribution.setEtag(this.updateStreamingDistribution(container, distribution).getETag()); } else if(distribution.getMethod().equals(Distribution.CUSTOM) || distribution.getMethod().equals(Distribution.WEBSITE_CDN)) { distribution.setEtag(this.updateCustomDistribution(container, distribution).getETag()); } } } catch(AmazonClientException e) { throw new AmazonServiceExceptionMappingService().map("Cannot write CDN configuration", e); } }
@Test public void testWriteNewDownload() throws Exception { final AtomicBoolean set = new AtomicBoolean(); final CloudFrontDistributionConfiguration configuration = new CloudFrontDistributionConfiguration(session, new S3LocationFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager()) { @Override protected UpdateDistributionResult updateDownloadDistribution(final Path container, final Distribution distribution) { fail(); return null; } @Override protected com.amazonaws.services.cloudfront.model.Distribution createDownloadDistribution(final Path container, final Distribution distribution) { set.set(true); return new com.amazonaws.services.cloudfront.model.Distribution().withId(""); } }; final Path container = new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory, Path.Type.volume)); final Distribution distribution = new Distribution(Distribution.DOWNLOAD, true); configuration.write(container, distribution, new DisabledLoginCallback()); assertTrue(set.get()); }
void prioritizeCopiesAndShiftUps(List<MigrationInfo> migrations) { for (int i = 0; i < migrations.size(); i++) { prioritize(migrations, i); } if (logger.isFinestEnabled()) { StringBuilder s = new StringBuilder("Migration order after prioritization: ["); int ix = 0; for (MigrationInfo migration : migrations) { s.append("\n\t").append(ix++).append("- ").append(migration).append(","); } s.deleteCharAt(s.length() - 1); s.append("]"); logger.finest(s.toString()); } }
@Test public void testSingleMigrationPrioritization() throws UnknownHostException { List<MigrationInfo> migrations = new ArrayList<>(); final MigrationInfo migration1 = new MigrationInfo(0, null, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, -1, -1, 0); migrations.add(migration1); migrationPlanner.prioritizeCopiesAndShiftUps(migrations); assertEquals(singletonList(migration1), migrations); }
public FloatArrayAsIterable usingTolerance(double tolerance) { return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_containsExactly_primitiveFloatArray_failure() { expectFailureWhenTestingThat(array(1.1f, TOLERABLE_2POINT2, 3.3f)) .usingTolerance(DEFAULT_TOLERANCE) .containsExactly(array(2.2f, 1.1f)); assertFailureKeys( "value of", "unexpected (1)", "---", "expected", "testing whether", "but was"); assertFailureValue("unexpected (1)", Float.toString(3.3f)); }
public static List<Path> pluginUrls(Path topPath) throws IOException { boolean containsClassFiles = false; Set<Path> archives = new TreeSet<>(); LinkedList<DirectoryEntry> dfs = new LinkedList<>(); Set<Path> visited = new HashSet<>(); if (isArchive(topPath)) { return Collections.singletonList(topPath); } DirectoryStream<Path> topListing = Files.newDirectoryStream( topPath, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(topListing)); visited.add(topPath); try { while (!dfs.isEmpty()) { Iterator<Path> neighbors = dfs.peek().iterator; if (!neighbors.hasNext()) { dfs.pop().stream.close(); continue; } Path adjacent = neighbors.next(); if (Files.isSymbolicLink(adjacent)) { try { Path symlink = Files.readSymbolicLink(adjacent); // if symlink is absolute resolve() returns the absolute symlink itself Path parent = adjacent.getParent(); if (parent == null) { continue; } Path absolute = parent.resolve(symlink).toRealPath(); if (Files.exists(absolute)) { adjacent = absolute; } else { continue; } } catch (IOException e) { // See https://issues.apache.org/jira/browse/KAFKA-6288 for a reported // failure. Such a failure at this stage is not easily reproducible and // therefore an exception is caught and ignored after issuing a // warning. This allows class scanning to continue for non-broken plugins. log.warn( "Resolving symbolic link '{}' failed. Ignoring this path.", adjacent, e ); continue; } } if (!visited.contains(adjacent)) { visited.add(adjacent); if (isArchive(adjacent)) { archives.add(adjacent); } else if (isClassFile(adjacent)) { containsClassFiles = true; } else { DirectoryStream<Path> listing = Files.newDirectoryStream( adjacent, PLUGIN_PATH_FILTER ); dfs.push(new DirectoryEntry(listing)); } } } } finally { while (!dfs.isEmpty()) { dfs.pop().stream.close(); } } if (containsClassFiles) { if (archives.isEmpty()) { return Collections.singletonList(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } return Arrays.asList(archives.toArray(new Path[0])); }
@Test public void testPluginUrlsWithRelativeSymlinkForwards() throws Exception { // Since this test case defines a relative symlink within an already included path, the main // assertion of this test is absence of exceptions and correct resolution of paths. createBasicDirectoryLayout(); Files.createDirectories(pluginPath.resolve("connectorB/deps/more")); Files.createSymbolicLink( pluginPath.resolve("connectorB/deps/symlink"), Paths.get("more") ); List<Path> expectedUrls = createBasicExpectedUrls(); expectedUrls.add( Files.createFile(pluginPath.resolve("connectorB/deps/more/converter.jar")) ); assertUrls(expectedUrls, PluginUtils.pluginUrls(pluginPath)); }