focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public <T> T convert(String property, Class<T> targetClass) { final AbstractPropertyConverter<?> converter = converterRegistry.get(targetClass); if (converter == null) { throw new MissingFormatArgumentException("converter not found, can't convert from String to " + targetClass.getCanonicalName()); } return (T) converter.convert(property); }
@Test void testConvertLong() { assertEquals(100L, (long) compositeConverter.convert("100", Long.class)); assertEquals(Long.MAX_VALUE, (long) compositeConverter.convert(String.valueOf(Long.MAX_VALUE), Long.class)); assertEquals(Long.MIN_VALUE, (long) compositeConverter.convert(String.valueOf(Long.MIN_VALUE), Long.class)); }
static MinMax findMinMax(MinMax minMax, List<Statement> statements, EncodedValueLookup lookup) { List<List<Statement>> groups = CustomModelParser.splitIntoGroup(statements); for (List<Statement> group : groups) findMinMaxForGroup(minMax, group, lookup); return minMax; }
@Test public void testFindMaxPriority() { List<Statement> statements = new ArrayList<>(); statements.add(If("true", MULTIPLY, "2")); assertEquals(2, findMinMax(new MinMax(0, 1), statements, lookup).max); List<Statement> statements2 = new ArrayList<>(); statements2.add(If("true", MULTIPLY, "0.5")); assertEquals(0.5, findMinMax(new MinMax(0, 1), statements2, lookup).max); List<Statement> statements3 = new ArrayList<>(); statements3.add(If("road_class == MOTORWAY", MULTIPLY, "0.5")); statements3.add(Else(MULTIPLY, "-0.5")); IllegalArgumentException m = assertThrows(IllegalArgumentException.class, () -> findMinMax(new MinMax(1, 1), statements3, lookup)); assertTrue(m.getMessage().startsWith("statement resulted in negative value")); }
public int getDepth(Throwable ex) { return getDepth(ex.getClass(), 0); }
@Test public void notFound() { RollbackRule rr = new RollbackRule(java.io.IOException.class.getName()); assertThat(rr.getDepth(new MyRuntimeException(""))).isEqualTo(-1); }
@Override public Result invoke(Invoker<?> invoker, Invocation inv) throws RpcException { if (inv.getMethodName().equals($ECHO) && inv.getArguments() != null && inv.getArguments().length == 1) { return AsyncRpcResult.newDefaultAsyncResult(inv.getArguments()[0], inv); } return invoker.invoke(inv); }
@SuppressWarnings("unchecked") @Test void testEcho() { Invocation invocation = createMockRpcInvocation(); Invoker<DemoService> invoker = createMockInvoker(invocation); given(invocation.getMethodName()).willReturn("$echo"); Result filterResult = echoFilter.invoke(invoker, invocation); assertEquals("hello", filterResult.getValue()); }
public CompletableFuture<Set<MessageQueue>> lockBatchMQ(ProxyContext ctx, Set<MessageQueue> mqSet, String consumerGroup, String clientId, long timeoutMillis) { CompletableFuture<Set<MessageQueue>> future = new CompletableFuture<>(); try { Set<MessageQueue> successSet = new CopyOnWriteArraySet<>(); Set<AddressableMessageQueue> addressableMessageQueueSet = buildAddressableSet(ctx, mqSet); Map<String, List<AddressableMessageQueue>> messageQueueSetMap = buildAddressableMapByBrokerName(addressableMessageQueueSet); List<CompletableFuture<Void>> futureList = new ArrayList<>(); messageQueueSetMap.forEach((k, v) -> { LockBatchRequestBody requestBody = new LockBatchRequestBody(); requestBody.setConsumerGroup(consumerGroup); requestBody.setClientId(clientId); requestBody.setMqSet(v.stream().map(AddressableMessageQueue::getMessageQueue).collect(Collectors.toSet())); CompletableFuture<Void> future0 = serviceManager.getMessageService() .lockBatchMQ(ctx, v.get(0), requestBody, timeoutMillis) .thenAccept(successSet::addAll); futureList.add(FutureUtils.addExecutor(future0, this.executor)); }); CompletableFuture.allOf(futureList.toArray(new CompletableFuture[0])).whenComplete((v, t) -> { if (t != null) { log.error("LockBatchMQ failed, group={}", consumerGroup, t); } future.complete(successSet); }); } catch (Throwable t) { log.error("LockBatchMQ exception, group={}", consumerGroup, t); future.completeExceptionally(t); } return FutureUtils.addExecutor(future, this.executor); }
@Test public void testLockBatch() throws Throwable { Set<MessageQueue> mqSet = new HashSet<>(); MessageQueue mq1 = new MessageQueue(TOPIC, "broker1", 0); AddressableMessageQueue addressableMessageQueue1 = new AddressableMessageQueue(mq1, "127.0.0.1"); MessageQueue mq2 = new MessageQueue(TOPIC, "broker2", 0); AddressableMessageQueue addressableMessageQueue2 = new AddressableMessageQueue(mq2, "127.0.0.1"); mqSet.add(mq1); mqSet.add(mq2); when(this.topicRouteService.buildAddressableMessageQueue(any(), any())).thenAnswer(i -> new AddressableMessageQueue((MessageQueue) i.getArguments()[1], "127.0.0.1")); when(this.messageService.lockBatchMQ(any(), eq(addressableMessageQueue1), any(), anyLong())) .thenReturn(CompletableFuture.completedFuture(Sets.newHashSet(mq1))); when(this.messageService.lockBatchMQ(any(), eq(addressableMessageQueue2), any(), anyLong())) .thenReturn(CompletableFuture.completedFuture(Sets.newHashSet(mq2))); Set<MessageQueue> result = this.consumerProcessor.lockBatchMQ(ProxyContext.create(), mqSet, CONSUMER_GROUP, CLIENT_ID, 1000) .get(); assertThat(result).isEqualTo(mqSet); }
public static String getAddress(ECKeyPair ecKeyPair) { return getAddress(ecKeyPair.getPublicKey()); }
@Test public void testGetAddressBigInteger() { assertEquals(Keys.getAddress(SampleKeys.PUBLIC_KEY), (SampleKeys.ADDRESS_NO_PREFIX)); }
public static <T, K, U, M extends Map<K,U>> Collector<T, ?, M> toCustomMap(Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends U> valueMapper, Supplier<M> mapSupplier) { return Collectors.toMap(keyMapper, valueMapper, throwingMerger(), mapSupplier); }
@Test public void custom_map_collector_throws_exception_upon_duplicate_keys() { List<String> duplicates = List.of("same", "same"); try { duplicates.stream().collect(toCustomMap(Function.identity(), Function.identity(), HashMap::new)); fail(); } catch (DuplicateKeyException e) { } }
@ConstantFunction(name = "multiply", argTypes = {SMALLINT, SMALLINT}, returnType = SMALLINT) public static ConstantOperator multiplySmallInt(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createSmallInt((short) Math.multiplyExact(first.getSmallint(), second.getSmallint())); }
@Test public void multiplySmallInt() { assertEquals(100, ScalarOperatorFunctions.multiplySmallInt(O_SI_10, O_SI_10).getSmallint()); }
public static void printProgress(char showChar, int len) { print("{}{}", CharUtil.CR, StrUtil.repeat(showChar, len)); }
@Test @Disabled public void printProgressTest() { for(int i = 0; i < 100; i++) { Console.printProgress('#', 100, i / 100D); ThreadUtil.sleep(200); } }
@Override public boolean contains(long pTileIndex) { if (MapTileIndex.getZoom(pTileIndex) != mZoom) { return false; } if (!contains(MapTileIndex.getX(pTileIndex), mLeft, mWidth)) { return false; } return contains(MapTileIndex.getY(pTileIndex), mTop, mHeight); }
@Test public void testCorners() { final MapTileArea area = new MapTileArea(); for (int zoom = 0; zoom <= TileSystem.getMaximumZoomLevel(); zoom++) { final int mapTileUpperBound = getMapTileUpperBound(zoom); final int max = mapTileUpperBound - 1; setNewWorld(area, zoom); Assert.assertTrue(area.contains(MapTileIndex.getTileIndex(zoom, 0, 0))); Assert.assertTrue(area.contains(MapTileIndex.getTileIndex(zoom, 0, max))); Assert.assertTrue(area.contains(MapTileIndex.getTileIndex(zoom, max, max))); Assert.assertTrue(area.contains(MapTileIndex.getTileIndex(zoom, max, 0))); } }
@Override public RecoverableFsDataOutputStream open(Path path) throws IOException { LOGGER.trace("Opening output stream for path {}", path); Preconditions.checkNotNull(path); GSBlobIdentifier finalBlobIdentifier = BlobUtils.parseUri(path.toUri()); return new GSRecoverableFsDataOutputStream(storage, options, finalBlobIdentifier); }
@Test(expected = IllegalArgumentException.class) public void testOpenWithEmptyBucketName() throws IOException { Path path = new Path("gs:///bar"); writer.open(path); }
protected AccessControlList toAcl(final Acl acl) { if(Acl.EMPTY.equals(acl)) { return null; } if(Acl.CANNED_PRIVATE.equals(acl)) { return AccessControlList.REST_CANNED_PRIVATE; } if(Acl.CANNED_BUCKET_OWNER_FULLCONTROL.equals(acl)) { return AccessControlList.REST_CANNED_BUCKET_OWNER_FULLCONTROL; } if(Acl.CANNED_BUCKET_OWNER_READ.equals(acl)) { return AccessControlList.REST_CANNED_BUCKET_OWNER_READ; } if(Acl.CANNED_AUTHENTICATED_READ.equals(acl)) { return AccessControlList.REST_CANNED_AUTHENTICATED_READ; } if(Acl.CANNED_PUBLIC_READ.equals(acl)) { return AccessControlList.REST_CANNED_PUBLIC_READ; } if(Acl.CANNED_PUBLIC_READ_WRITE.equals(acl)) { return AccessControlList.REST_CANNED_PUBLIC_READ_WRITE; } final AccessControlList list = new AccessControlList(); for(Acl.UserAndRole userAndRole : acl.asList()) { if(!userAndRole.isValid()) { continue; } if(userAndRole.getUser() instanceof Acl.Owner) { list.setOwner(new StorageOwner(userAndRole.getUser().getIdentifier(), userAndRole.getUser().getDisplayName())); } else if(userAndRole.getUser() instanceof Acl.EmailUser) { list.grantPermission(new EmailAddressGrantee(userAndRole.getUser().getIdentifier()), Permission.parsePermission(userAndRole.getRole().getName())); } else if(userAndRole.getUser() instanceof Acl.GroupUser) { // Handle special cases if(userAndRole.getUser().getIdentifier().equals(Acl.GroupUser.EVERYONE)) { list.grantPermission(GroupGrantee.ALL_USERS, Permission.parsePermission(userAndRole.getRole().getName())); } else if(userAndRole.getUser().getIdentifier().equals(Acl.GroupUser.AUTHENTICATED)) { list.grantPermission(GroupGrantee.AUTHENTICATED_USERS, Permission.parsePermission(userAndRole.getRole().getName())); } else { // Generic mappings list.grantPermission(new GroupGrantee(userAndRole.getUser().getIdentifier()), Permission.parsePermission(userAndRole.getRole().getName())); } } else if(userAndRole.getUser() instanceof Acl.CanonicalUser) { list.grantPermission(new CanonicalGrantee(userAndRole.getUser().getIdentifier()), Permission.parsePermission(userAndRole.getRole().getName())); } else { log.warn(String.format("Unsupported user %s", userAndRole.getUser())); } } if(null == list.getOwner()) { log.warn(String.format("Missing owner in %s", acl)); return null; } return list; }
@Test public void testInvalidOwnerFromServer() { final S3AccessControlListFeature f = new S3AccessControlListFeature(session); final AccessControlList list = new AccessControlList(); list.setOwner(new StorageOwner("", "")); assertEquals(Acl.EMPTY, f.toAcl(list)); }
HollowObjectTypeDataElements[] split(HollowObjectTypeDataElements from, int numSplits) { final int toMask = numSplits - 1; final int toOrdinalShift = 31 - Integer.numberOfLeadingZeros(numSplits); final long[][] currentWriteVarLengthDataPointers; if (numSplits<=0 || !((numSplits&(numSplits-1))==0)) { throw new IllegalStateException("Must split by power of 2"); } HollowObjectTypeDataElements[] to = new HollowObjectTypeDataElements[numSplits]; for(int i=0;i<to.length;i++) { to[i] = new HollowObjectTypeDataElements(from.schema, from.memoryMode, from.memoryRecycler); to[i].maxOrdinal = -1; } currentWriteVarLengthDataPointers = new long[numSplits][from.schema.numFields()]; populateStats(to, from, toMask, toOrdinalShift); if (from.encodedRemovals != null) { GapEncodedVariableLengthIntegerReader[] splitRemovals = from.encodedRemovals.split(numSplits); for(int i=0;i<to.length;i++) { to[i].encodedRemovals = splitRemovals[i]; } } if (from.encodedAdditions != null) { throw new IllegalStateException("Encountered encodedAdditions in data elements splitter- this is not expected " + "since encodedAdditions only exist on delta data elements and they dont carry over to target data elements, " + "delta data elements are never split/joined"); } for(int i=0;i<to.length;i++) { to[i].fixedLengthData = FixedLengthDataFactory.get((long)to[i].bitsPerRecord * (to[i].maxOrdinal + 1), to[i].memoryMode, to[i].memoryRecycler); for(int fieldIdx=0;fieldIdx<from.schema.numFields();fieldIdx++) { if(from.varLengthData[fieldIdx] != null) { to[i].varLengthData[fieldIdx] = VariableLengthDataFactory.get(from.memoryMode, from.memoryRecycler); } } } for(int i=0;i<=from.maxOrdinal;i++) { int toIndex = i & toMask; int toOrdinal = i >> toOrdinalShift; copyRecord(to[toIndex], toOrdinal, from, i, currentWriteVarLengthDataPointers[toIndex]); } return to; }
@Test public void testSplit() throws IOException { HollowObjectTypeDataElementsSplitter splitter = new HollowObjectTypeDataElementsSplitter(); HollowObjectTypeReadState typeReadState = populateTypeStateWith(5); assertEquals(1, typeReadState.numShards()); assertDataUnchanged(typeReadState, 5); HollowObjectTypeDataElements[] result1 = splitter.split(typeReadState.currentDataElements()[0], 1); typeReadState = new HollowObjectTypeReadState(typeReadState.getSchema(), result1[0]); assertDataUnchanged(typeReadState, 5); HollowObjectTypeDataElements[] result8 = splitter.split(typeReadState.currentDataElements()[0], 8); assertEquals(0, result8[0].maxOrdinal); // for index that landed one record after split assertEquals(-1, result8[7].maxOrdinal); // for index that landed no records after split try { splitter.split(typeReadState.currentDataElements()[0], 3); // numSplits=3 Assert.fail(); } catch (IllegalStateException e) { // expected, numSplits should be a power of 2 } try { splitter.split(typeReadState.currentDataElements()[0], 0); // numSplits=0 Assert.fail(); } catch (IllegalStateException e) { // expected, numSplits should be a power of 2 } }
public SelectValueMapper<K> getMapper() { return mapper; }
@Test public void shouldBuildMapperWithCorrectExpressions() { // When: final SelectValueMapper<String> mapper = selection.getMapper(); // Then: final List<SelectInfo> selectInfos = mapper.getSelects(); assertThat( selectInfos.get(0).getEvaluator().getExpression(), equalTo(EXPRESSION1)); assertThat( selectInfos.get(1).getEvaluator().getExpression(), equalTo(EXPRESSION2)); }
@SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler, boolean userPercentage, FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); if (conf.getBoolean( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); int interval = conf.getInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS); yarnSiteConfig.setInt(PREFIX + "schedule-asynchronously.scheduling-interval-ms", interval); } // This should be always true to trigger cs auto // refresh queue. yarnSiteConfig.setBoolean( YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { preemptionEnabled = true; String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); yarnSiteConfig.setInt( CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, waitTimeBeforeKill); long waitBeforeNextStarvationCheck = conf.getLong( FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS); yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); } else { if (preemptionMode == FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); } } // For auto created queue's auto deletion. if (!userPercentage) { String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); // Set the expired for deletion interval to 10s, consistent with fs. yarnSiteConfig.setInt(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); } else { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } // Make auto cs conf refresh enabled. yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, addMonitorPolicy(QueueConfigurationAutoRefreshPolicy .class.getCanonicalName(), yarnSiteConfig)); int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { yarnSiteConfig.setInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, maxAssign); } float localityThresholdNode = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE); if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) { yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, localityThresholdNode); } float localityThresholdRack = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK); if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) { yarnSiteConfig.setFloat( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY, localityThresholdRack); } if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) { sizeBasedWeight = true; } if (drfUsed) { yarnSiteConfig.set( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } if (enableAsyncScheduler) { yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); } }
@Test public void testSiteDisabledPreemptionWithNoPolicyConversion() { // Default mode is nopolicy yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, false); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertFalse("Should not contain ProportionalCapacityPreemptionPolicy.", yarnConvertedConfig. get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). contains(ProportionalCapacityPreemptionPolicy. class.getCanonicalName())); yarnConfig.setBoolean(FairSchedulerConfiguration.PREEMPTION, false); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY); assertFalse("Should not contain ProportionalCapacityPreemptionPolicy.", yarnConvertedConfig. get(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES). contains(ProportionalCapacityPreemptionPolicy. class.getCanonicalName())); }
@Override public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) { byte[] bytes = new byte[parameterValueLength]; payload.getByteBuf().readBytes(bytes); return ARRAY_PARAMETER_DECODER.decodeInt4Array(bytes, '{' != bytes[0]); }
@Test void assertRead() { String parameterValue = "{\"11\",\"12\"}"; int expectedLength = 4 + parameterValue.length(); ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(expectedLength); byteBuf.writeInt(parameterValue.length()); byteBuf.writeCharSequence(parameterValue, StandardCharsets.ISO_8859_1); byteBuf.readInt(); PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8); Object actual = newInstance().read(payload, parameterValue.length()); assertThat(actual, is(new int[]{11, 12})); assertThat(byteBuf.readerIndex(), is(expectedLength)); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test @UseDataProvider("clusterEnabledOrNot") public void disable_mmap_if_configured_in_search_additional_props(boolean clusterEnabled) throws Exception { Props props = minProps(clusterEnabled); props.set("sonar.search.javaAdditionalOpts", "-Dnode.store.allow_mmap=false"); Map<String, String> settings = new EsSettings(props, new EsInstallation(props), system).build(); assertThat(settings).containsEntry("node.store.allow_mmap", "false"); }
public ScanResults run(ScanTarget scanTarget) throws ExecutionException, InterruptedException { return runAsync(scanTarget).get(); }
@Test public void run_whenNoPortScannerInstalled_returnsFailedScanResult() throws ExecutionException, InterruptedException { Injector injector = Guice.createInjector( new FakeUtcClockModule(), new FakePluginExecutionModule(), new FakeServiceFingerprinterBootstrapModule(), new FakeVulnDetectorBootstrapModule()); scanningWorkflow = injector.getInstance(DefaultScanningWorkflow.class); ScanResults scanResults = scanningWorkflow.run(buildScanTarget()); assertThat(scanResults.getScanStatus()).isEqualTo(ScanStatus.FAILED); assertThat(scanResults.getStatusMessage()) .contains("At least one PortScanner plugin is required"); assertThat(scanResults.getScanFindingsList()).isEmpty(); }
@Override public PageData<WidgetTypeInfo> findTenantWidgetTypesByTenantId(WidgetTypeFilter widgetTypeFilter, PageLink pageLink) { boolean deprecatedFilterEnabled = !DeprecatedFilter.ALL.equals(widgetTypeFilter.getDeprecatedFilter()); boolean deprecatedFilterBool = DeprecatedFilter.DEPRECATED.equals(widgetTypeFilter.getDeprecatedFilter()); boolean widgetTypesEmpty = widgetTypeFilter.getWidgetTypes() == null || widgetTypeFilter.getWidgetTypes().isEmpty(); return DaoUtil.toPageData( widgetTypeInfoRepository .findTenantWidgetTypesByTenantId( widgetTypeFilter.getTenantId().getId(), pageLink.getTextSearch(), widgetTypeFilter.isFullSearch(), deprecatedFilterEnabled, deprecatedFilterBool, widgetTypesEmpty, widgetTypeFilter.getWidgetTypes() == null ? Collections.emptyList() : widgetTypeFilter.getWidgetTypes(), widgetTypeFilter.isScadaFirst(), DaoUtil.toPageable(pageLink, WidgetTypeInfoEntity.SEARCH_COLUMNS_MAP))); }
@Test public void testFindTenantWidgetTypesByTenantId() { UUID tenantId = Uuids.timeBased(); for (int i = 0; i < WIDGET_TYPE_COUNT; i++) { var widgetType = createAndSaveWidgetType(new TenantId(tenantId), i); widgetTypeList.add(widgetType); } PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findTenantWidgetTypesByTenantId( WidgetTypeFilter.builder() .tenantId(new TenantId(tenantId)) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(null).build(), new PageLink(10, 0, "", new SortOrder("createdTime"))); assertEquals(WIDGET_TYPE_COUNT, widgetTypes.getData().size()); assertEquals(new WidgetTypeInfo(widgetTypeList.get(3)), widgetTypes.getData().get(0)); assertEquals(new WidgetTypeInfo(widgetTypeList.get(4)), widgetTypes.getData().get(1)); assertEquals(new WidgetTypeInfo(widgetTypeList.get(5)), widgetTypes.getData().get(2)); }
public static Impl join(By clause) { return new Impl(new JoinArguments(clause)); }
@Test @Category(NeedsRunner.class) public void testPojo() { List<CgPojo> pc1Rows = Lists.newArrayList( new CgPojo("user1", 1, "us"), new CgPojo("user1", 2, "us"), new CgPojo("user1", 3, "il"), new CgPojo("user1", 4, "il")); List<CgPojo> pc2Rows = Lists.newArrayList( new CgPojo("user1", 3, "us"), new CgPojo("user1", 4, "us"), new CgPojo("user1", 5, "il"), new CgPojo("user1", 6, "il")); PCollection<CgPojo> pc1 = pipeline.apply("Create1", Create.of(pc1Rows)); PCollection<CgPojo> pc2 = pipeline.apply("Create2", Create.of(pc2Rows)); PCollection<Row> joined = PCollectionTuple.of("pc1", pc1) .and("pc2", pc2) .apply( CoGroup.join("pc1", By.fieldNames("user", "country")) .join("pc2", By.fieldNames("user", "country"))); Schema expectedSchema = Schema.builder() .addRowField("key", SIMPLE_CG_KEY_SCHEMA) .addIterableField("pc1", FieldType.row(CG_SCHEMA_1)) .addIterableField("pc2", FieldType.row(CG_SCHEMA_1)) .build(); List<Row> expected = Lists.newArrayList( Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "us").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "us").build())) .build(), Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "il").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 5, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 6, "il").build())) .build()); assertEquals(expectedSchema, joined.getSchema()); PAssert.that(joined).satisfies(actual -> containsJoinedFields(expected, actual)); pipeline.run(); }
public List<String> toList() { List<String> list = new ArrayList<>(header.size() * 2); Iterator<Map.Entry<String, String>> iterator = iterator(); while (iterator.hasNext()) { Map.Entry<String, String> entry = iterator.next(); list.add(entry.getKey()); list.add(entry.getValue()); } return list; }
@Test void testToList() { Header header = Header.newInstance(); List<String> list = header.toList(); assertTrue(list.contains(HttpHeaderConsts.CONTENT_TYPE)); assertTrue(list.contains(MediaType.APPLICATION_JSON)); assertEquals(1, list.indexOf(MediaType.APPLICATION_JSON) - list.indexOf(HttpHeaderConsts.CONTENT_TYPE)); assertTrue(list.contains(HttpHeaderConsts.ACCEPT_CHARSET)); assertTrue(list.contains("UTF-8")); assertEquals(1, list.indexOf("UTF-8") - list.indexOf(HttpHeaderConsts.ACCEPT_CHARSET)); }
public static void isGreaterThanOrEqualTo(int value, int minimumValue) { isGreaterThanOrEqualTo( value, minimumValue, String.format("value [%s] is less than minimum value [%s]", value, minimumValue)); }
@Test public void testIsGreaterThanOrEqualTo3() { assertThrows( IllegalArgumentException.class, () -> Precondition.isGreaterThanOrEqualTo(0, 1)); }
public void apply() { if (applied) { throw new IllegalStateException("can't apply twice"); } applied = true; PluginFileWriteRule writeRule = new PluginFileWriteRule( props.nonNullValueAsFile(ProcessProperties.Property.PATH_HOME.getKey()).toPath(), props.nonNullValueAsFile(ProcessProperties.Property.PATH_TEMP.getKey()).toPath()); pluginSecurityManager.restrictPlugins(writeRule); }
@Test public void fail_if_runs_twice() { Properties properties = new Properties(); properties.setProperty(PATH_HOME.getKey(), "home"); properties.setProperty(PATH_TEMP.getKey(), "temp"); Props props = new Props(properties); WebSecurityManager securityManager = new WebSecurityManager(pluginSecurityManager, props); securityManager.apply(); assertThrows(IllegalStateException.class, securityManager::apply); }
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception { return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM); }
@Test public void testExhaustingRetries() throws Exception { Mockito.when(mockCallable.call()).thenThrow(new TimeoutException("timeout exception")); ConnectException e = assertThrows(ConnectException.class, () -> RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(50), 10, mockTime)); Mockito.verify(mockCallable, Mockito.atLeastOnce()).call(); assertTrue(e.getMessage().contains("Reason: timeout exception")); }
public ZookeeperDataSource(final String serverAddr, final String path, Converter<String, T> parser) { super(parser); if (StringUtil.isBlank(serverAddr) || StringUtil.isBlank(path)) { throw new IllegalArgumentException(String.format("Bad argument: serverAddr=[%s], path=[%s]", serverAddr, path)); } this.path = path; init(serverAddr, null); }
@Test public void testZooKeeperDataSource() throws Exception { TestingServer server = new TestingServer(21812); server.start(); final String remoteAddress = server.getConnectString(); final String path = "/sentinel-zk-ds-demo/flow-HK"; ReadableDataSource<String, List<FlowRule>> flowRuleDataSource = new ZookeeperDataSource<List<FlowRule>>(remoteAddress, path, new Converter<String, List<FlowRule>>() { @Override public List<FlowRule> convert(String source) { return JSON.parseObject(source, new TypeReference<List<FlowRule>>() { }); } }); FlowRuleManager.register2Property(flowRuleDataSource.getProperty()); CuratorFramework zkClient = CuratorFrameworkFactory.newClient(remoteAddress, new ExponentialBackoffRetry(3, 1000)); zkClient.start(); Stat stat = zkClient.checkExists().forPath(path); if (stat == null) { zkClient.create().creatingParentContainersIfNeeded().withMode(CreateMode.PERSISTENT).forPath(path, null); } final String resourceName = "HK"; publishThenTestFor(zkClient, path, resourceName, 10); publishThenTestFor(zkClient, path, resourceName, 15); zkClient.close(); server.stop(); }
@Override public String getRuleKey() { return RULE_KEY; }
@Test public void getRuleKey_returnsTheKey() { assertThat(new MultilineHotspotSensor().getRuleKey()).isEqualTo(MultilineHotspotSensor.RULE_KEY); }
@Override public String toString() { return "ResourceConfig{" + "url=" + url + ", id='" + id + '\'' + ", resourceType=" + resourceType + '}'; }
@Test public void when_attachNonexistentFileWithFile_then_throwsException() { // Given String path = Paths.get("/i/do/not/exist").toString(); File file = new File(path); // Then expectedException.expect(JetException.class); expectedException.expectMessage("Not an existing, readable file: " + path); // When config.attachFile(file); }
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) { return new CreateStreamCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(), Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldThrowOnRowTimeValueColumn() { // Given: final CreateStream statement = new CreateStream( SOME_NAME, TableElements.of(tableElement(ROWTIME_NAME.text(), new Type(BIGINT))), false, true, withProperties, false ); // When: final Exception e = assertThrows( KsqlException.class, () -> createSourceFactory.createStreamCommand(statement, ksqlConfig) ); // Then: assertThat(e.getMessage(), containsString( "'ROWTIME' is a reserved column name.")); }
@Override public Void call() { RemoteLogReadResult result; try { LOGGER.debug("Reading records from remote storage for topic partition {}", fetchInfo.topicPartition); FetchDataInfo fetchDataInfo = remoteReadTimer.time(() -> rlm.read(fetchInfo)); brokerTopicStats.topicStats(fetchInfo.topicPartition.topic()).remoteFetchBytesRate().mark(fetchDataInfo.records.sizeInBytes()); brokerTopicStats.allTopicsStats().remoteFetchBytesRate().mark(fetchDataInfo.records.sizeInBytes()); result = new RemoteLogReadResult(Optional.of(fetchDataInfo), Optional.empty()); } catch (OffsetOutOfRangeException e) { result = new RemoteLogReadResult(Optional.empty(), Optional.of(e)); } catch (Exception e) { brokerTopicStats.topicStats(fetchInfo.topicPartition.topic()).failedRemoteFetchRequestRate().mark(); brokerTopicStats.allTopicsStats().failedRemoteFetchRequestRate().mark(); LOGGER.error("Error occurred while reading the remote data for {}", fetchInfo.topicPartition, e); result = new RemoteLogReadResult(Optional.empty(), Optional.of(e)); } LOGGER.debug("Finished reading records from remote storage for topic partition {}", fetchInfo.topicPartition); quotaManager.record(result.fetchDataInfo.map(fetchDataInfo -> fetchDataInfo.records.sizeInBytes()).orElse(0)); callback.accept(result); return null; }
@Test public void testRemoteLogReaderWithoutError() throws RemoteStorageException, IOException { FetchDataInfo fetchDataInfo = new FetchDataInfo(logOffsetMetadata, records); when(records.sizeInBytes()).thenReturn(100); when(mockRLM.read(any(RemoteStorageFetchInfo.class))).thenReturn(fetchDataInfo); Consumer<RemoteLogReadResult> callback = mock(Consumer.class); RemoteStorageFetchInfo remoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, new TopicPartition(TOPIC, 0), null, null, false); RemoteLogReader remoteLogReader = new RemoteLogReader(remoteStorageFetchInfo, mockRLM, callback, brokerTopicStats, mockQuotaManager, timer); remoteLogReader.call(); // verify the callback did get invoked with the expected remoteLogReadResult ArgumentCaptor<RemoteLogReadResult> remoteLogReadResultArg = ArgumentCaptor.forClass(RemoteLogReadResult.class); verify(callback, times(1)).accept(remoteLogReadResultArg.capture()); RemoteLogReadResult actualRemoteLogReadResult = remoteLogReadResultArg.getValue(); assertFalse(actualRemoteLogReadResult.error.isPresent()); assertTrue(actualRemoteLogReadResult.fetchDataInfo.isPresent()); assertEquals(fetchDataInfo, actualRemoteLogReadResult.fetchDataInfo.get()); // verify the record method on quota manager was called with the expected value ArgumentCaptor<Double> recordedArg = ArgumentCaptor.forClass(Double.class); verify(mockQuotaManager, times(1)).record(recordedArg.capture()); assertEquals(100, recordedArg.getValue()); // Verify metrics for remote reads are updated correctly assertEquals(1, brokerTopicStats.topicStats(TOPIC).remoteFetchRequestRate().count()); assertEquals(100, brokerTopicStats.topicStats(TOPIC).remoteFetchBytesRate().count()); assertEquals(0, brokerTopicStats.topicStats(TOPIC).failedRemoteFetchRequestRate().count()); // Verify aggregate metrics assertEquals(1, brokerTopicStats.allTopicsStats().remoteFetchRequestRate().count()); assertEquals(100, brokerTopicStats.allTopicsStats().remoteFetchBytesRate().count()); assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteFetchRequestRate().count()); }
@SuppressWarnings("MethodMayBeStatic") // Non-static to support DI. public long parse(final String text) { final String date; final String time; final String timezone; if (text.contains("T")) { date = text.substring(0, text.indexOf('T')); final String withTimezone = text.substring(text.indexOf('T') + 1); timezone = getTimezone(withTimezone); time = completeTime(withTimezone.substring(0, withTimezone.length() - timezone.length()) .replaceAll("Z$","")); } else { date = completeDate(text); time = completeTime(""); timezone = ""; } try { final ZoneId zoneId = parseTimezone(timezone); return PARSER.parse(date + "T" + time, zoneId); } catch (final RuntimeException e) { throw new KsqlException("Failed to parse timestamp '" + text + "': " + e.getMessage() + HELP_MESSAGE, e ); } }
@Test public void shouldParseDateTimeWithPositiveTimezones() { assertThat(parser.parse("2017-11-13T23:59:58.999+0100"), is(1510613998999L)); }
public synchronized NumaResourceAllocation allocateNumaNodes( Container container) throws ResourceHandlerException { NumaResourceAllocation allocation = allocate(container.getContainerId(), container.getResource()); if (allocation != null) { try { // Update state store. context.getNMStateStore().storeAssignedResources(container, NUMA_RESOURCE_TYPE, Arrays.asList(allocation)); } catch (IOException e) { releaseNumaResource(container.getContainerId()); throw new ResourceHandlerException(e); } } return allocation; }
@Test public void testAllocateNumaNode() throws Exception { NumaResourceAllocation nodeInfo = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000001"), Resource.newInstance(2048, 2))); Assert.assertEquals("0", String.join(",", nodeInfo.getMemNodes())); Assert.assertEquals("0", String.join(",", nodeInfo.getCpuNodes())); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { char subCommand = safeReadLine(reader).charAt(0); String returnCommand = null; if (subCommand == LIST_SLICE_SUB_COMMAND_NAME) { returnCommand = slice_list(reader); } else if (subCommand == LIST_CONCAT_SUB_COMMAND_NAME) { returnCommand = concat_list(reader); } else if (subCommand == LIST_MULT_SUB_COMMAND_NAME) { returnCommand = mult_list(reader); } else if (subCommand == LIST_IMULT_SUB_COMMAND_NAME) { returnCommand = imult_list(reader); } else if (subCommand == LIST_COUNT_SUB_COMMAND_NAME) { returnCommand = count_list(reader); } else { returnCommand = call_collections_method(reader, subCommand); } logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testMinException() { String inputCommand = ListCommand.LIST_MIN_SUB_COMMAND_NAME + "\n" + target2 + "\ne\n"; try { command.execute("l", new BufferedReader(new StringReader(inputCommand)), writer); assertEquals("!x\n", sWriter.toString()); } catch (Exception e) { e.printStackTrace(); fail(); } }
public static void addMetricsContextProperties(Map<String, Object> prop, WorkerConfig config, String clusterId) { //add all properties predefined with "metrics.context." prop.putAll(config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX, false)); //add connect properties prop.put(CommonClientConfigs.METRICS_CONTEXT_PREFIX + WorkerConfig.CONNECT_KAFKA_CLUSTER_ID, clusterId); Object groupId = config.originals().get(DistributedConfig.GROUP_ID_CONFIG); if (groupId != null) { prop.put(CommonClientConfigs.METRICS_CONTEXT_PREFIX + WorkerConfig.CONNECT_GROUP_ID, groupId); } }
@Test public void testAddMetricsContextPropertiesDistributed() { Map<String, String> props = new HashMap<>(); props.put(DistributedConfig.GROUP_ID_CONFIG, "connect-cluster"); props.put(DistributedConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(DistributedConfig.CONFIG_TOPIC_CONFIG, "connect-configs"); props.put(DistributedConfig.OFFSET_STORAGE_TOPIC_CONFIG, "connect-offsets"); props.put(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG, "connect-status"); props.put(DistributedConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); props.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); DistributedConfig config = new DistributedConfig(props); Map<String, Object> prop = new HashMap<>(); ConnectUtils.addMetricsContextProperties(prop, config, "cluster-1"); assertEquals("connect-cluster", prop.get(CommonClientConfigs.METRICS_CONTEXT_PREFIX + WorkerConfig.CONNECT_GROUP_ID)); assertEquals("cluster-1", prop.get(CommonClientConfigs.METRICS_CONTEXT_PREFIX + WorkerConfig.CONNECT_KAFKA_CLUSTER_ID)); }
@Override public void transform(Message message, DataType fromType, DataType toType) { final Optional<ValueRange> valueRange = getValueRangeBody(message); String range = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A:A").toString(); String majorDimension = message .getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "majorDimension", RangeCoordinate.DIMENSION_ROWS).toString(); String spreadsheetId = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "spreadsheetId", "").toString(); String[] columnNames = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "columnNames", "A").toString().split(","); boolean splitResults = Boolean .parseBoolean(message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "splitResults", "false").toString()); if (valueRange.isPresent()) { message.setBody( transformFromValueRangeModel(message, valueRange.get(), spreadsheetId, range, majorDimension, columnNames)); } else if (splitResults) { message.setBody(transformFromSplitValuesModel(message, spreadsheetId, range, majorDimension, columnNames)); } else { String valueInputOption = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption", "USER_ENTERED").toString(); message.setBody( transformToValueRangeModel(message, spreadsheetId, range, majorDimension, valueInputOption, columnNames)); } }
@Test public void testTransformToValueRangeMultipleRows() throws Exception { Exchange inbound = new DefaultExchange(camelContext); inbound.getMessage().setHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A1:B2"); List<String> model = Arrays.asList("{" + "\"spreadsheetId\": \"" + spreadsheetId + "\"," + "\"A\": \"a1\"," + "\"B\": \"b1\"" + "}", "{" + "\"spreadsheetId\": \"" + spreadsheetId + "\"," + "\"A\": \"a2\"," + "\"B\": \"b2\"" + "}"); inbound.getMessage().setBody(model); transformer.transform(inbound.getMessage(), DataType.ANY, DataType.ANY); Assertions.assertEquals("A1:B2", inbound.getMessage().getHeader(GoogleSheetsStreamConstants.RANGE)); Assertions.assertEquals(RangeCoordinate.DIMENSION_ROWS, inbound.getMessage().getHeader(GoogleSheetsStreamConstants.MAJOR_DIMENSION)); Assertions.assertEquals("USER_ENTERED", inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption")); ValueRange valueRange = (ValueRange) inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "values"); Assertions.assertEquals(2L, valueRange.getValues().size()); Assertions.assertEquals(2L, valueRange.getValues().get(0).size()); Assertions.assertEquals("a1", valueRange.getValues().get(0).get(0)); Assertions.assertEquals("b1", valueRange.getValues().get(0).get(1)); Assertions.assertEquals(2L, valueRange.getValues().get(1).size()); Assertions.assertEquals("a2", valueRange.getValues().get(1).get(0)); Assertions.assertEquals("b2", valueRange.getValues().get(1).get(1)); }
public RingbufferStoreConfig getRingbufferStoreConfig() { return ringbufferStoreConfig; }
@Test public void getRingbufferStoreConfig() { final RingbufferConfig config = new RingbufferConfig(NAME); final RingbufferStoreConfig ringbufferConfig = config.getRingbufferStoreConfig(); assertNotNull(ringbufferConfig); assertFalse(ringbufferConfig.isEnabled()); }
public int termLength() { return termLength; }
@Test void throwsIllegalStateExceptionIfLogFileSizeIsLessThanLogMetaDataLength(@TempDir final Path dir) throws IOException { final Path logFile = dir.resolve("test.log"); assertNotNull(Files.createFile(logFile)); final int fileLength = LOG_META_DATA_LENGTH - 5; final byte[] contents = new byte[fileLength]; final UnsafeBuffer buffer = new UnsafeBuffer(contents); termLength(buffer, TERM_MIN_LENGTH); pageSize(buffer, PAGE_MIN_SIZE); assertNotNull(Files.write(logFile, contents)); assertEquals(contents.length, Files.size(logFile)); final IllegalStateException exception = assertThrowsExactly( IllegalStateException.class, () -> new LogBuffers(logFile.toAbsolutePath().toString())); assertEquals("Log file length less than min length of " + LOG_META_DATA_LENGTH + ": length=" + fileLength, exception.getMessage()); }
public double calculateDensity(Graph graph, boolean isGraphDirected) { double result; double edgesCount = graph.getEdgeCount(); double nodesCount = graph.getNodeCount(); double multiplier = 1; if (!isGraphDirected) { multiplier = 2; } result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount); return result; }
@Test public void testTwoCompleteGraphsDensity() { GraphModel graphModel = GraphGenerator.generateCompleteUndirectedGraph(4); UndirectedGraph undirectedGraph = graphModel.getUndirectedGraph(); Node[] nodes = new Node[4]; for (int i = 0; i < 4; i++) { Node currentNode = graphModel.factory().newNode(((Integer) (i + 4)).toString()); nodes[i] = currentNode; undirectedGraph.addNode(currentNode); } for (int i = 0; i < 3; i++) { for (int j = i + 1; j < 4; j++) { Edge currentEdge = graphModel.factory().newEdge(nodes[i], nodes[j], false); undirectedGraph.addEdge(currentEdge); } } Graph graph = graphModel.getGraph(); GraphDensity d = new GraphDensity(); double density = d.calculateDensity(graph, false); double expectedAvDegree = 0.4286; double diff = Math.abs(density - expectedAvDegree); assertTrue(diff < 0.01); }
public boolean isAttached(Appender<E> appender) { if (appender == null) { return false; } for (Appender<E> a : appenderList) { if (a == appender) return true; } return false; }
@Test public void testIsAttached() throws Exception { NOPAppender<TestEvent> ta = new NOPAppender<TestEvent>(); ta.start(); aai.addAppender(ta); NOPAppender<TestEvent> tab = new NOPAppender<TestEvent>(); tab.setName("test"); tab.start(); aai.addAppender(tab); Assertions.assertTrue(aai.isAttached(ta), "Appender is not attached"); Assertions.assertTrue(aai.isAttached(tab), "Appender is not attached"); }
@Override public ProjectRepositories load(String projectKey, @Nullable String branchBase) { GetRequest request = new GetRequest(getUrl(projectKey, branchBase)); try (WsResponse response = wsClient.call(request)) { try (InputStream is = response.contentStream()) { return processStream(is); } catch (IOException e) { throw new IllegalStateException("Couldn't load project repository for " + projectKey, e); } } catch (RuntimeException e) { if (shouldThrow(e)) { throw e; } LOG.debug("Project repository not available - continuing without it"); return new SingleProjectRepository(); } }
@Test public void continueOnHttp404Exception() { when(wsClient.call(any())).thenThrow(new HttpException("/batch/project.protobuf?key=foo%3F", HttpURLConnection.HTTP_NOT_FOUND, "")); ProjectRepositories proj = loader.load(PROJECT_KEY, null); assertThat(proj.exists()).isFalse(); }
@Override public Changeset getChangesetForLine(int lineNumber) { if (!hasChangesetForLine(lineNumber)) { throw new IllegalArgumentException("There's no changeset on line " + lineNumber); } return lineChangesets[lineNumber - 1]; }
@Test public void get_changeset_for_given_line() { ScmInfo scmInfo = createScmInfoWithTwoChangestOnFourLines(); assertThat(scmInfo.getChangesetForLine(1)).isEqualTo(CHANGESET_1); assertThat(scmInfo.getChangesetForLine(2)).isEqualTo(CHANGESET_2); assertThat(scmInfo.getChangesetForLine(3)).isEqualTo(CHANGESET_1); assertThat(scmInfo.getChangesetForLine(4)).isEqualTo(CHANGESET_1); }
@Benchmark @Threads(1) public void testCounterCellReset(CounterCellState counterState) throws Exception { counterState.counterCell.inc(); counterState.counterCell.reset(); counterState.counterCell.inc(); }
@Test public void testCounterCellReset() throws Exception { CounterCellState state = new CounterCellState(); new MetricsBenchmark().testCounterCellReset(state); state.check(); }
@Override public KTable<K, V> toTable() { return toTable(NamedInternal.empty(), Materialized.with(keySerde, valueSerde)); }
@Test public void shouldNotAllowNullMaterializedOnToTableWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.toTable(Named.as("name"), null)); assertThat(exception.getMessage(), equalTo("materialized can't be null")); }
@VisibleForTesting List<MappingRule> getMappingRules(MappingRulesDescription rules) { List<MappingRule> mappingRules = new ArrayList<>(); for (Rule rule : rules.getRules()) { checkMandatoryParameters(rule); MappingRuleMatcher matcher = createMatcher(rule); MappingRuleAction action = createAction(rule); setFallbackToAction(rule, action); MappingRule mappingRule = new MappingRule(matcher, action); mappingRules.add(mappingRule); } return mappingRules; }
@Test public void testFallbackResultUnset() { rule.setFallbackResult(null); List<MappingRule> rules = ruleCreator.getMappingRules(description); MappingRule mpr = rules.get(0); assertEquals("Fallback result", MappingRuleResultType.SKIP, mpr.getFallback().getResult()); }
protected abstract void doBuildListing(Path pathToListFile, DistCpContext distCpContext) throws IOException;
@Test public void testFailOnCloseError() throws IOException { File inFile = File.createTempFile("TestCopyListingIn", null); inFile.deleteOnExit(); File outFile = File.createTempFile("TestCopyListingOut", null); outFile.deleteOnExit(); List<Path> srcs = new ArrayList<Path>(); srcs.add(new Path(inFile.toURI())); Exception expectedEx = new IOException("boom"); SequenceFile.Writer writer = mock(SequenceFile.Writer.class); doThrow(expectedEx).when(writer).close(); SimpleCopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS); final DistCpOptions options = new DistCpOptions.Builder(srcs, new Path(outFile.toURI())).build(); Exception actualEx = null; try { listing.doBuildListing(writer, new DistCpContext(options)); } catch (Exception e) { actualEx = e; } Assert.assertNotNull("close writer didn't fail", actualEx); Assert.assertEquals(expectedEx, actualEx); }
@Description("Gamma cdf given the shape and scale parameter and value") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double gammaCdf( @SqlType(StandardTypes.DOUBLE) double shape, @SqlType(StandardTypes.DOUBLE) double scale, @SqlType(StandardTypes.DOUBLE) double value) { checkCondition(value >= 0, INVALID_FUNCTION_ARGUMENT, "gammaCdf Function: value must be greater than, or equal to, 0"); checkCondition(shape > 0, INVALID_FUNCTION_ARGUMENT, "gammaCdf Function: shape must be greater than 0"); checkCondition(scale > 0, INVALID_FUNCTION_ARGUMENT, "gammaCdf Function: scale must be greater than 0"); GammaDistribution distribution = new GammaDistribution(null, shape, scale, GammaDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.cumulativeProbability(value); }
@Test public void testGammaCdf() { assertFunction("round(gamma_cdf(3.0, 4.0, 0.0), 10)", DOUBLE, 0.0); assertFunction("round(gamma_cdf(3.0, 4.0, 1.0), 3)", DOUBLE, 0.002); assertFunction("round(gamma_cdf(3.0, 4.0, 5.0), 3)", DOUBLE, 0.132); assertFunction("round(gamma_cdf(3.0, 4.0, 10.0), 3)", DOUBLE, 0.456); // Gamma with shape 10k/2 and scale 2 is chisquare with df=10k, which is approximatly Normal with mu=10k // Hence, we expect that the CDF of 10k will be close to 0.5, as we indeed get: assertFunction("round(gamma_cdf(10000.0/2, 2.0, 10000.0), 3)", DOUBLE, 0.502); assertInvalidFunction("gamma_cdf(0, 3, 0.5)", "gammaCdf Function: shape must be greater than 0"); assertInvalidFunction("gamma_cdf(3, 0, 0.5)", "gammaCdf Function: scale must be greater than 0"); assertInvalidFunction("gamma_cdf(3, 5, -0.1)", "gammaCdf Function: value must be greater than, or equal to, 0"); }
public void visit(Entry entry) { final AFreeplaneAction action = new EntryAccessor().getAction(entry); if (action != null) { final EntryAccessor entryAccessor = new EntryAccessor(); String accelerator = entryAccessor.getAccelerator(entry); if(accelerator != null) { map.setDefaultAccelerator(action, accelerator); } else map.setUserDefinedAccelerator(action); entries.registerEntry(action, entry); } }
@Test public void registersEntryWithAction() { Entry actionEntry = new Entry(); final AFreeplaneAction action = mock(AFreeplaneAction.class); new EntryAccessor().setAction(actionEntry, action); IAcceleratorMap map = mock(IAcceleratorMap.class); EntriesForAction entries = mock(EntriesForAction.class); final AcceleratorBuilder acceleratorBuilder = new AcceleratorBuilder(map, entries); acceleratorBuilder.visit(actionEntry); Mockito.verify(entries).registerEntry(action, actionEntry); }
public static Message toProto(final Map<?, ?> inputData, final Message defaultInstance) { ObjectHelper.notNull(inputData, "inputData"); ObjectHelper.notNull(defaultInstance, "defaultInstance"); final Descriptor descriptor = defaultInstance.getDescriptorForType(); final Builder target = defaultInstance.newBuilderForType(); return convertMapToMessage(descriptor, target, inputData); }
@Test public void testIfThrowsErrorInCaseRepeatedFieldIsNotList() { final Map<String, Object> input = new HashMap<>(); input.put("name", "Martin"); input.put("id", 1234); input.put("nicknames", "wrong nickname"); final AddressBookProtos.Person defaultInstance = AddressBookProtos.Person.getDefaultInstance(); assertThrows(IllegalArgumentException.class, () -> ProtobufConverter.toProto(input, defaultInstance)); }
static Entry<String, String> splitTrimmedConfigStringComponent(String input) { int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '=') { break; } } if (i == input.length()) { throw new FormatterException("No equals sign found in SCRAM component: " + input); } String value = input.substring(i + 1); if (value.length() >= 2) { if (value.startsWith("\"") && value.endsWith("\"")) { value = value.substring(1, value.length() - 1); } } return new AbstractMap.SimpleImmutableEntry<>(input.substring(0, i), value); }
@Test public void testSplitTrimmedConfigStringComponentOnNameEqualsQuotedFoo() { assertEquals(new AbstractMap.SimpleImmutableEntry<>("name", "foo"), ScramParser.splitTrimmedConfigStringComponent("name=\"foo\"")); }
static final String addFunctionParameter(ParameterDescriptor descriptor, RuleBuilderStep step) { final String parameterName = descriptor.name(); // parameter name needed by function final Map<String, Object> parameters = step.parameters(); if (Objects.isNull(parameters)) { return null; } final Object value = parameters.get(parameterName); // parameter value set by rule definition String syntax = " " + parameterName + " : "; if (value == null) { return null; } else if (value instanceof String valueString) { if (StringUtils.isEmpty(valueString)) { return null; } else if (valueString.startsWith("$")) { // value set as variable syntax += valueString.substring(1); } else { syntax += "\"" + StringEscapeUtils.escapeJava(valueString) + "\""; // value set as string } } else { syntax += value; } return syntax; }
@Test public void addFunctionParameterSyntaxOk_WhenStringParameterValueIsSet() { String parameterName = "foo"; var parameterValue = "bar"; RuleBuilderStep step = mock(RuleBuilderStep.class); Map<String, Object> params = Map.of(parameterName, parameterValue); when(step.parameters()).thenReturn(params); ParameterDescriptor descriptor = mock(ParameterDescriptor.class); when(descriptor.name()).thenReturn(parameterName); assertThat(ParserUtil.addFunctionParameter(descriptor, step)) .isEqualTo(" foo : \"bar\""); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testSetNonDefaultSlotSharingInHybridMode() { Configuration configuration = new Configuration(); // set all edge to HYBRID_FULL result partition type. configuration.set( ExecutionOptions.BATCH_SHUFFLE_MODE, BatchShuffleMode.ALL_EXCHANGES_HYBRID_FULL); final StreamGraph streamGraph = createStreamGraphForSlotSharingTest(configuration); // specify slot sharing group for map1 streamGraph.getStreamNodes().stream() .filter(n -> "map1".equals(n.getOperatorName())) .findFirst() .get() .setSlotSharingGroup("testSlotSharingGroup"); assertThatThrownBy(() -> StreamingJobGraphGenerator.createJobGraph(streamGraph)) .isInstanceOf(IllegalStateException.class) .hasMessage( "hybrid shuffle mode currently does not support setting non-default slot sharing group."); // set all edge to HYBRID_SELECTIVE result partition type. configuration.set( ExecutionOptions.BATCH_SHUFFLE_MODE, BatchShuffleMode.ALL_EXCHANGES_HYBRID_SELECTIVE); final StreamGraph streamGraph2 = createStreamGraphForSlotSharingTest(configuration); // specify slot sharing group for map1 streamGraph2.getStreamNodes().stream() .filter(n -> "map1".equals(n.getOperatorName())) .findFirst() .get() .setSlotSharingGroup("testSlotSharingGroup"); assertThatThrownBy(() -> StreamingJobGraphGenerator.createJobGraph(streamGraph2)) .isInstanceOf(IllegalStateException.class) .hasMessage( "hybrid shuffle mode currently does not support setting non-default slot sharing group."); }
@Override public void close() { _zkClient.close(); }
@Test public void testCloseZkClient() { _dynamicBrokerSelectorUnderTest.close(); Mockito.verify(_mockZkClient, times(1)).close(); }
@Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { long endTimeNanos = System.nanoTime() + unit.toNanos(timeout); ArrayList<ManagedChannel> channels = new ArrayList<>(); synchronized (this) { if (!shutdownStarted) { return false; } if (isTerminated()) { return true; } channels.addAll(usedChannels); channels.addAll(channelCache); } // Block outside the synchronized section. for (ManagedChannel channel : channels) { long awaitTimeNanos = endTimeNanos - System.nanoTime(); if (awaitTimeNanos <= 0) { break; } channel.awaitTermination(awaitTimeNanos, TimeUnit.NANOSECONDS); } return isTerminated(); }
@Test public void testAwaitTermination() throws Exception { ManagedChannel mockChannel = mock(ManagedChannel.class); when(channelSupplier.get()).thenReturn(mockChannel); IsolationChannel isolationChannel = IsolationChannel.create(channelSupplier); assertFalse(isolationChannel.awaitTermination(1, TimeUnit.MILLISECONDS)); when(mockChannel.shutdown()).thenReturn(mockChannel); when(mockChannel.isTerminated()).thenReturn(false, false, false, true, true); when(mockChannel.awaitTermination(longThat(l -> l < 2_000_000), eq(TimeUnit.NANOSECONDS))) .thenReturn(false, true); isolationChannel.shutdown(); assertFalse(isolationChannel.awaitTermination(1, TimeUnit.MILLISECONDS)); assertTrue(isolationChannel.awaitTermination(1, TimeUnit.MILLISECONDS)); assertTrue(isolationChannel.isTerminated()); verify(channelSupplier, times(1)).get(); verify(mockChannel, times(1)).shutdown(); verify(mockChannel, times(5)).isTerminated(); verify(mockChannel, times(2)) .awaitTermination(longThat(l -> l < 2_000_000), eq(TimeUnit.NANOSECONDS)); }
@VisibleForTesting protected void handleException( String message, Exception exception, StringBuilder text, StringBuilder details ) { if ( exception instanceof KettleException ) { // Normal error KettleException ke = (KettleException) exception; Throwable cause = ke.getCause(); if ( cause != null && cause.getMessage() != null ) { text.append( cause.getMessage() ); } else { text.append( ke.getMessage() ); } } else if ( exception instanceof InvocationTargetException ) { // Error from somewhere else, what is the cause? Throwable cause = exception.getCause(); if ( cause instanceof KettleException ) { KettleException ke = (KettleException) cause; text.append( ke.getMessage() ); } else { text.append( Const.NVL( cause.getMessage(), cause.toString() ) ); while ( text.length() == 0 && cause != null ) { cause = cause.getCause(); if ( cause != null ) { text.append( Const.NVL( cause.getMessage(), cause.toString() ) ); } } } } else { // Error from somewhere else... if ( exception.getMessage() == null ) { text.append( message ); } else { text.append( exception.getMessage() ); } } StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter( sw ); exception.printStackTrace( pw ); details.append( sw.getBuffer() ); }
@Test public void setErrorTextWithCauseExceptionWithoutCauseMessage() { //cause without message ClientProtocolException cpe = new ClientProtocolException( ); Exception e = new KettleException( "kettleMessage", cpe ); StringBuilder text = new StringBuilder(); StringBuilder details = new StringBuilder(); ErrorDialog dialog = mock( ErrorDialog.class ); doCallRealMethod().when( dialog ).handleException( anyString(), any( Exception.class ), any( StringBuilder.class ), any( StringBuilder.class ) ); dialog.handleException( "argMessage", e, text, details ); assertEquals( text.toString(), e.getMessage().toString() ); }
public boolean evaluate( RowMetaInterface rowMeta, Object[] r ) { // Start of evaluate boolean retval = false; // If we have 0 items in the list, evaluate the current condition // Otherwise, evaluate all sub-conditions // try { if ( isAtomic() ) { if ( function == FUNC_TRUE ) { return !negate; } // Get fieldnrs left value // // Check out the fieldnrs if we don't have them... if ( leftValuename != null && leftValuename.length() > 0 ) { leftFieldnr = rowMeta.indexOfValue( leftValuename ); } // Get fieldnrs right value // if ( rightValuename != null && rightValuename.length() > 0 ) { rightFieldnr = rowMeta.indexOfValue( rightValuename ); } // Get fieldnrs left field ValueMetaInterface fieldMeta = null; Object field = null; if ( leftFieldnr >= 0 ) { fieldMeta = rowMeta.getValueMeta( leftFieldnr ); field = r[ leftFieldnr ]; } else { return false; // no fields to evaluate } // Get fieldnrs right exact ValueMetaInterface fieldMeta2 = rightExact != null ? rightExact.getValueMeta() : null; Object field2 = rightExact != null ? rightExact.getValueData() : null; if ( field2 == null && rightFieldnr >= 0 ) { fieldMeta2 = rowMeta.getValueMeta( rightFieldnr ); field2 = r[ rightFieldnr ]; } // Evaluate switch ( function ) { case FUNC_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) == 0 ); break; case FUNC_NOT_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) != 0 ); break; case FUNC_SMALLER: // Added this if/else to accommodate for CUST-270 if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) ) && fieldMeta.isNull( field ) ) { retval = false; } else { retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) < 0 ); } break; case FUNC_SMALLER_EQUAL: // Added this if/else to accommodate for CUST-270 if ( "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_FILTER_TREAT_NULLS_AS_NOT_ZERO, "N" ) ) && fieldMeta.isNull( field ) ) { retval = false; } else { retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) <= 0 ); } break; case FUNC_LARGER: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) > 0 ); break; case FUNC_LARGER_EQUAL: retval = ( fieldMeta.compare( field, fieldMeta2, field2 ) >= 0 ); break; case FUNC_REGEXP: if ( fieldMeta.isNull( field ) || field2 == null ) { retval = false; } else { retval = Pattern .matches( fieldMeta2.getCompatibleString( field2 ), fieldMeta.getCompatibleString( field ) ); } break; case FUNC_NULL: retval = ( fieldMeta.isNull( field ) ); break; case FUNC_NOT_NULL: retval = ( !fieldMeta.isNull( field ) ); break; case FUNC_IN_LIST: // performance reason: create the array first or again when it is against a field and not a constant // if ( inList == null || rightFieldnr >= 0 ) { inList = Const.splitString( fieldMeta2.getString( field2 ), ';', true ); for ( int i = 0; i < inList.length; i++ ) { inList[i] = inList[i] == null ? null : inList[i].replace( "\\", "" ); } Arrays.sort( inList ); } String searchString = fieldMeta.getCompatibleString( field ); int inIndex = -1; if ( searchString != null ) { inIndex = Arrays.binarySearch( inList, searchString ); } retval = inIndex >= 0; break; case FUNC_CONTAINS: String fm2CompatibleContains = fieldMeta2.getCompatibleString( field2 ); retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) ) .filter( s -> s.contains( fm2CompatibleContains ) ).isPresent(); break; case FUNC_STARTS_WITH: String fm2CompatibleStarts = fieldMeta2.getCompatibleString( field2 ); retval = Optional.ofNullable( fieldMeta.getCompatibleString( field ) ) .filter( s -> s.startsWith( fm2CompatibleStarts ) ).isPresent(); break; case FUNC_ENDS_WITH: String string = fieldMeta.getCompatibleString( field ); if ( !Utils.isEmpty( string ) ) { if ( rightString == null && field2 != null ) { rightString = fieldMeta2.getCompatibleString( field2 ); } if ( rightString != null ) { retval = string.endsWith( fieldMeta2.getCompatibleString( field2 ) ); } else { retval = false; } } else { retval = false; } break; case FUNC_LIKE: // Converts to a regular expression // TODO: optimize the patterns and String replacements // if ( fieldMeta.isNull( field ) || field2 == null ) { retval = false; } else { String regex = fieldMeta2.getCompatibleString( field2 ); regex = regex.replace( "%", ".*" ); regex = regex.replace( "?", "." ); retval = Pattern.matches( regex, fieldMeta.getCompatibleString( field ) ); } break; default: break; } // Only NOT makes sense, the rest doesn't, so ignore!!!! // Optionally negate // if ( isNegated() ) { retval = !retval; } } else { // Composite : get first Condition cb0 = list.get( 0 ); retval = cb0.evaluate( rowMeta, r ); // Loop over the conditions listed below. // for ( int i = 1; i < list.size(); i++ ) { // Composite : #i // Get right hand condition Condition cb = list.get( i ); // Evaluate the right hand side of the condition cb.evaluate() within // the switch statement // because the condition may be short-circuited due to the left hand // side (retval) switch ( cb.getOperator() ) { case Condition.OPERATOR_OR: retval = retval || cb.evaluate( rowMeta, r ); break; case Condition.OPERATOR_AND: retval = retval && cb.evaluate( rowMeta, r ); break; case Condition.OPERATOR_OR_NOT: retval = retval || ( !cb.evaluate( rowMeta, r ) ); break; case Condition.OPERATOR_AND_NOT: retval = retval && ( !cb.evaluate( rowMeta, r ) ); break; case Condition.OPERATOR_XOR: retval = retval ^ cb.evaluate( rowMeta, r ); break; default: break; } } // Composite: optionally negate if ( isNegated() ) { retval = !retval; } } } catch ( Exception e ) { throw new RuntimeException( "Unexpected error evaluation condition [" + toString() + "]", e ); } return retval; }
@Test public void testNullLargerOrEqualsThanZero() { String left = "left"; String right = "right"; Long leftValue = null; Long rightValue = 0L; RowMetaInterface rowMeta = new RowMeta(); rowMeta.addValueMeta( new ValueMetaInteger( left ) ); rowMeta.addValueMeta( new ValueMetaInteger( right ) ); Condition condition = new Condition( left, Condition.FUNC_LARGER_EQUAL, right, null ); assertFalse( condition.evaluate( rowMeta, new Object[] { leftValue, rightValue } ) ); }
protected void validateMessageType(Type expected) { if (expected != messageType) { throw new IllegalArgumentException("Message type is expected to be " + expected + " but got " + messageType); } }
@Test public void testValidateMessage() { RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL); msg.validateMessageType(RpcMessage.Type.RPC_CALL); }
public static int parseIntAscii(final CharSequence cs, final int index, final int length) { if (length <= 0) { throw new AsciiNumberFormatException("empty string: index=" + index + " length=" + length); } final boolean negative = MINUS_SIGN == cs.charAt(index); int i = index; if (negative) { i++; if (1 == length) { throwParseIntError(cs, index, length); } } final int end = index + length; if (end - i < INT_MAX_DIGITS) { final int tally = parsePositiveIntAscii(cs, index, length, i, end); return negative ? -tally : tally; } else { final long tally = parsePositiveIntAsciiOverflowCheck(cs, index, length, i, end); if (tally > INTEGER_ABSOLUTE_MIN_VALUE || INTEGER_ABSOLUTE_MIN_VALUE == tally && !negative) { throwParseIntOverflowError(cs, index, length); } return (int)(negative ? -tally : tally); } }
@Test void parseIntAsciiRoundTrip() { final String prefix = "testInt"; final StringBuilder buffer = new StringBuilder(24); buffer.append(prefix); for (int i = 0; i < ITERATIONS; i++) { final int value = ThreadLocalRandom.current().nextInt(); buffer.append(value); final int parsedValue = parseIntAscii(buffer, prefix.length(), buffer.length() - prefix.length()); assertEquals(parsedValue, value); buffer.delete(prefix.length(), 24); } }
public void registerUrl( String urlString ) { if ( urlString == null || addedAllClusters == true ) { return; //We got no url or already added all clusters so nothing to do. } if ( urlString.startsWith( VARIABLE_START ) ) { addAllClusters(); } Pattern r = Pattern.compile( URL_PATTERN ); Matcher m = r.matcher( urlString ); if ( m.find() ) { String protocol = m.group( PARSE_URL_SCHEME ); String clusterName = m.group( PARSE_URL_AUTHORITY ); if ( "hc".equals( protocol ) ) { if ( clusterName.startsWith( VARIABLE_START ) ) { addAllClusters(); } addClusterToMeta( clusterName ); } } }
@Test public void testRegisterUrlFullVariable() throws Exception { when( mockNamedClusterService.listNames( mockMeta.getMetaStore() ) ) .thenReturn( Arrays.asList( CLUSTER1_NAME, CLUSTER2_NAME ) ); namedClusterEmbedManager.registerUrl( "${variable)" ); verify( mockMetaStoreFactory ).saveElement( mockNamedCluster1 ); verify( mockMetaStoreFactory ).saveElement( mockNamedCluster2 ); }
@VisibleForTesting void readCacheAt(long offset) throws IOException { DiskRange newCacheRange = regionFinder.getRangeFor(offset); cachePosition = newCacheRange.getOffset(); cacheLength = newCacheRange.getLength(); if (cache.length < cacheLength) { cache = new byte[cacheLength]; systemMemoryContext.setBytes(cacheLength); } dataSource.readFully(newCacheRange.getOffset(), cache, 0, cacheLength); }
@Test public void testTinyStripesReadCacheAt() throws IOException { DataSize maxMergeDistance = new DataSize(1, Unit.MEGABYTE); DataSize tinyStripeThreshold = new DataSize(8, Unit.MEGABYTE); OrcAggregatedMemoryContext systemMemoryContext = new TestingHiveOrcAggregatedMemoryContext(); TestingOrcDataSource testingOrcDataSource = new TestingOrcDataSource(NoopOrcDataSource.INSTANCE); CachingOrcDataSource cachingOrcDataSource = new CachingOrcDataSource( testingOrcDataSource, createTinyStripesRangeFinder( ImmutableList.of( new StripeInformation(123, 3, 10, 10, 10, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 33, 10, 10, 10, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 63, 1048576 * 8 - 20, 10, 10, OptionalLong.empty(), ImmutableList.of())), maxMergeDistance, tinyStripeThreshold), systemMemoryContext.newOrcLocalMemoryContext(CachingOrcDataSource.class.getSimpleName())); cachingOrcDataSource.readCacheAt(3); assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(3, 60))); cachingOrcDataSource.readCacheAt(63); // The allocated cache size is the length of the merged disk range 1048576 * 8. assertEquals(systemMemoryContext.getBytes(), 8 * 1048576); assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(63, 8 * 1048576))); testingOrcDataSource = new TestingOrcDataSource(NoopOrcDataSource.INSTANCE); cachingOrcDataSource = new CachingOrcDataSource( testingOrcDataSource, createTinyStripesRangeFinder( ImmutableList.of( new StripeInformation(123, 3, 10, 10, 10, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 33, 10, 10, 10, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 63, 1048576 * 8 - 20, 10, 10, OptionalLong.empty(), ImmutableList.of())), maxMergeDistance, tinyStripeThreshold), systemMemoryContext.newOrcLocalMemoryContext(CachingOrcDataSource.class.getSimpleName())); cachingOrcDataSource.readCacheAt(62); // read at the end of a stripe assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(3, 60))); cachingOrcDataSource.readCacheAt(63); // The newly allocated cache size is the length of the merged disk range 1048576 * 8 so the total size is 8 * 1048576 * 2. assertEquals(systemMemoryContext.getBytes(), 8 * 1048576 * 2); assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(63, 8 * 1048576))); testingOrcDataSource = new TestingOrcDataSource(NoopOrcDataSource.INSTANCE); cachingOrcDataSource = new CachingOrcDataSource( testingOrcDataSource, createTinyStripesRangeFinder( ImmutableList.of( new StripeInformation(123, 3, 1, 1, 1, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 4, 1048576, 1048576, 1048576 * 3, OptionalLong.empty(), ImmutableList.of()), new StripeInformation(123, 4 + 1048576 * 5, 1048576, 1048576, 1048576, OptionalLong.empty(), ImmutableList.of())), maxMergeDistance, tinyStripeThreshold), systemMemoryContext.newOrcLocalMemoryContext(CachingOrcDataSource.class.getSimpleName())); cachingOrcDataSource.readCacheAt(3); assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(3, 1 + 1048576 * 5))); cachingOrcDataSource.readCacheAt(4 + 1048576 * 5); // The newly allocated cache size is the length of the first merged disk range 1048576 * 5 + 1. assertEquals(systemMemoryContext.getBytes(), 8 * 1048576 * 2 + 1048576 * 5 + 1); assertEquals(testingOrcDataSource.getLastReadRanges(), ImmutableList.of(new DiskRange(4 + 1048576 * 5, 3 * 1048576))); }
static void addClusterToMirrorMaker2ConnectorConfig(Map<String, Object> config, KafkaMirrorMaker2ClusterSpec cluster, String configPrefix) { config.put(configPrefix + "alias", cluster.getAlias()); config.put(configPrefix + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); String securityProtocol = addTLSConfigToMirrorMaker2ConnectorConfig(config, cluster, configPrefix); if (cluster.getAuthentication() != null) { if (cluster.getAuthentication() instanceof KafkaClientAuthenticationTls) { config.put(configPrefix + SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12"); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STORE_LOCATION_ROOT + cluster.getAlias() + KEYSTORE_SUFFIX); config.put(configPrefix + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "${file:" + CONNECTORS_CONFIG_FILE + ":" + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG + "}"); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationPlain plainAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "PLAIN"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.plain.PlainLoginModule", Map.of("username", plainAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationScram scramAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, scramAuthentication instanceof KafkaClientAuthenticationScramSha256 ? "SCRAM-SHA-256" : "SCRAM-SHA-512"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, AuthenticationUtils.jaasConfig("org.apache.kafka.common.security.scram.ScramLoginModule", Map.of("username", scramAuthentication.getUsername(), "password", "${file:" + CONNECTORS_CONFIG_FILE + ":" + cluster.getAlias() + ".sasl.password}"))); } else if (cluster.getAuthentication() instanceof KafkaClientAuthenticationOAuth oauthAuthentication) { securityProtocol = cluster.getTls() != null ? "SASL_SSL" : "SASL_PLAINTEXT"; config.put(configPrefix + SaslConfigs.SASL_MECHANISM, "OAUTHBEARER"); config.put(configPrefix + SaslConfigs.SASL_JAAS_CONFIG, oauthJaasConfig(cluster, oauthAuthentication)); config.put(configPrefix + SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, "io.strimzi.kafka.oauth.client.JaasClientOauthLoginCallbackHandler"); } } // Security protocol config.put(configPrefix + AdminClientConfig.SECURITY_PROTOCOL_CONFIG, securityProtocol); config.putAll(cluster.getConfig().entrySet().stream() .collect(Collectors.toMap(entry -> configPrefix + entry.getKey(), Map.Entry::getValue))); config.putAll(cluster.getAdditionalProperties()); }
@Test public void testAddClusterToMirrorMaker2ConnectorConfigWithScramAndTlsEncryption() { Map<String, Object> config = new HashMap<>(); KafkaMirrorMaker2ClusterSpec cluster = new KafkaMirrorMaker2ClusterSpecBuilder() .withAlias("sourceClusterAlias") .withBootstrapServers("sourceClusterAlias.sourceNamespace.svc:9092") .withNewKafkaClientAuthenticationScramSha512() .withUsername("shaza") .withNewPasswordSecret() .withPassword("pa55word") .endPasswordSecret() .endKafkaClientAuthenticationScramSha512() .withNewTls() .withTrustedCertificates(new CertSecretSourceBuilder().withSecretName("my-tls").withCertificate("ca.crt").build()) .endTls() .build(); KafkaMirrorMaker2Connectors.addClusterToMirrorMaker2ConnectorConfig(config, cluster, PREFIX); String jaasConfig = (String) config.remove("prefix.sasl.jaas.config"); AppConfigurationEntry configEntry = AuthenticationUtilsTest.parseJaasConfig(jaasConfig); assertThat("org.apache.kafka.common.security.scram.ScramLoginModule", is(configEntry.getLoginModuleName())); assertThat(configEntry.getOptions(), is(Map.of("username", "shaza", "password", "${file:/tmp/strimzi-mirrormaker2-connector.properties:sourceClusterAlias.sasl.password}"))); assertThat(new TreeMap<>(config), is(new TreeMap<>(Map.of("prefix.alias", "sourceClusterAlias", "prefix.security.protocol", "SASL_SSL", "prefix.ssl.truststore.location", "/tmp/kafka/clusters/sourceClusterAlias.truststore.p12", "prefix.ssl.truststore.password", "${file:/tmp/strimzi-mirrormaker2-connector.properties:ssl.truststore.password}", "prefix.ssl.truststore.type", "PKCS12", "prefix.sasl.mechanism", "SCRAM-SHA-512", "prefix.bootstrap.servers", "sourceClusterAlias.sourceNamespace.svc:9092")))); }
@Override public DataType getType() { return DataType.META_DATA; }
@Test public void testGetType() { Assertions.assertEquals(DataType.META_DATA, metadataExecutorSubscriber.getType()); }
public static String getFormat(final String originalFilename) { final List<String> fileNameSplit = Splitter.on(".").splitToList(originalFilename); if (fileNameSplit.size() <= 1) { throw new BadRequestException("The file format is invalid."); } return fileNameSplit.get(fileNameSplit.size() - 1); }
@Test public void getFormat() { final String properties = ConfigFileUtils.getFormat("application+default+application.properties"); assertEquals("properties", properties); final String yml = ConfigFileUtils.getFormat("application+default+application.yml"); assertEquals("yml", yml); }
@Override public ExecuteContext before(ExecuteContext context) { Object argument = context.getArguments()[0]; if (argument instanceof InstanceInfo) { InstanceInfo instanceInfo = (InstanceInfo) argument; AppCache.INSTANCE.setAppName(instanceInfo.getAppName()); SpringRouterUtils.putMetaData(instanceInfo.getMetadata(), routerConfig); } return context; }
@Test public void testBefore() { interceptor.before(context); // ek will capitalize the service name, so the expected value here is also capitalized Assert.assertEquals("FOO", AppCache.INSTANCE.getAppName()); InstanceInfo instanceInfo = (InstanceInfo) context.getArguments()[0]; Map<String, String> metadata = instanceInfo.getMetadata(); Assert.assertEquals(routerConfig.getRouterVersion(), metadata.get("version")); Assert.assertEquals("bar1", metadata.get("bar")); Assert.assertEquals("foo2", metadata.get("foo")); }
@Override public final byte readByte() throws EOFException { final int ch = read(); if (ch < 0) { throw new EOFException(); } return (byte) (ch); }
@Test(expected = EOFException.class) public void testReadBytePosition_EOF() throws Exception { in.readByte(INIT_DATA.length + 1); }
public int getTimeToLiveSeconds() { return timeToLiveSeconds; }
@Test public void testGetTimeToLiveSeconds() { assertEquals(MapConfig.DEFAULT_TTL_SECONDS, new MapConfig().getTimeToLiveSeconds()); }
@Nonnull public static <T> Traverser<T> traverseStream(@Nonnull Stream<T> stream) { return traverseSpliterator(stream.spliterator()).onFirstNull(stream::close); }
@Test public void when_traverseStream_then_seeAllItems() { validateTraversal(traverseStream(of(1, 2))); }
@Override public PostDO getPost(Long id) { return postMapper.selectById(id); }
@Test public void testGetPost() { // mock 数据 PostDO dbPostDO = randomPostDO(); postMapper.insert(dbPostDO); // 准备参数 Long id = dbPostDO.getId(); // 调用 PostDO post = postService.getPost(id); // 断言 assertNotNull(post); assertPojoEquals(dbPostDO, post); }
public Set<Device> getDevicesFromPath(String path) throws IOException { MutableInt counter = new MutableInt(0); try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) { return stream.filter(p -> p.toFile().getName().startsWith("veslot")) .map(p -> toDevice(p, counter)) .collect(Collectors.toSet()); } }
@Test public void testDetectMultipleOnlineDevices() throws IOException { createVeSlotFile(0); createVeSlotFile(1); createVeSlotFile(2); createOsStateFile(0); when(mockCommandExecutor.getOutput()).thenReturn( "8:1:character special file", "9:1:character special file", "a:1:character special file"); when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder); Set<Device> devices = discoverer.getDevicesFromPath(testFolder); assertEquals("Number of devices", 3, devices.size()); List<Device> devicesList = Lists.newArrayList(devices); devicesList.sort(DEVICE_COMPARATOR); Device device0 = devicesList.get(0); assertEquals("Device ID", 0, device0.getId()); assertEquals("Major number", 8, device0.getMajorNumber()); assertEquals("Minor number", 1, device0.getMinorNumber()); assertEquals("Status", "ONLINE", device0.getStatus()); assertTrue("Device is not healthy", device0.isHealthy()); Device device1 = devicesList.get(1); assertEquals("Device ID", 1, device1.getId()); assertEquals("Major number", 9, device1.getMajorNumber()); assertEquals("Minor number", 1, device1.getMinorNumber()); assertEquals("Status", "ONLINE", device1.getStatus()); assertTrue("Device is not healthy", device1.isHealthy()); Device device2 = devicesList.get(2); assertEquals("Device ID", 2, device2.getId()); assertEquals("Major number", 10, device2.getMajorNumber()); assertEquals("Minor number", 1, device2.getMinorNumber()); assertEquals("Status", "ONLINE", device2.getStatus()); assertTrue("Device is not healthy", device2.isHealthy()); }
public void changeNumber(final Account account, final String number, final UUID phoneNumberIdentifier, final Optional<UUID> maybeDisplacedAccountIdentifier, final Collection<TransactWriteItem> additionalWriteItems) { CHANGE_NUMBER_TIMER.record(() -> { final String originalNumber = account.getNumber(); final UUID originalPni = account.getPhoneNumberIdentifier(); boolean succeeded = false; account.setNumber(number, phoneNumberIdentifier); int accountUpdateIndex = -1; try { final List<TransactWriteItem> writeItems = new ArrayList<>(); final AttributeValue uuidAttr = AttributeValues.fromUUID(account.getUuid()); final AttributeValue numberAttr = AttributeValues.fromString(number); final AttributeValue pniAttr = AttributeValues.fromUUID(phoneNumberIdentifier); writeItems.add(buildDelete(phoneNumberConstraintTableName, ATTR_ACCOUNT_E164, originalNumber)); writeItems.add(buildConstraintTablePut(phoneNumberConstraintTableName, uuidAttr, ATTR_ACCOUNT_E164, numberAttr)); writeItems.add(buildDelete(phoneNumberIdentifierConstraintTableName, ATTR_PNI_UUID, originalPni)); writeItems.add(buildConstraintTablePut(phoneNumberIdentifierConstraintTableName, uuidAttr, ATTR_PNI_UUID, pniAttr)); writeItems.add(buildRemoveDeletedAccount(number)); maybeDisplacedAccountIdentifier.ifPresent(displacedAccountIdentifier -> writeItems.add(buildPutDeletedAccount(displacedAccountIdentifier, originalNumber))); // The `catch (TransactionCanceledException) block needs to check whether the cancellation reason is the account // update write item accountUpdateIndex = writeItems.size(); writeItems.add( TransactWriteItem.builder() .update(Update.builder() .tableName(accountsTableName) .key(Map.of(KEY_ACCOUNT_UUID, uuidAttr)) .updateExpression( "SET #data = :data, #number = :number, #pni = :pni, #cds = :cds ADD #version :version_increment") .conditionExpression( "attribute_exists(#number) AND #version = :version") .expressionAttributeNames(Map.of( "#number", ATTR_ACCOUNT_E164, "#data", ATTR_ACCOUNT_DATA, "#cds", ATTR_CANONICALLY_DISCOVERABLE, "#pni", ATTR_PNI_UUID, "#version", ATTR_VERSION)) .expressionAttributeValues(Map.of( ":number", numberAttr, ":data", accountDataAttributeValue(account), ":cds", AttributeValues.fromBool(account.isDiscoverableByPhoneNumber()), ":pni", pniAttr, ":version", AttributeValues.fromInt(account.getVersion()), ":version_increment", AttributeValues.fromInt(1))) .build()) .build()); writeItems.addAll(additionalWriteItems); final TransactWriteItemsRequest request = TransactWriteItemsRequest.builder() .transactItems(writeItems) .build(); db().transactWriteItems(request); account.setVersion(account.getVersion() + 1); succeeded = true; } catch (final TransactionCanceledException e) { if (e.hasCancellationReasons()) { if (CONDITIONAL_CHECK_FAILED.equals(e.cancellationReasons().get(accountUpdateIndex).code())) { // the #version = :version condition failed, which indicates a concurrent update throw new ContestedOptimisticLockException(); } } else { log.warn("Unexpected cancellation reasons: {}", e.cancellationReasons()); } throw e; } finally { if (!succeeded) { account.setNumber(originalNumber, originalPni); } } }); }
@Test public void testChangeNumberConflict() { final String originalNumber = "+14151112222"; final String targetNumber = "+14151113333"; final UUID originalPni = UUID.randomUUID(); final UUID targetPni = UUID.randomUUID(); final Device existingDevice = generateDevice(DEVICE_ID_1); final Account existingAccount = generateAccount(targetNumber, UUID.randomUUID(), targetPni, List.of(existingDevice)); final Device device = generateDevice(DEVICE_ID_1); final Account account = generateAccount(originalNumber, UUID.randomUUID(), originalPni, List.of(device)); createAccount(account); createAccount(existingAccount); assertThrows(TransactionCanceledException.class, () -> accounts.changeNumber(account, targetNumber, targetPni, Optional.of(existingAccount.getUuid()), Collections.emptyList())); assertPhoneNumberConstraintExists(originalNumber, account.getUuid()); assertPhoneNumberIdentifierConstraintExists(originalPni, account.getUuid()); assertPhoneNumberConstraintExists(targetNumber, existingAccount.getUuid()); assertPhoneNumberIdentifierConstraintExists(targetPni, existingAccount.getUuid()); }
public static <T> Object create(Class<T> iface, T implementation, RetryPolicy retryPolicy) { return RetryProxy.create(iface, new DefaultFailoverProxyProvider<T>(iface, implementation), retryPolicy); }
@Test public void testTryOnceThenFail() throws Exception { RetryPolicy policy = mock(TryOnceThenFail.class); RetryPolicy realPolicy = TRY_ONCE_THEN_FAIL; setupMockPolicy(policy, realPolicy); UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(UnreliableInterface.class, unreliableImpl, policy); unreliable.alwaysSucceeds(); try { unreliable.failsOnceThenSucceeds(); fail("Should fail"); } catch (UnreliableException e) { // expected verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(), anyInt(), anyBoolean()); assertEquals(RetryDecision.FAIL, caughtRetryAction.action); assertEquals("try once and fail.", caughtRetryAction.reason); } catch (Exception e) { fail("Other exception other than UnreliableException should also get " + "failed."); } }
@POST @Path(RMWSConsts.SCHEDULER_CONF_VALIDATE) @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public synchronized Response validateAndGetSchedulerConfiguration( SchedConfUpdateInfo mutationInfo, @Context HttpServletRequest hsr) throws AuthorizationException { // Only admin user is allowed to read scheduler conf, // in order to avoid leaking sensitive info, such as ACLs UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); initForWritableEndpoints(callerUGI, true); ResourceScheduler scheduler = rm.getResourceScheduler(); if (isConfigurationMutable(scheduler)) { try { MutableConfigurationProvider mutableConfigurationProvider = ((MutableConfScheduler) scheduler).getMutableConfProvider(); Configuration schedulerConf = mutableConfigurationProvider .getConfiguration(); Configuration newSchedulerConf = mutableConfigurationProvider .applyChanges(schedulerConf, mutationInfo); Configuration yarnConf = ((CapacityScheduler) scheduler).getConf(); Configuration newConfig = new Configuration(yarnConf); Iterator<Map.Entry<String, String>> iter = newSchedulerConf.iterator(); Entry<String, String> e = null; while (iter.hasNext()) { e = iter.next(); newConfig.set(e.getKey(), e.getValue()); } CapacitySchedulerConfigValidator.validateCSConfiguration(yarnConf, newConfig, rm.getRMContext()); return Response.status(Status.OK) .entity(new ConfInfo(newSchedulerConf)) .build(); } catch (Exception e) { String errorMsg = "CapacityScheduler configuration validation failed:" + e.toString(); LOG.warn(errorMsg); return Response.status(Status.BAD_REQUEST) .entity(errorMsg) .build(); } } else { String errorMsg = String.format("Configuration change validation only supported by %s.", MutableConfScheduler.class.getSimpleName()); LOG.warn(errorMsg); return Response.status(Status.BAD_REQUEST) .entity(errorMsg) .build(); } }
@Test public void testValidateAndGetSchedulerConfigurationInvalidConfig() throws IOException { Configuration config = CapacitySchedulerConfigGeneratorForTest .createBasicCSConfiguration(); ResourceScheduler scheduler = prepareCSForValidation(config); SchedConfUpdateInfo mutationInfo = new SchedConfUpdateInfo(); ArrayList<String> queuesToRemove = new ArrayList(); queuesToRemove.add("root.test1"); mutationInfo.setRemoveQueueInfo(queuesToRemove); RMWebServices webService = prepareWebServiceForValidation(scheduler); HttpServletRequest mockHsr = prepareServletRequestForValidation(); Response response = webService .validateAndGetSchedulerConfiguration(mutationInfo, mockHsr); Assert.assertEquals(Status.BAD_REQUEST .getStatusCode(), response.getStatus()); Assert.assertTrue(response.getEntity().toString() .contains("IOException")); }
public Future<KafkaVersionChange> reconcile() { return getPods() .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testDowngradeWithAllVersionAndMixedPods(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion(), VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion()), mockRos(mockMixedPods(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).version(), VERSIONS.defaultVersion().version())) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION))); assertThat(c.metadataVersion(), is(VERSIONS.version(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION).metadataVersion())); async.flag(); }))); }
public static String readClasspathResource(Class<?> relativeTo, String resourcePath) { try { InputStream resourceStream = relativeTo.getResourceAsStream(resourcePath); if (resourceStream == null) { throw new IllegalStateException(getErrorMessage(relativeTo, resourcePath)); } return IOUtils.toString(resourceStream, StandardCharsets.UTF_8); } catch (IOException e) { throw new IllegalStateException(getErrorMessage(relativeTo, resourcePath), e); } }
@Test public void whenReadValidClasspathResource_thenReadIt() { String result = ResourceUtils.readClasspathResource(ResourceUtilsTest.class, "classpath_resource.txt"); assertThat(result) .isEqualTo("OK\n"); }
public static Timestamp next(Timestamp timestamp) { if (timestamp.equals(Timestamp.MAX_VALUE)) { return timestamp; } final int nanos = timestamp.getNanos(); final long seconds = timestamp.getSeconds(); if (nanos + 1 >= NANOS_PER_SECOND) { return Timestamp.ofTimeSecondsAndNanos(seconds + 1, 0); } else { return Timestamp.ofTimeSecondsAndNanos(seconds, nanos + 1); } }
@Test public void testNextIncrementsNanosWhenPossible() { assertEquals( Timestamp.ofTimeSecondsAndNanos(10L, 999999999), TimestampUtils.next(Timestamp.ofTimeSecondsAndNanos(10L, 999999998))); }
@Override public boolean isValid(ParameterValue value) { return choices.contains(((StringParameterValue) value).getValue()); }
@Test @Issue("JENKINS-62889") public void checkValue_Invalid() { String stringValue = "single"; String[] choices = new String[]{stringValue}; ChoiceParameterDefinition parameterDefinition = new ChoiceParameterDefinition("name", choices, "description"); StringParameterValue parameterValue = new StringParameterValue("choice", "invalid"); assertFalse(parameterDefinition.isValid(parameterValue)); }
public long getNumBlocksFailedToCache() { return numBlocksFailedToCache.longValue(); }
@Test(timeout=600000) public void testFilesExceedMaxLockedMemory() throws Exception { LOG.info("beginning testFilesExceedMaxLockedMemory"); // Create some test files that will exceed total cache capacity final int numFiles = 5; final long fileSize = CACHE_CAPACITY / (numFiles-1); final Path[] testFiles = new Path[numFiles]; final HdfsBlockLocation[][] fileLocs = new HdfsBlockLocation[numFiles][]; final long[] fileSizes = new long[numFiles]; for (int i=0; i<numFiles; i++) { testFiles[i] = new Path("/testFilesExceedMaxLockedMemory-" + i); DFSTestUtil.createFile(fs, testFiles[i], fileSize, (short)1, 0xDFAl); fileLocs[i] = (HdfsBlockLocation[])fs.getFileBlockLocations( testFiles[i], 0, fileSize); // Get the file size (sum of blocks) long[] sizes = getBlockSizes(fileLocs[i]); for (int j=0; j<sizes.length; j++) { fileSizes[i] += sizes[j]; } } // Cache the first n-1 files long total = 0; DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(cacheBlocks(fileLocs[i])); total = DFSTestUtil.verifyExpectedCacheUsage( rounder.roundUp(total + fileSizes[i]), 4 * (i + 1), fsd); } // nth file should hit a capacity exception final LogVerificationAppender appender = new LogVerificationAppender(); final Logger logger = Logger.getRootLogger(); logger.addAppender(appender); setHeartbeatResponse(cacheBlocks(fileLocs[numFiles-1])); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { // check the log reported by FsDataSetCache // in the case that cache capacity is exceeded. int lines = appender.countLinesWithMessage( "could not reserve more bytes in the cache: "); return lines > 0; } }, 500, 30000); // Also check the metrics for the failure assertTrue("Expected more than 0 failed cache attempts", fsd.getNumBlocksFailedToCache() > 0); // Uncache the n-1 files int curCachedBlocks = 16; for (int i=0; i<numFiles-1; i++) { setHeartbeatResponse(uncacheBlocks(fileLocs[i])); long uncachedBytes = rounder.roundUp(fileSizes[i]); total -= uncachedBytes; curCachedBlocks -= uncachedBytes / BLOCK_SIZE; DFSTestUtil.verifyExpectedCacheUsage(total, curCachedBlocks, fsd); } LOG.info("finishing testFilesExceedMaxLockedMemory"); }
@Override public RecoverableWriter.ResumeRecoverable persist() throws IOException { LOGGER.trace("Persisting write channel for blob {}", finalBlobIdentifier); closeWriteChannelIfExists(); return createResumeRecoverable(); }
@Test public void shouldPersist() throws IOException { if (!closed) { GSResumeRecoverable recoverable = (GSResumeRecoverable) fsDataOutputStream.persist(); assertEquals(blobIdentifier, recoverable.finalBlobIdentifier); if (empty) { assertEquals(0, recoverable.componentObjectIds.size()); } else { assertArrayEquals( componentObjectIds.toArray(), recoverable.componentObjectIds.toArray()); } assertEquals(position, recoverable.position); assertFalse(recoverable.closed); } }
@Override public ProducerConnection examineProducerConnectionInfo(String producerGroup, final String topic) throws RemotingException, MQClientException, InterruptedException, MQBrokerException { return defaultMQAdminExtImpl.examineProducerConnectionInfo(producerGroup, topic); }
@Test public void testExamineProducerConnectionInfo() throws InterruptedException, RemotingException, MQClientException, MQBrokerException { ProducerConnection producerConnection = defaultMQAdminExt.examineProducerConnectionInfo("default-producer-group", "unit-test"); assertThat(producerConnection.getConnectionSet().size()).isEqualTo(1); }
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress, HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger) throws SocketException, IOException, UnknownHostException { multicastSocket.setReuseAddress(true); // bind to receive interface multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort())); multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive()); try { boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress(); Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled(); if (loopbackModeEnabled != null) { // setting loopbackmode is just a hint - and the argument means "disable"! // to check the real value we call getLoopbackMode() (and again - return value means "disabled") multicastSocket.setLoopbackMode(!loopbackModeEnabled); } // If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one, // then print a warning if (loopbackBind && multicastSocket.getLoopbackMode()) { logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is " + "disabled. This could cause multicast auto-discovery issues " + "and render it unable to work. Check your network connectivity, try to enable the " + "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM."); } // warning: before modifying lines below, take a look at these links: // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033 // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758 // https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270 boolean callSetInterface = OS.isMac() || !loopbackBind; String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE); if (propSetInterface != null) { callSetInterface = Boolean.parseBoolean(propSetInterface); } if (callSetInterface) { multicastSocket.setInterface(bindAddress.getInetAddress()); } } catch (Exception e) { logger.warning(e); } multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE); multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE); String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP); if (multicastGroup == null) { multicastGroup = multicastConfig.getMulticastGroup(); } multicastConfig.setMulticastGroup(multicastGroup); multicastSocket.joinGroup(InetAddress.getByName(multicastGroup)); multicastSocket.setSoTimeout(SOCKET_TIMEOUT); }
@Test public void testSetInterfaceDefaultWhenNonLoopbackAddrAndNoLoopbackMode() throws Exception { Config config = createConfig(null); MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig(); multicastConfig.setLoopbackModeEnabled(false); MulticastSocket multicastSocket = mock(MulticastSocket.class); Address address = new Address("10.0.0.2", 5701); HazelcastProperties hzProperties = new HazelcastProperties(config); MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class)); verify(multicastSocket).setInterface(address.getInetAddress()); verify(multicastSocket).setLoopbackMode(true); }
public static Optional<Object> invokeMethod(Object target, String methodName, Class<?>[] paramsType, Object[] params) { if (methodName == null || target == null) { return Optional.empty(); } final Optional<Method> method = findMethod(target.getClass(), methodName, paramsType); if (method.isPresent()) { return invokeMethod(target, method.get(), params); } return Optional.empty(); }
@Test public void testInvokeMethod1() { int params = 88; final Optional<Object> staticMethod = ReflectUtils .invokeMethod(TestReflect.class, "staticMethod", new Class[]{int.class}, new Object[]{params}); Assert.assertTrue(staticMethod.isPresent() && staticMethod.get() instanceof String); Assert.assertEquals(TestReflect.staticMethod(params), staticMethod.get()); // can not find method final Optional<Object> test = ReflectUtils.invokeMethod(TestReflect.class, "test", new Class[]{int.class}, new Object[]{params}); Assert.assertFalse(test.isPresent()); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testPerms() throws Exception { createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path("/perm")); createWithHttp("/perm/none", null); String statusJson = getStatus("/perm/none", "GETFILESTATUS"); Assert.assertTrue("755".equals(getPerms(statusJson))); createWithHttp("/perm/p-777", "777"); statusJson = getStatus("/perm/p-777", "GETFILESTATUS"); Assert.assertTrue("777".equals(getPerms(statusJson))); createWithHttp("/perm/p-654", "654"); statusJson = getStatus("/perm/p-654", "GETFILESTATUS"); Assert.assertTrue("654".equals(getPerms(statusJson))); createWithHttp("/perm/p-321", "321"); statusJson = getStatus("/perm/p-321", "GETFILESTATUS"); Assert.assertTrue("321".equals(getPerms(statusJson))); }
@Nonnull public static <T> FunctionEx<T, T> wholeItem() { return FunctionEx.identity(); }
@Test public void when_wholeItem() { Object o = new Object(); assertSame(o, wholeItem().apply(o)); }
public static final StartTime absolute(Instant absoluteStart) { return new StartTime(StartTimeOption.ABSOLUTE, null, absoluteStart); }
@Test public void testStartAbsolute() { StartTime st = StartTime.absolute(OffsetDateTime .of(2017, 3, 20, 11, 43, 11, 0, ZoneOffset.ofHours(-7)) .toInstant()); assertEquals(StartTimeOption.ABSOLUTE, st.option()); assertNull(st.relativeTime()); assertEquals("2017-03-20T11:43:11-07:00", st .absoluteTime() .atOffset(ZoneOffset.ofHours(-7)) .format(DateTimeFormatter.ISO_OFFSET_DATE_TIME)); }
public static void install() { installBasic(); installLight(); installSemiBold(); }
@Test void testFont() { FlatRobotoFont.install(); testFont( FlatRobotoFont.FAMILY, Font.PLAIN, 13 ); testFont( FlatRobotoFont.FAMILY, Font.ITALIC, 13 ); testFont( FlatRobotoFont.FAMILY, Font.BOLD, 13 ); testFont( FlatRobotoFont.FAMILY, Font.BOLD | Font.ITALIC, 13 ); testFont( FlatRobotoFont.FAMILY_LIGHT, Font.PLAIN, 13 ); testFont( FlatRobotoFont.FAMILY_LIGHT, Font.ITALIC, 13 ); testFont( FlatRobotoFont.FAMILY_SEMIBOLD, Font.PLAIN, 13 ); testFont( FlatRobotoFont.FAMILY_SEMIBOLD, Font.ITALIC, 13 ); }
public String process(final Expression expression) { return formatExpression(expression); }
@Test public void shouldEscapeQuotesInStringLiteral() { // Given: final Expression expression = new StringLiteral("\"foo\""); // When: final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat(javaExpression, equalTo("\"\\\"foo\\\"\"")); }
public static CheckpointedInputGate[] createCheckpointedMultipleInputGate( MailboxExecutor mailboxExecutor, List<IndexedInputGate>[] inputGates, TaskIOMetricGroup taskIOMetricGroup, CheckpointBarrierHandler barrierHandler, StreamConfig config) { registerCheckpointMetrics(taskIOMetricGroup, barrierHandler); InputGate[] unionedInputGates = Arrays.stream(inputGates) .map(InputGateUtil::createInputGate) .toArray(InputGate[]::new); return Arrays.stream(unionedInputGates) .map( unionedInputGate -> new CheckpointedInputGate( unionedInputGate, barrierHandler, mailboxExecutor, config.isGraphContainingLoops() ? UpstreamRecoveryTracker.NO_OP : UpstreamRecoveryTracker.forInputGate( unionedInputGate))) .toArray(CheckpointedInputGate[]::new); }
@Test void testCreateCheckpointedMultipleInputGate() throws Exception { try (CloseableRegistry registry = new CloseableRegistry()) { MockEnvironment environment = new MockEnvironmentBuilder().build(); MockStreamTask streamTask = new MockStreamTaskBuilder(environment).build(); StreamConfig streamConfig = new StreamConfig(environment.getJobConfiguration()); streamConfig.setCheckpointMode(CheckpointingMode.EXACTLY_ONCE); streamConfig.setUnalignedCheckpointsEnabled(true); // First input gate has index larger than the second List<IndexedInputGate>[] inputGates = new List[] { Collections.singletonList(getGate(1, 4)), Collections.singletonList(getGate(0, 2)), }; CheckpointBarrierHandler barrierHandler = InputProcessorUtil.createCheckpointBarrierHandler( streamTask, streamConfig, new TestSubtaskCheckpointCoordinator(new MockChannelStateWriter()), streamTask.getName(), inputGates, Collections.emptyList(), new SyncMailboxExecutor(), new TestProcessingTimeService()); CheckpointedInputGate[] checkpointedMultipleInputGate = InputProcessorUtil.createCheckpointedMultipleInputGate( new SyncMailboxExecutor(), inputGates, environment.getMetricGroup().getIOMetricGroup(), barrierHandler, streamConfig); for (CheckpointedInputGate checkpointedInputGate : checkpointedMultipleInputGate) { registry.registerCloseable(checkpointedInputGate); } List<IndexedInputGate> allInputGates = Arrays.stream(inputGates) .flatMap(gates -> gates.stream()) .collect(Collectors.toList()); for (IndexedInputGate inputGate : allInputGates) { for (int channelId = 0; channelId < inputGate.getNumberOfInputChannels(); channelId++) { barrierHandler.processBarrier( new CheckpointBarrier( 1, 42, CheckpointOptions.unaligned( CheckpointType.CHECKPOINT, CheckpointStorageLocationReference.getDefault())), new InputChannelInfo(inputGate.getGateIndex(), channelId), false); } } assertThat(barrierHandler.getAllBarriersReceivedFuture(1)).isDone(); } }
@Override public List<UsbSerialPort> getPorts() { return mPorts; }
@Test public void compositeDevice() throws Exception { UsbDeviceConnection usbDeviceConnection = mock(UsbDeviceConnection.class); UsbDevice usbDevice = mock(UsbDevice.class); UsbInterface massStorageInterface = mock(UsbInterface.class); UsbInterface controlInterface = mock(UsbInterface.class); UsbInterface dataInterface = mock(UsbInterface.class); UsbInterface hidInterface = mock(UsbInterface.class); UsbInterface vendorInterface = mock(UsbInterface.class); UsbEndpoint controlEndpoint = mock(UsbEndpoint.class); UsbEndpoint readEndpoint = mock(UsbEndpoint.class); UsbEndpoint writeEndpoint = mock(UsbEndpoint.class); /* * BBC micro:bit * UsbInterface[mId=0,mAlternateSetting=0,mName=USB_MSC,mClass=8,mSubclass=6,mProtocol=80,mEndpoints=[ * UsbEndpoint[mAddress=130,mAttributes=2,mMaxPacketSize=64,mInterval=0] * UsbEndpoint[mAddress=2,mAttributes=2,mMaxPacketSize=64,mInterval=0]] * UsbInterface[mId=1,mAlternateSetting=0,mName=mbed Serial Port,mClass=2,mSubclass=2,mProtocol=1,mEndpoints=[ * UsbEndpoint[mAddress=131,mAttributes=3,mMaxPacketSize=16,mInterval=32]] * UsbInterface[mId=2,mAlternateSetting=0,mName=mbed Serial Port,mClass=10,mSubclass=0,mProtocol=0,mEndpoints=[ * UsbEndpoint[mAddress=4,mAttributes=2,mMaxPacketSize=64,mInterval=0] * UsbEndpoint[mAddress=132,mAttributes=2,mMaxPacketSize=64,mInterval=0]] * UsbInterface[mId=3,mAlternateSetting=0,mName=CMSIS-DAP,mClass=3,mSubclass=0,mProtocol=0,mEndpoints=[ * UsbEndpoint[mAddress=129,mAttributes=3,mMaxPacketSize=64,mInterval=1] * UsbEndpoint[mAddress=1,mAttributes=3,mMaxPacketSize=64,mInterval=1]] * UsbInterface[mId=4,mAlternateSetting=0,mName=WebUSB: CMSIS-DAP,mClass=255,mSubclass=3,mProtocol=0,mEndpoints=[] */ when(usbDeviceConnection.getRawDescriptors()).thenReturn(HexDump.hexStringToByteArray( "12 01 10 02 EF 02 01 40 28 0D 04 02 00 10 01 02 03 01\n" + "09 02 8B 00 05 01 00 80 FA\n" + "09 04 00 00 02 08 06 50 08\n" + "07 05 82 02 40 00 00\n" + "07 05 02 02 40 00 00\n" + "08 0B 01 02 02 02 01 04\n" + "09 04 01 00 01 02 02 01 04\n" + "05 24 00 10 01\n" + "05 24 01 03 02\n" + "04 24 02 06\n" + "05 24 06 01 02\n" + "07 05 83 03 10 00 20\n" + "09 04 02 00 02 0A 00 00 05\n" + "07 05 04 02 40 00 00\n" + "07 05 84 02 40 00 00\n" + "09 04 03 00 02 03 00 00 06\n" + "09 21 00 01 00 01 22 21 00\n" + "07 05 81 03 40 00 01\n" + "07 05 01 03 40 00 01\n" + "09 04 04 00 00 FF 03 00 07")); when(usbDeviceConnection.claimInterface(controlInterface,true)).thenReturn(true); when(usbDeviceConnection.claimInterface(dataInterface,true)).thenReturn(true); when(usbDevice.getInterfaceCount()).thenReturn(5); when(usbDevice.getInterface(0)).thenReturn(massStorageInterface); when(usbDevice.getInterface(1)).thenReturn(controlInterface); when(usbDevice.getInterface(2)).thenReturn(dataInterface); when(usbDevice.getInterface(3)).thenReturn(hidInterface); when(usbDevice.getInterface(4)).thenReturn(vendorInterface); when(massStorageInterface.getId()).thenReturn(0); when(massStorageInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_MASS_STORAGE); when(controlInterface.getId()).thenReturn(1); when(controlInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_COMM); when(controlInterface.getInterfaceSubclass()).thenReturn(USB_SUBCLASS_ACM); when(dataInterface.getId()).thenReturn(2); when(dataInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_CDC_DATA); when(hidInterface.getId()).thenReturn(3); when(hidInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_HID); when(vendorInterface.getId()).thenReturn(4); when(vendorInterface.getInterfaceClass()).thenReturn(UsbConstants.USB_CLASS_VENDOR_SPEC); when(controlInterface.getEndpointCount()).thenReturn(1); when(controlInterface.getEndpoint(0)).thenReturn(controlEndpoint); when(dataInterface.getEndpointCount()).thenReturn(2); when(dataInterface.getEndpoint(0)).thenReturn(writeEndpoint); when(dataInterface.getEndpoint(1)).thenReturn(readEndpoint); when(controlEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(controlEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_INT); when(readEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_IN); when(readEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); when(writeEndpoint.getDirection()).thenReturn(UsbConstants.USB_DIR_OUT); when(writeEndpoint.getType()).thenReturn(UsbConstants.USB_ENDPOINT_XFER_BULK); CdcAcmSerialDriver driver = new CdcAcmSerialDriver(usbDevice); CdcAcmSerialDriver.CdcAcmSerialPort port = (CdcAcmSerialDriver.CdcAcmSerialPort) driver.getPorts().get(0); port.mConnection = usbDeviceConnection; port.openInt(); assertEquals(readEndpoint, port.mReadEndpoint); assertEquals(writeEndpoint, port.mWriteEndpoint); ProbeTable probeTable = UsbSerialProber.getDefaultProbeTable(); Class<? extends UsbSerialDriver> probeDriver = probeTable.findDriver(usbDevice); assertEquals(driver.getClass(), probeDriver); }
@Deprecated public RegistryBuilder transport(String transport) { this.transporter = transport; return getThis(); }
@Test void transport() { RegistryBuilder builder = new RegistryBuilder(); builder.transport("transport"); Assertions.assertEquals("transport", builder.build().getTransport()); }
private boolean autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId) { boolean redeploy = false; boolean enabled = enabledFlag.with(Dimension.INSTANCE_ID, applicationId.serializedForm()).value(); boolean logDetails = enableDetailedLoggingFlag.with(Dimension.INSTANCE_ID, applicationId.serializedForm()).value(); try (var lock = nodeRepository().applications().lock(applicationId)) { Optional<Application> application = nodeRepository().applications().get(applicationId); if (application.isEmpty()) return true; if (application.get().cluster(clusterId).isEmpty()) return true; Cluster cluster = application.get().cluster(clusterId).get(); Cluster unchangedCluster = cluster; NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId); if (clusterNodes.isEmpty()) return true; // Cluster was removed since we started cluster = updateCompletion(cluster, clusterNodes); var current = new AllocatableResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources(); // Autoscale unless an autoscaling is already in progress Autoscaling autoscaling = null; if (cluster.target().resources().isEmpty() && !cluster.scalingInProgress()) { autoscaling = autoscaler.autoscale(application.get(), cluster, clusterNodes, enabled, logDetails); if (autoscaling.isPresent() || cluster.target().isEmpty()) // Ignore empty from recently started servers cluster = cluster.withTarget(autoscaling); } // Always store any updates if (cluster != unchangedCluster) applications().put(application.get().with(cluster), lock); // Attempt to perform the autoscaling immediately, and log it regardless if (autoscaling != null && autoscaling.resources().isPresent() && !current.equals(autoscaling.resources().get())) { redeploy = true; logAutoscaling(current, autoscaling.resources().get(), applicationId, clusterNodes.not().retired()); if (logDetails) { log.info("autoscaling data for " + applicationId.toFullString() + ": " + "\n\tmetrics().cpuCostPerQuery(): " + autoscaling.metrics().cpuCostPerQuery() + "\n\tmetrics().queryRate(): " + autoscaling.metrics().queryRate() + "\n\tmetrics().growthRateHeadroom(): " + autoscaling.metrics().growthRateHeadroom() + "\n\tpeak(): " + autoscaling.peak().toString() + "\n\tideal(): " + autoscaling.ideal().toString()); } } } catch (ApplicationLockException e) { return false; } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Illegal arguments for " + applicationId + " cluster " + clusterId, e); } if (redeploy) { try (MaintenanceDeployment deployment = new MaintenanceDeployment(applicationId, deployer, metric, nodeRepository())) { if (deployment.isValid()) deployment.activate(); } } return true; }
@Test public void test_autoscaling_window() { ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); NodeResources lowResources = new NodeResources(4, 4, 10, 0.1); NodeResources highResources = new NodeResources(8, 8, 20, 0.1); Capacity app1Capacity = Capacity.from(new ClusterResources(2, 1, lowResources), new ClusterResources(4, 2, highResources)); var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, app1Capacity)); ManualClock clock = tester.clock(); tester.deploy(app1, cluster1, app1Capacity); autoscale(false, Duration.ofMinutes( 1), Duration.ofMinutes( 5), clock, app1, cluster1, tester); autoscale( true, Duration.ofMinutes(19), Duration.ofMinutes(10), clock, app1, cluster1, tester); }
@Override public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return join(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullTableOnJoinWithGlobalTableWithNamed() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.join( null, MockMapper.selectValueMapper(), MockValueJoiner.TOSTRING_JOINER, Named.as("name"))); assertThat(exception.getMessage(), equalTo("globalTable can't be null")); }
static void readFullyDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException { int nextReadLength = Math.min(buf.remaining(), temp.length); int bytesRead = 0; while (nextReadLength > 0 && (bytesRead = f.read(temp, 0, nextReadLength)) >= 0) { buf.put(temp, 0, bytesRead); nextReadLength = Math.min(buf.remaining(), temp.length); } if (bytesRead < 0 && buf.remaining() > 0) { throw new EOFException("Reached the end of stream with " + buf.remaining() + " bytes left to read"); } }
@Test public void testDirectReadFullySmallReads() throws Exception { final ByteBuffer readBuffer = ByteBuffer.allocateDirect(10); MockInputStream stream = new MockInputStream(2, 3, 3); DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(10, readBuffer.limit()); DelegatingSeekableInputStream.readFullyDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(10, readBuffer.limit()); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY), readBuffer); }
public static byte[] nextBytes(final byte[] bytes) { Requires.requireNonNull(bytes, "bytes"); final int len = bytes.length; if (len == 0) { // fast path return new byte[] { 0 }; } final byte[] nextBytes = new byte[len + 1]; System.arraycopy(bytes, 0, nextBytes, 0, len); nextBytes[len] = 0; return nextBytes; }
@Test public void testNextBytes() { Assert.assertArrayEquals(new byte[] { 0 }, BytesUtil.nextBytes(new byte[] {})); Assert.assertArrayEquals(new byte[] { 1, 2, 0 }, BytesUtil.nextBytes(new byte[] { 1, 2 })); }
public void set(Writable obj) { instance = obj; Class<? extends Writable> instanceClazz = instance.getClass(); Class<? extends Writable>[] clazzes = getTypes(); for (int i = 0; i < clazzes.length; i++) { Class<? extends Writable> clazz = clazzes[i]; if (clazz.equals(instanceClazz)) { type = (byte) i; return; } } throw new RuntimeException("The type of instance is: " + instance.getClass() + ", which is NOT registered."); }
@Test public void testSet() throws Exception { Foo foo = new Foo(); FooGenericWritable generic = new FooGenericWritable(); //exception should not occur generic.set(foo); try { //exception should occur, since IntWritable is not registered generic = new FooGenericWritable(); generic.set(new IntWritable(1)); fail("Generic writable should have thrown an exception for a Writable not registered"); }catch (RuntimeException e) { //ignore } }
public SQLTranslatorContext translate(final String sql, final List<Object> parameters, final QueryContext queryContext, final DatabaseType storageType, final ShardingSphereDatabase database, final RuleMetaData globalRuleMetaData) { DatabaseType sqlParserType = queryContext.getSqlStatementContext().getDatabaseType(); if (sqlParserType.equals(storageType) || null == storageType) { return new SQLTranslatorContext(sql, parameters); } try { return translator.translate(sql, parameters, queryContext, storageType, database, globalRuleMetaData); } catch (final SQLTranslationException ex) { if (useOriginalSQLWhenTranslatingFailed) { return new SQLTranslatorContext(sql, parameters); } throw ex; } }
@Test void assertUseOriginalSQLWhenTranslatingFailed() { String expected = "ERROR: select 1"; DatabaseType sqlParserType = TypedSPILoader.getService(DatabaseType.class, "PostgreSQL"); QueryContext queryContext = mock(QueryContext.class, RETURNS_DEEP_STUBS); when(queryContext.getSqlStatementContext().getDatabaseType()).thenReturn(sqlParserType); DatabaseType storageType = TypedSPILoader.getService(DatabaseType.class, "MySQL"); SQLTranslatorContext actual = new SQLTranslatorRule(new SQLTranslatorRuleConfiguration("FIXTURE", new Properties(), true)).translate( expected, Collections.emptyList(), queryContext, storageType, mock(ShardingSphereDatabase.class), mock(RuleMetaData.class)); assertThat(actual.getSql(), is(expected)); }
@Override public void onError(Throwable e) { // individual callbacks are notified first for (Callback<RestResponse> callback : _callbacks.values()) { callback.onError(e); } // aggregated callback is guaranteed to be called after all individual callbacks _aggregatedCallback.onError(e); }
@Test public void testError() throws Exception { FutureCallback<RestResponse> callback1 = new FutureCallback<>(); FutureCallback<RestResponse> callback2 = new FutureCallback<>(); ImmutableMap<Integer, Callback<RestResponse>> individualCallbacks = ImmutableMap.<Integer, Callback<RestResponse>>of(ID1, callback1, ID2, callback2); FutureCallback<MultiplexedResponse> aggregatedCallback = new FutureCallback<>(); MultiplexedCallback multiplexedCallback = new MultiplexedCallback(individualCallbacks, aggregatedCallback); RestLiDecodingException exception = new RestLiDecodingException(null, null); multiplexedCallback.onError(exception); Assert.assertSame(getError(callback1), exception); Assert.assertSame(getError(callback2), exception); Assert.assertSame(getError(aggregatedCallback), exception); }
public static Map<String, DataSource> create(final Map<String, DataSourcePoolProperties> propsMap, final boolean cacheEnabled) { Map<String, DataSource> result = new LinkedHashMap<>(propsMap.size(), 1F); for (Entry<String, DataSourcePoolProperties> entry : propsMap.entrySet()) { result.put(entry.getKey(), create(entry.getKey(), entry.getValue(), cacheEnabled, result.values())); } return result; }
@Test void assertCreateMap() { Map<String, DataSource> actual = DataSourcePoolCreator.create(Collections.singletonMap("foo_ds", new DataSourcePoolProperties(MockedDataSource.class.getName(), createProperties())), true); assertThat(actual.size(), is(1)); assertDataSource((MockedDataSource) actual.get("foo_ds")); }
public int getTailMatchLength(ElementPath p) { if (p == null) { return 0; } int lSize = this.partList.size(); int rSize = p.partList.size(); // no match possible for empty sets if ((lSize == 0) || (rSize == 0)) { return 0; } int minLen = (lSize <= rSize) ? lSize : rSize; int match = 0; // loop from the end to the front for (int i = 1; i <= minLen; i++) { String l = this.partList.get(lSize - i); String r = p.partList.get(rSize - i); if (equalityCheck(l, r)) { match++; } else { break; } } return match; }
@Test public void testTailMatch() { { ElementPath p = new ElementPath("/a/b"); ElementSelector ruleElementSelector = new ElementSelector("*"); assertEquals(0, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/a"); ElementSelector ruleElementSelector = new ElementSelector("*/a"); assertEquals(1, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/A"); ElementSelector ruleElementSelector = new ElementSelector("*/a"); assertEquals(1, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/a"); ElementSelector ruleElementSelector = new ElementSelector("*/A"); assertEquals(1, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/a/b"); ElementSelector ruleElementSelector = new ElementSelector("*/b"); assertEquals(1, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/a/B"); ElementSelector ruleElementSelector = new ElementSelector("*/b"); assertEquals(1, ruleElementSelector.getTailMatchLength(p)); } { ElementPath p = new ElementPath("/a/b/c"); ElementSelector ruleElementSelector = new ElementSelector("*/b/c"); assertEquals(2, ruleElementSelector.getTailMatchLength(p)); } }
@Deactivate public void deactivate() { sessionMap.clear(); snmpDeviceMap.clear(); log.info("Stopped"); }
@Test public void testDeactivate() { snmpController.deactivate(); assertEquals("Device map should be clear", 0, snmpController.getDevices().size()); assertEquals("Session map should be clear", 0, snmpController.sessionMap.size()); }
@Override public Optional<Rule> findByKey(RuleKey key) { verifyKeyArgument(key); ensureInitialized(); return Optional.ofNullable(rulesByKey.get(key)); }
@Test public void findByKey_returns_empty_if_argument_is_deprecated_key_in_DB_of_rule_in_DB() { Optional<Rule> rule = underTest.findByKey(DEPRECATED_KEY_OF_NON_EXITING_RULE); assertThat(rule).isEmpty(); }
public static <T> ListCoder<T> of(Coder<T> elemCoder) { return new ListCoder<>(elemCoder); }
@Test public void testListWithNullsAndVarIntCoderThrowsException() throws Exception { thrown.expect(CoderException.class); thrown.expectMessage("cannot encode a null Integer"); List<Integer> list = Arrays.asList(1, 2, 3, null, 4); Coder<List<Integer>> coder = ListCoder.of(VarIntCoder.of()); CoderProperties.coderDecodeEncodeEqual(coder, list); }
@Override public void applyToConfiguration(Configuration configuration) { super.applyToConfiguration(configuration); merge(configuration, pythonConfiguration); }
@Test void testCreateProgramOptionsWithLongOptions() throws CliArgsException { String[] args = { "--python", "xxx.py", "--pyModule", "xxx", "--pyFiles", "/absolute/a.py,relative/b.py,relative/c.py", "--pyRequirements", "d.txt#e_dir", "--pyExecutable", "/usr/bin/python", "--pyArchives", "g.zip,h.zip#data,h.zip#data2", "--pyPythonPath", "bin/python/lib/:bin/python/lib64", "userarg1", "userarg2" }; CommandLine line = CliFrontendParser.parse(options, args, false); PythonProgramOptions programOptions = (PythonProgramOptions) ProgramOptions.create(line); Configuration config = new Configuration(); programOptions.applyToConfiguration(config); assertThat(config.get(PythonOptions.PYTHON_FILES)) .isEqualTo("/absolute/a.py,relative/b.py,relative/c.py"); assertThat(config.get(PYTHON_REQUIREMENTS)).isEqualTo("d.txt#e_dir"); assertThat(config.get(PythonOptions.PYTHON_ARCHIVES)) .isEqualTo("g.zip,h.zip#data,h.zip#data2"); assertThat(config.get(PYTHON_EXECUTABLE)).isEqualTo("/usr/bin/python"); assertThat(config.get(PythonOptions.PYTHON_PATH)) .isEqualTo("bin/python/lib/:bin/python/lib64"); assertThat(programOptions.getProgramArgs()) .containsExactly("--python", "xxx.py", "--pyModule", "xxx", "userarg1", "userarg2"); }