focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean processData(DistroData distroData) { switch (distroData.getType()) { case ADD: case CHANGE: ClientSyncData clientSyncData = ApplicationUtils.getBean(Serializer.class) .deserialize(distroData.getContent(), ClientSyncData.class); handlerClientSyncData(clientSyncData); return true; case DELETE: String deleteClientId = distroData.getDistroKey().getResourceKey(); Loggers.DISTRO.info("[Client-Delete] Received distro client sync data {}", deleteClientId); clientManager.clientDisconnected(deleteClientId); return true; default: return false; } }
@Test void testProcessDataForBatch() { // swap tmp Serializer mock = Mockito.mock(Serializer.class); when(applicationContext.getBean(Serializer.class)).thenReturn(mock); // single instance => batch instances => batch instances => single instance // single ClientSyncData syncData = createSingleForBatchTest(1); DistroData data = new DistroData(); data.setContent(serializer.serialize(syncData)); data.setType(DataOperation.ADD); when(mock.deserialize(any(), eq(ClientSyncData.class))).thenReturn(syncData); distroClientDataProcessor.processData(data); assertEquals(1L, client.getRevision()); assertEquals(1, client.getAllPublishedService().size()); Service service = Service.newService("batchData", "batchData", "batchData"); Service singleton = ServiceManager.getInstance().getSingleton(service); InstancePublishInfo info = client.getInstancePublishInfo(ServiceManager.getInstance().getSingleton(singleton)); assertEquals("127.0.0.1", info.getIp()); assertEquals(8080, info.getPort()); // batch data = new DistroData(); syncData = createBatchForBatchTest(2); data.setContent(serializer.serialize(syncData)); data.setType(DataOperation.CHANGE); when(mock.deserialize(any(), eq(ClientSyncData.class))).thenReturn(syncData); distroClientDataProcessor.processData(data); assertEquals(2L, client.getRevision()); assertEquals(1, client.getAllPublishedService().size()); info = client.getInstancePublishInfo(ServiceManager.getInstance().getSingleton(singleton)); assertTrue(info instanceof BatchInstancePublishInfo); BatchInstancePublishInfo batchInfo = (BatchInstancePublishInfo) info; assertEquals(2, batchInfo.getInstancePublishInfos().size()); for (InstancePublishInfo instancePublishInfo : batchInfo.getInstancePublishInfos()) { assertEquals("127.0.0.1", instancePublishInfo.getIp()); assertTrue(instancePublishInfo.getPort() == 8080 || instancePublishInfo.getPort() == 8081); } // batch data = new DistroData(); syncData = createBatchForBatchTest(3); data.setContent(serializer.serialize(syncData)); data.setType(DataOperation.CHANGE); when(mock.deserialize(any(), eq(ClientSyncData.class))).thenReturn(syncData); distroClientDataProcessor.processData(data); assertEquals(3L, client.getRevision()); assertEquals(1, client.getAllPublishedService().size()); info = client.getInstancePublishInfo(ServiceManager.getInstance().getSingleton(singleton)); assertTrue(info instanceof BatchInstancePublishInfo); batchInfo = (BatchInstancePublishInfo) info; assertEquals(2, batchInfo.getInstancePublishInfos().size()); for (InstancePublishInfo instancePublishInfo : batchInfo.getInstancePublishInfos()) { assertEquals("127.0.0.1", instancePublishInfo.getIp()); assertTrue(instancePublishInfo.getPort() == 8080 || instancePublishInfo.getPort() == 8081); } // single syncData = createSingleForBatchTest(4); data = new DistroData(); data.setContent(serializer.serialize(syncData)); data.setType(DataOperation.ADD); when(mock.deserialize(any(), eq(ClientSyncData.class))).thenReturn(syncData); distroClientDataProcessor.processData(data); assertEquals(4L, client.getRevision()); assertEquals(1, client.getAllPublishedService().size()); info = client.getInstancePublishInfo(ServiceManager.getInstance().getSingleton(singleton)); assertEquals("127.0.0.1", info.getIp()); assertEquals(8080, info.getPort()); }
Mono<ResponseEntity<Void>> delete(UUID id) { return client.delete() .uri(uriBuilder -> uriBuilder.path("/posts/{id}").build(id)) .exchangeToMono(response -> { if (response.statusCode().equals(HttpStatus.NO_CONTENT)) { return response.toBodilessEntity(); } return response.createError(); }); }
@SneakyThrows @Test public void testDeletePostById() { var id = UUID.randomUUID(); stubFor(delete("/posts/" + id) .willReturn( aResponse() .withStatus(204) ) ); postClient.delete(id) .as(StepVerifier::create) .consumeNextWith( entity -> assertThat(entity.getStatusCode().value()).isEqualTo(204) ) .verifyComplete(); verify(deleteRequestedFor(urlEqualTo("/posts/" + id))); }
public MultiMap<Value, T, List<T>> get(final KeyDefinition keyDefinition) { return tree.get(keyDefinition); }
@Test void testRetract() throws Exception { toni.uuidKey.retract(); assertThat(map.get(UUIDKey.UNIQUE_UUID).keySet()).containsExactlyInAnyOrder(eder.uuidKey.getSingleValue(), michael.uuidKey.getSingleValue()); MultiMap<Value, Person, List<Person>> nameMap = map.get(KeyDefinition.newKeyDefinition() .withId("name") .build()); assertThat(nameMap.keySet()).extracting(x -> x.getComparable()).containsExactly("Eder", "Michael"); MultiMap<Value, Person, List<Person>> ageMap = map.get(KeyDefinition.newKeyDefinition() .withId("age") .build()); assertThat(ageMap.keySet()).extracting(x -> x.getComparable()).containsExactly(20, 30); }
static int readDirectBuffer(InputStream f, ByteBuffer buf, byte[] temp) throws IOException { // copy all the bytes that return immediately, stopping at the first // read that doesn't return a full buffer. int nextReadLength = Math.min(buf.remaining(), temp.length); int totalBytesRead = 0; int bytesRead; while ((bytesRead = f.read(temp, 0, nextReadLength)) == temp.length) { buf.put(temp); totalBytesRead += bytesRead; nextReadLength = Math.min(buf.remaining(), temp.length); } if (bytesRead < 0) { // return -1 if nothing was read return totalBytesRead == 0 ? -1 : totalBytesRead; } else { // copy the last partial buffer buf.put(temp, 0, bytesRead); totalBytesRead += bytesRead; return totalBytesRead; } }
@Test public void testDirectRead() throws Exception { ByteBuffer readBuffer = ByteBuffer.allocateDirect(20); MockInputStream stream = new MockInputStream(); int len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(10, len); Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(20, readBuffer.limit()); len = DelegatingSeekableInputStream.readDirectBuffer(stream, readBuffer, TEMP.get()); Assert.assertEquals(-1, len); readBuffer.flip(); Assert.assertEquals("Buffer contents should match", ByteBuffer.wrap(TEST_ARRAY), readBuffer); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullTableOnTableLeftJoinWithJoined() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(null, MockValueJoiner.TOSTRING_JOINER, Joined.as("name"))); assertThat(exception.getMessage(), equalTo("table can't be null")); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildGroupByMemoryMergedResultWithAggregationOnlyWithOracleLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "Oracle")); final ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); WhereSegment whereSegment = mock(WhereSegment.class); BinaryOperationExpression binaryOperationExpression = mock(BinaryOperationExpression.class); when(binaryOperationExpression.getLeft()).thenReturn(new ColumnSegment(0, 0, new IdentifierValue("row_id"))); when(binaryOperationExpression.getRight()).thenReturn(new LiteralExpressionSegment(0, 0, 1L)); when(binaryOperationExpression.getOperator()).thenReturn(">="); when(whereSegment.getExpr()).thenReturn(binaryOperationExpression); SubqueryTableSegment subqueryTableSegment = mock(SubqueryTableSegment.class); SubquerySegment subquerySegment = mock(SubquerySegment.class); SelectStatement subSelectStatement = mock(MySQLSelectStatement.class); ProjectionsSegment subProjectionsSegment = mock(ProjectionsSegment.class); TopProjectionSegment topProjectionSegment = mock(TopProjectionSegment.class); when(topProjectionSegment.getAlias()).thenReturn("row_id"); when(subProjectionsSegment.getProjections()).thenReturn(Collections.singletonList(topProjectionSegment)); when(subSelectStatement.getProjections()).thenReturn(subProjectionsSegment); when(subquerySegment.getSelect()).thenReturn(subSelectStatement); when(subqueryTableSegment.getSubquery()).thenReturn(subquerySegment); ProjectionsSegment projectionsSegment = new ProjectionsSegment(0, 0); projectionsSegment.getProjections().add(new AggregationProjectionSegment(0, 0, AggregationType.COUNT, "COUNT(*)")); OracleSelectStatement selectStatement = (OracleSelectStatement) buildSelectStatement(new OracleSelectStatement()); selectStatement.setProjections(projectionsSegment); selectStatement.setFrom(subqueryTableSegment); selectStatement.setWhere(whereSegment); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), null, selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(RowNumberDecoratorMergedResult.class)); assertThat(((RowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByMemoryMergedResult.class)); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } final Region region = regionService.lookup(file); try { if(containerService.isContainer(file)) { final ContainerInfo info = session.getClient().getContainerInfo(region, containerService.getContainer(file).getName()); final PathAttributes attributes = new PathAttributes(); attributes.setSize(info.getTotalSize()); attributes.setRegion(info.getRegion().getRegionId()); return attributes; } final ObjectMetadata metadata; try { try { metadata = session.getClient().getObjectMetaData(region, containerService.getContainer(file).getName(), containerService.getKey(file)); } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } } catch(NotfoundException e) { if(file.isDirectory()) { // Directory placeholder file may be missing. Still return empty attributes when we find children try { new SwiftObjectListService(session).list(file, new CancellingListProgressListener()); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Common prefix only return PathAttributes.EMPTY; } // Try to find pending large file upload final Write.Append append = new SwiftLargeObjectUploadFeature(session, regionService, new SwiftWriteFeature(session, regionService)).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw e; } if(file.isDirectory()) { if(!StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) { throw new NotfoundException(String.format("File %s has set MIME type %s but expected %s", file.getAbsolute(), metadata.getMimeType(), SwiftDirectoryFeature.DIRECTORY_MIME_TYPE)); } } if(file.isFile()) { if(StringUtils.equals(SwiftDirectoryFeature.DIRECTORY_MIME_TYPE, metadata.getMimeType())) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), metadata.getMimeType())); } } return this.toAttributes(metadata); } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testFind() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final String name = new AlphanumericRandomStringService().random(); final Path test = new SwiftTouchFeature(session, new SwiftRegionService(session)).touch( new Path(container, String.format("%s-", name), EnumSet.of(Path.Type.file)), new TransferStatus()); final SwiftAttributesFinderFeature f = new SwiftAttributesFinderFeature(session); final PathAttributes attributes = f.find(test); assertEquals(0L, attributes.getSize()); assertEquals(EnumSet.of(Path.Type.file), test.getType()); assertNull(attributes.getETag()); assertEquals("d41d8cd98f00b204e9800998ecf8427e", attributes.getChecksum().hash); assertEquals(attributes.getModificationDate(), new SwiftObjectListService(session).list(container, new DisabledListProgressListener()) .find(new DefaultPathPredicate(test)).attributes().getModificationDate()); assertNotEquals(-1L, attributes.getModificationDate()); // Test wrong type try { f.find(new Path(container, name, EnumSet.of(Path.Type.directory))); fail(); } catch(NotfoundException e) { // Expected } try { f.find(new Path(container, String.format("%s-", name), EnumSet.of(Path.Type.directory))); fail(); } catch(NotfoundException e) { // Expected } new SwiftDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public boolean hasReadPermissionForWholeCollection(final Subject subject, final String collection) { return readPermissionForCollection(collection) .map(rp -> rp.equals(DbEntity.ALL_ALLOWED) || subject.isPermitted(rp + ":*")) .orElse(false); }
@Test void hasReadPermissionForWholeCollectionReturnsTrueWhenSubjectHasPermission() { doReturn(Optional.of( new DbEntityCatalogEntry("streams", "title", StreamImpl.class, "streams:read")) ).when(catalog) .getByCollectionName("streams"); doReturn(true).when(subject).isPermitted("streams:read:*"); final boolean hasReadPermissions = toTest.hasReadPermissionForWholeCollection(subject, "streams"); assertTrue(hasReadPermissions); }
@VisibleForTesting static void validateFips(final KsqlConfig config, final KsqlRestConfig restConfig) { if (config.getBoolean(ConfluentConfigs.ENABLE_FIPS_CONFIG)) { final FipsValidator fipsValidator = ConfluentConfigs.buildFipsValidator(); // validate cipher suites and TLS version validateCipherSuites(fipsValidator, restConfig); // validate broker validateBroker(fipsValidator, config); // validate ssl endpoint algorithm validateSslEndpointAlgo(fipsValidator, restConfig); // validate schema registry url validateSrUrl(fipsValidator, restConfig); // validate all listeners validateListeners(fipsValidator, restConfig); log.info("FIPS mode enabled for ksqlDB!"); } }
@Test public void shouldFailOnInvalidProxyListenerProtocols() { // Given: final KsqlConfig config = configWith(ImmutableMap.of( ConfluentConfigs.ENABLE_FIPS_CONFIG, true, CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_SSL.name )); final KsqlRestConfig restConfig = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(KsqlRestConfig.SSL_CIPHER_SUITES_CONFIG, Collections.singletonList("TLS_RSA_WITH_AES_256_CCM")) .put(KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "https") .put(KsqlRestConfig.LISTENERS_CONFIG, Collections.singletonList("https://bar:8080")) .put(KsqlRestConfig.PROXY_PROTOCOL_LISTENERS_CONFIG, Collections.singletonList("http://baz:8080")) .build() ); // When: final Exception e = assertThrows( SecurityException.class, () -> KsqlServerMain.validateFips(config, restConfig) ); // Then: assertThat(e.getMessage(), containsString( "FIPS 140-2 Configuration Error, invalid rest protocol: http" + "\nInvalid rest protocol for listeners." + "\nMake sure that all listeners, listeners.proxy.protocol, ksql.advertised.listener, and ksql.internal.listener follow FIPS 140-2.")); }
@Override public List<Column> getPartitionColumns() { List<Column> partitionColumns = new ArrayList<>(); if (!partColumnNames.isEmpty()) { partitionColumns = partColumnNames.stream().map(this::getColumn) .collect(Collectors.toList()); } return partitionColumns; }
@Test public void testPartitionKeys(@Mocked FileStoreTable paimonNativeTable) { RowType rowType = RowType.builder().field("a", DataTypes.INT()).field("b", DataTypes.INT()).field("c", DataTypes.INT()) .build(); List<DataField> fields = rowType.getFields(); List<Column> fullSchema = new ArrayList<>(fields.size()); ArrayList<String> partitions = Lists.newArrayList("b", "c"); ArrayList<Column> partitionSchema = new ArrayList<>(); for (DataField field : fields) { String fieldName = field.name(); DataType type = field.type(); Type fieldType = ColumnTypeConverter.fromPaimonType(type); Column column = new Column(fieldName, fieldType, true); fullSchema.add(column); if (partitions.contains(fieldName)) { partitionSchema.add(column); } } new Expectations() { { paimonNativeTable.rowType(); result = rowType; paimonNativeTable.partitionKeys(); result = partitions; } }; PaimonTable paimonTable = new PaimonTable("testCatalog", "testDB", "testTable", fullSchema, paimonNativeTable, 100L); List<Column> partitionColumns = paimonTable.getPartitionColumns(); Assertions.assertThat(partitionColumns).hasSameElementsAs(partitionSchema); }
public static boolean checkArpMode(String arpMode) { if (isNullOrEmpty(arpMode)) { return false; } else { return arpMode.equals(PROXY_MODE) || arpMode.equals(BROADCAST_MODE); } }
@Test public void testCheckArpMode() { assertFalse(checkArpMode(null)); assertTrue(checkArpMode("proxy")); assertTrue(checkArpMode("broadcast")); }
@Override public String toString() { return new StringJoiner(", ", RuleDescriptionSectionDto.class.getSimpleName() + "[", "]") .add("uuid='" + uuid + "'") .add("key='" + key + "'") .add("content='" + content + "'") .add("context='" + context + "'") .toString(); }
@Test void testToString() { assertThat("RuleDescriptionSectionDto[uuid='uuid', key='key', content='desc', " + "context='RuleDescriptionSectionContextDto[key='key', displayName='displayName']']") .isEqualTo(SECTION.toString()); }
@Override public IndexedFieldProvider<Class<?>> getIndexedFieldProvider() { return entityType -> { IndexDescriptor indexDescriptor = getIndexDescriptor(entityType); if (indexDescriptor == null) { return CLASS_NO_INDEXING; } return new SearchFieldIndexingMetadata(indexDescriptor); }; }
@Test public void testRecognizeStoredField() { assertThat(propertyHelper.getIndexedFieldProvider().get(TestEntity.class).isProjectable(new String[]{"description"})).isTrue(); assertThat(propertyHelper.getIndexedFieldProvider().get(TestEntity.class).isSortable(new String[]{"description"})).isFalse(); }
public static ValueReference createParameter(String value) { return ValueReference.builder() .valueType(ValueType.PARAMETER) .value(value) .build(); }
@Test public void serializeParameter() throws IOException { assertJsonEqualsNonStrict(objectMapper.writeValueAsString(ValueReference.createParameter("Test")), "{\"@type\":\"parameter\",\"@value\":\"Test\"}"); }
@Override public Column convert(BasicTypeDefine typeDefine) { Long typeDefineLength = typeDefine.getLength(); PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .columnLength(typeDefineLength) .scale(typeDefine.getScale()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String irisDataType = typeDefine.getDataType().toUpperCase(); long charOrBinaryLength = Objects.nonNull(typeDefineLength) && typeDefineLength > 0 ? typeDefineLength : 1; switch (irisDataType) { case IRIS_NULL: builder.dataType(BasicType.VOID_TYPE); break; case IRIS_BIT: builder.dataType(BasicType.BOOLEAN_TYPE); break; case IRIS_NUMERIC: case IRIS_MONEY: case IRIS_SMALLMONEY: case IRIS_NUMBER: case IRIS_DEC: case IRIS_DECIMAL: DecimalType decimalType; if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale()); } else { decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case IRIS_INT: case IRIS_INTEGER: case IRIS_MEDIUMINT: builder.dataType(BasicType.INT_TYPE); break; case IRIS_ROWVERSION: case IRIS_BIGINT: case IRIS_SERIAL: builder.dataType(BasicType.LONG_TYPE); break; case IRIS_TINYINT: builder.dataType(BasicType.BYTE_TYPE); break; case IRIS_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case IRIS_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case IRIS_DOUBLE: case IRIS_REAL: case IRIS_DOUBLE_PRECISION: builder.dataType(BasicType.DOUBLE_TYPE); break; case IRIS_CHAR: case IRIS_CHAR_VARYING: case IRIS_CHARACTER_VARYING: case IRIS_NATIONAL_CHAR: case IRIS_NATIONAL_CHAR_VARYING: case IRIS_NATIONAL_CHARACTER: case IRIS_NATIONAL_CHARACTER_VARYING: case IRIS_NATIONAL_VARCHAR: case IRIS_NCHAR: case IRIS_SYSNAME: case IRIS_VARCHAR2: case IRIS_VARCHAR: case IRIS_NVARCHAR: case IRIS_UNIQUEIDENTIFIER: case IRIS_GUID: case IRIS_CHARACTER: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(charOrBinaryLength); break; case IRIS_NTEXT: case IRIS_CLOB: case IRIS_LONG_VARCHAR: case IRIS_LONG: case IRIS_LONGTEXT: case IRIS_MEDIUMTEXT: case IRIS_TEXT: case IRIS_LONGVARCHAR: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(Long.valueOf(Integer.MAX_VALUE)); break; case IRIS_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case IRIS_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); break; case IRIS_DATETIME: case IRIS_DATETIME2: case IRIS_SMALLDATETIME: case IRIS_TIMESTAMP: case IRIS_TIMESTAMP2: case IRIS_POSIXTIME: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; case IRIS_BINARY: case IRIS_BINARY_VARYING: case IRIS_RAW: case IRIS_VARBINARY: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(charOrBinaryLength); break; case IRIS_LONGVARBINARY: case IRIS_BLOB: case IRIS_IMAGE: case IRIS_LONG_BINARY: case IRIS_LONG_RAW: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(Long.valueOf(Integer.MAX_VALUE)); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.IRIS, irisDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertTimestamp() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("datetime") .dataType("datetime") .build(); Column column = IrisTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("timestamp") .dataType("timestamp") .build(); column = IrisTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getScale(), column.getScale()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("timestamp(6)") .dataType("timestamp") .scale(6) .build(); column = IrisTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TIME_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getScale(), column.getScale()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public Schema getSchema() { return context.getSchema(); }
@Test public void testMapSchema() { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(MapPrimitive.getDescriptor()); Schema schema = schemaProvider.getSchema(); assertEquals(MAP_PRIMITIVE_SCHEMA, schema); }
@Override public void checkDone() throws IllegalStateException { if (range.getFrom() == range.getTo()) { return; } checkState( lastAttemptedOffset != null, "Last attempted offset should not be null. No work was claimed in non-empty range %s.", range); checkState( lastAttemptedOffset >= range.getTo() - 1, "Last attempted offset was %s in range %s, claiming work in [%s, %s) was not attempted", lastAttemptedOffset, range, lastAttemptedOffset + 1, range.getTo()); }
@Test public void testDoneBeforeClaim() throws Exception { expected.expectMessage( "Last attempted offset should not be null. No work was claimed in non-empty range [100, 200)"); OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(100, 200)); tracker.checkDone(); }
public boolean eval(StructLike data) { return new EvalVisitor().eval(data); }
@Test public void testIn() { assertThat(in("s", 7, 8, 9).literals()).hasSize(3); assertThat(in("s", 7, 8.1, Long.MAX_VALUE).literals()).hasSize(3); assertThat(in("s", "abc", "abd", "abc").literals()).hasSize(3); assertThat(in("s").literals()).isEmpty(); assertThat(in("s", 5).literals()).hasSize(1); assertThat(in("s", 5, 5).literals()).hasSize(2); assertThat(in("s", Arrays.asList(5, 5)).literals()).hasSize(2); assertThat(in("s", Collections.emptyList()).literals()).isEmpty(); Evaluator evaluator = new Evaluator(STRUCT, in("x", 7, 8, Long.MAX_VALUE)); assertThat(evaluator.eval(TestHelpers.Row.of(7, 8, null))).as("7 in [7, 8] => true").isTrue(); assertThat(evaluator.eval(TestHelpers.Row.of(9, 8, null))) .as("9 in [7, 8] => false") .isFalse(); Evaluator intSetEvaluator = new Evaluator(STRUCT, in("x", Long.MAX_VALUE, Integer.MAX_VALUE, Long.MIN_VALUE)); assertThat(intSetEvaluator.eval(TestHelpers.Row.of(Integer.MAX_VALUE, 7.0, null))) .as("Integer.MAX_VALUE in [Integer.MAX_VALUE] => true") .isTrue(); assertThat(intSetEvaluator.eval(TestHelpers.Row.of(6, 6.8, null))) .as("6 in [Integer.MAX_VALUE] => false") .isFalse(); Evaluator integerEvaluator = new Evaluator(STRUCT, in("y", 7, 8, 9.1)); assertThat(integerEvaluator.eval(TestHelpers.Row.of(0, 7.0, null))) .as("7.0 in [7, 8, 9.1] => true") .isTrue(); assertThat(integerEvaluator.eval(TestHelpers.Row.of(7, 9.1, null))) .as("9.1 in [7, 8, 9.1] => true") .isTrue(); assertThat(integerEvaluator.eval(TestHelpers.Row.of(6, 6.8, null))) .as("6.8 in [7, 8, 9.1] => false") .isFalse(); Evaluator structEvaluator = new Evaluator(STRUCT, in("s1.s2.s3.s4.i", 7, 8, 9)); assertThat( structEvaluator.eval( TestHelpers.Row.of( 7, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7))))))) .as("7 in [7, 8, 9] => true") .isTrue(); assertThat( structEvaluator.eval( TestHelpers.Row.of( 6, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(6))))))) .as("6 in [7, 8, 9] => false") .isFalse(); StructType charSeqStruct = StructType.of(required(34, "s", Types.StringType.get())); Evaluator charSeqEvaluator = new Evaluator(charSeqStruct, in("s", "abc", "abd", "abc")); assertThat(charSeqEvaluator.eval(TestHelpers.Row.of(new Utf8("abc")))) .as("utf8(abc) in [string(abc), string(abd)] => true") .isTrue(); assertThat(charSeqEvaluator.eval(TestHelpers.Row.of(new Utf8("abcd")))) .as("utf8(abcd) in [string(abc), string(abd)] => false") .isFalse(); }
@Override public List<OptExpression> transform(OptExpression input, OptimizerContext context) { LogicalOlapScanOperator logicalOlapScanOperator = (LogicalOlapScanOperator) input.getOp(); LogicalOlapScanOperator prunedOlapScanOperator = null; if (logicalOlapScanOperator.getSelectedPartitionId() == null) { prunedOlapScanOperator = OptOlapPartitionPruner.prunePartitions(logicalOlapScanOperator); } else { // do merge pruned partitions with new pruned partitions prunedOlapScanOperator = OptOlapPartitionPruner.mergePartitionPrune(logicalOlapScanOperator); } Utils.setOpAppliedRule(prunedOlapScanOperator, Operator.OP_PARTITION_PRUNE_BIT); return Lists.newArrayList(OptExpression.create(prunedOlapScanOperator, input.getInputs())); }
@Test public void transform1(@Mocked OlapTable olapTable, @Mocked RangePartitionInfo partitionInfo) { FeConstants.runningUnitTest = true; Partition part1 = new Partition(1, "p1", null, null); Partition part2 = new Partition(2, "p2", null, null); Partition part3 = new Partition(3, "p3", null, null); Partition part4 = new Partition(4, "p4", null, null); Partition part5 = new Partition(5, "p5", null, null); List<Column> columns = Lists.newArrayList( new Column("dealDate", Type.DATE, false) ); List<ColumnId> columnNames = Lists.newArrayList(ColumnId.create(columns.get(0).getName())); Map<Long, Range<PartitionKey>> keyRange = Maps.newHashMap(); PartitionKey p1 = new PartitionKey(); p1.pushColumn(new DateLiteral(2019, 11, 1), PrimitiveType.DATE); PartitionKey p2 = new PartitionKey(); p2.pushColumn(new DateLiteral(2020, 2, 1), PrimitiveType.DATE); PartitionKey p3 = new PartitionKey(); p3.pushColumn(new DateLiteral(2020, 5, 1), PrimitiveType.DATE); PartitionKey p4 = new PartitionKey(); p4.pushColumn(new DateLiteral(2020, 8, 1), PrimitiveType.DATE); PartitionKey p5 = new PartitionKey(); p5.pushColumn(new DateLiteral(2020, 11, 1), PrimitiveType.DATE); PartitionKey p6 = new PartitionKey(); p6.pushColumn(new DateLiteral(2021, 2, 1), PrimitiveType.DATE); keyRange.put(1L, Range.closed(p1, p2)); keyRange.put(2L, Range.closed(p2, p3)); keyRange.put(3L, Range.closed(p3, p4)); keyRange.put(4L, Range.closed(p4, p5)); keyRange.put(5L, Range.closed(p5, p6)); ColumnRefFactory columnRefFactory = new ColumnRefFactory(); ColumnRefOperator column1 = columnRefFactory.create("dealDate", ScalarType.DATE, false); Map<ColumnRefOperator, Column> scanColumnMap = Maps.newHashMap(); scanColumnMap.put(column1, new Column("dealDate", Type.DATE, false)); Map<Column, ColumnRefOperator> scanMetaColMap = Maps.newHashMap(); scanMetaColMap.put(new Column("dealDate", Type.DATE, false), column1); BinaryPredicateOperator binaryPredicateOperator1 = new BinaryPredicateOperator(BinaryType.GE, column1, ConstantOperator.createDate(LocalDateTime.of(2020, 6, 1, 0, 0, 0))); BinaryPredicateOperator binaryPredicateOperator2 = new BinaryPredicateOperator(BinaryType.LE, column1, ConstantOperator.createDate(LocalDateTime.of(2020, 12, 1, 0, 0, 0))); ScalarOperator predicate = Utils.compoundAnd(binaryPredicateOperator1, binaryPredicateOperator2); LogicalOlapScanOperator operator = new LogicalOlapScanOperator(olapTable, scanColumnMap, scanMetaColMap, null, -1, predicate); operator.setPredicate(null); new Expectations() { { olapTable.getPartitionInfo(); result = partitionInfo; partitionInfo.isRangePartition(); result = true; partitionInfo.getIdToRange(false); result = keyRange; partitionInfo.getPartitionColumns((Map<ColumnId, Column>) any); result = columns; olapTable.getPartitions(); result = Lists.newArrayList(part1, part2, part3, part4, part5); minTimes = 0; olapTable.getPartition(1); result = part1; minTimes = 0; olapTable.getPartition(2); result = part2; minTimes = 0; olapTable.getPartition(3); result = part3; minTimes = 0; olapTable.getPartition(4); result = part4; minTimes = 0; olapTable.getPartition(5); result = part5; minTimes = 0; } }; PartitionPruneRule rule = new PartitionPruneRule(); assertNull(operator.getSelectedPartitionId()); OptExpression optExpression = rule.transform(new OptExpression(operator), new OptimizerContext(new Memo(), columnRefFactory)).get(0); assertEquals(3, ((LogicalOlapScanOperator) optExpression.getOp()).getSelectedPartitionId().size()); }
public static RecordBatchingStateRestoreCallback adapt(final StateRestoreCallback restoreCallback) { Objects.requireNonNull(restoreCallback, "stateRestoreCallback must not be null"); if (restoreCallback instanceof RecordBatchingStateRestoreCallback) { return (RecordBatchingStateRestoreCallback) restoreCallback; } else if (restoreCallback instanceof BatchingStateRestoreCallback) { return records -> { final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>(); for (final ConsumerRecord<byte[], byte[]> record : records) { keyValues.add(new KeyValue<>(record.key(), record.value())); } ((BatchingStateRestoreCallback) restoreCallback).restoreAll(keyValues); }; } else { return records -> { for (final ConsumerRecord<byte[], byte[]> record : records) { restoreCallback.restore(record.key(), record.value()); } }; } }
@Test public void shouldThrowOnRestore() { assertThrows(UnsupportedOperationException.class, () -> adapt(mock(StateRestoreCallback.class)).restore(null, null)); }
public static StructType groupingKeyType(Schema schema, Collection<PartitionSpec> specs) { return buildPartitionProjectionType("grouping key", specs, commonActiveFieldIds(schema, specs)); }
@Test public void testGroupingKeyTypeWithRenamesInV1Table() { PartitionSpec initialSpec = PartitionSpec.builderFor(SCHEMA).identity("data", "p1").build(); TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, initialSpec, V1_FORMAT_VERSION); table.updateSpec().addField("category").commit(); table.updateSpec().renameField("p1", "p2").commit(); StructType expectedType = StructType.of(NestedField.optional(1000, "p2", Types.StringType.get())); StructType actualType = Partitioning.groupingKeyType(table.schema(), table.specs().values()); assertThat(actualType).isEqualTo(expectedType); }
@Override public void execute(Context context) { List<MeasureComputerWrapper> wrappers = Arrays.stream(measureComputers).map(ToMeasureWrapper.INSTANCE).toList(); validateMetrics(wrappers); measureComputersHolder.setMeasureComputers(sortComputers(wrappers)); }
@Test public void fail_with_ISE_when_output_metric_is_a_core_metric() { assertThatThrownBy(() -> { MeasureComputer[] computers = new MeasureComputer[] {newMeasureComputer(array(NEW_METRIC_4), array(NCLOC_KEY))}; ComputationStep underTest = new LoadMeasureComputersStep(holder, array(new TestMetrics()), computers); underTest.execute(new TestComputationStepContext()); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Metric 'ncloc' cannot be used as an output metric because it's a core metric"); }
@Override public Num calculate(BarSeries series, Position position) { if (position == null || position.getEntry() == null || position.getExit() == null) { return series.zero(); } Returns returns = new Returns(series, position, Returns.ReturnType.LOG); return calculateES(returns, confidence); }
@Test public void calculateWithASimplePosition() { // if only one position in tail, VaR = ES series = new MockBarSeries(numFunction, 100d, 104d, 90d, 100d, 95d, 105d); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series)); AnalysisCriterion esCriterion = getCriterion(); assertNumEquals(numOf(Math.log(90d / 104)), esCriterion.calculate(series, tradingRecord)); }
@Override public String toString() { return "MappingRule{" + "matcher=" + matcher + ", action=" + action + '}'; }
@Test public void testToStrings() { MappingRuleAction action = new MappingRuleActions.PlaceToQueueAction( "queue", true); MappingRuleMatcher matcher = MappingRuleMatchers.createUserMatcher("bob"); MappingRule rule = new MappingRule(matcher, action); assertEquals("MappingRule{matcher=" + matcher.toString() + ", action=" + action.toString() + "}", rule.toString()); }
public static boolean acceptEndpoint(String endpointUrl) { return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING); }
@Test public void testAcceptEndpoint() { AsyncTestSpecification specification = new AsyncTestSpecification(); NATSMessageConsumptionTask task = new NATSMessageConsumptionTask(specification); assertTrue(NATSMessageConsumptionTask.acceptEndpoint("nats://localhost:4222/testTopic")); assertTrue(NATSMessageConsumptionTask.acceptEndpoint("nats://mynats.acme.com/testTopic")); }
ProducerListeners listeners() { return new ProducerListeners(eventListeners.toArray(new HollowProducerEventListener[0])); }
@Test public void testAddDuringCycle() { ProducerListenerSupport ls = new ProducerListenerSupport(); class SecondCycleListener implements CycleListener { int cycleStart; int cycleComplete; @Override public void onCycleSkip(CycleSkipReason reason) { } @Override public void onNewDeltaChain(long version) { } @Override public void onCycleStart(long version) { cycleStart++; } @Override public void onCycleComplete(Status status, HollowProducer.ReadState rs, long version, Duration elapsed) { cycleComplete++; } } class FirstCycleListener extends SecondCycleListener { private SecondCycleListener scl = new SecondCycleListener(); @Override public void onCycleStart(long version) { super.onCycleStart(version); ls.addListener(scl); } } FirstCycleListener fcl = new FirstCycleListener(); ls.addListener(fcl); ProducerListenerSupport.ProducerListeners s = ls.listeners(); s.fireCycleStart(1); s.fireCycleComplete(new Status.StageWithStateBuilder()); Assert.assertEquals(1, fcl.cycleStart); Assert.assertEquals(1, fcl.cycleComplete); Assert.assertEquals(0, fcl.scl.cycleStart); Assert.assertEquals(0, fcl.scl.cycleComplete); s = ls.listeners(); s.fireCycleStart(1); s.fireCycleComplete(new Status.StageWithStateBuilder()); Assert.assertEquals(2, fcl.cycleStart); Assert.assertEquals(2, fcl.cycleComplete); Assert.assertEquals(1, fcl.scl.cycleStart); Assert.assertEquals(1, fcl.scl.cycleComplete); }
public static <T> List<T> sub(List<T> list, int start, int end) { return sub(list, start, end, 1); }
@Test public void subTest() { final List<Integer> of = ListUtil.of(1, 2, 3, 4); final List<Integer> sub = ListUtil.sub(of, 2, 4); sub.remove(0); // 对子列表操作不影响原列表 assertEquals(4, of.size()); assertEquals(1, sub.size()); }
@Override public Response removeFromClusterNodeLabels(Set<String> oldNodeLabels, HttpServletRequest hsr) throws Exception { if (CollectionUtils.isEmpty(oldNodeLabels)) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), REMOVE_FROM_CLUSTERNODELABELS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the oldNodeLabels is null or empty."); throw new IllegalArgumentException("Parameter error, the oldNodeLabels is null or empty."); } try { long startTime = clock.getTime(); Collection<SubClusterInfo> subClustersActives = federationFacade.getActiveSubClusters(); final HttpServletRequest hsrCopy = clone(hsr); Class[] argsClasses = new Class[]{Set.class, HttpServletRequest.class}; Object[] args = new Object[]{oldNodeLabels, hsrCopy}; ClientMethod remoteMethod = new ClientMethod("removeFromClusterNodeLabels", argsClasses, args); Map<SubClusterInfo, Response> responseInfoMap = invokeConcurrent(subClustersActives, remoteMethod, Response.class); StringBuilder buffer = new StringBuilder(); // SubCluster-0:SUCCESS,SubCluster-1:SUCCESS responseInfoMap.forEach((subClusterInfo, response) -> buildAppendMsg(subClusterInfo, buffer, response)); long stopTime = clock.getTime(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), REMOVE_FROM_CLUSTERNODELABELS, TARGET_WEB_SERVICE); routerMetrics.succeededRemoveFromClusterNodeLabelsRetrieved(stopTime - startTime); return Response.status(Status.OK).entity(buffer.toString()).build(); } catch (NotFoundException e) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), REMOVE_FROM_CLUSTERNODELABELS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e); } catch (YarnException e) { routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), REMOVE_FROM_CLUSTERNODELABELS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowIOException("removeFromClusterNodeLabels with yarn error.", e); } routerMetrics.incrRemoveFromClusterNodeLabelsFailedRetrieved(); throw new RuntimeException("removeFromClusterNodeLabels Failed."); }
@Test public void testRemoveFromClusterNodeLabels2() throws Exception { Set<String> oldNodeLabels = Sets.newHashSet(); oldNodeLabels.add("A0"); Response response = interceptor.removeFromClusterNodeLabels(oldNodeLabels, null); Assert.assertNotNull(response); Object entityObj = response.getEntity(); Assert.assertNotNull(entityObj); String expectedValue = "SubCluster-0:SUCCESS,"; String entity = String.valueOf(entityObj); Assert.assertTrue(entity.contains(expectedValue)); }
public static LocalDateTime of(Instant instant) { return of(instant, ZoneId.systemDefault()); }
@Test public void ofTest() { final String dateStr = "2020-01-23T12:23:56"; final DateTime dt = DateUtil.parse(dateStr); LocalDateTime of = LocalDateTimeUtil.of(dt); assertNotNull(of); assertEquals(dateStr, of.toString()); // 不加Z是标准当地时间,与UTC时间不同 of = LocalDateTimeUtil.ofUTC(dt.getTime()); assertNotEquals(dateStr, of.toString()); }
public static List<Group> enumerateFrom(Group root) { List<Group> leaves = new ArrayList<>(); visitNode(root, leaves); return leaves; }
@Test void singleLeafIsEnumeratedInNestedCase() throws Exception { Group g = new Group(0, "donkeykong", dummyDistribution()); Group child = new Group(1, "mario", dummyDistribution()); child.addSubGroup(new Group(2, "toad")); g.addSubGroup(child); List<Group> leaves = LeafGroups.enumerateFrom(g); assertThat(leaves.size(), is(1)); assertThat(leaves.get(0).getName(), is("toad")); }
@Override public boolean isSigned(final int column) { Preconditions.checkArgument(1 == column); return true; }
@Test void assertIsSigned() throws SQLException { assertTrue(actualMetaData.isSigned(1)); }
@SuppressWarnings("unchecked") @Override public void configure(final Map<String, ?> configs, final boolean isKey) { //check to see if the window size config is set and the window size is already set from the constructor final Long configWindowSize; if (configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG) instanceof String) { configWindowSize = Long.parseLong((String) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG)); } else { configWindowSize = (Long) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG); } if (windowSize != null && configWindowSize != null) { throw new IllegalArgumentException("Window size should not be set in both the time windowed deserializer constructor and the window.size.ms config"); } else if (windowSize == null && configWindowSize == null) { throw new IllegalArgumentException("Window size needs to be set either through the time windowed deserializer " + "constructor or the window.size.ms config but not both"); } else { windowSize = windowSize == null ? configWindowSize : windowSize; } final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE); Serde<T> windowInnerClassSerde = null; if (windowedInnerClassSerdeConfig != null) { try { windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class); } catch (final ClassNotFoundException e) { throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig, "Serde class " + windowedInnerClassSerdeConfig + " could not be found."); } } if (inner != null && windowedInnerClassSerdeConfig != null) { if (!inner.getClass().getName().equals(windowInnerClassSerde.deserializer().getClass().getName())) { throw new IllegalArgumentException("Inner class deserializer set using constructor " + "(" + inner.getClass().getName() + ")" + " is different from the one set in windowed.inner.class.serde config " + "(" + windowInnerClassSerde.deserializer().getClass().getName() + ")."); } } else if (inner == null && windowedInnerClassSerdeConfig == null) { throw new IllegalArgumentException("Inner class deserializer should be set either via constructor " + "or via the windowed.inner.class.serde config"); } else if (inner == null) inner = windowInnerClassSerde.deserializer(); }
@Test public void shouldThrowConfigExceptionWhenInvalidWindowedInnerClassDeserialiserSupplied() { props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, "some.non.existent.class"); assertThrows(ConfigException.class, () -> timeWindowedDeserializer.configure(props, false)); }
public static Counter totalCommentCounter(MeterRegistry registry, String name) { return counter(registry, name, Tag.of(SCENE, TOTAL_COMMENT_SCENE)); }
@Test void totalCommentCounter() { MeterRegistry meterRegistry = new SimpleMeterRegistry(); MeterUtils.totalCommentCounter(meterRegistry, "content.halo.run.posts.fake-post") .increment(3); RequiredSearch requiredSearch = meterRegistry.get("content.halo.run.posts.fake-post"); assertThat(requiredSearch.counter().count()).isEqualTo(3); Meter.Id id = requiredSearch.counter().getId(); assertThat(id.getTag(MeterUtils.SCENE)).isEqualTo(MeterUtils.TOTAL_COMMENT_SCENE); assertThat(id.getTag(MeterUtils.METRICS_COMMON_TAG.getKey())) .isEqualTo(MeterUtils.METRICS_COMMON_TAG.getValue()); }
@Override public Map<Consumer, List<Range>> getConsumerKeyHashRanges() { Map<Consumer, List<Range>> result = new LinkedHashMap<>(); rwLock.readLock().lock(); try { int start = 0; for (Map.Entry<Integer, List<Consumer>> entry: hashRing.entrySet()) { for (Consumer consumer: entry.getValue()) { result.computeIfAbsent(consumer, key -> new ArrayList<>()) .add(Range.of(start, entry.getKey())); } start = entry.getKey() + 1; } } finally { rwLock.readLock().unlock(); } return result; }
@Test public void testGetConsumerKeyHashRanges() throws BrokerServiceException.ConsumerAssignException { ConsistentHashingStickyKeyConsumerSelector selector = new ConsistentHashingStickyKeyConsumerSelector(3); List<String> consumerName = Arrays.asList("consumer1", "consumer2", "consumer3"); List<Consumer> consumers = new ArrayList<>(); for (String s : consumerName) { Consumer consumer = mock(Consumer.class); when(consumer.consumerName()).thenReturn(s); selector.addConsumer(consumer); consumers.add(consumer); } Map<Consumer, List<Range>> expectedResult = new HashMap<>(); expectedResult.put(consumers.get(0), Arrays.asList( Range.of(119056335, 242013991), Range.of(722195657, 1656011842), Range.of(1707482098, 1914695766))); expectedResult.put(consumers.get(1), Arrays.asList( Range.of(0, 90164503), Range.of(90164504, 119056334), Range.of(382436668, 722195656))); expectedResult.put(consumers.get(2), Arrays.asList( Range.of(242013992, 242377547), Range.of(242377548, 382436667), Range.of(1656011843, 1707482097))); for (Map.Entry<Consumer, List<Range>> entry : selector.getConsumerKeyHashRanges().entrySet()) { System.out.println(entry.getValue()); Assert.assertEquals(entry.getValue(), expectedResult.get(entry.getKey())); expectedResult.remove(entry.getKey()); } Assert.assertEquals(expectedResult.size(), 0); }
public Status upload(String localFilePath, String remoteFilePath) { // Preconditions.checkArgument(remoteFilePath.startsWith(location), remoteFilePath); // get md5usm of local file File file = new File(localFilePath); String md5sum; try { md5sum = DigestUtils.md5Hex(new FileInputStream(file)); } catch (FileNotFoundException e) { return new Status(ErrCode.NOT_FOUND, "file " + localFilePath + " does not exist"); } catch (IOException e) { return new Status(ErrCode.COMMON_ERROR, "failed to get md5sum of file: " + localFilePath); } Preconditions.checkState(!Strings.isNullOrEmpty(md5sum)); String tmpRemotePath = assembleFileNameWithSuffix(remoteFilePath, SUFFIX_TMP_FILE); String finalRemotePath = assembleFileNameWithSuffix(remoteFilePath, md5sum); LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", localFilePath, tmpRemotePath, finalRemotePath); // this may be a retry, so we should first delete remote file Status st = storage.delete(tmpRemotePath); if (!st.ok()) { return st; } st = storage.delete(finalRemotePath); if (!st.ok()) { return st; } // upload tmp file st = storage.upload(localFilePath, tmpRemotePath); if (!st.ok()) { return st; } // rename tmp file with checksum named file st = storage.rename(tmpRemotePath, finalRemotePath); if (!st.ok()) { return st; } LOG.info("finished to upload local file {} to remote file: {}", localFilePath, finalRemotePath); return st; }
@Test public void testUpload() { new Expectations() { { storage.upload(anyString, anyString); minTimes = 0; result = Status.OK; storage.rename(anyString, anyString); minTimes = 0; result = Status.OK; storage.delete(anyString); minTimes = 0; result = Status.OK; } }; repo = new Repository(10000, "repo", false, location, storage); String localFilePath = "./tmp_" + System.currentTimeMillis(); try (PrintWriter out = new PrintWriter(localFilePath)) { out.print("a"); } catch (FileNotFoundException e) { e.printStackTrace(); Assert.fail(); } try { String remoteFilePath = location + "/remote_file"; Status st = repo.upload(localFilePath, remoteFilePath); Assert.assertTrue(st.ok()); } finally { File file = new File(localFilePath); file.delete(); } }
public T getResult() { return result; }
@Test public void testWeb3Sha3() throws IOException { buildResponse( "{\n" + " \"id\":64,\n" + " \"jsonrpc\": \"2.0\",\n" + " \"result\": " + "\"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad\"\n" + "}"); Web3Sha3 web3Sha3 = deserialiseResponse(Web3Sha3.class); assertEquals( web3Sha3.getResult(), ("0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad")); }
@Override public String render(String text) { if (StringUtils.isBlank(text)) { return ""; } if (regex.isEmpty() || link.isEmpty()) { Comment comment = new Comment(); comment.escapeAndAdd(text); return comment.render(); } try { Matcher matcher = Pattern.compile(regex).matcher(text); int start = 0; Comment comment = new Comment(); while (hasMatch(matcher)) { comment.escapeAndAdd(text.substring(start, matcher.start())); comment.add(dynamicLink(matcher)); start = matcher.end(); } comment.escapeAndAdd(text.substring(start)); return comment.render(); } catch (PatternSyntaxException e) { LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage()); } return text; }
@Test public void shouldUseLinkFromConfigurationRegardlessOfItsValidity() throws Exception { String link = "aaa${ID}"; String regex = "\\d+"; trackingTool = new DefaultCommentRenderer(link, regex); String result = trackingTool.render("111: checkin message"); assertThat(result, is("<a href=\"aaa111\" target=\"story_tracker\">111</a>: checkin message")); }
@Override public int hashCode() { int result = super.hashCode(); result = 31 * result + urn.hashCode(); return result; }
@Test public void hashcode_fail_withDiffURN() { ScheduledTaskHandler handlerA = ScheduledTaskHandlerImpl.of(1, "MyScheduler", "MyTask"); String myTaskURN = handlerA.toUrn(); ScheduledTaskHandler handlerB = ScheduledTaskHandlerImpl.of(1, "MyScheduler", "MyTask2"); String myTask2URN = handlerB.toUrn(); ScheduledExecutorWaitNotifyKey keyA = new ScheduledExecutorWaitNotifyKey("myScheduler", myTaskURN); ScheduledExecutorWaitNotifyKey keyB = new ScheduledExecutorWaitNotifyKey("myScheduler", myTask2URN); assertNotEquals(keyA.hashCode(), keyB.hashCode()); }
@Override public void run() { Date now = new Date(); LOG.info("Application cleaner run at time {}", now); FederationStateStoreFacade facade = getGPGContext().getStateStoreFacade(); try { // Get the candidate list from StateStore before calling router Set<ApplicationId> allStateStoreApps = new HashSet<>(); List<ApplicationHomeSubCluster> response = facade.getApplicationsHomeSubCluster(); for (ApplicationHomeSubCluster app : response) { allStateStoreApps.add(app.getApplicationId()); } LOG.info("{} app entries in FederationStateStore", allStateStoreApps.size()); // Get the candidate list from Registry before calling router List<String> allRegistryApps = getRegistryClient().getAllApplications(); LOG.info("{} app entries in FederationRegistry", allStateStoreApps.size()); // Get the list of known apps from Router Set<ApplicationId> routerApps = getRouterKnownApplications(); LOG.info("{} known applications from Router", routerApps.size()); // Clean up StateStore entries Set<ApplicationId> toDelete = Sets.difference(allStateStoreApps, routerApps); LOG.info("Deleting {} applications from statestore", toDelete.size()); LOG.debug("Apps to delete: {}.", toDelete.stream().map(Object::toString).collect(Collectors.joining(","))); for (ApplicationId appId : toDelete) { try { LOG.debug("Deleting {} from statestore ", appId); facade.deleteApplicationHomeSubCluster(appId); } catch (Exception e) { LOG.error("deleteApplicationHomeSubCluster failed at application {}.", appId, e); } } // Clean up Registry entries for (String app : allRegistryApps) { ApplicationId appId = ApplicationId.fromString(app); if (!routerApps.contains(appId)) { LOG.debug("removing finished application entry for {}", app); getRegistryClient().removeAppFromRegistry(appId, true); } } } catch (Throwable e) { LOG.error("Application cleaner started at time {} fails. ", now, e); } }
@Test public void testFederationStateStoreAppsCleanUp() throws YarnException { // Set first app to be still known by Router ApplicationId appId = appIds.get(0); routerAppIds.add(appId); // Another random app not in stateStore known by Router appId = ApplicationId.newInstance(100, 200); routerAppIds.add(appId); appCleaner.run(); // Only one app should be left Assert.assertEquals(1, stateStore .getApplicationsHomeSubCluster( GetApplicationsHomeSubClusterRequest.newInstance()) .getAppsHomeSubClusters().size()); // The known app should not be cleaned in registry Assert.assertEquals(1, registryClient.getAllApplications().size()); }
@Override protected void writeErrorPage(HttpServletRequest request, Writer writer, int code, String message, boolean showStacks) throws IOException { String defaultErrorMessage = HttpStatus.getMessage(code); String errorPage = replaceHtml(code, defaultErrorMessage); writer.write(errorPage); }
@Test public void shouldWriteErrorPageFor404WithMessage() throws Exception { errorHandler.writeErrorPage(request, writer, 404, null, false); verify(writer).write(captor.capture()); String fileContents = captor.getValue(); assertThat(fileContents, containsString("<h1>404</h1>")); assertThat(fileContents, containsString("<h2>Not Found</h2>")); }
@Override public void check(final String databaseName, final EncryptRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkEncryptors(ruleConfig.getEncryptors()); checkTables(databaseName, ruleConfig.getTables(), ruleConfig.getEncryptors()); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void assertCheckWhenConfigInvalidAssistColumn() { EncryptRuleConfiguration config = createInvalidAssistColumnConfiguration(); RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass()); assertThrows(UnregisteredAlgorithmException.class, () -> checker.check("test", config, Collections.emptyMap(), Collections.emptyList())); }
@Bean @ConditionalOnMissingBean(PolarisDataChangedInit.class) public DataChangedInit polarisDataChangedInit(final PolarisProperties polarisProperties, final ConfigFileService configFileService) { return new PolarisDataChangedInit(polarisProperties, configFileService); }
@Test public void testPolarisDataInit() { PolarisSyncConfiguration polarisListener = new PolarisSyncConfiguration(); PolarisProperties polarisProperties = mock(PolarisProperties.class); ConfigFileService polarisConfigFileService = mock(ConfigFileService.class); assertNotNull(polarisListener.polarisDataChangedInit(polarisProperties, polarisConfigFileService)); }
public Properties apply(final Properties properties) { if (properties == null) { throw new IllegalArgumentException("properties must not be null"); } else { if (properties.isEmpty()) { return new Properties(); } else { final Properties filtered = new Properties(); for (Map.Entry<Object, Object> entry : properties.entrySet()) { final Object key = entry.getKey(); final Object value = entry.getValue(); if (!keysToRemove.contains(key)) { filtered.put(key, value); } } return filtered; } } }
@Test public void filtersNothingByDefault() { // Given Properties anyProperties = System.getProperties(); Filter f = new Filter(); // When Properties filtered = f.apply(anyProperties); // Then assertEquals(anyProperties, filtered); assertEquals(anyProperties.size(), filtered.size()); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldAcceptNullsForAnyColumn() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.BIGINT) .build(); final List<ColumnName> names = ImmutableList.of(KEY, COL0); // When: final KsqlGenericRecord record = recordFactory.build( names, ImmutableList.of(new NullLiteral(), new NullLiteral()), schema, DataSourceType.KSTREAM ); // Then: assertThat(record, is(KsqlGenericRecord.of( GenericKey.genericKey((Object) null), GenericRow.genericRow((Object) null), 0 ))); }
@Override public void write(Object object) throws IOException { objectOutputStream.writeObject(object); objectOutputStream.flush(); preventMemoryLeak(); }
@Test public void resetsObjectOutputStreamAccordingToGivenResetFrequency() throws IOException { // given ObjectWriter objectWriter = new AutoFlushingObjectWriter(objectOutputStream, 2); String object = "foo"; // when objectWriter.write(object); objectWriter.write(object); objectWriter.write(object); objectWriter.write(object); // then InOrder inOrder = inOrder(objectOutputStream); inOrder.verify(objectOutputStream).writeObjectOverride(object); inOrder.verify(objectOutputStream).writeObjectOverride(object); inOrder.verify(objectOutputStream).reset(); inOrder.verify(objectOutputStream).writeObjectOverride(object); inOrder.verify(objectOutputStream).writeObjectOverride(object); inOrder.verify(objectOutputStream).reset(); }
public TopicConfigSerializeWrapper getAllTopicConfig(final String addr, long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_TOPIC_CONFIG, null); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return TopicConfigSerializeWrapper.decode(response.getBody(), TopicConfigSerializeWrapper.class); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void assertGetAllTopicConfig() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); TopicConfigSerializeWrapper responseBody = new TopicConfigSerializeWrapper(); responseBody.getTopicConfigTable().put("key", new TopicConfig()); setResponseBody(responseBody); TopicConfigSerializeWrapper actual = mqClientAPI.getAllTopicConfig(defaultBrokerAddr, defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getTopicConfigTable().size()); assertNotNull(actual.getDataVersion()); assertEquals(0, actual.getDataVersion().getStateVersion()); }
public void ready() { sync.releaseShared(UNUSED); }
@Test public void testStartingGunNoInterruptions() throws InterruptedException { StartingGun sg = new StartingGun(); Thread[] threads = startWaitingThreads(sg); Thread.sleep(PAUSE); allThreadsAlive(threads); sg.ready(); allThreadsDead(threads); }
public synchronized void add(String topic, long nowMs) { Objects.requireNonNull(topic, "topic cannot be null"); if (topics.put(topic, nowMs + metadataIdleMs) == null) { newTopics.add(topic); requestUpdateForNewTopics(); } }
@Test public void testTimeToNextUpdateOverwriteBackoff() { long now = 10000; // New topic added to fetch set and update requested. It should allow immediate update. metadata.updateWithCurrentRequestVersion(responseWithCurrentTopics(), false, now); metadata.add("new-topic", now); assertEquals(0, metadata.timeToNextUpdate(now)); // Even though add is called, immediate update isn't necessary if the new topic set isn't // containing a new topic, metadata.updateWithCurrentRequestVersion(responseWithCurrentTopics(), false, now); metadata.add("new-topic", now); assertEquals(metadataExpireMs, metadata.timeToNextUpdate(now)); // If the new set of topics containing a new topic then it should allow immediate update. metadata.add("another-new-topic", now); assertEquals(0, metadata.timeToNextUpdate(now)); }
public static boolean isValidPrivateKey(String privateKey) { String cleanPrivateKey = Numeric.cleanHexPrefix(privateKey); return cleanPrivateKey.length() == PRIVATE_KEY_LENGTH_IN_HEX; }
@Test public void testIsValidPrivateKey() { assertTrue(isValidPrivateKey(SampleKeys.PRIVATE_KEY_STRING)); assertTrue(isValidPrivateKey(Numeric.prependHexPrefix(SampleKeys.PRIVATE_KEY_STRING))); assertFalse(isValidPrivateKey("")); assertFalse(isValidPrivateKey(SampleKeys.PRIVATE_KEY_STRING + "a")); assertFalse(isValidPrivateKey(SampleKeys.PRIVATE_KEY_STRING.substring(1))); }
public static String createErrorMessage(HttpException exception) { String json = tryParseAsJsonError(exception.content()); if (json != null) { return json; } String msg = "HTTP code " + exception.code(); if (isHtml(exception.content())) { return msg; } return msg + ": " + StringUtils.left(exception.content(), MAX_ERROR_MSG_LEN); }
@Test public void createErrorMessage_whenLongContent_shouldCreateErrorMsg() { String content = StringUtils.repeat("mystring", 1000); assertThat(DefaultScannerWsClient.createErrorMessage(new HttpException("url", 400, content))).hasSize(15 + 128); }
public static TopicMessageType getMessageType(SendMessageRequestHeader requestHeader) { Map<String, String> properties = MessageDecoder.string2messageProperties(requestHeader.getProperties()); String traFlag = properties.get(MessageConst.PROPERTY_TRANSACTION_PREPARED); TopicMessageType topicMessageType = TopicMessageType.NORMAL; if (Boolean.parseBoolean(traFlag)) { topicMessageType = TopicMessageType.TRANSACTION; } else if (properties.containsKey(MessageConst.PROPERTY_SHARDING_KEY)) { topicMessageType = TopicMessageType.FIFO; } else if (properties.get("__STARTDELIVERTIME") != null || properties.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL) != null || properties.get(MessageConst.PROPERTY_TIMER_DELIVER_MS) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_SEC) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_MS) != null) { topicMessageType = TopicMessageType.DELAY; } return topicMessageType; }
@Test public void testGetMessageTypeWithEmptyProperties() { TopicMessageType result = BrokerMetricsManager.getMessageType(new SendMessageRequestHeader()); assertThat(TopicMessageType.NORMAL).isEqualTo(result); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseDingTalkTest() { final String uaString = "Mozilla/5.0 (iPhone; CPU iPhone OS 14_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/18A373 AliApp(DingTalk/5.1.33) com.laiwang.DingTalk/13976299 Channel/201200 language/zh-Hans-CN WK"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("DingTalk", ua.getBrowser().toString()); assertEquals("5.1.33", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("605.1.15", ua.getEngineVersion()); assertEquals("iPhone", ua.getOs().toString()); assertEquals("14_0", ua.getOsVersion()); assertEquals("iPhone", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
private Top() { // do not instantiate }
@Test @Category(NeedsRunner.class) @SuppressWarnings("unchecked") public void testTop() { PCollection<String> input = p.apply(Create.of(Arrays.asList(COLLECTION)).withCoder(StringUtf8Coder.of())); PCollection<List<String>> top1 = input.apply(Top.of(1, new OrderByLength())); PCollection<List<String>> top2 = input.apply(Top.largest(2)); PCollection<List<String>> top3 = input.apply(Top.smallest(3)); PCollection<KV<String, Integer>> inputTable = createInputTable(p); PCollection<KV<String, List<Integer>>> largestPerKey = inputTable.apply(Top.largestPerKey(2)); PCollection<KV<String, List<Integer>>> smallestPerKey = inputTable.apply(Top.smallestPerKey(2)); PAssert.thatSingletonIterable(top1).containsInAnyOrder(Arrays.asList("bb")); PAssert.thatSingletonIterable(top2).containsInAnyOrder("z", "c"); PAssert.thatSingletonIterable(top3).containsInAnyOrder("a", "bb", "c"); PAssert.that(largestPerKey) .containsInAnyOrder(KV.of("a", Arrays.asList(3, 2)), KV.of("b", Arrays.asList(100, 10))); PAssert.that(smallestPerKey) .containsInAnyOrder(KV.of("a", Arrays.asList(1, 2)), KV.of("b", Arrays.asList(1, 10))); p.run(); }
public Sample readSample() { Sample out = lastSampleRead; lastSampleRead = nextSample(); return out; }
@Test public void testReadSample() { try (CsvSampleReader reader = new CsvSampleReader(tempCsv, metadata)) { for (long i = 0; i < NR_ROWS; i++) { Sample expected = new SampleBuilder(metadata).add(i) .add("a" + i).build(); assertThat(reader.readSample().toString(), CoreMatchers.is(expected.toString())); } } }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyWithMissingAndExtraElements() { expectFailureWhenTestingThat(asList(1, 2, 3)).containsExactly(1, 2, 4); assertFailureValue("missing (1)", "4"); assertFailureValue("unexpected (1)", "3"); }
public static int getPackageIdentifier(int resId) { return (resId >>> 24); }
@Test public void testGetPackageIdentifier() { assertThat(ResourceIds.getPackageIdentifier(0x01000000)).isEqualTo(0x01); assertThat(ResourceIds.getPackageIdentifier(0x7F000000)).isEqualTo(0x7F); }
@SuppressWarnings("unchecked") @Override public void punctuate(final ProcessorNode<?, ?, ?, ?> node, final long timestamp, final PunctuationType type, final Punctuator punctuator) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix)); } // when punctuating, we need to preserve the timestamp (this can be either system time or event time) // while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header final ProcessorRecordContext recordContext = new ProcessorRecordContext( timestamp, -1L, -1, null, new RecordHeaders() ); updateProcessorContext(node, time.milliseconds(), recordContext); if (log.isTraceEnabled()) { log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type); } try { maybeMeasureLatency(() -> punctuator.punctuate(timestamp), time, punctuateLatencySensor); } catch (final TimeoutException timeoutException) { if (!eosEnabled) { throw timeoutException; } else { record = null; throw new TaskCorruptedException(Collections.singleton(id)); } } catch (final FailedProcessingException e) { throw createStreamsException(node.name(), e.getCause()); } catch (final TaskCorruptedException | TaskMigratedException e) { throw e; } catch (final RuntimeException processingException) { final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext( null, recordContext.topic(), recordContext.partition(), recordContext.offset(), recordContext.headers(), node.name(), id() ); final ProcessingExceptionHandler.ProcessingHandlerResponse response; try { response = Objects.requireNonNull( processingExceptionHandler.handle(errorHandlerContext, null, processingException), "Invalid ProcessingExceptionHandler response." ); } catch (final RuntimeException fatalUserException) { log.error( "Processing error callback failed after processing error for record: {}", errorHandlerContext, processingException ); throw new FailedProcessingException("Fatal user code error in processing error callback", fatalUserException); } if (response == ProcessingExceptionHandler.ProcessingHandlerResponse.FAIL) { log.error("Processing exception handler is set to fail upon" + " a processing error. If you would rather have the streaming pipeline" + " continue after a processing error, please set the " + PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG + " appropriately."); throw createStreamsException(node.name(), processingException); } else { droppedRecordsSensor.record(); } } finally { processorContext.setCurrentNode(null); } }
@Test public void punctuateShouldThrowStreamsExceptionWhenProcessingExceptionHandlerRepliesWithFail() { when(stateManager.taskId()).thenReturn(taskId); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); task = createStatelessTask(createConfig( AT_LEAST_ONCE, "100", LogAndFailExceptionHandler.class.getName(), LogAndFailProcessingExceptionHandler.class.getName() )); final StreamsException streamsException = assertThrows( StreamsException.class, () -> task.punctuate(processorStreamTime, 1, PunctuationType.STREAM_TIME, timestamp -> { throw new KafkaException("KABOOM!"); }) ); assertInstanceOf(KafkaException.class, streamsException.getCause()); assertEquals("KABOOM!", streamsException.getCause().getMessage()); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeCreateSourceTableCorrectly() { final String output = anon.anonymize( "CREATE SOURCE TABLE my_table (profileId VARCHAR, latitude DOUBLE, longitude DOUBLE)\n" + "WITH (kafka_topic='locations', value_format='json');"); Approvals.verify(output); }
public Set<Map.Entry<Identifier, UntypedMetric>> entrySet() { return values.entrySet(); }
@Test final void testEntrySet() { assertEquals(0, bucket.entrySet().size()); for (int i = 0; i < 4; ++i) { bucket.put(new Sample(new Measurement(i), new Identifier("nalle_" + i, null), AssumedType.GAUGE)); } assertEquals(4, bucket.entrySet().size()); for (int i = 0; i < 4; ++i) { bucket.put(new Sample(new Measurement(i), new Identifier("nalle", new Point(new ImmutableMap.Builder<String, Integer>().put("dim", Integer.valueOf(i)).build())), AssumedType.GAUGE)); } assertEquals(8, bucket.entrySet().size()); int nalle = 0, nalle0 = 0, nalle1 = 0, nalle2 = 0, nalle3 = 0; for (Entry<Identifier, UntypedMetric> x : bucket.entrySet()) { String metricName = x.getKey().getName(); switch (metricName) { case "nalle" -> ++nalle; case "nalle_0" -> ++nalle0; case "nalle_1" -> ++nalle1; case "nalle_2" -> ++nalle2; case "nalle_3" -> ++nalle3; default -> throw new IllegalStateException(); } } assertEquals(4, nalle); assertEquals(1, nalle0); assertEquals(1, nalle1); assertEquals(1, nalle2); assertEquals(1, nalle3); }
public static CallRoutingTable fromTsv(final Reader inputReader) throws IOException { try (final BufferedReader reader = new BufferedReader(inputReader)) { // use maps to silently dedupe CidrBlocks Map<CidrBlock.IpV4CidrBlock, List<String>> ipv4Map = new HashMap<>(); Map<CidrBlock.IpV6CidrBlock, List<String>> ipv6Map = new HashMap<>(); Map<CallRoutingTable.GeoKey, List<String>> ipGeoTable = new HashMap<>(); String line; while((line = reader.readLine()) != null) { if(line.isBlank()) { continue; } List<String> splits = Arrays.stream(line.split(WHITESPACE_REGEX)).filter(s -> !s.isBlank()).toList(); if (splits.size() < 2) { throw new IllegalStateException("Invalid row, expected some key and list of values"); } List<String> datacenters = splits.subList(1, splits.size()); switch (guessLineType(splits)) { case v4 -> { CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst()); if(!(cidrBlock instanceof CidrBlock.IpV4CidrBlock)) { throw new IllegalArgumentException("Expected an ipv4 cidr block"); } ipv4Map.put((CidrBlock.IpV4CidrBlock) cidrBlock, datacenters); } case v6 -> { CidrBlock cidrBlock = CidrBlock.parseCidrBlock(splits.getFirst()); if(!(cidrBlock instanceof CidrBlock.IpV6CidrBlock)) { throw new IllegalArgumentException("Expected an ipv6 cidr block"); } ipv6Map.put((CidrBlock.IpV6CidrBlock) cidrBlock, datacenters); } case Geo -> { String[] geo = splits.getFirst().split("-"); if(geo.length < 3) { throw new IllegalStateException("Geo row key invalid, expected atleast continent, country, and protocol"); } String continent = geo[0]; String country = geo[1]; Optional<String> subdivision = geo.length > 3 ? Optional.of(geo[2]) : Optional.empty(); CallRoutingTable.Protocol protocol = CallRoutingTable.Protocol.valueOf(geo[geo.length - 1].toLowerCase()); CallRoutingTable.GeoKey tableKey = new CallRoutingTable.GeoKey( continent, country, subdivision, protocol ); ipGeoTable.put(tableKey, datacenters); } } } return new CallRoutingTable( ipv4Map, ipv6Map, ipGeoTable ); } }
@Test public void testParserSuccess() throws IOException { var input = """ 192.1.12.0/24 datacenter-1 datacenter-2 datacenter-3 193.123.123.0/24 datacenter-1 datacenter-2 1.123.123.0/24 datacenter-1 2001:db8:b0aa::/48 datacenter-1 2001:db8:b0ab::/48 datacenter-3 datacenter-1 datacenter-2 2001:db8:b0ac::/48 datacenter-2 datacenter-1 SA-SR-v4 datacenter-3 SA-UY-v4 datacenter-3 datacenter-1 datacenter-2 NA-US-VA-v6 datacenter-2 datacenter-1 """; var actual = CallRoutingTableParser.fromTsv(new StringReader(input)); var expected = new CallRoutingTable( Map.of( (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("192.1.12.0/24"), List.of("datacenter-1", "datacenter-2", "datacenter-3"), (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("193.123.123.0/24"), List.of("datacenter-1", "datacenter-2"), (CidrBlock.IpV4CidrBlock) CidrBlock.parseCidrBlock("1.123.123.0/24"), List.of("datacenter-1") ), Map.of( (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0aa::/48"), List.of("datacenter-1"), (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0ab::/48"), List.of("datacenter-3", "datacenter-1", "datacenter-2"), (CidrBlock.IpV6CidrBlock) CidrBlock.parseCidrBlock("2001:db8:b0ac::/48"), List.of("datacenter-2", "datacenter-1") ), Map.of( new CallRoutingTable.GeoKey("SA", "SR", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3"), new CallRoutingTable.GeoKey("SA", "UY", Optional.empty(), CallRoutingTable.Protocol.v4), List.of("datacenter-3", "datacenter-1", "datacenter-2"), new CallRoutingTable.GeoKey("NA", "US", Optional.of("VA"), CallRoutingTable.Protocol.v6), List.of("datacenter-2", "datacenter-1") ) ); assertThat(actual).isEqualTo(expected); }
@Bean @ConfigurationProperties(prefix = "shenyu.httpclient") public HttpClientProperties httpClientProperties() { return new HttpClientProperties(); }
@Test public void testHttpClientProperties() { applicationContextRunner .withPropertyValues( "debug=true", "shenyu.httpclient.connectTimeout=3", "shenyu.httpclient.responseTimeout=0", "shenyu.httpclient.pool.PoolType=0", "shenyu.httpclient.pool.name=proxy", "shenyu.httpclient.pool.maxConnections=1", "shenyu.httpclient.pool.acquireTimeout=45000", "shenyu.httpclient.proxy.host=http://localhost", "shenyu.httpclient.proxy.port=18848", "shenyu.httpclient.proxy.username=itmiwang", "shenyu.httpclient.proxy.password=itmiwang", "shenyu.httpclient.proxy.nonProxyHostsPattern=itmiwang", "shenyu.httpclient.ssl.X509Certificate[]=[]", "shenyu.httpclient.ssl.handshakeTimeout=10000", "shenyu.httpclient.ssl.closeNotifyFlushTimeout=3000", "shenyu.httpclient.ssl.closeNotifyReadTimeout=0" ) .run(context -> { HttpClientProperties properties = context.getBean("httpClientProperties", HttpClientProperties.class); assertNotNull(properties); assertThat(properties.getConnectTimeout(), is(3)); assertThat(properties.getResponseTimeout(), is(Duration.ZERO)); assertThat(properties.getPool().getType(), is(HttpClientProperties.Pool.PoolType.ELASTIC)); assertThat(properties.getPool().getName(), is("proxy")); assertThat(properties.getPool().getMaxConnections(), is(1)); assertThat(properties.getPool().getAcquireTimeout(), is(ConnectionProvider.DEFAULT_POOL_ACQUIRE_TIMEOUT)); assertThat(properties.getProxy().getHost(), is("http://localhost")); assertThat(properties.getProxy().getPort(), is(18848)); assertThat(properties.getProxy().getUsername(), is("itmiwang")); assertThat(properties.getProxy().getPassword(), is("itmiwang")); assertThat(properties.getProxy().getNonProxyHostsPattern(), is("itmiwang")); assertThat(properties.getSsl().getHandshakeTimeout(), is(Duration.ofMillis(10000))); assertNotNull(properties.getSsl().getTrustedX509Certificates()); assertThat(properties.getSsl().getCloseNotifyFlushTimeout(), is(Duration.ofMillis(3000))); assertThat(properties.getSsl().getCloseNotifyReadTimeout(), is(Duration.ZERO)); }); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertNamenodeRole() { assertEquals(NamenodeRoleProto.BACKUP, PBHelper.convert(NamenodeRole.BACKUP)); assertEquals(NamenodeRoleProto.CHECKPOINT, PBHelper.convert(NamenodeRole.CHECKPOINT)); assertEquals(NamenodeRoleProto.NAMENODE, PBHelper.convert(NamenodeRole.NAMENODE)); assertEquals(NamenodeRole.BACKUP, PBHelper.convert(NamenodeRoleProto.BACKUP)); assertEquals(NamenodeRole.CHECKPOINT, PBHelper.convert(NamenodeRoleProto.CHECKPOINT)); assertEquals(NamenodeRole.NAMENODE, PBHelper.convert(NamenodeRoleProto.NAMENODE)); }
@Override public String getDisplayName() { return CaseInsensitiveString.str(getName()); }
@Test void shouldReturnUpstreamPipelineNameAsDisplayNameIfMaterialNameIsNotDefined() throws Exception { DependencyMaterial material = new DependencyMaterial(new CaseInsensitiveString("upstream"), new CaseInsensitiveString("first")); assertThat(material.getDisplayName()).isEqualTo("upstream"); }
public void updateLabel(final String instanceId, final Collection<String> labels) { if (instance.getMetaData().getId().equals(instanceId)) { instance.getLabels().clear(); instance.getLabels().addAll(labels); } for (ComputeNodeInstance each : allClusterInstances) { if (each.getMetaData().getId().equals(instanceId)) { each.getLabels().clear(); each.getLabels().addAll(labels); } } }
@Test void assertUpdateLabel() { InstanceMetaData instanceMetaData = mock(InstanceMetaData.class); when(instanceMetaData.getId()).thenReturn("foo_instance_id"); ComputeNodeInstanceContext context = new ComputeNodeInstanceContext( new ComputeNodeInstance(instanceMetaData), mock(WorkerIdGenerator.class), modeConfig, lockContext, eventBusContext); Collection<String> expected = Arrays.asList("label_1", "label_2"); context.updateLabel("foo_instance_id", expected); Collection<String> actual = context.getInstance().getLabels(); assertThat(actual, is(expected)); }
public static String getBaseUrl() { try { var requestAttrs = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes(); return getBaseUrl(requestAttrs.getRequest()); } catch (IllegalStateException e) { // method is called outside of web request context return ""; } }
@Test public void testWithXForwardedHostCommaSeparated() throws Exception { // basic request doReturn("http").when(request).getScheme(); doReturn("localhost").when(request).getServerName(); doReturn(8080).when(request).getServerPort(); doReturn("/").when(request).getContextPath(); // XForwarded content doReturn("https").when(request).getHeader("X-Forwarded-Proto"); var items = new ArrayList<String>(); items.add("open-vsx.org, foo.com, bar.com"); doReturn(Collections.enumeration(items)).when(request).getHeaders("X-Forwarded-Host"); doReturn("/openvsx").when(request).getHeader("X-Forwarded-Prefix"); assertThat(UrlUtil.getBaseUrl(request)).isEqualTo("https://open-vsx.org/openvsx/"); }
@VisibleForTesting RoleDO validateRoleForUpdate(Long id) { RoleDO role = roleMapper.selectById(id); if (role == null) { throw exception(ROLE_NOT_EXISTS); } // 内置角色,不允许删除 if (RoleTypeEnum.SYSTEM.getType().equals(role.getType())) { throw exception(ROLE_CAN_NOT_UPDATE_SYSTEM_TYPE_ROLE); } return role; }
@Test public void testValidateUpdateRole_success() { RoleDO roleDO = randomPojo(RoleDO.class); roleMapper.insert(roleDO); // 准备参数 Long id = roleDO.getId(); // 调用,无异常 roleService.validateRoleForUpdate(id); }
AwsCredentials credentials() { if (!StringUtil.isNullOrEmptyAfterTrim(awsConfig.getAccessKey())) { return AwsCredentials.builder() .setAccessKey(awsConfig.getAccessKey()) .setSecretKey(awsConfig.getSecretKey()) .build(); } if (!StringUtil.isNullOrEmptyAfterTrim(ec2IamRole)) { return fetchCredentialsFromEc2(); } if (environment.isRunningOnEcs()) { return fetchCredentialsFromEcs(); } throw new NoCredentialsException(); }
@Test(expected = InvalidConfigurationException.class) public void credentialsEcsException() { // given AwsConfig awsConfig = AwsConfig.builder().build(); given(awsMetadataApi.credentialsEcs()).willThrow(new RuntimeException("Error fetching credentials")); given(environment.isRunningOnEcs()).willReturn(true); AwsCredentialsProvider credentialsProvider = new AwsCredentialsProvider(awsConfig, awsMetadataApi, environment); // when credentialsProvider.credentials(); // then // throws exception }
public static <S> S load(Class<S> service, ClassLoader loader) throws EnhancedServiceNotFoundException { return InnerEnhancedServiceLoader.getServiceLoader(service).load(loader, true); }
@Test public void classCastExceptionTest() { Assertions.assertThrows(EnhancedServiceNotFoundException.class, () -> { Hello1 load = EnhancedServiceLoader.load(Hello1.class); }); }
Mono<ImmutableMap<String, String>> resolve(List<SchemaReference> refs) { return resolveReferences(refs, new Resolving(ImmutableMap.of(), ImmutableSet.of())) .map(Resolving::resolved); }
@Test void returnsEmptyMapOnEmptyInputs() { StepVerifier.create(schemaReferencesResolver.resolve(null)) .assertNext(map -> Assertions.assertThat(map).isEmpty()) .verifyComplete(); StepVerifier.create(schemaReferencesResolver.resolve(List.of())) .assertNext(map -> Assertions.assertThat(map).isEmpty()) .verifyComplete(); }
public static Range<Comparable<?>> safeClosed(final Comparable<?> lowerEndpoint, final Comparable<?> upperEndpoint) { try { return Range.closed(lowerEndpoint, upperEndpoint); } catch (final ClassCastException ex) { Optional<Class<?>> clazz = getTargetNumericType(Arrays.asList(lowerEndpoint, upperEndpoint)); if (!clazz.isPresent()) { throw ex; } return Range.closed(parseNumberByClazz(lowerEndpoint.toString(), clazz.get()), parseNumberByClazz(upperEndpoint.toString(), clazz.get())); } }
@Test void assertSafeClosedForInteger() { Range<Comparable<?>> range = SafeNumberOperationUtils.safeClosed(12, 500); assertThat(range.lowerEndpoint(), is(12)); assertThat(range.upperEndpoint(), is(500)); }
public SqlConfig setStatementTimeoutMillis(long statementTimeoutMillis) { checkNotNegative(statementTimeoutMillis, "Timeout cannot be negative"); this.statementTimeoutMillis = statementTimeoutMillis; return this; }
@Test(expected = IllegalArgumentException.class) public void testQueryTimeoutNegative() { new SqlConfig().setStatementTimeoutMillis(-1L); }
@Override public void smoke() { tobacco.smoke(this); }
@Test void testSmokeEveryThing() { List<Tobacco> tobaccos = List.of( new OldTobyTobacco(), new RivendellTobacco(), new SecondBreakfastTobacco() ); // Verify if the sorceress is smoking the correct tobacco ... tobaccos.forEach(tobacco -> { final var advancedSorceress = new AdvancedSorceress(); advancedSorceress.setTobacco(tobacco); advancedSorceress.smoke(); String lastMessage = appender.getLastMessage(); assertEquals("AdvancedSorceress smoking " + tobacco.getClass().getSimpleName(), lastMessage); }); // ... and nothing else is happening. assertEquals(tobaccos.size(), appender.getLogSize()); }
@Override public BlameAlgorithmEnum getBlameAlgorithm(int availableProcessors, int numberOfFiles) { BlameAlgorithmEnum forcedStrategy = configuration.get(PROP_SONAR_SCM_USE_BLAME_ALGORITHM) .map(BlameAlgorithmEnum::valueOf) .orElse(null); if (forcedStrategy != null) { return forcedStrategy; } if (availableProcessors == 0) { LOG.warn("Available processors are 0. Falling back to native git blame"); return GIT_NATIVE_BLAME; } if (numberOfFiles / availableProcessors > FILES_GIT_BLAME_TRIGGER) { return GIT_FILES_BLAME; } return GIT_NATIVE_BLAME; }
@Test public void useRepositoryBlame_whenFileBlamePropsDisableOrUnspecified_shouldEnableRepoBlame() { when(configuration.get(DefaultBlameStrategy.PROP_SONAR_SCM_USE_BLAME_ALGORITHM)).thenReturn(Optional.of(GIT_NATIVE_BLAME.name())); assertThat(underTest.getBlameAlgorithm(1, 10000)).isEqualTo(GIT_NATIVE_BLAME); }
public long scan( final UnsafeBuffer termBuffer, final long rebuildPosition, final long hwmPosition, final long nowNs, final int termLengthMask, final int positionBitsToShift, final int initialTermId) { boolean lossFound = false; int rebuildOffset = (int)rebuildPosition & termLengthMask; if (rebuildPosition < hwmPosition) { final int rebuildTermCount = (int)(rebuildPosition >>> positionBitsToShift); final int hwmTermCount = (int)(hwmPosition >>> positionBitsToShift); final int rebuildTermId = initialTermId + rebuildTermCount; final int hwmTermOffset = (int)hwmPosition & termLengthMask; final int limitOffset = rebuildTermCount == hwmTermCount ? hwmTermOffset : termLengthMask + 1; rebuildOffset = scanForGap(termBuffer, rebuildTermId, rebuildOffset, limitOffset, this); if (rebuildOffset < limitOffset) { if (scannedTermOffset != activeTermOffset || scannedTermId != activeTermId) { activateGap(nowNs); lossFound = true; } checkTimerExpiry(nowNs); } } return pack(rebuildOffset, lossFound); }
@Test void shouldHandleLongerRetryDelay() { lossDetector = getLossHandlerWithLongRetry(); final long rebuildPosition = ACTIVE_TERM_POSITION; final long hwmPosition = ACTIVE_TERM_POSITION + (ALIGNED_FRAME_LENGTH * 3L); insertDataFrame(offsetOfMessage(0)); insertDataFrame(offsetOfMessage(2)); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); verifyNoInteractions(lossHandler); currentTime = TimeUnit.MILLISECONDS.toNanos(40); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); verify(lossHandler).onGapDetected(TERM_ID, offsetOfMessage(1), gapLength()); currentTime = TimeUnit.MILLISECONDS.toNanos(80); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); verifyNoMoreInteractions(lossHandler); currentTime = TimeUnit.MILLISECONDS.toNanos(240); lossDetector.scan(termBuffer, rebuildPosition, hwmPosition, currentTime, MASK, POSITION_BITS_TO_SHIFT, TERM_ID); verify(lossHandler, times(2)).onGapDetected(TERM_ID, offsetOfMessage(1), gapLength()); }
@Override public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) { Objects.requireNonNull(intentOperationContext); Optional<IntentData> toUninstall = intentOperationContext.toUninstall(); Optional<IntentData> toInstall = intentOperationContext.toInstall(); List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall(); List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(intentOperationContext); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } FlowObjectiveIntentInstallationContext intentInstallationContext = new FlowObjectiveIntentInstallationContext(intentOperationContext); uninstallIntents.stream() .map(intent -> buildObjectiveContexts(intent, REMOVE)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addPendingContext(context); }); installIntents.stream() .map(intent -> buildObjectiveContexts(intent, ADD)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addNextPendingContext(context); }); intentInstallationContext.apply(); }
@Test public void testInstallIntent() { List<Intent> intentsToUninstall = Lists.newArrayList(); List<Intent> intentsToInstall = createFlowObjectiveIntents(); IntentData toUninstall = null; IntentData toInstall = new IntentData(createP2PIntent(), IntentState.INSTALLING, new WallClockTimestamp()); toInstall = IntentData.compiled(toInstall, intentsToInstall); IntentOperationContext<FlowObjectiveIntent> operationContext; IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall); operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context); installer.apply(operationContext); IntentOperationContext successContext = intentInstallCoordinator.successContext; assertEquals(successContext, operationContext); }
public static Set<Set<LogicalVertex>> computePipelinedRegions( final Iterable<? extends LogicalVertex> topologicallySortedVertices) { final Map<LogicalVertex, Set<LogicalVertex>> vertexToRegion = PipelinedRegionComputeUtil.buildRawRegions( topologicallySortedVertices, LogicalPipelinedRegionComputeUtil::getMustBePipelinedConsumedResults); // Since LogicalTopology is a DAG, there is no need to do cycle detection nor to merge // regions on cycles. return uniqueVertexGroups(vertexToRegion); }
@Test void testDiamondWithMixedPipelinedAndBlockingEdges() { JobVertex v1 = new JobVertex("v1"); JobVertex v2 = new JobVertex("v2"); JobVertex v3 = new JobVertex("v3"); JobVertex v4 = new JobVertex("v4"); v2.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); v4.connectNewDataSetAsInput( v2, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v3.connectNewDataSetAsInput( v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); v4.connectNewDataSetAsInput( v3, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED); Set<Set<LogicalVertex>> regions = computePipelinedRegions(v1, v2, v3, v4); checkRegionSize(regions, 1, 4); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenSoftwareFilterHasNoMatchingService_returnsEmpty() { NetworkService wordPressService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .setSoftware(Software.newBuilder().setName("WordPress")) .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(wordPressService) .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), FakeSoftwareFilteringDetector.getModule()) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat(vulnDetectors).isEmpty(); }
@SuppressWarnings("unchecked") public T getValue() { final T value = (T) FROM_STRING.get(getConverterClass()).apply(JiveGlobals.getProperty(key), this); if (value == null || (Collection.class.isAssignableFrom(value.getClass()) && ((Collection) value).isEmpty())) { return defaultValue; } if (minValue != null && ((Comparable) minValue).compareTo(value) > 0) { LOGGER.warn("Configured value of {} is less than the minimum value of {} for the SystemProperty {} - will use default value of {} instead", value, minValue, key, defaultValue); return defaultValue; } if (maxValue != null && ((Comparable) maxValue).compareTo(value) < 0) { LOGGER.warn("Configured value of {} is more than the maximum value of {} for the SystemProperty {} - will use default value of {} instead", value, maxValue, key, defaultValue); return defaultValue; } return value; }
@Test public void willCreateAListOfCommaWhitespaceSeparatedString2() { final String key = "another whitespace csv list property"; final SystemProperty<List<String>> property = SystemProperty.Builder.ofType(List.class) .setKey(key) .setDefaultValue(Collections.emptyList()) .setDynamic(true) .buildList(String.class); JiveGlobals.setProperty(key, "1 , 2 , 3"); assertThat(property.getValue(), is(Arrays.asList("1", "2", "3"))); }
public RunResponse restart(RunRequest runRequest) { RunResponse runResponse = restartRecursively(runRequest); if (runResponse.getStatus() == RunResponse.Status.NON_TERMINAL_ERROR) { LOG.error( "workflow instance {} does not support restart action as it is in a non-terminal status [{}]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); throw new MaestroBadRequestException( Collections.emptyList(), "workflow instance %s does not support restart action as it is in a non-terminal status [%s]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); } return runResponse; }
@Test public void testRestart() { WorkflowInstance wfInstance = new WorkflowInstance(); wfInstance.setInitiator(new ManualInitiator()); wfInstance.setStatus(WorkflowInstance.Status.SUCCEEDED); wfInstance.setWorkflowInstanceId(10L); wfInstance.setWorkflowRunId(1L); wfInstance.setWorkflowId("test-workflow"); wfInstance.setRuntimeWorkflow(Workflow.builder().build()); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 10L, null).build()) .build(); when(instanceDao.getWorkflowInstance("test-workflow", 10L, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(wfInstance); when(runStrategyDao.startWithRunStrategy(any(), any())).thenReturn(1); when(instanceDao.getLatestWorkflowInstanceStatus(any(), anyLong())) .thenReturn(WorkflowInstance.Status.SUCCEEDED); RunResponse response = actionHandler.restart(request); assertEquals("test-workflow", response.getWorkflowId()); assertEquals(10L, response.getWorkflowInstanceId()); assertEquals(RunResponse.Status.WORKFLOW_RUN_CREATED, response.getStatus()); }
@Override public AppResponse process(Flow flow, RequestAccountRequest request) { digidClient.remoteLog("3"); Map<String, Object> result = digidClient.createRegistration(request); if (result.get(lowerUnderscore(STATUS)).equals("NOK")) { if (result.get(ERROR) != null) { return new StartAccountRequestNokResponse((String) result.get(ERROR), result); } return new NokResponse(); } appSession = new AppSession(); appSession.setState(State.INITIALIZED.name()); appSession.setFlow(flow.getName()); appSession.setRegistrationId(Long.valueOf((Integer) result.get(lowerUnderscore(REGISTRATION_ID)))); appSession.setLanguage(request.getLanguage()); appSession.setNfcSupport(request.getNfcSupport()); if (!request.getNfcSupport()) { digidClient.remoteLog("1506", Map.of(lowerUnderscore(REGISTRATION_ID), appSession.getRegistrationId())); } digidClient.remoteLog("6", Map.of(lowerUnderscore(REGISTRATION_ID), appSession.getRegistrationId())); return new AppSessionResponse(appSession.getId(), Instant.now().getEpochSecond()); }
@Test void processNOKTest(){ RequestAccountRequest requestAccountRequest = createRequest(); String expectedErrorMsg = "error"; when(digidClientMock.createRegistration(requestAccountRequest)).thenReturn(Map.of( lowerUnderscore(STATUS), "NOK", lowerUnderscore(ERROR), expectedErrorMsg )); AppResponse appResponse = startAccountRequest.process(flowMock, requestAccountRequest); verify(digidClientMock, times(1)).remoteLog("3"); assertTrue(appResponse instanceof NokResponse); assertEquals(expectedErrorMsg, ((NokResponse) appResponse).getError()); assertNull(startAccountRequest.getAppSession()); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestPurgeSegments() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final PurgeSegmentsRequestEncoder requestEncoder = new PurgeSegmentsRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(3) .correlationId(56) .recordingId(15) .newStartPosition(100); dissectControlRequest(CMD_IN_PURGE_SEGMENTS, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_PURGE_SEGMENTS.name() + " [1000/1000]:" + " controlSessionId=3" + " correlationId=56" + " recordingId=15" + " newStartPosition=100", builder.toString()); }
@Override public void createTable(Table table) { validateTableType(table); // first assert the table name is unique if (tables.containsKey(table.getName())) { throw new IllegalArgumentException("Duplicate table name: " + table.getName()); } // invoke the provider's create providers.get(table.getType()).createTable(table); // store to the global metastore tables.put(table.getName(), table); }
@Test public void testCreateTable() throws Exception { Table table = mockTable("person"); store.createTable(table); Table actualTable = store.getTables().get("person"); assertEquals(table, actualTable); }
public static boolean containsDistinctType(List<Type> types) { LinkedList<Type> allTypes = new LinkedList<>(types); while (!allTypes.isEmpty()) { Type type = allTypes.removeLast(); if (isDistinctType(type)) { return true; } allTypes.addAll(type.getTypeParameters()); } return false; }
@Test public void testContainsDistinctType() { QualifiedObjectName distinctTypeName = QualifiedObjectName.valueOf("test.dt.int00"); DistinctTypeInfo distinctTypeInfo = new DistinctTypeInfo(distinctTypeName, INTEGER.getTypeSignature(), Optional.empty(), false); DistinctType distinctType = new DistinctType(distinctTypeInfo, INTEGER, null); // check primitives assertFalse(containsDistinctType(ImmutableList.of(INTEGER, VARCHAR))); // check top level assertTrue(containsDistinctType(ImmutableList.of(distinctType))); // check first nesting level assertFalse(containsDistinctType(ImmutableList.of(RowType.anonymous(ImmutableList.of(INTEGER, VARCHAR))))); assertFalse(containsDistinctType(ImmutableList.of(new ArrayType(INTEGER)))); assertTrue(containsDistinctType(ImmutableList.of(RowType.anonymous(ImmutableList.of(INTEGER, distinctType))))); assertTrue(containsDistinctType(ImmutableList.of(new ArrayType(distinctType)))); // check deep nesting assertFalse(containsDistinctType(ImmutableList.of(new ArrayType(new ArrayType(INTEGER))))); assertFalse(containsDistinctType(ImmutableList.of(new ArrayType(RowType.anonymous(ImmutableList.of(INTEGER, VARCHAR)))))); assertTrue(containsDistinctType(ImmutableList.of(new ArrayType(new ArrayType(distinctType))))); assertTrue(containsDistinctType(ImmutableList.of(new ArrayType(RowType.anonymous(ImmutableList.of(INTEGER, distinctType)))))); }
public void handleIrcError(int num) { if (IRCConstants.ERR_NICKNAMEINUSE == num) { handleNickInUse(); } }
@Test public void doHandleIrcErrorNickInUse() { when(connection.getNick()).thenReturn("nick"); endpoint.handleIrcError(IRCConstants.ERR_NICKNAMEINUSE); verify(connection).doNick("nick-"); when(connection.getNick()).thenReturn("nick---"); // confirm doNick was not called verify(connection, never()).doNick("foo"); }
public Optional<Long> claimNextId() { long nextId = producerIdCounter.getAndIncrement(); if (nextId > lastProducerId()) { return Optional.empty(); } return Optional.of(nextId); }
@Test public void testClaimNextId() throws Exception { for (int i = 0; i < 50; i++) { ProducerIdsBlock block = new ProducerIdsBlock(0, 1, 1); CountDownLatch latch = new CountDownLatch(1); AtomicLong counter = new AtomicLong(0); CompletableFuture.runAsync(() -> { Optional<Long> pid = block.claimNextId(); counter.addAndGet(pid.orElse(0L)); latch.countDown(); }); Optional<Long> pid = block.claimNextId(); counter.addAndGet(pid.orElse(0L)); assertTrue(latch.await(1, TimeUnit.SECONDS)); assertEquals(1, counter.get()); } }
@Override public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) { final LargeUploadOutputStream proxy = new LargeUploadOutputStream(file, status); return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("openstack.upload.largeobject.size.minimum")), new SwiftAttributesFinderFeature(session, regionService), status) { @Override public StorageObject getStatus() { return proxy.getResponse(); } }; }
@Test public void testWriteZeroLength() throws Exception { final SwiftRegionService regionService = new SwiftRegionService(session); final SwiftLargeUploadWriteFeature feature = new SwiftLargeUploadWriteFeature(session, regionService, new SwiftSegmentService(session, ".segments-test/")); final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); final byte[] content = RandomUtils.nextBytes(0); final TransferStatus status = new TransferStatus(); status.setLength(-1L); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<StorageObject> out = feature.write(file, status, new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); assertEquals(content.length, IOUtils.copyLarge(in, out)); in.close(); out.close(); assertNotNull(out.getStatus()); assertEquals(0L, out.getStatus().getSize(), 0L); assertTrue(new DefaultFindFeature(session).find(file)); final byte[] compare = new byte[content.length]; final InputStream stream = new SwiftReadFeature(session, regionService).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new SwiftDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static String getString(long date, String format) { Date d = new Date(date); return getString(d, format); }
@Test public void getString_longSignature() { String expectedDateAsString = "16/03/2020"; long expectedDate = 1584371565000L; String format = "dd/MM/yyyy"; assertEquals(expectedDateAsString, DateUtils.getString(expectedDate, format)); }
public static Sensor skippedIdempotentUpdatesSensor(final String threadId, final String taskId, final String processorNodeId, final StreamsMetricsImpl streamsMetrics) { return throughputSensor( threadId, taskId, processorNodeId, IDEMPOTENT_UPDATE_SKIP, IDEMPOTENT_UPDATE_SKIP_RATE_DESCRIPTION, IDEMPOTENT_UPDATE_SKIP_TOTAL_DESCRIPTION, RecordingLevel.DEBUG, streamsMetrics ); }
@Test public void shouldGetIdempotentUpdateSkipSensor() { final String metricNamePrefix = "idempotent-update-skip"; final String descriptionOfCount = "The total number of skipped idempotent updates"; final String descriptionOfRate = "The average number of skipped idempotent updates per second"; when(streamsMetrics.nodeLevelSensor(THREAD_ID, TASK_ID, PROCESSOR_NODE_ID, metricNamePrefix, RecordingLevel.DEBUG)) .thenReturn(expectedSensor); when(streamsMetrics.nodeLevelTagMap(THREAD_ID, TASK_ID, PROCESSOR_NODE_ID)).thenReturn(tagMap); getAndVerifySensor( () -> ProcessorNodeMetrics.skippedIdempotentUpdatesSensor(THREAD_ID, TASK_ID, PROCESSOR_NODE_ID, streamsMetrics), metricNamePrefix, descriptionOfRate, descriptionOfCount ); }
@Override @Nonnull public Member getLocalMember() { throw new UnsupportedOperationException("Client has no local member!"); }
@Test(expected = UnsupportedOperationException.class) public void getLocalMember() { client().getCluster().getLocalMember(); }
@Override public Mono<GetUnversionedProfileResponse> getUnversionedProfile(final GetUnversionedProfileAnonymousRequest request) { final ServiceIdentifier targetIdentifier = ServiceIdentifierUtil.fromGrpcServiceIdentifier(request.getRequest().getServiceIdentifier()); // Callers must be authenticated to request unversioned profiles by PNI if (targetIdentifier.identityType() == IdentityType.PNI) { throw Status.UNAUTHENTICATED.asRuntimeException(); } final Mono<Account> account = switch (request.getAuthenticationCase()) { case GROUP_SEND_TOKEN -> groupSendTokenUtil.checkGroupSendToken(request.getGroupSendToken(), List.of(targetIdentifier)) .then(Mono.fromFuture(() -> accountsManager.getByServiceIdentifierAsync(targetIdentifier))) .flatMap(Mono::justOrEmpty) .switchIfEmpty(Mono.error(Status.NOT_FOUND.asException())); case UNIDENTIFIED_ACCESS_KEY -> getTargetAccountAndValidateUnidentifiedAccess(targetIdentifier, request.getUnidentifiedAccessKey().toByteArray()); default -> Mono.error(Status.INVALID_ARGUMENT.asException()); }; return account.map(targetAccount -> ProfileGrpcHelper.buildUnversionedProfileResponse(targetIdentifier, null, targetAccount, profileBadgeConverter)); }
@Test void getUnversionedProfileIncorrectGroupSendEndorsement() throws Exception { final AciServiceIdentifier targetServiceIdentifier = new AciServiceIdentifier(UUID.randomUUID()); final AciServiceIdentifier authorizedServiceIdentifier = new AciServiceIdentifier(UUID.randomUUID()); // Expiration must be on a day boundary; we want one in the future final Instant expiration = Instant.now().plus(Duration.ofDays(1)).truncatedTo(ChronoUnit.DAYS); final byte[] token = AuthHelper.validGroupSendToken(SERVER_SECRET_PARAMS, List.of(authorizedServiceIdentifier), expiration); when(accountsManager.getByServiceIdentifierAsync(any())).thenReturn( CompletableFuture.completedFuture(Optional.empty())); final GetUnversionedProfileAnonymousRequest request = GetUnversionedProfileAnonymousRequest.newBuilder() .setGroupSendToken(ByteString.copyFrom(token)) .setRequest(GetUnversionedProfileRequest.newBuilder() .setServiceIdentifier( ServiceIdentifierUtil.toGrpcServiceIdentifier(targetServiceIdentifier))) .build(); assertStatusException(Status.UNAUTHENTICATED, () -> unauthenticatedServiceStub().getUnversionedProfile(request)); }
public static SqlToConnectTypeConverter sqlToConnectConverter() { return SQL_TO_CONNECT_CONVERTER; }
@Test public void shouldConvertGeneralizedUnionTypeFromSqlToConnect() { // Given: SqlStruct sqlStruct = SqlStruct.builder() .field("connect_union_field_0", SqlPrimitiveType.of(SqlBaseType.STRING)) .field("connect_union_field_1", SqlPrimitiveType.of(SqlBaseType.BOOLEAN)) .field("connect_union_field_2", SqlPrimitiveType.of(SqlBaseType.INTEGER)) .build(); // When: Schema connectSchema = sqlToConnectConverter().toConnectSchema(sqlStruct); // Then: assertThat(connectSchema.type(), is(Schema.Type.STRUCT)); assertFalse(connectSchema.schema().parameters().isEmpty()); assertTrue(connectSchema.schema().parameters().containsKey(JsonSchemaData.GENERALIZED_TYPE_UNION)); }
@Override public void run() { // top-level command, do nothing }
@Test public void test_submit_nameUsed() { run("submit", "-n", "fooName", testJobJarFile.toString()); assertTrueEventually(() -> assertEquals(1, hz.getJet().getJobs().size()), 5); Job job = hz.getJet().getJobs().get(0); assertEquals("fooName", job.getName()); }
@Override public AMFeedback statusUpdate(TaskAttemptID taskAttemptID, TaskStatus taskStatus) throws IOException, InterruptedException { org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID = TypeConverter.toYarn(taskAttemptID); AMFeedback feedback = new AMFeedback(); feedback.setTaskFound(true); AtomicReference<TaskAttemptStatus> lastStatusRef = attemptIdToStatus.get(yarnAttemptID); if (lastStatusRef == null) { // The task is not known, but it could be in the process of tearing // down gracefully or receiving a thread dump signal. Tolerate unknown // tasks as long as they have unregistered recently. if (!taskHeartbeatHandler.hasRecentlyUnregistered(yarnAttemptID)) { LOG.error("Status update was called with illegal TaskAttemptId: " + yarnAttemptID); feedback.setTaskFound(false); } return feedback; } // Propagating preemption to the task if TASK_PREEMPTION is enabled if (getConfig().getBoolean(MRJobConfig.TASK_PREEMPTION, false) && preemptionPolicy.isPreempted(yarnAttemptID)) { feedback.setPreemption(true); LOG.info("Setting preemption bit for task: "+ yarnAttemptID + " of type " + yarnAttemptID.getTaskId().getTaskType()); } if (taskStatus == null) { //We are using statusUpdate only as a simple ping if (LOG.isDebugEnabled()) { LOG.debug("Ping from " + taskAttemptID.toString()); } // Consider ping from the tasks for liveliness check if (getConfig().getBoolean(MRJobConfig.MR_TASK_ENABLE_PING_FOR_LIVELINESS_CHECK, MRJobConfig.DEFAULT_MR_TASK_ENABLE_PING_FOR_LIVELINESS_CHECK)) { taskHeartbeatHandler.progressing(yarnAttemptID); } return feedback; } // if we are here there is an actual status update to be processed taskHeartbeatHandler.progressing(yarnAttemptID); TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus(); taskAttemptStatus.id = yarnAttemptID; // Task sends the updated progress to the TT. taskAttemptStatus.progress = taskStatus.getProgress(); // log the new progress taskAttemptLogProgressStamps.computeIfAbsent(taskAttemptID, k -> new TaskProgressLogPair(taskAttemptID)) .update(taskStatus.getProgress()); // Task sends the updated state-string to the TT. taskAttemptStatus.stateString = taskStatus.getStateString(); // Task sends the updated phase to the TT. taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase()); // Counters are updated by the task. Convert counters into new format as // that is the primary storage format inside the AM to avoid multiple // conversions and unnecessary heap usage. taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters( taskStatus.getCounters()); // Map Finish time set by the task (map only) if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) { taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime(); } // Shuffle Finish time set by the task (reduce only). if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) { taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime(); } // Sort finish time set by the task (reduce only). if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) { taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime(); } // Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl //taskAttemptStatus.taskState = TypeConverter.toYarn(taskStatus.getRunState()); //set the fetch failures if (taskStatus.getFetchFailedMaps() != null && taskStatus.getFetchFailedMaps().size() > 0) { taskAttemptStatus.fetchFailedMaps = new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>(); for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) { taskAttemptStatus.fetchFailedMaps.add( TypeConverter.toYarn(failedMapId)); } } // Task sends the information about the nextRecordRange to the TT // TODO: The following are not needed here, but needed to be set somewhere inside AppMaster. // taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO // taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask(). // taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes // // This was used by TT to do counter updates only once every minute. So this // // isn't ever changed by the Task itself. // taskStatus.getIncludeCounters(); coalesceStatusUpdate(yarnAttemptID, taskAttemptStatus, lastStatusRef); return feedback; }
@Test public void testSingleStatusUpdate() throws IOException, InterruptedException { configureMocks(); startListener(true); listener.statusUpdate(attemptID, firstReduceStatus); verify(ea).handle(eventCaptor.capture()); TaskAttemptStatusUpdateEvent updateEvent = (TaskAttemptStatusUpdateEvent) eventCaptor.getValue(); TaskAttemptStatus status = updateEvent.getTaskAttemptStatusRef().get(); assertTrue(status.fetchFailedMaps.contains(TASKATTEMPTID1)); assertEquals(1, status.fetchFailedMaps.size()); assertEquals(Phase.SHUFFLE, status.phase); }
public XAConnectionFactory xaConnectionFactory(XAConnectionFactory xaConnectionFactory) { return TracingXAConnectionFactory.create(xaConnectionFactory, this); }
@Test void xaConnectionFactory_wrapsInput() { assertThat(jmsTracing.xaConnectionFactory(mock(XAConnectionFactory.class))) .isInstanceOf(TracingXAConnectionFactory.class); }
public MaintenanceAssociation decode(ObjectNode json, CodecContext context, int mdNameLen) { if (json == null || !json.isObject()) { return null; } JsonNode maNode = json.get(MA); String maName = nullIsIllegal(maNode.get(MA_NAME), "maName is required").asText(); String maNameType = MaIdShort.MaIdType.CHARACTERSTRING.name(); if (maNode.get(MA_NAME_TYPE) != null) { maNameType = maNode.get(MA_NAME_TYPE).asText(); } try { MaIdShort maId = MdMaNameUtil.parseMaName(maNameType, maName); MaBuilder builder = DefaultMaintenanceAssociation.builder(maId, mdNameLen); JsonNode maNumericIdNode = maNode.get(MA_NUMERIC_ID); if (maNumericIdNode != null) { short mdNumericId = (short) maNumericIdNode.asInt(); builder = builder.maNumericId(mdNumericId); } if (maNode.get(CCM_INTERVAL) != null) { builder.ccmInterval(CcmInterval.valueOf(maNode.get(CCM_INTERVAL).asText())); } List<Component> componentList = (new ComponentCodec()).decode((ArrayNode) nullIsIllegal(maNode.get(COMPONENT_LIST), "component-list is required"), context); for (Component component:componentList) { builder = builder.addToComponentList(component); } JsonNode rmepListJson = maNode.get(RMEP_LIST); if (rmepListJson != null) { List<MepId> remoteMeps = (new RMepCodec()).decode( (ArrayNode) rmepListJson, context); for (MepId remoteMep:remoteMeps) { builder = builder.addToRemoteMepIdList(remoteMep); } } return builder.build(); } catch (CfmConfigException e) { throw new IllegalArgumentException(e); } }
@Test public void testDecodeMa1NoTypeGiven() throws IOException { String mdString = "{\"ma\": { \"maName\": \"ma-1\"," + "\"component-list\": [], " + "\"rmep-list\": [], " + "\"maNumericId\": 1}}"; InputStream input = new ByteArrayInputStream( mdString.getBytes(StandardCharsets.UTF_8)); JsonNode cfg = mapper.readTree(input); MaintenanceAssociation maDecode1 = ((MaintenanceAssociationCodec) context .codec(MaintenanceAssociation.class)) .decode((ObjectNode) cfg, context, 10); assertEquals(MAID1_CHAR, maDecode1.maId()); assertEquals(1, maDecode1.maNumericId()); }
public static long calculate(PhysicalRel rel, ExpressionEvalContext evalContext) { GcdCalculatorVisitor visitor = new GcdCalculatorVisitor(evalContext); visitor.go(rel); if (visitor.gcd == 0) { // there's no window aggr in the rel, return the value for joins, which is already capped at some reasonable value return visitor.maximumIntervalForJoins; } // if there's window aggr, cap it with the maximumIntervalForJoins return Math.min(visitor.gcd, visitor.maximumIntervalForJoins); }
@Test public void when_noSlidingWindowInTree_then_returnDefault() { HazelcastTable table = partitionedTable("map", asList(field(KEY, INT), field(VALUE, INT)), 1); List<QueryDataType> parameterTypes = asList(INT, INT); final String sql = "SELECT * FROM TABLE(IMPOSE_ORDER((SELECT __key, this FROM map), DESCRIPTOR(this), 1))"; PhysicalRel optimizedPhysicalRel = optimizePhysical(sql, parameterTypes, table).getPhysical(); assertPlan(optimizedPhysicalRel, plan( planRow(0, DropLateItemsPhysicalRel.class), planRow(1, FullScanPhysicalRel.class) )); assertThat(optimizedPhysicalRel.getInput(0)).isInstanceOf(FullScanPhysicalRel.class); assertThat(WatermarkThrottlingFrameSizeCalculator.calculate(optimizedPhysicalRel, MOCK_EEC)) .isEqualTo(S2S_JOIN_MAX_THROTTLING_INTERVAL); }
@Override public List<String> detect(ClassLoader classLoader) { List<File> classpathContents = classGraph .disableNestedJarScanning() .addClassLoader(classLoader) .scan(1) .getClasspathFiles(); return classpathContents.stream().map(File::getAbsolutePath).collect(Collectors.toList()); }
@Test public void shouldNotDetectOrdinaryFiles() throws Exception { File textFile = tmpFolder.newFile("ordinaryTextFile.txt"); ClassLoader classLoader = new URLClassLoader(new URL[] {textFile.toURI().toURL()}); ClasspathScanningResourcesDetector detector = new ClasspathScanningResourcesDetector(new ClassGraph()); List<String> result = detector.detect(classLoader); assertThat(result, not(hasItem(containsString(textFile.getCanonicalPath())))); }
@Override protected void init() throws ServiceException { LOG.info("Using FileSystemAccess JARs version [{}]", VersionInfo.getVersion()); String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim(); if (security.equals("kerberos")) { String defaultName = getServer().getName(); String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab"; keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim(); if (keytab.length() == 0) { throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_KEYTAB); } String principal = defaultName + "/localhost@LOCALHOST"; principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim(); if (principal.length() == 0) { throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL); } Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); try { UserGroupInformation.loginUserFromKeytab(principal, keytab); } catch (IOException ex) { throw new ServiceException(FileSystemAccessException.ERROR.H02, ex.getMessage(), ex); } LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab); } else if (security.equals("simple")) { Configuration conf = new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple"); UserGroupInformation.setConfiguration(conf); LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name")); } else { throw new ServiceException(FileSystemAccessException.ERROR.H09, security); } String hadoopConfDirProp = getServiceConfig().get(HADOOP_CONF_DIR, getServer().getConfigDir()); File hadoopConfDir = new File(hadoopConfDirProp).getAbsoluteFile(); if (!hadoopConfDir.exists()) { hadoopConfDir = new File(getServer().getConfigDir()).getAbsoluteFile(); } if (!hadoopConfDir.exists()) { throw new ServiceException(FileSystemAccessException.ERROR.H10, hadoopConfDir); } try { serviceHadoopConf = loadHadoopConf(hadoopConfDir); fileSystemConf = getNewFileSystemConfiguration(); } catch (IOException ex) { throw new ServiceException(FileSystemAccessException.ERROR.H11, ex.toString(), ex); } if (LOG.isDebugEnabled()) { LOG.debug("FileSystemAccess FileSystem configuration:"); for (Map.Entry entry : serviceHadoopConf) { LOG.debug(" {} = {}", entry.getKey(), entry.getValue()); } } setRequiredServiceHadoopConf(serviceHadoopConf); nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST)); }
@Test @TestDir public void serviceHadoopConf() throws Exception { String dir = TestDirHelper.getTestDir().getAbsolutePath(); String services = StringUtils.join(",", Arrays.asList(InstrumentationService.class.getName(), SchedulerService.class.getName(), FileSystemAccessService.class.getName())); Configuration conf = new Configuration(false); conf.set("server.services", services); Server server = new Server("server", dir, dir, dir, dir, conf); server.init(); FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class); Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"), "FOO"); server.destroy(); }
@Override public AuthenticationDataSource getAuthDataSource() { return authenticationDataSource; }
@Test public void verifyHttpAuthConstructorInitializesAuthDataSourceAndDoesNotAuthenticateData() { HttpServletRequest request = mock(HttpServletRequest.class); when(request.getRemoteAddr()).thenReturn("localhost"); when(request.getRemotePort()).thenReturn(8080); CountingAuthenticationProvider provider = new CountingAuthenticationProvider(); OneStageAuthenticationState authState = new OneStageAuthenticationState(request, provider); assertNotNull(authState.getAuthDataSource()); assertEquals(provider.getAuthCallCount(), 0); }
@Bean("ScmChangedFiles") public ScmChangedFiles provide(ScmConfiguration scmConfiguration, BranchConfiguration branchConfiguration, DefaultInputProject project) { Path rootBaseDir = project.getBaseDir(); Set<ChangedFile> changedFiles = loadChangedFilesIfNeeded(scmConfiguration, branchConfiguration, rootBaseDir); if (changedFiles != null) { validatePaths(getAbsoluteFilePaths(changedFiles)); } return new ScmChangedFiles(changedFiles); }
@Test public void testProviderDoesntSupport() { when(branchConfiguration.targetBranchName()).thenReturn("target"); when(branchConfiguration.isPullRequest()).thenReturn(true); when(scmConfiguration.provider()).thenReturn(scmProvider); when(scmProvider.branchChangedFiles("target", rootBaseDir)).thenReturn(null); ScmChangedFiles scmChangedFiles = provider.provide(scmConfiguration, branchConfiguration, project); assertThat(scmChangedFiles.get()).isNull(); verify(scmProvider).branchChangedFiles("target", rootBaseDir); }