focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public String toTypeString() { // needs a map instead of switch because for some reason switch creates an // internal class with no annotations that messes up EntityTest return Optional.ofNullable(TO_TYPE_STRING.getOrDefault(type, si -> si.type.name())) .orElseThrow(NullPointerException::new).apply(this); }
@Test public void shouldCorrectlyFormatDecimalsWithPrecisionAndScale() { final SchemaInfo schemaInfo= new SchemaInfo( SqlBaseType.DECIMAL, null, null, ImmutableMap.of("precision", 10, "scale", 9) ); assertThat(schemaInfo.toTypeString(), equalTo("DECIMAL(10, 9)")); }
public void shutdown() { stopReconstructionInitializer(); blocksMap.close(); MBeans.unregister(mxBeanName); mxBeanName = null; }
@Test (timeout = 300000) public void testPlacementPolicySatisfied() throws Exception { LOG.info("Starting testPlacementPolicySatisfied."); final String[] initialRacks = new String[]{ "/rack0", "/rack1", "/rack2", "/rack3", "/rack4", "/rack5"}; final String[] initialHosts = new String[]{ "host0", "host1", "host2", "host3", "host4", "host5"}; final int numDataBlocks = StripedFileTestUtil.getDefaultECPolicy() .getNumDataUnits(); final int numParityBlocks = StripedFileTestUtil.getDefaultECPolicy() .getNumParityUnits(); final long blockSize = 6 * 1024 * 1024; Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf) .racks(initialRacks) .hosts(initialHosts) .numDataNodes(initialRacks.length) .build(); cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final Path ecDir = new Path("/ec"); final Path testFileUnsatisfied = new Path(ecDir, "test1"); final Path testFileSatisfied = new Path(ecDir, "test2"); dfs.enableErasureCodingPolicy( StripedFileTestUtil.getDefaultECPolicy().getName()); cluster.getFileSystem().getClient().mkdirs(ecDir.toString(), null, true); cluster.getFileSystem().getClient() .setErasureCodingPolicy(ecDir.toString(), StripedFileTestUtil.getDefaultECPolicy().getName()); long fileLen = blockSize * numDataBlocks; // Create a file to be stored in 6 racks. DFSTestUtil.createFile(dfs, testFileUnsatisfied, fileLen, (short) 1, 1); // Block placement policy should be satisfied as rack count // is less than numDataBlocks + numParityBlocks. verifyPlacementPolicy(cluster, testFileUnsatisfied, true); LOG.info("Adding 3 new hosts in the existing racks."); cluster.startDataNodes(conf, 3, true, null, new String[]{"/rack3", "/rack4", "/rack5"}, new String[]{"host3-2", "host4-2", "host5-2"}, null); cluster.triggerHeartbeats(); LOG.info("Waiting for EC reconstruction to complete."); DFSTestUtil.waitForReplication(dfs, testFileUnsatisfied, (short)(numDataBlocks + numParityBlocks), 30 * 1000); // Block placement policy should still be satisfied // as there are only 6 racks. verifyPlacementPolicy(cluster, testFileUnsatisfied, true); LOG.info("Adding 3 new hosts in 3 new racks."); cluster.startDataNodes(conf, 3, true, null, new String[]{"/rack6", "/rack7", "/rack8"}, new String[]{"host6", "host7", "host8"}, null); cluster.triggerHeartbeats(); // Addition of new racks can make the existing EC files block // placements unsatisfied and there is NO automatic block // reconstruction for this yet. // TODO: // Verify for block placement satisfied once the automatic // block reconstruction is implemented. verifyPlacementPolicy(cluster, testFileUnsatisfied, false); // Create a new file DFSTestUtil.createFile(dfs, testFileSatisfied, fileLen, (short) 1, 1); // The new file should be rightly placed on all 9 racks // and the block placement policy should be satisfied. verifyPlacementPolicy(cluster, testFileUnsatisfied, false); } finally { if (cluster != null) { cluster.shutdown(); } } }
@Override public TimelineEntity getApplicationAttemptEntity( ApplicationAttemptId appAttemptId, String fields, Map<String, String> filters) throws IOException { ApplicationId appId = appAttemptId.getApplicationId(); String path = PATH_JOINER.join("clusters", clusterId, "apps", appId, "entities", YARN_APPLICATION_ATTEMPT, appAttemptId); if (fields == null || fields.isEmpty()) { fields = "INFO"; } MultivaluedMap<String, String> params = new MultivaluedMapImpl(); params.add("fields", fields); mergeFilters(params, filters); ClientResponse response = doGetUri(baseUri, path, params); TimelineEntity entity = response.getEntity(TimelineEntity.class); return entity; }
@Test void getApplicationAttemptEntity() throws Exception { ApplicationAttemptId attemptId = ApplicationAttemptId.fromString("appattempt_1234_0001_000001"); TimelineEntity entity = client.getApplicationAttemptEntity(attemptId, null, null); assertEquals("mockAppAttempt1", entity.getId()); }
@Override public byte[] serialize(Event event) { if (event instanceof SchemaChangeEvent) { Schema schema; SchemaChangeEvent schemaChangeEvent = (SchemaChangeEvent) event; if (event instanceof CreateTableEvent) { CreateTableEvent createTableEvent = (CreateTableEvent) event; schema = createTableEvent.getSchema(); } else { schema = SchemaUtils.applySchemaChangeEvent( jsonSerializers.get(schemaChangeEvent.tableId()).getSchema(), schemaChangeEvent); } LogicalType rowType = DataTypeUtils.toFlinkDataType(schema.toRowDataType()).getLogicalType(); JsonRowDataSerializationSchema jsonSerializer = new JsonRowDataSerializationSchema( createJsonRowType(fromLogicalToDataType(rowType)), timestampFormat, mapNullKeyMode, mapNullKeyLiteral, encodeDecimalAsPlainNumber); try { jsonSerializer.open(context); } catch (Exception e) { throw new RuntimeException(e); } jsonSerializers.put( schemaChangeEvent.tableId(), new TableSchemaInfo( schemaChangeEvent.tableId(), schema, jsonSerializer, zoneId)); return null; } DataChangeEvent dataChangeEvent = (DataChangeEvent) event; reuseGenericRowData.setField( 3, StringData.fromString(dataChangeEvent.tableId().getSchemaName())); reuseGenericRowData.setField( 4, StringData.fromString(dataChangeEvent.tableId().getTableName())); reuseGenericRowData.setField( 5, new GenericArrayData( jsonSerializers.get(dataChangeEvent.tableId()).getSchema().primaryKeys() .stream() .map(StringData::fromString) .toArray())); try { switch (dataChangeEvent.op()) { case INSERT: reuseGenericRowData.setField(0, null); reuseGenericRowData.setField( 1, new GenericArrayData( new RowData[] { jsonSerializers .get(dataChangeEvent.tableId()) .getRowDataFromRecordData( dataChangeEvent.after(), false) })); reuseGenericRowData.setField(2, OP_INSERT); return jsonSerializers .get(dataChangeEvent.tableId()) .getSerializationSchema() .serialize(reuseGenericRowData); case DELETE: reuseGenericRowData.setField( 0, new GenericArrayData( new RowData[] { jsonSerializers .get(dataChangeEvent.tableId()) .getRowDataFromRecordData( dataChangeEvent.before(), false) })); reuseGenericRowData.setField(1, null); reuseGenericRowData.setField(2, OP_DELETE); return jsonSerializers .get(dataChangeEvent.tableId()) .getSerializationSchema() .serialize(reuseGenericRowData); case UPDATE: case REPLACE: reuseGenericRowData.setField( 0, new GenericArrayData( new RowData[] { jsonSerializers .get(dataChangeEvent.tableId()) .getRowDataFromRecordData( dataChangeEvent.before(), false) })); reuseGenericRowData.setField( 1, new GenericArrayData( new RowData[] { jsonSerializers .get(dataChangeEvent.tableId()) .getRowDataFromRecordData( dataChangeEvent.after(), false) })); reuseGenericRowData.setField(2, OP_UPDATE); return jsonSerializers .get(dataChangeEvent.tableId()) .getSerializationSchema() .serialize(reuseGenericRowData); default: throw new UnsupportedOperationException( format( "Unsupported operation '%s' for OperationType.", dataChangeEvent.op())); } } catch (Throwable t) { throw new RuntimeException(format("Could not serialize event '%s'.", event), t); } }
@Test public void testSerialize() throws Exception { ObjectMapper mapper = JacksonMapperFactory.createObjectMapper() .configure(JsonGenerator.Feature.WRITE_BIGDECIMAL_AS_PLAIN, false); SerializationSchema<Event> serializationSchema = ChangeLogJsonFormatFactory.createSerializationSchema( new Configuration(), JsonSerializationType.CANAL_JSON, ZoneId.systemDefault()); serializationSchema.open(new MockInitializationContext()); // create table Schema schema = Schema.newBuilder() .physicalColumn("col1", DataTypes.STRING()) .physicalColumn("col2", DataTypes.STRING()) .primaryKey("col1") .build(); CreateTableEvent createTableEvent = new CreateTableEvent(TABLE_1, schema); Assertions.assertNull(serializationSchema.serialize(createTableEvent)); // insert BinaryRecordDataGenerator generator = new BinaryRecordDataGenerator(RowType.of(DataTypes.STRING(), DataTypes.STRING())); DataChangeEvent insertEvent1 = DataChangeEvent.insertEvent( TABLE_1, generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("1") })); JsonNode expected = mapper.readTree( "{\"old\":null,\"data\":[{\"col1\":\"1\",\"col2\":\"1\"}],\"type\":\"INSERT\",\"database\":\"default_schema\",\"table\":\"table1\",\"pkNames\":[\"col1\"]}"); JsonNode actual = mapper.readTree(serializationSchema.serialize(insertEvent1)); Assertions.assertEquals(expected, actual); DataChangeEvent insertEvent2 = DataChangeEvent.insertEvent( TABLE_1, generator.generate( new Object[] { BinaryStringData.fromString("2"), BinaryStringData.fromString("2") })); expected = mapper.readTree( "{\"old\":null,\"data\":[{\"col1\":\"2\",\"col2\":\"2\"}],\"type\":\"INSERT\",\"database\":\"default_schema\",\"table\":\"table1\",\"pkNames\":[\"col1\"]}"); actual = mapper.readTree(serializationSchema.serialize(insertEvent2)); Assertions.assertEquals(expected, actual); DataChangeEvent deleteEvent = DataChangeEvent.deleteEvent( TABLE_1, generator.generate( new Object[] { BinaryStringData.fromString("2"), BinaryStringData.fromString("2") })); expected = mapper.readTree( "{\"old\":[{\"col1\":\"2\",\"col2\":\"2\"}],\"data\":null,\"type\":\"DELETE\",\"database\":\"default_schema\",\"table\":\"table1\",\"pkNames\":[\"col1\"]}"); actual = mapper.readTree(serializationSchema.serialize(deleteEvent)); Assertions.assertEquals(expected, actual); DataChangeEvent updateEvent = DataChangeEvent.updateEvent( TABLE_1, generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("1") }), generator.generate( new Object[] { BinaryStringData.fromString("1"), BinaryStringData.fromString("x") })); expected = mapper.readTree( "{\"old\":[{\"col1\":\"1\",\"col2\":\"1\"}],\"data\":[{\"col1\":\"1\",\"col2\":\"x\"}],\"type\":\"UPDATE\",\"database\":\"default_schema\",\"table\":\"table1\",\"pkNames\":[\"col1\"]}"); actual = mapper.readTree(serializationSchema.serialize(updateEvent)); Assertions.assertEquals(expected, actual); }
@Override public Collection<Permission> getPermissions(Action action) { if (!(action instanceof DestinationAction)) { throw new IllegalArgumentException("Action argument must be a " + DestinationAction.class.getName() + " instance."); } DestinationAction da = (DestinationAction) action; return getPermissions(da); }
@Test public void testGetPermissionsWithTopic() { ActiveMQTopic topic = new ActiveMQTopic("myTopic"); DestinationAction action = new DestinationAction(new ConnectionContext(), topic, "create"); Collection<Permission> perms = resolver.getPermissions(action); assertPermString("topic:myTopic:create", perms); }
@Udf(description = "Subtracts a duration from a timestamp") public Timestamp timestampSub( @UdfParameter(description = "A unit of time, for example DAY or HOUR") final TimeUnit unit, @UdfParameter( description = "An integer number of intervals to subtract")final Integer interval, @UdfParameter(description = "A TIMESTAMP value.") final Timestamp timestamp ) { if (unit == null || interval == null || timestamp == null) { return null; } return new Timestamp(timestamp.getTime() - unit.toMillis(interval)); }
@Test public void subtractFromTimestampNegativeResult() { // When: final Timestamp result = udf.timestampSub(TimeUnit.MILLISECONDS, 300, new Timestamp(100)); // Then: final Timestamp expectedResult = new Timestamp(-200); assertThat(result, is(expectedResult)); }
@Override public boolean usesXAResource(final XAResource xaResource) { return resourceName.equals(((SingleXAResource) xaResource).getResourceName()); }
@Test void assertUseXAResource() { AtomikosXARecoverableResource atomikosXARecoverableResource = new AtomikosXARecoverableResource("ds1", xaDataSource); assertTrue(atomikosXARecoverableResource.usesXAResource(singleXAResource)); }
public static String uncompress(byte[] compressedURL) { StringBuffer url = new StringBuffer(); switch (compressedURL[0] & 0x0f) { case EDDYSTONE_URL_PROTOCOL_HTTP_WWW: url.append(URL_PROTOCOL_HTTP_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW: url.append(URL_PROTOCOL_HTTPS_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTP: url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH); break; case EDDYSTONE_URL_PROTOCOL_HTTPS: url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH); break; default: break; } byte lastByte = -1; for (int i = 1; i < compressedURL.length; i++) { byte b = compressedURL[i]; if (lastByte == 0 && b == 0 ) { break; } lastByte = b; String tld = topLevelDomainForByte(b); if (tld != null) { url.append(tld); } else { url.append((char) b); } } return url.toString(); }
@Test public void testUncompressHttpsURL() { String testURL = "https://www.radiusnetworks.com"; byte[] testBytes = {0x01, 'r', 'a', 'd', 'i', 'u', 's', 'n', 'e', 't', 'w', 'o', 'r', 'k', 's', 0x07}; assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes)); }
@ProcessElement public ProcessContinuation processElement( @Element PulsarSourceDescriptor pulsarSourceDescriptor, RestrictionTracker<OffsetRange, Long> tracker, WatermarkEstimator watermarkEstimator, OutputReceiver<PulsarMessage> output) throws IOException { long startTimestamp = tracker.currentRestriction().getFrom(); String topicDescriptor = pulsarSourceDescriptor.getTopic(); try (Reader<byte[]> reader = newReader(this.client, topicDescriptor)) { if (startTimestamp > 0) { reader.seek(startTimestamp); } while (true) { if (reader.hasReachedEndOfTopic()) { reader.close(); return ProcessContinuation.stop(); } Message<byte[]> message = reader.readNext(); if (message == null) { return ProcessContinuation.resume(); } Long currentTimestamp = message.getPublishTime(); // if tracker.tryclaim() return true, sdf must execute work otherwise // doFn must exit processElement() without doing any work associated // or claiming more work if (!tracker.tryClaim(currentTimestamp)) { reader.close(); return ProcessContinuation.stop(); } if (pulsarSourceDescriptor.getEndMessageId() != null) { MessageId currentMsgId = message.getMessageId(); boolean hasReachedEndMessageId = currentMsgId.compareTo(pulsarSourceDescriptor.getEndMessageId()) == 0; if (hasReachedEndMessageId) { return ProcessContinuation.stop(); } } PulsarMessage pulsarMessage = new PulsarMessage(message.getTopicName(), message.getPublishTime(), message); Instant outputTimestamp = extractOutputTimestampFn.apply(message); output.outputWithTimestamp(pulsarMessage, outputTimestamp); } } }
@Test public void testProcessElementWhenHasReachedEndTopic() throws Exception { MockOutputReceiver receiver = new MockOutputReceiver(); fakePulsarReader.setReachedEndOfTopic(true); OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(0L, Long.MAX_VALUE)); DoFn.ProcessContinuation result = dofnInstance.processElement( PulsarSourceDescriptor.of(TOPIC, null, null, null, SERVICE_URL, ADMIN_URL), tracker, null, (DoFn.OutputReceiver) receiver); assertEquals(DoFn.ProcessContinuation.stop(), result); }
public void unionFields(Record other) { final int minFields = Math.min(this.numFields, other.numFields); final int maxFields = Math.max(this.numFields, other.numFields); final int[] offsets = this.offsets.length >= maxFields ? this.offsets : new int[maxFields]; final int[] lengths = this.lengths.length >= maxFields ? this.lengths : new int[maxFields]; if (!(this.isModified() || other.isModified())) { // handle the special (but common) case where both records have a valid binary // representation differently // allocate space for the switchBuffer first final int estimatedLength = this.binaryLen + other.binaryLen; this.serializer.memory = (this.switchBuffer != null && this.switchBuffer.length >= estimatedLength) ? this.switchBuffer : new byte[estimatedLength]; this.serializer.position = 0; try { // common loop for both records for (int i = 0; i < minFields; i++) { final int thisOff = this.offsets[i]; if (thisOff == NULL_INDICATOR_OFFSET) { final int otherOff = other.offsets[i]; if (otherOff == NULL_INDICATOR_OFFSET) { offsets[i] = NULL_INDICATOR_OFFSET; } else { // take field from other record offsets[i] = this.serializer.position; this.serializer.write(other.binaryData, otherOff, other.lengths[i]); lengths[i] = other.lengths[i]; } } else { // copy field from this one offsets[i] = this.serializer.position; this.serializer.write(this.binaryData, thisOff, this.lengths[i]); lengths[i] = this.lengths[i]; } } // add the trailing fields from one record if (minFields != maxFields) { final Record sourceForRemainder = this.numFields > minFields ? this : other; int begin = -1; int end = -1; int offsetDelta = 0; // go through the offsets, find the non-null fields to account for the remaining // data for (int k = minFields; k < maxFields; k++) { final int off = sourceForRemainder.offsets[k]; if (off == NULL_INDICATOR_OFFSET) { offsets[k] = NULL_INDICATOR_OFFSET; } else { end = sourceForRemainder.offsets[k] + sourceForRemainder.lengths[k]; if (begin == -1) { // first non null column in the remainder begin = sourceForRemainder.offsets[k]; offsetDelta = this.serializer.position - begin; } offsets[k] = sourceForRemainder.offsets[k] + offsetDelta; } } // copy the remaining fields directly as binary if (begin != -1) { this.serializer.write(sourceForRemainder.binaryData, begin, end - begin); } // the lengths can be copied directly if (lengths != sourceForRemainder.lengths) { System.arraycopy( sourceForRemainder.lengths, minFields, lengths, minFields, maxFields - minFields); } } } catch (Exception ioex) { throw new RuntimeException( "Error creating field union of record data" + ioex.getMessage() == null ? "." : ": " + ioex.getMessage(), ioex); } } else { // the general case, where at least one of the two records has a binary representation // that is not in sync. final int estimatedLength = (this.binaryLen > 0 ? this.binaryLen : this.numFields * DEFAULT_FIELD_LEN_ESTIMATE) + (other.binaryLen > 0 ? other.binaryLen : other.numFields * DEFAULT_FIELD_LEN_ESTIMATE); this.serializer.memory = (this.switchBuffer != null && this.switchBuffer.length >= estimatedLength) ? this.switchBuffer : new byte[estimatedLength]; this.serializer.position = 0; try { // common loop for both records for (int i = 0; i < minFields; i++) { final int thisOff = this.offsets[i]; if (thisOff == NULL_INDICATOR_OFFSET) { final int otherOff = other.offsets[i]; if (otherOff == NULL_INDICATOR_OFFSET) { offsets[i] = NULL_INDICATOR_OFFSET; } else if (otherOff == MODIFIED_INDICATOR_OFFSET) { // serialize modified field from other record offsets[i] = this.serializer.position; other.writeFields[i].write(this.serializer); lengths[i] = this.serializer.position - offsets[i]; } else { // take field from other record binary offsets[i] = this.serializer.position; this.serializer.write(other.binaryData, otherOff, other.lengths[i]); lengths[i] = other.lengths[i]; } } else if (thisOff == MODIFIED_INDICATOR_OFFSET) { // serialize modified field from this record offsets[i] = this.serializer.position; this.writeFields[i].write(this.serializer); lengths[i] = this.serializer.position - offsets[i]; } else { // copy field from this one offsets[i] = this.serializer.position; this.serializer.write(this.binaryData, thisOff, this.lengths[i]); lengths[i] = this.lengths[i]; } } // add the trailing fields from one record if (minFields != maxFields) { final Record sourceForRemainder = this.numFields > minFields ? this : other; // go through the offsets, find the non-null fields for (int k = minFields; k < maxFields; k++) { final int off = sourceForRemainder.offsets[k]; if (off == NULL_INDICATOR_OFFSET) { offsets[k] = NULL_INDICATOR_OFFSET; } else if (off == MODIFIED_INDICATOR_OFFSET) { // serialize modified field from the source record offsets[k] = this.serializer.position; sourceForRemainder.writeFields[k].write(this.serializer); lengths[k] = this.serializer.position - offsets[k]; } else { // copy field from the source record binary offsets[k] = this.serializer.position; final int len = sourceForRemainder.lengths[k]; this.serializer.write(sourceForRemainder.binaryData, off, len); lengths[k] = len; } } } } catch (Exception ioex) { throw new RuntimeException( "Error creating field union of record data" + ioex.getMessage() == null ? "." : ": " + ioex.getMessage(), ioex); } } serializeHeader(this.serializer, offsets, maxFields); // set the fields this.switchBuffer = this.binaryData; this.binaryData = serializer.memory; this.binaryLen = serializer.position; this.numFields = maxFields; this.offsets = offsets; this.lengths = lengths; this.firstModifiedPos = Integer.MAX_VALUE; // make sure that the object arrays reflect the size as well if (this.readFields == null || this.readFields.length < maxFields) { final Value[] na = new Value[maxFields]; System.arraycopy(this.readFields, 0, na, 0, this.readFields.length); this.readFields = na; } this.writeFields = (this.writeFields == null || this.writeFields.length < maxFields) ? new Value[maxFields] : this.writeFields; }
@Test void testUnionFields() { final Value[][] values = new Value[][] { {new IntValue(56), null, new IntValue(-7628761)}, {null, new StringValue("Hello Test!"), null}, {null, null, null, null, null, null, null, null}, { null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null }, { new IntValue(56), new IntValue(56), new IntValue(56), new IntValue(56), null, null, null }, {null, null, null, null, new IntValue(56), new IntValue(56), new IntValue(56)}, {new IntValue(43), new IntValue(42), new IntValue(41)}, {new IntValue(-463), new IntValue(-464), new IntValue(-465)} }; for (int i = 0; i < values.length - 1; i += 2) { testUnionFieldsForValues(values[i], values[i + 1], this.rand); testUnionFieldsForValues(values[i + 1], values[i], this.rand); } }
public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) { KafkaMetadataState currentState = metadataState; metadataState = switch (currentState) { case KRaft -> onKRaft(kafkaStatus); case ZooKeeper -> onZooKeeper(kafkaStatus); case KRaftMigration -> onKRaftMigration(kafkaStatus); case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus); case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus); case PreKRaft -> onPreKRaft(kafkaStatus); }; if (metadataState != currentState) { LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno); } else { LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno); } return metadataState; }
@Test public void testWarningInKRaftDualWriting() { Kafka kafka = new KafkaBuilder(KAFKA) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled") .endMetadata() .withNewStatus() .withKafkaMetadataState(KRaftDualWriting) .endStatus() .build(); KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), "The strimzi.io/kraft annotation can't be set to 'enabled' during a migration process. " + "It has to be used in post migration to finalize it and move definitely to KRaft."); kafka = new KafkaBuilder(KAFKA) .editMetadata() .addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "rollback") .endMetadata() .withNewStatus() .withKafkaMetadataState(KRaftDualWriting) .endStatus() .build(); kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka); kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()); assertTrue(kafka.getStatus().getConditions().stream().anyMatch(condition -> "KafkaMetadataStateWarning".equals(condition.getReason()))); assertEquals(kafka.getStatus().getConditions().get(0).getMessage(), "The strimzi.io/kraft annotation can't be set to 'rollback' during dual writing. " + "It can be used in post migration to start rollback process."); }
public static void checkArgument(boolean isValid, String message) throws IllegalArgumentException { if (!isValid) { throw new IllegalArgumentException(message); } }
@Test public void testCheckArgumentWithOneParam() { try { Preconditions.checkArgument(true, "Test message %s", 12); } catch (IllegalArgumentException e) { Assert.fail("Should not throw exception when isValid is true"); } try { Preconditions.checkArgument(false, "Test message %s", 12); Assert.fail("Should throw exception when isValid is false"); } catch (IllegalArgumentException e) { Assert.assertEquals("Should format message", "Test message 12", e.getMessage()); } }
@Override public void close() { }
@Test public void shouldSucceed_removeRemoteNode() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { localPublisher.accept(LOCAL_ROW1); localPublisher.accept(LOCAL_ROW2); remotePublisher.accept(REMOTE_ROW1); remotePublisher.accept(REMOTE_ROW2); }); Set<List<?>> rows = waitOnRows(4); final RoutingResult result = handle.get(ksqlNodeRemote).get(); nodes.set(ImmutableSet.of(ksqlNodeLocal)); while (handle.get(ksqlNodeRemote).isPresent()) { Thread.sleep(100); continue; } handle.close(); // Then: assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true)); assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true)); assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true)); assertThat(result.getStatus(), is(RoutingResultStatus.REMOVED)); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof BridgeName) { final BridgeName that = (BridgeName) obj; return this.getClass() == that.getClass() && Objects.equals(this.name, that.name); } return false; }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(bridgeName1, sameAsBridgeName1) .addEqualityGroup(bridgeName2) .testEquals(); }
public String getProfileParams() { return get(JobContext.TASK_PROFILE_PARAMS, MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS); }
@Test public void testProfileParamsDefaults() { JobConf configuration = new JobConf(); String result = configuration.getProfileParams(); Assert.assertNotNull(result); Assert.assertTrue(result.contains("file=%s")); Assert.assertTrue(result.startsWith("-agentlib:hprof")); }
@Nonnull public static <T> Traverser<T> traverseArray(@Nonnull T[] array) { return new ArrayTraverser<>(array); }
@Test public void when_traverseArray_then_seeAllItems() { validateTraversal(traverseArray(new Integer[] {1, 2})); }
public static ClusterOperatorConfig buildFromMap(Map<String, String> map) { warningsForRemovedEndVars(map); KafkaVersion.Lookup lookup = parseKafkaVersions(map.get(STRIMZI_KAFKA_IMAGES), map.get(STRIMZI_KAFKA_CONNECT_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_IMAGES), map.get(STRIMZI_KAFKA_MIRROR_MAKER_2_IMAGES)); return buildFromMap(map, lookup); }
@Test public void testInvalidCustomResourceSelectorLabels() { Map<String, String> envVars = new HashMap<>(ClusterOperatorConfigTest.ENV_VARS); envVars.put(ClusterOperatorConfig.CUSTOM_RESOURCE_SELECTOR.key(), "nsLabelKey1,nsLabelKey2"); InvalidConfigurationException e = assertThrows(InvalidConfigurationException.class, () -> ClusterOperatorConfig.buildFromMap(envVars, KafkaVersionTestUtils.getKafkaVersionLookup())); assertThat(e.getMessage(), containsString("Failed to parse. Value nsLabelKey1,nsLabelKey2 is not valid")); }
@Override protected boolean hasChildProjectsPermission(String permission, String applicationUuid) { return false; }
@Test public void hasChildProjectsPermission() { assertThat(githubWebhookUserSession.hasChildProjectsPermission("perm", "project")).isFalse(); }
@VisibleForTesting public static Domain getDomain(Type type, long rowCount, ColumnStatistics columnStatistics) { if (rowCount == 0) { return Domain.none(type); } if (columnStatistics == null) { return Domain.all(type); } if (columnStatistics.hasNumberOfValues() && columnStatistics.getNumberOfValues() == 0) { return Domain.onlyNull(type); } boolean hasNullValue = columnStatistics.getNumberOfValues() != rowCount; if (type.getJavaType() == boolean.class && columnStatistics.getBooleanStatistics() != null) { BooleanStatistics booleanStatistics = columnStatistics.getBooleanStatistics(); boolean hasTrueValues = (booleanStatistics.getTrueValueCount() != 0); boolean hasFalseValues = (columnStatistics.getNumberOfValues() != booleanStatistics.getTrueValueCount()); if (hasTrueValues && hasFalseValues) { return Domain.all(BOOLEAN); } if (hasTrueValues) { return Domain.create(ValueSet.of(BOOLEAN, true), hasNullValue); } if (hasFalseValues) { return Domain.create(ValueSet.of(BOOLEAN, false), hasNullValue); } } else if (isShortDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> rescale(value, (DecimalType) type).unscaledValue().longValue()); } else if (isLongDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> encodeUnscaledValue(rescale(value, (DecimalType) type).unscaledValue())); } else if (isCharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics(), value -> truncateToLengthAndTrimSpaces(value, type)); } else if (isVarcharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics()); } else if (type.getTypeSignature().getBase().equals(StandardTypes.DATE) && columnStatistics.getDateStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDateStatistics(), value -> (long) value); } else if (type.getJavaType() == long.class && columnStatistics.getIntegerStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getIntegerStatistics()); } else if (type.getJavaType() == double.class && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics()); } else if (REAL.equals(type) && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics(), value -> (long) floatToRawIntBits(value.floatValue())); } return Domain.create(ValueSet.all(type), hasNullValue); }
@Test public void testBigint() { assertEquals(getDomain(BIGINT, 0, null), Domain.none(BIGINT)); assertEquals(getDomain(BIGINT, 10, null), Domain.all(BIGINT)); assertEquals(getDomain(BIGINT, 0, integerColumnStats(null, null, null)), Domain.none(BIGINT)); assertEquals(getDomain(BIGINT, 0, integerColumnStats(0L, null, null)), Domain.none(BIGINT)); assertEquals(getDomain(BIGINT, 0, integerColumnStats(0L, 100L, 100L)), Domain.none(BIGINT)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(0L, null, null)), onlyNull(BIGINT)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(10L, null, null)), notNull(BIGINT)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(10L, 100L, 100L)), singleValue(BIGINT, 100L)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(10L, 0L, 100L)), create(ValueSet.ofRanges(range(BIGINT, 0L, true, 100L, true)), false)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(10L, null, 100L)), create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, 100L)), false)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(10L, 0L, null)), create(ValueSet.ofRanges(greaterThanOrEqual(BIGINT, 0L)), false)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(5L, 0L, 100L)), create(ValueSet.ofRanges(range(BIGINT, 0L, true, 100L, true)), true)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(5L, null, 100L)), create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, 100L)), true)); assertEquals(getDomain(BIGINT, 10, integerColumnStats(5L, 0L, null)), create(ValueSet.ofRanges(greaterThanOrEqual(BIGINT, 0L)), true)); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testTransactionalSplitBatchAndSend() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); txnManager.beginTransaction(); txnManager.maybeAddPartition(tp); client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp, Errors.NONE))); sender.runOnce(); testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); }
@SuppressWarnings("unchecked") @Override public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( MoveApplicationAcrossQueuesRequest request) throws YarnException { ApplicationId applicationId = request.getApplicationId(); UserGroupInformation callerUGI = getCallerUgi(applicationId, AuditConstants.MOVE_APP_REQUEST); RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI, AuditConstants.MOVE_APP_REQUEST, ApplicationAccessType.MODIFY_APP, true); String targetQueue = request.getTargetQueue(); if (!accessToTargetQueueAllowed(callerUGI, application, targetQueue)) { RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "Target queue doesn't exist or user" + " doesn't have permissions to submit to target queue: " + targetQueue, "ClientRMService", AuditConstants.UNAUTHORIZED_USER, applicationId); throw RPCUtil.getRemoteException(new AccessControlException("User " + callerUGI.getShortUserName() + " cannot submit applications to" + " target queue or the target queue doesn't exist: " + targetQueue + " while moving " + applicationId)); } // Moves only allowed when app is in a state that means it is tracked by // the scheduler. Introducing SUBMITTED state also to this list as there // could be a corner scenario that app may not be in Scheduler in SUBMITTED // state. if (!ACTIVE_APP_STATES.contains(application.getState())) { String msg = "App in " + application.getState() + " state cannot be moved."; RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", msg); throw new YarnException(msg); } try { this.rmAppManager.moveApplicationAcrossQueue( application.getApplicationId(), request.getTargetQueue()); } catch (YarnException ex) { RMAuditLogger.logFailure(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "UNKNOWN", "ClientRMService", ex.getMessage()); throw ex; } RMAuditLogger.logSuccess(callerUGI.getShortUserName(), AuditConstants.MOVE_APP_REQUEST, "ClientRMService" , applicationId); return recordFactory .newRecordInstance(MoveApplicationAcrossQueuesResponse.class); }
@Test (expected = YarnException.class) public void testNonExistingQueue() throws Exception { ApplicationId applicationId = getApplicationId(1); UserGroupInformation aclUGI = UserGroupInformation.getCurrentUser(); QueueACLsManager queueAclsManager = getQueueAclManager(); ApplicationACLsManager appAclsManager = getAppAclManager(); ClientRMService rmService = createClientRMServiceForMoveApplicationRequest(applicationId, aclUGI.getShortUserName(), appAclsManager, queueAclsManager); MoveApplicationAcrossQueuesRequest moveAppRequest = MoveApplicationAcrossQueuesRequest.newInstance(applicationId, "unknown_queue"); rmService.moveApplicationAcrossQueues(moveAppRequest); }
public static MetricsInfo info(String name, String description) { return Info.INSTANCE.cache.add(name, description); }
@Test public void testInfoOverflow() { MetricsInfo i0 = info("m0", "m desc"); for (int i = 0; i < MAX_INFO_NAMES + 1; ++i) { info("m"+ i, "m desc"); if (i < MAX_INFO_NAMES) { assertSame("m0 is still there", i0, info("m0", "m desc")); } } assertNotSame("m0 is gone", i0, info("m0", "m desc")); MetricsInfo i1 = info("m1", "m desc"); for (int i = 0; i < MAX_INFO_DESCS; ++i) { info("m1", "m desc"+ i); if (i < MAX_INFO_DESCS - 1) { assertSame("i1 is still there", i1, info("m1", "m desc")); } } assertNotSame("i1 is gone", i1, info("m1", "m desc")); }
@Override public void uploadAll(Configuration config) throws Exception { if (!config.get(KubernetesConfigOptions.LOCAL_UPLOAD_ENABLED)) { LOG.info( "Local artifact uploading is disabled. Set '{}' to enable.", KubernetesConfigOptions.LOCAL_UPLOAD_ENABLED.key()); return; } final String jobUri = upload(config, getJobUri(config)); updateConfig(config, PipelineOptions.JARS, Collections.singletonList(jobUri)); final List<String> additionalUris = config.getOptional(ArtifactFetchOptions.ARTIFACT_LIST) .orElse(Collections.emptyList()); final List<String> uploadedAdditionalUris = additionalUris.stream() .map( FunctionUtils.uncheckedFunction( artifactUri -> upload(config, artifactUri))) .collect(Collectors.toList()); updateConfig(config, ArtifactFetchOptions.ARTIFACT_LIST, uploadedAdditionalUris); }
@Test void testUploadAllWithOneJobJar() throws Exception { File jar = getFlinkKubernetesJar(); String localUri = "local://" + jar.getAbsolutePath(); config.set(PipelineOptions.JARS, Collections.singletonList(localUri)); artifactUploader.uploadAll(config); assertJobJarUri(jar.getName()); }
static String indent(String item) { // '([^']|'')*': Matches the escape sequence "'...'" where the content between "'" // characters can contain anything except "'" unless its doubled (''). // // Then each match is checked. If it starts with "'", it's left unchanged // (escaped sequence). Otherwise, it replaces newlines within the match with indent. Pattern pattern = Pattern.compile("('([^']|'')*')|\\n"); Matcher matcher = pattern.matcher(item); StringBuffer output = new StringBuffer(); while (matcher.find()) { final String group = matcher.group(); if (group.startsWith("'")) { matcher.appendReplacement(output, Matcher.quoteReplacement(group)); } else { String replaced = group.replaceAll("\n", "\n" + OPERATION_INDENT); matcher.appendReplacement(output, Matcher.quoteReplacement(replaced)); } } matcher.appendTail(output); return "\n" + OPERATION_INDENT + output; }
@Test void testIndentChildWithEscapedQuotes() { String sourceQuery = "SELECT *, '',\n'' FROM source_t"; String s = String.format( "SELECT * FROM (%s\n) WHERE a > 5", OperationUtils.indent(sourceQuery)); assertThat(s) .isEqualTo( "SELECT * FROM (\n" + " SELECT *, '',\n" + " '' FROM source_t\n" + ") WHERE a > 5"); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testQualifiedViewColumnResolution() { // it should be possible to qualify the column reference with the view name analyze("SELECT v1.a FROM v1"); analyze("SELECT s1.v1.a FROM s1.v1"); analyze("SELECT tpch.s1.v1.a FROM tpch.s1.v1"); }
@Override public void unsubscribe(URL url, NotifyListener listener) { if (url == null) { throw new IllegalArgumentException("unsubscribe url == null"); } if (listener == null) { throw new IllegalArgumentException("unsubscribe listener == null"); } if (logger.isInfoEnabled()) { logger.info("Unsubscribe: " + url); } Set<NotifyListener> listeners = subscribed.get(url); if (listeners != null) { listeners.remove(listener); } // do not forget remove notified notified.remove(url); }
@Test void testUnsubscribe() { // check parameters try { abstractRegistry.unsubscribe(testUrl, null); Assertions.fail(); } catch (Exception e) { Assertions.assertTrue(e instanceof IllegalArgumentException); } // check parameters try { abstractRegistry.unsubscribe(null, null); Assertions.fail(); } catch (Exception e) { Assertions.assertTrue(e instanceof IllegalArgumentException); } Assertions.assertNull(abstractRegistry.getSubscribed().get(testUrl)); // check if unsubscribe successfully abstractRegistry.subscribe(testUrl, listener); abstractRegistry.unsubscribe(testUrl, listener); // Since we have subscribed testUrl, here should return a empty set instead of null Assertions.assertNotNull(abstractRegistry.getSubscribed().get(testUrl)); Assertions.assertFalse(abstractRegistry.getSubscribed().get(testUrl).contains(listener)); }
public KsqlConfig cloneWithPropertyOverwrite(final Map<String, ?> props) { final Map<String, Object> cloneProps = new HashMap<>(originals()); cloneProps.putAll(props); final Map<String, ConfigValue> streamConfigProps = buildStreamingConfig(getKsqlStreamConfigProps(), props); return new KsqlConfig(ConfigGeneration.CURRENT, cloneProps, streamConfigProps); }
@Test public void shouldCloneWithKsqlPropertyOverwrite() { final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap( KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test")); final KsqlConfig ksqlConfigClone = ksqlConfig.cloneWithPropertyOverwrite( Collections.singletonMap( KsqlConfig.KSQL_SERVICE_ID_CONFIG, "test-2")); final String result = ksqlConfigClone.getString(KsqlConfig.KSQL_SERVICE_ID_CONFIG); assertThat(result, equalTo("test-2")); }
@Override @Nullable protected HttpHost determineProxy(HttpHost target, HttpContext context) throws HttpException { for (Pattern nonProxyHostPattern : nonProxyHostPatterns) { if (nonProxyHostPattern.matcher(target.getHostName()).matches()) { return null; } } return super.determineProxy(target, context); }
@Test void testHostWithEndWildcardIsMatched() throws Exception { assertThat(routePlanner.determineProxy(new HttpHost("192.168.52.94"), httpContext)).isNull(); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } IndexSpec indexSpec = (IndexSpec) o; return Objects.equal(name, indexSpec.name); }
@Test void equalAnotherObject() { var spec3 = new IndexSpec() .setName("metadata.name"); assertThat(spec3.equals(new Object())).isFalse(); }
@Override public TransformResultMetadata getResultMetadata() { return _resultMetadata; }
@Test public void testArrayElementAtString() { Random rand = new Random(); int index = rand.nextInt(MAX_NUM_MULTI_VALUES); ExpressionContext expression = RequestContextUtils.getExpression( String.format("array_element_at_string(%s, %d)", STRING_MV_COLUMN, index + 1)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper); assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.STRING); assertTrue(transformFunction.getResultMetadata().isSingleValue()); String[] expectedValues = new String[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { expectedValues[i] = _stringMVValues[i].length > index ? _stringMVValues[i][index] : NullValuePlaceHolder.STRING; } testTransformFunction(transformFunction, expectedValues); }
@Override public void close() { if (ch.isOpen()) { ch.close(); } }
@Test public void testNegativeTtl() throws Exception { final DnsNameResolver resolver = newResolver().negativeTtl(10).build(); try { resolveNonExistentDomain(resolver); final int size = 10000; final List<UnknownHostException> exceptions = new ArrayList<UnknownHostException>(); // If negative cache works, this thread should be done really quickly. final Thread negativeLookupThread = new Thread() { @Override public void run() { for (int i = 0; i < size; i++) { exceptions.add(resolveNonExistentDomain(resolver)); if (isInterrupted()) { break; } } } }; negativeLookupThread.start(); negativeLookupThread.join(DEFAULT_TEST_TIMEOUT_MS); if (negativeLookupThread.isAlive()) { negativeLookupThread.interrupt(); fail("Cached negative lookups did not finish quickly."); } assertThat(exceptions, hasSize(size)); } finally { resolver.close(); } }
public GetMetaDataResponseHeader getControllerMetaData( final String controllerAddress) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, RemotingCommandException, MQBrokerException { final RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CONTROLLER_GET_METADATA_INFO, null); final RemotingCommand response = this.remotingClient.invokeSync(controllerAddress, request, 3000); assert response != null; if (response.getCode() == SUCCESS) { return (GetMetaDataResponseHeader) response.decodeCommandCustomHeader(GetMetaDataResponseHeader.class); } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void assertGetControllerMetaData() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); GetMetaDataResponseHeader responseHeader = new GetMetaDataResponseHeader(); responseHeader.setGroup(group); responseHeader.setIsLeader(true); setResponseHeader(responseHeader); GetMetaDataResponseHeader actual = mqClientAPI.getControllerMetaData(defaultBrokerAddr); assertNotNull(actual); assertEquals(group, actual.getGroup()); assertTrue(actual.isLeader()); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseByDateTimeFormatterTest() { final DateTime parse = DateUtil.parse("2021-12-01", DatePattern.NORM_DATE_FORMATTER); assertEquals("2021-12-01 00:00:00", parse.toString()); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_UINT32_BE_big() { final MutableData data = new MutableData(new byte[4]); data.setValue(0xF0000001L, Data.FORMAT_UINT32_BE, 0); assertArrayEquals(new byte[] { (byte) 0xF0, 0x00, 0x00, 0x01 } , data.getValue()); }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { Num loss = excludeCosts ? position.getGrossProfit() : position.getProfit(); return loss.isNegative() ? loss : series.zero(); } return series.zero(); }
@Test public void calculateComparingIncludingVsExcludingCosts() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); LinearTransactionCostModel transactionCost = new LinearTransactionCostModel(0.01); ZeroCostModel holdingCost = new ZeroCostModel(); TradingRecord tradingRecord = new BaseTradingRecord(Trade.TradeType.BUY, transactionCost, holdingCost); // entry price = 100 (cost = 100*0.01 = 1) => netPrice = 101, grossPrice = 100 tradingRecord.enter(0, series.getBar(0).getClosePrice(), numOf(1)); // exit price = 95 (cost = 95*0.01 = 0.95) => netPrice = 94.05, grossPrice = 95 tradingRecord.exit(1, series.getBar(1).getClosePrice(), tradingRecord.getCurrentPosition().getEntry().getAmount()); // entry price = 100 (cost = 100*0.01 = 1) => netPrice = 101, grossPrice = 100 tradingRecord.enter(2, series.getBar(2).getClosePrice(), numOf(1)); // exit price = 70 (cost = 70*0.01 = 0.70) => netPrice = 69.3, grossPrice = 70 tradingRecord.exit(5, series.getBar(5).getClosePrice(), tradingRecord.getCurrentPosition().getEntry().getAmount()); // include costs, i.e. loss - costs: // [(94.05 - 101)] + [(69.3 - 101)] = -6.95 + (-31.7) = -38.65 loss // [(95 - 100)] + [(70 - 100)] = -5 + (-30) = -35 loss - 3.65 = -38.65 loss AnalysisCriterion lossIncludingCosts = getCriterion(false); assertNumEquals(-38.65, lossIncludingCosts.calculate(series, tradingRecord)); // exclude costs, i.e. costs are not contained: // [(95 - 100)] + [(70 - 100)] = -5 + (-30) = -35 loss AnalysisCriterion lossExcludingCosts = getCriterion(true); assertNumEquals(-35, lossExcludingCosts.calculate(series, tradingRecord)); }
public static LogMessage getInstance() { return new LogMessage(); }
@Test public void testGetInstanceShouldReturnANewLogMessageInstance() { final LogMessage newInstance = LogMessage.getInstance(); assertNotNull(newInstance); assertNotSame(logMessage, newInstance); }
public Object clone() { XMLOutputMeta retval = (XMLOutputMeta) super.clone(); int nrfields = outputFields.length; retval.allocate( nrfields ); for ( int i = 0; i < nrfields; i++ ) { retval.outputFields[i] = (XMLField) outputFields[i].clone(); } return retval; }
@Test public void testClone() throws Exception { XMLOutputMeta xmlOutputMeta = new XMLOutputMeta(); Node stepnode = getTestNode(); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); IMetaStore metaStore = mock( IMetaStore.class ); xmlOutputMeta.loadXML( stepnode, Collections.singletonList( dbMeta ), metaStore ); XMLOutputMeta cloned = (XMLOutputMeta) xmlOutputMeta.clone(); assertNotSame( cloned, xmlOutputMeta ); assertXmlOutputMeta( cloned ); }
public final void containsKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).contains(key); }
@Test public void containsKeyNull() { Multimap<String, String> multimap = HashMultimap.create(); multimap.put(null, "null"); assertThat(multimap).containsKey(null); }
@Override public void run(Namespace namespace, Liquibase liquibase) throws Exception { final boolean list = firstNonNull(namespace.getBoolean("list"), false); final boolean release = firstNonNull(namespace.getBoolean("release"), false); if (list == release) { throw new IllegalArgumentException("Must specify either --list or --force-release"); } else if (list) { liquibase.reportLocks(printStream); } else { liquibase.forceReleaseLocks(); } }
@Test void testRelease() throws Exception { // We can't create locks in the database, so use mocks final Liquibase liquibase = Mockito.mock(Liquibase.class); locksCommand.run(new Namespace(Map.of("list", false, "release", true)), liquibase); Mockito.verify(liquibase).forceReleaseLocks(); }
public static String evaluate(String jsonText, JsonEvaluationSpecification specification) throws JsonMappingException { // Parse json text ang get root node. JsonNode rootNode; try { ObjectMapper mapper = new ObjectMapper(); rootNode = mapper.readTree(new StringReader(jsonText)); } catch (Exception e) { log.error("Exception while parsing Json text", e); throw new JsonMappingException("Exception while parsing Json payload"); } // Retrieve evaluated node within JSON tree. JsonNode evaluatedNode = rootNode.at(specification.getExp()); String caseKey = evaluatedNode.asText(); switch (specification.getOperator()) { case equals: // Consider simple equality. String value = specification.getCases().get(caseKey); return (value != null ? value : specification.getCases().getDefault()); case range: // Consider range evaluation. double caseNumber = 0.000; try { caseNumber = Double.parseDouble(caseKey); } catch (NumberFormatException nfe) { log.error(caseKey + " into range expression cannot be parsed as number. Considering default case."); return specification.getCases().getDefault(); } return foundRangeMatchingCase(caseNumber, specification.getCases()); case regexp: // Consider regular expression evaluation for each case key. for (String choiceKey : specification.getCases().keySet()) { if (!"default".equals(choiceKey)) { if (Pattern.matches(choiceKey, caseKey)) { return specification.getCases().get(choiceKey); } } } break; case size: // Consider size evaluation. if (evaluatedNode.isArray()) { int size = evaluatedNode.size(); return foundRangeMatchingCase(size, specification.getCases()); } break; case presence: // Consider presence evaluation of evaluatedNode directly. if (evaluatedNode != null && evaluatedNode.toString().length() > 0) { if (specification.getCases().containsKey("found")) { return specification.getCases().get("found"); } } else { if (specification.getCases().containsKey("missing")) { return specification.getCases().get("missing"); } } break; } return specification.getCases().getDefault(); }
@Test void testEqualsOperatorDispatcher() throws Exception { DispatchCases cases = new DispatchCases(); Map<String, String> dispatchCases = new HashMap<>(); dispatchCases.put("Belgium", "OK"); dispatchCases.put("Germany", "KO"); dispatchCases.put("default", "Why not?"); cases.putAll(dispatchCases); JsonEvaluationSpecification specifications = new JsonEvaluationSpecification(); specifications.setExp("/country"); specifications.setOperator(EvaluationOperator.equals); specifications.setCases(cases); String result = JsonExpressionEvaluator.evaluate(BELGIUM_BEER, specifications); assertEquals("OK", result); result = JsonExpressionEvaluator.evaluate(GERMAN_BEER, specifications); assertEquals("KO", result); result = JsonExpressionEvaluator.evaluate(ENGLISH_BEER, specifications); assertEquals("Why not?", result); }
public static <T> T retryUntilTimeout(Callable<T> callable, Supplier<String> description, Duration timeoutDuration, long retryBackoffMs) throws Exception { return retryUntilTimeout(callable, description, timeoutDuration, retryBackoffMs, Time.SYSTEM); }
@Test public void testSuccess() throws Exception { Mockito.when(mockCallable.call()).thenReturn("success"); assertEquals("success", RetryUtil.retryUntilTimeout(mockCallable, testMsg, Duration.ofMillis(100), 1, mockTime)); Mockito.verify(mockCallable, Mockito.times(1)).call(); }
@Override public TensorProto serialize() { TensorProto.Builder builder = TensorProto.newBuilder(); builder.setVersion(CURRENT_VERSION); builder.setClassName(DenseVector.class.getName()); DenseTensorProto.Builder dataBuilder = DenseTensorProto.newBuilder(); dataBuilder.addAllDimensions(Arrays.stream(shape).boxed().collect(Collectors.toList())); ByteBuffer buffer = ByteBuffer.allocate(elements.length * 8).order(ByteOrder.LITTLE_ENDIAN); DoubleBuffer doubleBuffer = buffer.asDoubleBuffer(); doubleBuffer.put(elements); doubleBuffer.rewind(); dataBuilder.setValues(ByteString.copyFrom(buffer)); builder.setSerializedData(Any.pack(dataBuilder.build())); return builder.build(); }
@Test public void serializationTest() { DenseVector a = generateVectorA(); TensorProto proto = a.serialize(); Tensor deser = Tensor.deserialize(proto); assertEquals(a,deser); }
public FactMapping addFactMapping(int index, FactMapping toClone) { FactMapping toReturn = toClone.cloneFactMapping(); factMappings.add(index, toReturn); return toReturn; }
@Test public void addFactMapping_byIndexAndFactMapping() { FactMapping toClone = new FactMapping(); toClone.setFactAlias("ALIAS"); toClone.setExpressionAlias("EXPRESSION_ALIAS"); final FactMapping cloned = modelDescriptor.addFactMapping(0, toClone); assertThat(cloned.getFactAlias()).isEqualTo(toClone.getFactAlias()); assertThat(cloned.getExpressionAlias()).isEqualTo(toClone.getExpressionAlias()); }
@Override public void run() { final Instant now = time.get(); try { final Collection<PersistentQueryMetadata> queries = engine.getPersistentQueries(); final Optional<Double> saturation = queries.stream() .collect(Collectors.groupingBy(PersistentQueryMetadata::getQueryApplicationId)) .entrySet() .stream() .map(e -> measure(now, e.getKey(), e.getValue())) .max(PersistentQuerySaturationMetrics::compareSaturation) .orElse(Optional.of(0.0)); saturation.ifPresent(s -> report(now, s)); final Set<String> appIds = queries.stream() .map(PersistentQueryMetadata::getQueryApplicationId) .collect(Collectors.toSet()); for (final String appId : Sets.difference(new HashSet<>(perKafkaStreamsStats.keySet()), appIds)) { perKafkaStreamsStats.get(appId).cleanup(reporter); perKafkaStreamsStats.remove(appId); } } catch (final RuntimeException e) { LOGGER.error("Error collecting saturation", e); throw e; } }
@Test public void shouldComputeSaturationForQuery() { // Given: final Instant start = Instant.now(); when(clock.get()).thenReturn(start); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(2)) .withThreadStartTime("t2", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t2", Duration.ofMinutes(2)); collector.run(); when(clock.get()).thenReturn(start.plus(WINDOW)); givenMetrics(kafkaStreams1) .withThreadStartTime("t1", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t1", Duration.ofMinutes(3)) .withThreadStartTime("t2", start.minus(WINDOW.multipliedBy(2))) .withBlockedTime("t2", Duration.ofMinutes(7)); // When: collector.run(); // Then: final DataPoint point = verifyAndGetLatestDataPoint( "node-query-saturation", ImmutableMap.of("query-id", "hootie") ); assertThat((Double) point.getValue(), closeTo(.9, .01)); }
void regionFinished(SchedulingPipelinedRegion region) { for (ConsumerRegionGroupExecutionView executionView : executionViewByRegion.getOrDefault(region, Collections.emptySet())) { executionView.regionFinished(region); } }
@Test void testRegionFinished() throws Exception { consumerRegionGroupExecutionViewMaintainer.regionFinished(consumerRegion); assertThat(consumerRegionGroupExecutionView.isFinished()).isTrue(); }
@Override public Object getServiceDetail(String namespaceId, String groupName, String serviceName) throws NacosException { Service service = Service.newService(namespaceId, groupName, serviceName); if (!ServiceManager.getInstance().containSingleton(service)) { throw new NacosException(NacosException.NOT_FOUND, String.format("service %s@@%s is not found!", groupName, serviceName)); } Optional<ServiceMetadata> metadata = metadataManager.getServiceMetadata(service); ServiceMetadata detailedService = metadata.orElseGet(ServiceMetadata::new); ObjectNode serviceObject = JacksonUtils.createEmptyJsonNode(); serviceObject.put(FieldsConstants.NAME, serviceName); serviceObject.put(FieldsConstants.GROUP_NAME, groupName); serviceObject.put(FieldsConstants.PROTECT_THRESHOLD, detailedService.getProtectThreshold()); serviceObject.replace(FieldsConstants.SELECTOR, JacksonUtils.transferToJsonNode(detailedService.getSelector())); serviceObject.replace(FieldsConstants.METADATA, JacksonUtils.transferToJsonNode(detailedService.getExtendData())); ObjectNode detailView = JacksonUtils.createEmptyJsonNode(); detailView.replace(FieldsConstants.SERVICE, serviceObject); List<com.alibaba.nacos.api.naming.pojo.Cluster> clusters = new ArrayList<>(); for (String each : serviceStorage.getClusters(service)) { ClusterMetadata clusterMetadata = detailedService.getClusters().containsKey(each) ? detailedService.getClusters().get(each) : new ClusterMetadata(); com.alibaba.nacos.api.naming.pojo.Cluster clusterView = new Cluster(); clusterView.setName(each); clusterView.setHealthChecker(clusterMetadata.getHealthChecker()); clusterView.setMetadata(clusterMetadata.getExtendData()); clusterView.setUseIPPort4Check(clusterMetadata.isUseInstancePortForCheck()); clusterView.setDefaultPort(DEFAULT_PORT); clusterView.setDefaultCheckPort(clusterMetadata.getHealthyCheckPort()); clusterView.setServiceName(service.getGroupedServiceName()); clusters.add(clusterView); } detailView.replace(FieldsConstants.CLUSTERS, JacksonUtils.transferToJsonNode(clusters)); return detailView; }
@Test void testGetServiceDetail() throws NacosException { ServiceMetadata serviceMetadata = new ServiceMetadata(); serviceMetadata.setProtectThreshold(0.75F); Mockito.when(metadataManager.getServiceMetadata(Mockito.any())).thenReturn(Optional.of(serviceMetadata)); Mockito.when(serviceStorage.getClusters(Mockito.any())).thenReturn(Collections.singleton("C")); Object obj = catalogServiceV2Impl.getServiceDetail("A", "B", "C"); ObjectNode objectNode = (ObjectNode) obj; assertEquals("C", objectNode.get(FieldsConstants.SERVICE).get(FieldsConstants.NAME).asText()); assertEquals("B", objectNode.get(FieldsConstants.SERVICE).get(FieldsConstants.GROUP_NAME).asText()); assertEquals("none", objectNode.get(FieldsConstants.SERVICE).get(FieldsConstants.SELECTOR).get("type").asText()); assertEquals(0, objectNode.get(FieldsConstants.SERVICE).get(FieldsConstants.METADATA).size()); assertEquals(0.75, objectNode.get(FieldsConstants.SERVICE).get(FieldsConstants.PROTECT_THRESHOLD).asDouble(), 0.1); }
@Override public boolean isValid(Link link, ResourceContext context) { // explicitly call a method not depending on LinkResourceService return isValid(link); }
@Test public void testMeteredAllowed() { MeteredConstraint constraint = new MeteredConstraint(true); assertThat(constraint.isValid(meteredLink, resourceContext), is(true)); assertThat(constraint.isValid(nonMeteredLink, resourceContext), is(true)); assertThat(constraint.isValid(unAnnotatedLink, resourceContext), is(true)); }
@NonNull @Override public EncodeStrategy getEncodeStrategy(@NonNull Options options) { Boolean encodeTransformation = options.get(ENCODE_TRANSFORMATION); return encodeTransformation != null && encodeTransformation ? EncodeStrategy.TRANSFORMED : EncodeStrategy.SOURCE; }
@Test public void testEncodeStrategy_withEncodeTransformationFalse_returnsSource() { options.set(ReEncodingGifResourceEncoder.ENCODE_TRANSFORMATION, false); assertThat(encoder.getEncodeStrategy(options)).isEqualTo(EncodeStrategy.SOURCE); }
@Nonnull @Override public Optional<? extends INode> parse( @Nullable final String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } for (IMapper mapper : jcaSpecificAlgorithmMappers) { Optional<? extends INode> asset = mapper.parse(str, detectionLocation); if (asset.isPresent()) { return asset; } } return switch (str.toUpperCase().trim()) { case "PBE", "PBES2" -> Optional.of(new PasswordBasedEncryption(detectionLocation)); case "DH", "DIFFIEHELLMAN" -> Optional.of(new DH(detectionLocation)); case "RSA" -> Optional.of(new RSA(detectionLocation)); case "EC" -> Optional.of(new Algorithm(str, PublicKeyEncryption.class, detectionLocation)); default -> { final Algorithm algorithm = new Algorithm(str, Unknown.class, detectionLocation); algorithm.put(new Unknown(detectionLocation)); yield Optional.of(algorithm); } }; }
@Test void blockCipher() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaAlgorithmMapper jcaAlgorithmMapper = new JcaAlgorithmMapper(); Optional<? extends INode> assetOptional = jcaAlgorithmMapper.parse("AES/CFB8/NoPadding", testDetectionLocation); assertThat(assetOptional).isPresent(); assertThat(assetOptional.get().is(BlockCipher.class)).isTrue(); }
@VisibleForTesting public String validateMobile(String mobile) { if (StrUtil.isEmpty(mobile)) { throw exception(SMS_SEND_MOBILE_NOT_EXISTS); } return mobile; }
@Test public void testCheckMobile_notExists() { // 准备参数 // mock 方法 // 调用,并断言异常 assertServiceException(() -> smsSendService.validateMobile(null), SMS_SEND_MOBILE_NOT_EXISTS); }
public static <I> Builder<I> foreach(Iterable<I> items) { return new Builder<>(requireNonNull(items, "items")); }
@Test public void testFailNoStoppingSuppressed() throws Throwable { assertFailed(builder().suppressExceptions(), failingTask); failingTask.assertInvoked("Continued through operations", ITEM_COUNT); items.forEach(Item::assertCommittedOrFailed); }
public String getCustomError(HttpRequestWrapper req, HttpResponseWrapper res) { for (MatcherAndError m : matchersAndLogs) { if (m.getMatcher().matchResponse(req, res)) { return m.getCustomError().customError(req, res); } } return null; }
@Test public void testMatchesCodeAndUrlContains() throws IOException { HttpRequestWrapper request = createHttpRequest(BQ_TABLES_LIST_URL); HttpResponseWrapper response = createHttpResponse(403); CustomHttpErrors.Builder builder = new CustomHttpErrors.Builder(); builder.addErrorForCodeAndUrlContains(403, "/tables?", "Custom Error Msg"); CustomHttpErrors customErrors = builder.build(); String errorMessage = customErrors.getCustomError(request, response); assertEquals("Custom Error Msg", errorMessage); }
@Override protected TableRecords getUndoRows() { return sqlUndoLog.getAfterImage(); }
@Test public void getUndoRows() { Assertions.assertEquals(executor.getUndoRows(), executor.getSqlUndoLog().getAfterImage()); }
ControllerResult<Map<String, ApiError>> updateFeatures( Map<String, Short> updates, Map<String, FeatureUpdate.UpgradeType> upgradeTypes, boolean validateOnly ) { TreeMap<String, ApiError> results = new TreeMap<>(); List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (Entry<String, Short> entry : updates.entrySet()) { results.put(entry.getKey(), updateFeature(entry.getKey(), entry.getValue(), upgradeTypes.getOrDefault(entry.getKey(), FeatureUpdate.UpgradeType.UPGRADE), records)); } if (validateOnly) { return ControllerResult.of(Collections.emptyList(), results); } else { return ControllerResult.atomicOf(records, results); } }
@Disabled @Test public void testCanUseUnsafeDowngradeIfMetadataChanged() { FeatureControlManager manager = TEST_MANAGER_BUILDER1.build(); assertEquals(ControllerResult.of(Collections.emptyList(), singletonMap(MetadataVersion.FEATURE_NAME, ApiError.NONE)), manager.updateFeatures( singletonMap(MetadataVersion.FEATURE_NAME, MetadataVersion.IBP_3_3_IV0.featureLevel()), singletonMap(MetadataVersion.FEATURE_NAME, FeatureUpdate.UpgradeType.UNSAFE_DOWNGRADE), true)); }
public StringAppender append(final String message) { if(StringUtils.isBlank(StringUtils.trim(message))) { return this; } if(buffer.length() > 0) { buffer.append(" "); } buffer.append(StringUtils.trim(message)); if(buffer.charAt(buffer.length() - 1) == '.') { return this; } if(buffer.charAt(buffer.length() - 1) == ':') { buffer.deleteCharAt(buffer.length() - 1); } if(!Pattern.matches("[.?!]", String.valueOf(buffer.charAt(buffer.length() - 1)))) { buffer.append(suffix); } return this; }
@Test public void testAppend() { assertEquals("Verification Code.", new StringAppender().append("Verification Code:").toString()); assertEquals("Message.", new StringAppender().append("Message").toString()); assertEquals("Message.", new StringAppender().append("Message.").toString()); assertEquals("Message? t.", new StringAppender().append("Message?").append("t").toString()); assertEquals("Message).", new StringAppender().append("Message)").toString()); assertEquals("m.", new StringAppender().append("m").append(" ").toString()); }
@Nonnull public static <K, V> Sink<Entry<K, V>> map(@Nonnull String mapName) { return map(mapName, Entry::getKey, Entry::getValue); }
@Test @SuppressWarnings("unchecked") public void test_adaptingPartitionFunction() { Pipeline p = Pipeline.create(); StreamStage<KeyedWindowResult<String, Long>> input1 = p.readFrom(TestSources.items(0)) .addTimestamps(i -> i, 0) .groupingKey(item -> "key0") .window(WindowDefinition.sliding(1, 1)) .aggregate(AggregateOperations.counting()); BatchStage<Entry<String, Long>> input2 = p.readFrom(TestSources.items(entry("key1", 2L))); IMap<String, Long> sinkMap = hz().getMap(randomMapName()); p.writeTo(Sinks.map(sinkMap), input1, input2); hz().getJet().newJob(p).join(); assertEquals(2, sinkMap.size()); assertEquals((Long) 1L, sinkMap.get("key0")); assertEquals((Long) 2L, sinkMap.get("key1")); }
@CanIgnoreReturnValue public final Ordered containsExactly() { return containsExactlyEntriesIn(ImmutableMap.of()); }
@Test public void containsExactlyExtraKeyAndMissingKey() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "march", 3); expectFailureWhenTestingThat(actual).containsExactly("jan", 1, "feb", 2); assertFailureKeys( "missing keys", "for key", "expected value", "unexpected keys", "for key", "unexpected value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "feb"); assertFailureValue("expected value", "2"); assertFailureValueIndexed("for key", 1, "march"); assertFailureValue("unexpected value", "3"); }
public List<List<String>> getAllInfo(String dictionaryName) throws Exception { List<List<String>> allInfo = Lists.newArrayList(); lock.lock(); try { for (Map.Entry<Long, Dictionary> entry : dictionariesMapById.entrySet()) { Dictionary dictionary = entry.getValue(); if (dictionaryName != null && !dictionary.getDictionaryName().equals(dictionaryName)) { continue; } allInfo.add(dictionary.getInfo()); Map<TNetworkAddress, PProcessDictionaryCacheResult> resultMap = getDictionaryStatistic(dictionary); String memoryUsage = ""; for (Map.Entry<TNetworkAddress, PProcessDictionaryCacheResult> result : resultMap.entrySet()) { TNetworkAddress address = result.getKey(); memoryUsage += address.getHostname() + ":" + String.valueOf(address.getPort()) + " : "; if (result.getValue() != null) { memoryUsage += String.valueOf(result.getValue().dictionaryMemoryUsage) + ", "; } else { memoryUsage += "Can not get Memory info" + ", "; } } allInfo.get(allInfo.size() - 1).add(memoryUsage.substring(0, memoryUsage.length() - 2)); } } finally { lock.unlock(); } return allInfo; }
@Test public void testShowDictionary() throws Exception { dictionaryMgr.getAllInfo("dict"); }
public static UUnary create(Kind unaryOp, UExpression expression) { checkArgument( UNARY_OP_CODES.containsKey(unaryOp), "%s is not a recognized unary operation", unaryOp); return new AutoValue_UUnary(unaryOp, expression); }
@Test public void postDecrement() { assertUnifiesAndInlines("foo--", UUnary.create(Kind.POSTFIX_DECREMENT, fooIdent)); }
public static boolean isIpV6Endpoint(NetworkEndpoint networkEndpoint) { return hasIpAddress(networkEndpoint) && networkEndpoint.getIpAddress().getAddressFamily().equals(AddressFamily.IPV6); }
@Test public void isIpV6Endpoint_withIpV4Endpoint_returnsFalse() { NetworkEndpoint ipV4Endpoint = NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.IP) .setIpAddress( IpAddress.newBuilder().setAddress("1.2.3.4").setAddressFamily(AddressFamily.IPV4)) .build(); assertThat(NetworkEndpointUtils.isIpV6Endpoint(ipV4Endpoint)).isFalse(); }
public static String toShortString(Object obj) { if (obj == null) { return "null"; } return obj.getClass().getSimpleName() + "@" + System.identityHashCode(obj); }
@Test void testToShortString() { assertThat(ClassUtils.toShortString(null), equalTo("null")); assertThat(ClassUtils.toShortString(new ClassUtilsTest()), startsWith("ClassUtilsTest@")); }
public void setFilePaths(String... filePaths) { Path[] paths = new Path[filePaths.length]; for (int i = 0; i < paths.length; i++) { paths[i] = new Path(filePaths[i]); } setFilePaths(paths); }
@Test void testSetPathsOnePathNull() { assertThatThrownBy( () -> new MultiDummyFileInputFormat() .setFilePaths("/an/imaginary/path", null)) .isInstanceOf(IllegalArgumentException.class); }
public List<ServiceInstance> findAllNonRunningInstances() { return jdbcRepository.getDslContextWrapper().transactionResult( configuration -> findAllNonRunningInstances(configuration, false) ); }
@Test protected void shouldFindAllNonRunningInstances() { // Given AbstractJdbcServiceInstanceRepositoryTest.Fixtures.all().forEach(repository::save); // When List<ServiceInstance> results = repository.findAllNonRunningInstances(); // Then assertEquals(AbstractJdbcServiceInstanceRepositoryTest.Fixtures.allNonRunning().size(), results.size()); assertThat(results, Matchers.containsInAnyOrder(AbstractJdbcServiceInstanceRepositoryTest.Fixtures.allNonRunning().toArray())); }
public ShowResultSet modifyBackendProperty(ModifyBackendClause modifyBackendClause) throws DdlException { String backendHostPort = modifyBackendClause.getBackendHostPort(); Map<String, String> properties = modifyBackendClause.getProperties(); // check backend existence Backend backend = getBackendWithHeartbeatPort(backendHostPort.split(":")[0], Integer.parseInt(backendHostPort.split(":")[1])); if (null == backend) { throw new DdlException(String.format("backend [%s] not found", backendHostPort)); } ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); builder.addColumn(new Column("Message", ScalarType.createVarchar(1024))); List<List<String>> messageResult = new ArrayList<>(); // update backend based on properties for (Map.Entry<String, String> entry : properties.entrySet()) { if (entry.getKey().equals(AlterSystemStmtAnalyzer.PROP_KEY_LOCATION)) { Map<String, String> location = new HashMap<>(); // "" means clean backend location label if (entry.getValue().isEmpty()) { backend.setLocation(location); continue; } String[] locKV = entry.getValue().split(":"); location.put(locKV[0].trim(), locKV[1].trim()); backend.setLocation(location); String opMessage = String.format("%s:%d's location has been modified to %s", backend.getHost(), backend.getHeartbeatPort(), properties); messageResult.add(Collections.singletonList(opMessage)); } else { throw new UnsupportedOperationException("unsupported property: " + entry.getKey()); } } // persistence GlobalStateMgr.getCurrentState().getEditLog().logBackendStateChange(backend); // Return message return new ShowResultSet(builder.build(), messageResult); }
@Test public void testModifyBackendProperty() throws DdlException { Backend be = new Backend(100, "originalHost", 1000); service.addBackend(be); Map<String, String> properties = Maps.newHashMap(); String location = "rack:rack1"; properties.put(AlterSystemStmtAnalyzer.PROP_KEY_LOCATION, location); ModifyBackendClause clause = new ModifyBackendClause("originalHost:1000", properties); service.modifyBackendProperty(clause); Backend backend = service.getBackendWithHeartbeatPort("originalHost", 1000); Assert.assertNotNull(backend); Assert.assertEquals("{rack=rack1}", backend.getLocation().toString()); }
@Override public Address translate(Address address) throws Exception { if (address == null) { return null; } // if it is inside cloud, return private address otherwise we need to translate it. if (!usePublic) { return address; } Address publicAddress = privateToPublic.get(address); if (publicAddress != null) { return publicAddress; } privateToPublic = getAddresses.call(); return privateToPublic.get(address); }
@Test public void testTranslate() throws Exception { Address privateAddress = new Address("10.0.0.1", 5701); Address publicAddress = new Address("198.51.100.1", 5701); RemoteAddressProvider provider = new RemoteAddressProvider(() -> Collections.singletonMap(privateAddress, publicAddress), true); Address actual = provider.translate(privateAddress); assertEquals(publicAddress.getHost(), actual.getHost()); assertEquals(publicAddress.getPort(), actual.getPort()); }
public static Object convertBigDecimalValue(final Object value, final boolean needScale, final int scale) { if (null == value) { return convertNullValue(BigDecimal.class); } if (BigDecimal.class == value.getClass()) { return adjustBigDecimalResult((BigDecimal) value, needScale, scale); } if (value instanceof Number || value instanceof String) { BigDecimal bigDecimal = new BigDecimal(value.toString()); return adjustBigDecimalResult(bigDecimal, needScale, scale); } throw new UnsupportedDataTypeConversionException(BigDecimal.class, value); }
@Test void assertConvertBigDecimalValue() { BigDecimal bigDecimal = (BigDecimal) ResultSetUtils.convertBigDecimalValue("12", false, 0); assertThat(bigDecimal, is(BigDecimal.valueOf(12L))); }
protected ExceptionCollection scanArtifacts(MavenProject project, Engine engine) { return scanArtifacts(project, engine, false); }
@Test public void testScanArtifacts() throws DatabaseException, InvalidSettingException { new MockUp<MavenProject>() { @Mock public Set<Artifact> getArtifacts() { Set<Artifact> artifacts = new HashSet<>(); Artifact a = new ArtifactStub(); try { File file = new File(Test.class.getProtectionDomain().getCodeSource().getLocation().toURI()); a.setFile(file); artifacts.add(a); } catch (URISyntaxException ex) { Logger.getLogger(BaseDependencyCheckMojoTest.class.getName()).log(Level.SEVERE, null, ex); } //File file = new File(this.getClass().getClassLoader().getResource("daytrader-ear-2.1.7.ear").getPath()); return artifacts; } @SuppressWarnings("SameReturnValue") @Mock public String getName() { return "test-project"; } }; if (canRun()) { boolean autoUpdate = getSettings().getBoolean(Settings.KEYS.AUTO_UPDATE); getSettings().setBoolean(Settings.KEYS.AUTO_UPDATE, false); try (Engine engine = new Engine(getSettings())) { getSettings().setBoolean(Settings.KEYS.AUTO_UPDATE, autoUpdate); assertTrue(engine.getDependencies().length == 0); BaseDependencyCheckMojoImpl instance = new BaseDependencyCheckMojoImpl(); ExceptionCollection exCol = null; try { //the mock above fails under some JDKs exCol = instance.scanArtifacts(project, engine); } catch (NullPointerException ex) { Assume.assumeNoException(ex); } assertNull(exCol); assertFalse(engine.getDependencies().length == 0); } } }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal > ruleVal); }
@Test public void testMissedMatchWithEqualValues() { StreamRule rule = getSampleRule(); rule.setValue("-9001"); Message msg = getSampleMessage(); msg.addField("something", "-9001"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
@ApiOperation(value = "Get column info for a single table", tags = { "Database tables" }) @ApiResponses(value = { @ApiResponse(code = 200, message = "Indicates the table exists and the table column info is returned."), @ApiResponse(code = 404, message = "Indicates the requested table does not exist.") }) @GetMapping(value = "/management/tables/{tableName}/columns", produces = "application/json") public TableMetaData getTableMetaData(@ApiParam(name = "tableName") @PathVariable String tableName) { if (restApiInterceptor != null) { restApiInterceptor.accessTableInfo(); } TableMetaData response = managementService.getTableMetaData(tableName); if (response == null) { throw new FlowableObjectNotFoundException("Could not find a table with name '" + tableName + "'.", String.class); } return response; }
@Test public void testGetTableColumns() throws Exception { String tableName = managementService.getTableCount().keySet().iterator().next(); TableMetaData metaData = managementService.getTableMetaData(tableName); CloseableHttpResponse response = executeRequest( new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_TABLE_COLUMNS, tableName)), HttpStatus.SC_OK); // Check table JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThat(responseNode.get("tableName").textValue()).isEqualTo(tableName); ArrayNode names = (ArrayNode) responseNode.get("columnNames"); ArrayNode types = (ArrayNode) responseNode.get("columnTypes"); assertThat(names).isNotNull(); assertThat(types).isNotNull(); assertThat(names).hasSameSizeAs(metaData.getColumnNames()); assertThat(types).hasSameSizeAs(metaData.getColumnTypes()); for (int i = 0; i < names.size(); i++) { assertThat(metaData.getColumnNames().get(i)).isEqualTo(names.get(i).textValue()); assertThat(metaData.getColumnTypes().get(i)).isEqualTo(types.get(i).textValue()); } }
public static boolean unblock( final UnsafeBuffer[] termBuffers, final UnsafeBuffer logMetaDataBuffer, final long blockedPosition, final int termLength) { final int positionBitsToShift = LogBufferDescriptor.positionBitsToShift(termLength); final int blockedTermCount = (int)(blockedPosition >> positionBitsToShift); final int blockedOffset = (int)blockedPosition & (termLength - 1); final int activeTermCount = activeTermCount(logMetaDataBuffer); if (activeTermCount == (blockedTermCount - 1) && blockedOffset == 0) { final int currentTermId = termId(rawTailVolatile(logMetaDataBuffer, indexByTermCount(activeTermCount))); rotateLog(logMetaDataBuffer, activeTermCount, currentTermId); return true; } final int blockedIndex = indexByTermCount(blockedTermCount); final long rawTail = rawTailVolatile(logMetaDataBuffer, blockedIndex); final int termId = termId(rawTail); final int tailOffset = termOffset(rawTail, termLength); final UnsafeBuffer termBuffer = termBuffers[blockedIndex]; switch (TermUnblocker.unblock(logMetaDataBuffer, termBuffer, blockedOffset, tailOffset, termId)) { case NO_ACTION: break; case UNBLOCKED_TO_END: rotateLog(logMetaDataBuffer, blockedTermCount, termId); return true; case UNBLOCKED: return true; } return false; }
@Test void shouldUnblockWhenPositionHasNonCommittedMessageAndTailPastEndOfTerm() { final int messageLength = HEADER_LENGTH * 4; final int blockedOffset = TERM_LENGTH - messageLength; final long blockedPosition = computePosition(TERM_ID_1, blockedOffset, positionBitsToShift, TERM_ID_1); final int activeIndex = indexByPosition(blockedPosition, positionBitsToShift); when(termBuffers[activeIndex].getIntVolatile(blockedOffset)).thenReturn(0); logMetaDataBuffer.putLong(TERM_TAIL_COUNTER_OFFSET, pack(TERM_ID_1, TERM_LENGTH + HEADER_LENGTH)); assertTrue(LogBufferUnblocker.unblock(termBuffers, logMetaDataBuffer, blockedPosition, TERM_LENGTH)); final long rawTail = rawTailVolatile(logMetaDataBuffer); final int termId = termId(rawTail); assertEquals( blockedPosition + messageLength, computePosition(termId, 0, positionBitsToShift, TERM_ID_1)); verify(logMetaDataBuffer).compareAndSetInt(LOG_ACTIVE_TERM_COUNT_OFFSET, 0, 1); }
public DomainWildcardMappingBuilder<V> add(String hostname, V output) { map.put(normalizeHostName(hostname), checkNotNull(output, "output")); return this; }
@Test public void testNullDomainNamePatternsAreForbidden() { assertThrows(NullPointerException.class, new Executable() { @Override public void execute() { new DomainWildcardMappingBuilder<String>("NotFound").add(null, "Some value"); } }); }
public XmlStreamInfo information() throws IOException { if (information.problem != null) { return information; } if (XMLStreamConstants.START_DOCUMENT != reader.getEventType()) { information.problem = new IllegalStateException("Expected START_DOCUMENT"); return information; } boolean skipComments = false; try { while (reader.hasNext()) { int ev = reader.next(); switch (ev) { case XMLStreamConstants.COMMENT: if (!skipComments) { // search for modelines String comment = reader.getText(); if (comment != null) { comment.lines().map(String::trim).forEach(l -> { if (l.startsWith("camel-k:")) { information.modelines.add(l); } }); } } break; case XMLStreamConstants.START_ELEMENT: if (information.rootElementName != null) { // only root element is checked. No need to parse more return information; } skipComments = true; information.rootElementName = reader.getLocalName(); information.rootElementNamespace = reader.getNamespaceURI(); for (int ns = 0; ns < reader.getNamespaceCount(); ns++) { String prefix = reader.getNamespacePrefix(ns); information.namespaceMapping.put(prefix == null ? "" : prefix, reader.getNamespaceURI(ns)); } for (int at = 0; at < reader.getAttributeCount(); at++) { QName qn = reader.getAttributeName(at); String prefix = qn.getPrefix() == null ? "" : qn.getPrefix().trim(); String nsURI = qn.getNamespaceURI() == null ? "" : qn.getNamespaceURI().trim(); String value = reader.getAttributeValue(at); String localPart = qn.getLocalPart(); if (nsURI.isEmpty() || prefix.isEmpty()) { // according to XML spec, this attribut is not namespaced, not in default namespace // https://www.w3.org/TR/xml-names/#defaulting // > The namespace name for an unprefixed attribute name always has no value. information.attributes.put(localPart, value); } else { information.attributes.put("{" + nsURI + "}" + localPart, value); information.attributes.put(prefix + ":" + localPart, value); } } break; case XMLStreamConstants.END_ELEMENT: case XMLStreamConstants.END_DOCUMENT: if (information.rootElementName == null) { information.problem = new IllegalArgumentException("XML Stream is empty"); return information; } break; default: break; } } } catch (XMLStreamException e) { information.problem = e; return information; } return information; }
@Test public void documentFullOfNamespaces() throws IOException { String xml = readAllFromFile("documentFullOfNamespaces.xml"); XmlStreamDetector detector = new XmlStreamDetector(new ByteArrayInputStream(xml.getBytes(StandardCharsets.UTF_8))); XmlStreamInfo info = detector.information(); assertTrue(info.isValid()); assertEquals("root", info.getRootElementName()); assertEquals("urn:camel", info.getRootElementNamespace()); assertEquals(6, info.getAttributes().size()); assertEquals("typo", info.getAttributes().get("xmlnS")); assertEquals("v1", info.getAttributes().get("a1")); assertEquals("v2", info.getAttributes().get("c:a1")); assertEquals("v2", info.getAttributes().get("{urn:camel:ns1}a1")); assertEquals("v3", info.getAttributes().get("d:a1")); assertEquals("v3", info.getAttributes().get("{urn:camel:ns2}a1")); assertEquals(3, info.getNamespaces().size()); assertEquals("urn:camel", info.getNamespaces().get("")); assertEquals("urn:camel:ns1", info.getNamespaces().get("c")); assertEquals("urn:camel:ns2", info.getNamespaces().get("d")); }
@Override public void setRampUpPercent(long rampUpPercent) { Validate.isTrue((rampUpPercent >= 0) && (rampUpPercent <= 100), "rampUpPercent must be a value between 0 and 100"); this.rampUpPercent = rampUpPercent; }
@Test(expected = IllegalArgumentException.class) public void testSetRampUpPercent_exceeds100() { sampler.setRampUpPercent(101); }
public boolean registerAndBootstrapService() { try { client.registerService("starrocks"); } catch (StarClientException e) { if (e.getCode() != StatusCode.ALREADY_EXIST) { LOG.error("Failed to register service from starMgr. Error: {}", e); return false; } } try (LockCloseable lock = new LockCloseable(rwLock.writeLock())) { try { serviceId = client.bootstrapService("starrocks", SERVICE_NAME); LOG.info("get serviceId: {} by bootstrapService to starMgr", serviceId); } catch (StarClientException e) { if (e.getCode() != StatusCode.ALREADY_EXIST) { LOG.error("Failed to bootstrap service from starMgr. Error: {}", e); return false; } else { getServiceId(); } } } return true; }
@Test public void testRegisterServiceException() throws Exception { new Expectations() { { client.registerService(SERVICE_NAME); minTimes = 0; result = new StarClientException(StatusCode.ALREADY_EXIST, "service already exists!"); client.bootstrapService("starrocks", SERVICE_NAME); minTimes = 0; result = "3"; } }; starosAgent.registerAndBootstrapService(); Assert.assertEquals("3", Deencapsulation.getField(starosAgent, "serviceId")); }
@SuppressWarnings("unchecked") // visible for testing public StitchRequestBody createStitchRequestBody(final Message inMessage) { if (inMessage.getBody() instanceof StitchRequestBody) { return createStitchRequestBodyFromStitchRequestBody(inMessage.getBody(StitchRequestBody.class), inMessage); } if (inMessage.getBody() instanceof StitchMessage) { return createStitchRequestBodyFromStitchMessages(Collections.singletonList(inMessage.getBody(StitchMessage.class)), inMessage); } if (inMessage.getBody() instanceof Iterable) { return createStitchRequestBodyFromIterable(inMessage.getBody(Iterable.class), inMessage); } if (inMessage.getBody() instanceof Map) { return createStitchRecordFromMap(inMessage.getBody(Map.class), inMessage); } throw new IllegalArgumentException("Message body data `" + inMessage.getBody() + "` type is not supported"); }
@Test void testIfCreateFromIterable() { final StitchConfiguration configuration = new StitchConfiguration(); configuration.setTableName("table_1"); configuration.setStitchSchema(StitchSchema.builder().addKeyword("field_1", "string").build()); configuration.setKeyNames("field_1"); final StitchMessage stitchMessage1 = StitchMessage.builder() .withData("field_1", "stitchMessage1") .withSequence(1) .build(); final StitchMessage stitchMessage2 = StitchMessage.builder() .withData("field_1", "stitchMessage2-1") .withData("field_2", "stitchMessage2-2") .withSequence(2) .build(); final StitchRequestBody stitchMessage2RequestBody = StitchRequestBody.builder() .addMessage(stitchMessage2) .withSchema(StitchSchema.builder().addKeyword("field_1", "integer").build()) .withTableName("table_1") .withKeyNames(Collections.singleton("field_1")) .build(); final Map<String, Object> stitchMessage3 = new LinkedHashMap<>(); stitchMessage3.put(StitchMessage.DATA, Collections.singletonMap("field_1", "stitchMessage3")); stitchMessage3.put(StitchMessage.SEQUENCE, 3L); final StitchMessage stitchMessage4 = StitchMessage.builder() .withData("field_1", "stitchMessage4") .withSequence(4) .build(); final Exchange stitchMessage4Exchange = new DefaultExchange(context); stitchMessage4Exchange.getMessage().setBody(stitchMessage4); final StitchMessage stitchMessage5 = StitchMessage.builder() .withData("field_1", "stitchMessage5") .withSequence(5) .build(); final Message stitchMessage5Message = new DefaultExchange(context).getMessage(); stitchMessage5Message.setBody(stitchMessage5); final List<Object> inputMessages = new LinkedList<>(); inputMessages.add(stitchMessage1); inputMessages.add(stitchMessage2RequestBody); inputMessages.add(stitchMessage3); inputMessages.add(stitchMessage4Exchange); inputMessages.add(stitchMessage5Message); final StitchProducerOperations operations = new StitchProducerOperations(new TestClient(), configuration); final Exchange exchange = new DefaultExchange(context); exchange.getMessage().setBody(inputMessages); final String createdJson = JsonUtils.convertMapToJson(operations.createStitchRequestBody(exchange.getMessage()).toMap()); assertEquals( "{\"table_name\":\"table_1\",\"schema\":{\"field_1\":\"string\"},\"messages\":[{\"action\":\"upsert\",\"sequence\":1,\"data\":{\"field_1\":\"stitchMessage1\"}}," + "{\"action\":\"upsert\",\"sequence\":2,\"data\":{\"field_1\":\"stitchMessage2-1\",\"field_2\":\"stitchMessage2-2\"}},{\"action\":\"upsert\",\"sequence\":3,\"data\":{\"field_1\":" + "\"stitchMessage3\"}},{\"action\":\"upsert\",\"sequence\":4,\"data\":{\"field_1\":\"stitchMessage4\"}},{\"action\":\"upsert\",\"sequence\":5,\"data\":{\"field_1\":\"stitchMessage5\"}}]," + "\"key_names\":[\"field_1\"]}", createdJson); }
public List<T> topologicalSort() throws CyclicDependencyException { long stamp = lock.readLock(); try { ArrayList<T> result = new ArrayList<>(); Deque<T> noIncomingEdges = new ArrayDeque<>(); Map<T, Integer> temp = new HashMap<>(); for (Map.Entry<T, Set<T>> incoming : incomingEdges.entrySet()) { int size = incoming.getValue().size(); T key = incoming.getKey(); temp.put(key, size); if (size == 0) { noIncomingEdges.add(key); } } while (!noIncomingEdges.isEmpty()) { T n = noIncomingEdges.poll(); result.add(n); temp.remove(n); Set<T> elements = outgoingEdges.get(n); if (elements != null) { for (T m : elements) { Integer count = temp.get(m); temp.put(m, --count); if (count == 0) { noIncomingEdges.add(m); } } } } if (!temp.isEmpty()) { throw new CyclicDependencyException("Cycle detected"); } else { return result; } } finally { lock.unlockRead(stamp); } }
@Test public void testConcurrentAccess() throws Exception { DependencyGraph<String> graph = new DependencyGraph<>(); ExecutorService service = Executors.newCachedThreadPool(getTestThreadFactory("Worker")); try { CountDownLatch startLatch = new CountDownLatch(1); int threads = 20; ArrayList<Future<?>> futures = new ArrayList<>(); for (int i = 0; i < threads; i++) { futures.add(submitTask("A", "B", startLatch, service, graph)); futures.add(submitTask("A", "C", startLatch, service, graph)); futures.add(submitTask("A", "D", startLatch, service, graph)); futures.add(submitTask("A", "B", startLatch, service, graph)); futures.add(submitTask("D", "B", startLatch, service, graph)); futures.add(submitTask("D", "C", startLatch, service, graph)); futures.add(submitTask("C", "B", startLatch, service, graph)); } startLatch.countDown(); awaitAll(futures); } finally { service.shutdownNow(); } assertEquals(graph.topologicalSort(), Arrays.asList("A", "D", "C", "B")); }
public final int readInt() throws IOException { if (input instanceof RandomAccessFile) { return ((RandomAccessFile) input).readInt(); } else if (input instanceof DataInputStream) { return ((DataInputStream) input).readInt(); } else { throw new UnsupportedOperationException("Unknown Hollow Blob Input type"); } }
@Test public void testReadInt() throws IOException { HollowBlobInput inStream = HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob); assertEquals(65537, inStream.readInt()); // first int HollowBlobInput inBuffer = HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob); assertEquals(65537, inBuffer.readInt()); // first int }
static IDXData readData(Path path) throws IOException { InputStream inputStream = IOUtil.getInputStreamForLocation(path.toString()); if (inputStream == null) { throw new FileNotFoundException("Failed to load from path - " + path); } // DataInputStream.close implicitly closes the InputStream try (DataInputStream stream = new DataInputStream(inputStream)) { short magicNumber = stream.readShort(); if (magicNumber != 0) { throw new IllegalStateException("Invalid IDX file, magic number was not zero. Found " + magicNumber); } final byte dataTypeByte = stream.readByte(); final IDXType dataType = IDXType.convert(dataTypeByte); final byte numDimensions = stream.readByte(); if (numDimensions < 1) { throw new IllegalStateException("Invalid number of dimensions, found " + numDimensions); } final int[] shape = new int[numDimensions]; int size = 1; for (int i = 0; i < numDimensions; i++) { shape[i] = stream.readInt(); if (shape[i] < 1) { throw new IllegalStateException("Invalid shape, found " + Arrays.toString(shape)); } size *= shape[i]; } double[] data = new double[size]; try { for (int i = 0; i < size; i++) { switch (dataType) { case BYTE: data[i] = stream.readByte(); break; case UBYTE: data[i] = stream.readUnsignedByte(); break; case SHORT: data[i] = stream.readShort(); break; case INT: data[i] = stream.readInt(); break; case FLOAT: data[i] = stream.readFloat(); break; case DOUBLE: data[i] = stream.readDouble(); break; } } } catch (EOFException e) { throw new IllegalStateException("Too little data in the file, expected to find " + size + " elements"); } try { byte unexpectedByte = stream.readByte(); throw new IllegalStateException("Too much data in the file"); } catch (EOFException e) { //pass as the stream is exhausted } return new IDXData(dataType, shape, data); } }
@Test public void testInvalidIDX() throws URISyntaxException { Path dataFile = Paths.get(IDXDataSourceTest.class.getResource("/org/tribuo/datasource/too-much-data.idx").toURI()); assertThrows(IllegalStateException.class, () -> IDXDataSource.readData(dataFile)); Path otherDataFile = Paths.get(IDXDataSourceTest.class.getResource("/org/tribuo/datasource/too-little-data.idx").toURI()); assertThrows(IllegalStateException.class, () -> IDXDataSource.readData(otherDataFile)); }
@Override public void execute(final ConnectionSession connectionSession) { mergedResult = new LocalDataMergedResult(Collections.singleton(new LocalDataQueryResultRow( DatabaseProtocolServerInfo.getProtocolVersion(connectionSession.getUsedDatabaseName(), TypedSPILoader.getService(DatabaseType.class, "MySQL"))))); }
@Test void assertExecuteWithAlias() throws SQLException { SelectStatement selectStatement = mock(SelectStatement.class); when(selectStatement.getProjections()).thenReturn(createProjectionsSegmentWithAlias()); ShowVersionExecutor executor = new ShowVersionExecutor(selectStatement); executor.execute(mockConnectionSession()); assertQueryResult(executor, "foo_alias"); }
void addAnalyticHeaders(List<Header> headers) { headers.add(new RecordHeader("_producerId", getClientId().getBytes(StandardCharsets.UTF_8))); headers.add(new RecordHeader("_threadName", Thread.currentThread().getName().getBytes(StandardCharsets.UTF_8))); if (log.isTraceEnabled()) { try { StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); int maxLevel = Math.min(stackTrace.length, 20); for (int i = 2; i < maxLevel; i++) { // ignore two levels: getStackTrace and addAnalyticHeaders headers.add(new RecordHeader("_stackTrace" + i, stackTrace[i].toString().getBytes(StandardCharsets.UTF_8))); } } catch (Throwable t) { log.trace("Failed to add stacktrace headers in Kafka producer {}", getClientId(), t); } } }
@Test void testAddAnalyticHeaders() { List<Header> headers = new ArrayList<>(); producerTemplate.addAnalyticHeaders(headers); assertThat(headers).isNotEmpty(); headers.forEach(r -> log.info("RecordHeader key [{}] value [{}]", r.key(), new String(r.value(), StandardCharsets.UTF_8))); }
@Override public HttpRestResult<String> httpPost(String path, Map<String, String> headers, Map<String, String> paramValues, String encode, long readTimeoutMs) throws Exception { final long endTime = System.currentTimeMillis() + readTimeoutMs; String currentServerAddr = serverListMgr.getCurrentServerAddr(); int maxRetry = this.maxRetry; HttpClientConfig httpConfig = HttpClientConfig.builder() .setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue()) .setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(3000)).build(); do { try { Header newHeaders = Header.newInstance(); if (headers != null) { newHeaders.addAll(headers); } HttpRestResult<String> result = nacosRestTemplate.postForm(getUrl(currentServerAddr, path), httpConfig, newHeaders, paramValues, String.class); if (isFail(result)) { LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", currentServerAddr, result.getCode()); } else { // Update the currently available server addr serverListMgr.updateCurrentServerAddr(currentServerAddr); return result; } } catch (ConnectException connectException) { LOGGER.error("[NACOS ConnectException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, connectException.getMessage()); } catch (SocketTimeoutException socketTimeoutException) { LOGGER.error("[NACOS SocketTimeoutException httpPost] currentServerAddr: {}, err : {}", currentServerAddr, socketTimeoutException.getMessage()); } catch (Exception ex) { LOGGER.error("[NACOS Exception httpPost] currentServerAddr: " + currentServerAddr, ex); throw ex; } if (serverListMgr.getIterator().hasNext()) { currentServerAddr = serverListMgr.getIterator().next(); } else { maxRetry--; if (maxRetry < 0) { throw new ConnectException( "[NACOS HTTP-POST] The maximum number of tolerable server reconnection errors has been reached"); } serverListMgr.refreshCurrentServerAddr(); } } while (System.currentTimeMillis() <= endTime); LOGGER.error("no available server, currentServerAddr : {}", currentServerAddr); throw new ConnectException("no available server, currentServerAddr : " + currentServerAddr); }
@Test void testRetryPostWithNewServer() throws Exception { when(mockIterator.hasNext()).thenReturn(true); when(nacosRestTemplate.<String>postForm(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class), any(Header.class), anyMap(), eq(String.class))).thenThrow(new ConnectException()); when(nacosRestTemplate.<String>postForm(eq(SERVER_ADDRESS_2 + "/test"), any(HttpClientConfig.class), any(Header.class), anyMap(), eq(String.class))).thenReturn(mockResult); when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_OK); HttpRestResult<String> actual = serverHttpAgent.httpPost("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000); assertEquals(mockResult, actual); }
public static StatTriggerInstruction statTrigger(Map<StatTriggerField, Long> statTriggerMap, StatTriggerFlag flag) { checkNotNull(statTriggerMap, "Stat trigger map cannot be null"); checkNotNull(flag, "Stat trigger flag cannot be null"); return new StatTriggerInstruction(statTriggerMap, flag); }
@Test public void testStatTriggerTrafficMethod() { final Instruction instruction = Instructions.statTrigger(statTriggerFieldMap1, flag1); final Instructions.StatTriggerInstruction statTriggerInstruction = checkAndConvert(instruction, Instruction.Type.STAT_TRIGGER, Instructions.StatTriggerInstruction.class); assertThat(statTriggerInstruction.getStatTriggerFieldMap(), is(equalTo(statTriggerFieldMap1))); assertThat(statTriggerInstruction.getStatTriggerFlag(), is(equalTo(flag1))); assertThat(statTriggerInstruction.getStatTriggerFieldMap(), is(not(equalTo(statTriggerFieldMap2)))); assertThat(statTriggerInstruction.getStatTriggerFlag(), is(not(equalTo(flag2)))); }
void shutdown(@Observes ShutdownEvent event) { if (jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) { backgroundJobServerInstance.get().stop(); } if (jobRunrBuildTimeConfiguration.dashboard().enabled()) { dashboardWebServerInstance.get().stop(); } storageProviderInstance.get().close(); }
@Test void jobRunrStarterStopsDashboardIfConfigured() { when(dashboardConfiguration.enabled()).thenReturn(true); jobRunrStarter.shutdown(new ShutdownEvent()); verify(dashboardWebServer).stop(); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void retryOnResultUsingObservable() throws InterruptedException { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willReturn("retry") .willReturn("success"); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .await() .assertValueCount(1) .assertValue("success") .assertComplete(); then(helloWorldService).should(times(2)).returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); assertThat(metrics.getNumberOfSuccessfulCallsWithRetryAttempt()).isEqualTo(1); }
public synchronized void executeDdlStatement(String statement) throws IllegalStateException { checkIsUsable(); maybeCreateInstance(); maybeCreateDatabase(); LOG.info("Executing DDL statement '{}' on database {}.", statement, databaseId); try { databaseAdminClient .updateDatabaseDdl( instanceId, databaseId, ImmutableList.of(statement), /* operationId= */ null) .get(); LOG.info("Successfully executed DDL statement '{}' on database {}.", statement, databaseId); } catch (ExecutionException | InterruptedException | SpannerException e) { throw new SpannerResourceManagerException("Failed to execute statement.", e); } }
@Test public void testExecuteDdlStatementShouldThrowExceptionWhenSpannerCreateInstanceFails() throws ExecutionException, InterruptedException { // arrange when(spanner.getInstanceAdminClient().createInstance(any()).get()) .thenThrow(InterruptedException.class); prepareCreateDatabaseMock(); prepareUpdateDatabaseMock(); String statement = "CREATE TABLE Singers (\n" + " SingerId INT64 NOT NULL,\n" + " FirstName STRING(1024),\n" + " LastName STRING(1024),\n" + ") PRIMARY KEY (SingerId)"; // act & assert assertThrows( SpannerResourceManagerException.class, () -> testManager.executeDdlStatement(statement)); }
static Optional<VespaVersion> getVespaVersion() { return Optional.of(getCompiledVespaVersion()); }
@Test public void testVespaVersion() { assertThat(JRTConfigRequestFactory.getVespaVersion().get(), is(defaultVespaVersion)); }
@Override public String toString() { return String.format("The giant looks %s, %s and %s.", health, fatigue, nourishment); }
@Test void testSetNourishment() { final var model = new GiantModel(Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); assertEquals(Nourishment.SATURATED, model.getNourishment()); var messageFormat = "The giant looks healthy, alert and %s."; for (final var nourishment : Nourishment.values()) { model.setNourishment(nourishment); assertEquals(nourishment, model.getNourishment()); assertEquals(String.format(messageFormat, nourishment), model.toString()); } }
@JsonProperty("isBase64Encoded") public boolean isBase64Encoded() { return isBase64Encoded; }
@Test void deserialize_base64Encoded_readsBoolCorrectly() throws IOException { AwsProxyRequest req = new AwsProxyRequestBuilder() .fromJsonString(getRequestJson(true, CUSTOM_HEADER_KEY_LOWER_CASE, CUSTOM_HEADER_VALUE)).build(); assertTrue(req.isBase64Encoded()); req = new AwsProxyRequestBuilder() .fromJsonString(getRequestJson(false, CUSTOM_HEADER_KEY_LOWER_CASE, CUSTOM_HEADER_VALUE)).build(); assertFalse(req.isBase64Encoded()); }
public static <EventT> Write<EventT> write() { return new AutoValue_JmsIO_Write.Builder<EventT>().build(); }
@Test public void testWriteDynamicMessage() throws Exception { Connection connection = connectionFactory.createConnection(USERNAME, PASSWORD); connection.start(); Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); MessageConsumer consumerOne = session.createConsumer(session.createTopic("Topic_One")); MessageConsumer consumerTwo = session.createConsumer(session.createTopic("Topic_Two")); ArrayList<TestEvent> data = new ArrayList<>(); for (int i = 0; i < 50; i++) { data.add(new TestEvent("Topic_One", "Message One " + i)); } for (int i = 0; i < 100; i++) { data.add(new TestEvent("Topic_Two", "Message Two " + i)); } pipeline .apply(Create.of(data)) .apply( JmsIO.<TestEvent>write() .withConnectionFactory(connectionFactory) .withUsername(USERNAME) .withPassword(PASSWORD) .withRetryConfiguration(retryConfiguration) .withTopicNameMapper(e -> e.getTopicName()) .withValueMapper( (e, s) -> { try { TextMessage msg = s.createTextMessage(); msg.setText(e.getValue()); return msg; } catch (JMSException ex) { throw new JmsIOException("Error writing TextMessage", ex); } })); pipeline.run(); int count = 0; while (consumerOne.receive(1000) != null) { count++; } assertEquals(50, count); count = 0; while (consumerTwo.receive(1000) != null) { count++; } assertEquals(100, count); }
@Override public InputStream getBody() throws IOException { return response.getEntity().getContent(); }
@Test void testGetBody() throws IOException { assertEquals(inputStream, clientHttpResponse.getBody()); }
public void print(final ByteBuffer encodedMessage, final StringBuilder output) { final UnsafeBuffer buffer = new UnsafeBuffer(encodedMessage); print(output, buffer, 0); }
@Test public void exampleVarData() throws Exception { final ByteBuffer encodedSchemaBuffer = ByteBuffer.allocate(SCHEMA_BUFFER_CAPACITY); encodeSchema(encodedSchemaBuffer); final ByteBuffer encodedMsgBuffer = ByteBuffer.allocate(MSG_BUFFER_CAPACITY); final UnsafeBuffer buffer = new UnsafeBuffer(encodedMsgBuffer); final CredentialsEncoder encoder = new CredentialsEncoder(); encoder.wrapAndApplyHeader(buffer, 0, new MessageHeaderEncoder()); encoder.login("example"); encoder.putEncryptedPassword(new byte[] {11, 0, 64, 97}, 0, 4); encodedMsgBuffer.position(encoder.encodedLength()); encodedSchemaBuffer.flip(); final Ir ir = decodeIr(encodedSchemaBuffer); final JsonPrinter printer = new JsonPrinter(ir); final String result = printer.print(encodedMsgBuffer); assertEquals( "{\n" + " \"login\": \"example\",\n" + " \"encryptedPassword\": \"0b004061\"\n" + "}", result); }
public void updateAutoCommitTimer(final long currentTimeMs) { this.autoCommitState.ifPresent(t -> t.updateTimer(currentTimeMs)); }
@Test public void testAutocommitInterceptorsNotInvokedOnError() { TopicPartition t1p = new TopicPartition("topic1", 0); subscriptionState.assignFromUser(singleton(t1p)); subscriptionState.seek(t1p, 100); CommitRequestManager commitRequestManager = create(true, 100); time.sleep(100); commitRequestManager.updateAutoCommitTimer(time.milliseconds()); List<NetworkClientDelegate.FutureCompletionHandler> futures = assertPoll(1, commitRequestManager); // complete the unsent request to trigger interceptor futures.get(0).onComplete(buildOffsetCommitClientResponse( new OffsetCommitResponse(0, Collections.singletonMap(t1p, Errors.NETWORK_EXCEPTION))) ); Mockito.verify(offsetCommitCallbackInvoker, never()).enqueueInterceptorInvocation(any()); }
@Override public boolean setProperties(Namespace namespace, Map<String, String> properties) throws NoSuchNamespaceException { Map<String, String> newProperties = Maps.newHashMap(); newProperties.putAll(loadNamespaceMetadata(namespace)); newProperties.putAll(properties); glue.updateDatabase( UpdateDatabaseRequest.builder() .catalogId(awsProperties.glueCatalogId()) .name( IcebergToGlueConverter.toDatabaseName( namespace, awsProperties.glueCatalogSkipNameValidation())) .databaseInput( IcebergToGlueConverter.toDatabaseInput( namespace, newProperties, awsProperties.glueCatalogSkipNameValidation())) .build()); LOG.debug("Successfully set properties {} for {}", properties.keySet(), namespace); // Always successful, otherwise exception is thrown return true; }
@Test public void testSetProperties() { Map<String, String> parameters = Maps.newHashMap(); parameters.put("key", "val"); Mockito.doReturn( GetDatabaseResponse.builder() .database(Database.builder().name("db1").parameters(parameters).build()) .build()) .when(glue) .getDatabase(Mockito.any(GetDatabaseRequest.class)); Mockito.doReturn(UpdateDatabaseResponse.builder().build()) .when(glue) .updateDatabase(Mockito.any(UpdateDatabaseRequest.class)); glueCatalog.setProperties(Namespace.of("db1"), parameters); }
@Override public void updateDemand() { // Compute demand by iterating through apps in the queue // Limit demand to maxResources Resource tmpDemand = Resources.createResource(0); readLock.lock(); try { for (FSAppAttempt sched : runnableApps) { sched.updateDemand(); Resources.addTo(tmpDemand, sched.getDemand()); } for (FSAppAttempt sched : nonRunnableApps) { sched.updateDemand(); Resources.addTo(tmpDemand, sched.getDemand()); } } finally { readLock.unlock(); } // Cap demand to maxShare to limit allocation to maxShare demand = Resources.componentwiseMin(tmpDemand, getMaxShare()); if (LOG.isDebugEnabled()) { LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + getMaxShare()); LOG.debug("The updated fairshare for " + getName() + " is " + getFairShare()); } }
@Test public void testUpdateDemand() { conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false"); resourceManager = new MockRM(conf); resourceManager.start(); scheduler = (FairScheduler) resourceManager.getResourceScheduler(); String queueName = "root.queue1"; FSLeafQueue schedulable = new FSLeafQueue(queueName, scheduler, null); schedulable.setMaxShare(new ConfigurableResource(maxResource)); assertThat(schedulable.getMetrics().getMaxApps()). isEqualTo(Integer.MAX_VALUE); assertThat(schedulable.getMetrics().getSchedulingPolicy()).isEqualTo( SchedulingPolicy.DEFAULT_POLICY.getName()); FSAppAttempt app = mock(FSAppAttempt.class); Mockito.when(app.getDemand()).thenReturn(maxResource); Mockito.when(app.getResourceUsage()).thenReturn(Resources.none()); schedulable.addApp(app, true); schedulable.addApp(app, true); schedulable.updateDemand(); assertTrue("Demand is greater than max allowed ", Resources.equals(schedulable.getDemand(), maxResource)); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } try { final String recordCsvString = new String(bytes, StandardCharsets.UTF_8); final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat) .getRecords(); if (csvRecords.isEmpty()) { throw new SerializationException("No fields in record"); } final CSVRecord csvRecord = csvRecords.get(0); if (csvRecord == null || csvRecord.size() == 0) { throw new SerializationException("No fields in record."); } SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic); final List<Object> values = new ArrayList<>(parsers.size()); final Iterator<Parser> pIt = parsers.iterator(); for (int i = 0; i < csvRecord.size(); i++) { final String value = csvRecord.get(i); final Parser parser = pIt.next(); final Object parsed = value == null || value.isEmpty() ? null : parser.parse(value); values.add(parsed); } return values; } catch (final Exception e) { throw new SerializationException("Error deserializing delimited", e); } }
@Test public void shouldDeserializeDelimitedCorrectlyWithTabDelimiter() { // Given: final byte[] bytes = "1511897796092\t1\titem_1\t10.0\t10.10\t100\t10\t100\tew==\r\n" .getBytes(StandardCharsets.UTF_8); final KsqlDelimitedDeserializer deserializer = new KsqlDelimitedDeserializer(ORDER_SCHEMA, CSVFormat.DEFAULT.withDelimiter('\t')); // When: final List<?> result = deserializer.deserialize("", bytes); // Then: assertThat(result, contains(1511897796092L, 1L, "item_1", 10.0, new BigDecimal("10.10"), new Time(100), new Date(864000000), new Timestamp(100), ByteBuffer.wrap(new byte[] {123}))); }
@Override public Set<Class<? extends BaseStepMeta>> getSupportedSteps() { return new HashSet<Class<? extends BaseStepMeta>>() { { add( XMLOutputMeta.class ); } }; }
@Test public void testGetSupportedSteps() { XMLOutputStepAnalyzer analyzer = new XMLOutputStepAnalyzer(); Set<Class<? extends BaseStepMeta>> types = analyzer.getSupportedSteps(); assertNotNull( types ); assertEquals( types.size(), 1 ); assertTrue( types.contains( XMLOutputMeta.class ) ); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertCompressedStreamMessageToAmqpMessageWithAmqpValueBody() throws Exception { ActiveMQStreamMessage outbound = createStreamMessage(true); outbound.writeBoolean(false); outbound.writeString("test"); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof AmqpValue); assertTrue(((AmqpValue) amqp.getBody()).getValue() instanceof List); @SuppressWarnings("unchecked") List<Object> amqpList = (List<Object>) ((AmqpValue) amqp.getBody()).getValue(); assertEquals(2, amqpList.size()); }
public static boolean isContentType(String contentType, Message message) { if (contentType == null) { return message.getContentType() == null; } else { return contentType.equals(message.getContentType()); } }
@Test public void testIsContentTypeWithNullStringValueAndNullMessageContentType() { Message message = Proton.message(); assertTrue(AmqpMessageSupport.isContentType(null, message)); }
@Override protected Release findLatestActiveRelease(String configAppId, String configClusterName, String configNamespace, ApolloNotificationMessages clientMessages) { return releaseService.findLatestActiveRelease(configAppId, configClusterName, configNamespace); }
@Test public void testLoadConfigWithDefaultClusterWithNoDataCenterRelease() throws Exception { when(releaseService.findLatestActiveRelease(someConfigAppId, someDataCenter, defaultNamespaceName)) .thenReturn(null); when(releaseService.findLatestActiveRelease(someConfigAppId, defaultClusterName, defaultNamespaceName)) .thenReturn(someRelease); Release release = configService .loadConfig(someClientAppId, someClientIp, someClientLabel, someConfigAppId, defaultClusterName, defaultNamespaceName, someDataCenter, someNotificationMessages); verify(releaseService, times(1)).findLatestActiveRelease(someConfigAppId, someDataCenter, defaultNamespaceName); verify(releaseService, times(1)) .findLatestActiveRelease(someConfigAppId, defaultClusterName, defaultNamespaceName); assertEquals(someRelease, release); }
@SuppressWarnings("unchecked") @Override public Result execute(Query query, Target target) { Query adjustedQuery = adjustQuery(query); switch (target.mode()) { case ALL_NODES: adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getAllPartitionIds()).build(); return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES); case LOCAL_NODE: adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getLocalPartitionIds()).build(); return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.LOCAL_NODE); case PARTITION_OWNER: int solePartition = target.partitions().solePartition(); adjustedQuery = Query.of(adjustedQuery).partitionIdSet(target.partitions()).build(); if (solePartition >= 0) { return runOnGivenPartition(adjustedQuery, solePartition); } else { return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES); } default: throw new IllegalArgumentException("Illegal target " + target); } }
@Test public void runQueryOnAllPartitions_value() { Predicate<Object, Object> predicate = Predicates.equal("this", value); Query query = Query.of().mapName(map.getName()).predicate(predicate).iterationType(VALUE).build(); QueryResult result = queryEngine.execute(query, Target.ALL_NODES); assertEquals(1, result.size()); assertEquals(value, toObject(result.iterator().next().getValue())); }