src_fm_fc_ms_ff
stringlengths
43
86.8k
target
stringlengths
20
276k
NoviflowSpecificFeature extends AbstractFeature { static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } static final String NOVIFLOW_MANUFACTURER_SUFFIX; }
@Test public void testIsSmSeries() { assertTrue(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "SM5000-SM"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "WB5164-E"))); assertFalse(isSmSeries(makeSwitchMock("E", "NW400.4.0", "WB5164"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW400.4.0", "NS21100"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.2.0_dev", "NS21100"))); assertFalse(isSmSeries(makeSwitchMock("Common Inc", "Soft123", "Hard123"))); assertFalse(isSmSeries(makeSwitchMock("Nicira, Inc.", "Soft123", "Hard123"))); assertFalse(isSmSeries(makeSwitchMock("2004-2016 Centec Networks Inc", "2.8.16.21", "48T"))); assertFalse(isSmSeries(makeSwitchMock("Sonus Networks Inc, 4 Technology Park Dr, Westford, MA 01886, USA", "8.1.0.14", "VX3048"))); }
BfdFeature extends AbstractFeature { @Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); } @Override Optional<SwitchFeature> discover(IOFSwitch sw); }
@Test public void testDiscoverOfSwitchWithoutBfdSupport() { Assert.assertFalse(bfdFeature.discover(createSwitchWithDescription(null)).isPresent()); assertWithoutBfdSupport("2.8.16.21"); assertWithoutBfdSupport("2.8.16.15"); assertWithoutBfdSupport("8.1.0.14"); }
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId); PortStatsEntry toPortStatsEntry(OFPortStatsEntry entry); static final OfPortStatsMapper INSTANCE; }
@Test public void testToPortStatsDataV13() { OFFactoryVer13 ofFactoryVer13 = new OFFactoryVer13(); OFPortStatsEntry ofPortStatsEntry = prebuildPortStatsEntry(ofFactoryVer13.buildPortStatsEntry()) .setRxFrameErr(U64.of(rxFrameErr)) .setRxOverErr(U64.of(rxOverErr)) .setRxCrcErr(U64.of(rxCrcErr)) .setCollisions(U64.of(collisions)).build(); OFPortStatsReply ofPortStatsReply = ofFactoryVer13.buildPortStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofPortStatsEntry)) .build(); PortStatsData data = OfPortStatsMapper.INSTANCE.toPostStatsData( Collections.singletonList(ofPortStatsReply), switchId); assertPortStatsData(data); } @Test public void testToPortStatsDataV14() { OFFactoryVer14 ofFactoryVer14 = new OFFactoryVer14(); OFPortStatsProp opticalProps = ofFactoryVer14.buildPortStatsPropOptical().setRxPwr(123).build(); OFPortStatsProp ethernetProps = ofFactoryVer14.buildPortStatsPropEthernet() .setRxFrameErr(U64.of(rxFrameErr)) .setRxOverErr(U64.of(rxOverErr)) .setRxCrcErr(U64.of(rxCrcErr)) .setCollisions(U64.of(collisions)) .build(); OFPortStatsEntry ofPortStatsEntry = prebuildPortStatsEntry(ofFactoryVer14.buildPortStatsEntry()) .setProperties(Lists.newArrayList(opticalProps, ethernetProps)) .build(); OFPortStatsReply ofPortStatsReply = ofFactoryVer14.buildPortStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofPortStatsEntry)) .build(); PortStatsData data = OfPortStatsMapper.INSTANCE.toPostStatsData( Collections.singletonList(ofPortStatsReply), switchId); assertPortStatsData(data); }
OfFlowStatsMapper { public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
@Test public void testToFlowStatsData() { OFFlowStatsEntry ofEntry = buildFlowStatsEntry(); OFFlowStatsReply ofReply = factory.buildFlowStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofEntry)) .build(); FlowStatsData data = OfFlowStatsMapper.INSTANCE.toFlowStatsData(Collections.singletonList(ofReply), switchId); assertEquals(switchId, data.getSwitchId()); assertEquals(1, data.getStats().size()); FlowStatsEntry entry = data.getStats().get(0); assertEquals(tableId, entry.getTableId()); assertEquals(cookie, entry.getCookie()); assertEquals(packetCount, entry.getPacketCount()); assertEquals(byteCount, entry.getByteCount()); }
OfFlowStatsMapper { public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
@Test public void testFlowEntry() { OFFlowStatsEntry ofEntry = buildFlowStatsEntry(); FlowEntry entry = OfFlowStatsMapper.INSTANCE.toFlowEntry(ofEntry); assertEquals(tableId, entry.getTableId()); assertEquals(cookie, entry.getCookie()); assertEquals(packetCount, entry.getPacketCount()); assertEquals(byteCount, entry.getByteCount()); assertEquals(durationSec, entry.getDurationSeconds()); assertEquals(durationNsec, entry.getDurationNanoSeconds()); assertEquals(hardTimeout, entry.getHardTimeout()); assertEquals(idleTimeout, entry.getIdleTimeout()); assertEquals(priority, entry.getPriority()); assertEquals(String.valueOf(vlanVid.getVlan()), entry.getMatch().getVlanVid()); assertEquals(ethType.toString(), entry.getMatch().getEthType()); assertEquals(ethDst.toString(), entry.getMatch().getEthDst()); assertEquals(port.toString(), entry.getMatch().getInPort()); assertEquals(ipProto.toString(), entry.getMatch().getIpProto()); assertEquals(udpSrc.toString(), entry.getMatch().getUdpSrc()); assertEquals(udpDst.toString(), entry.getMatch().getUdpDst()); FlowSetFieldAction flowSetEthSrcAction = new FlowSetFieldAction("eth_src", MAC_ADDRESS_1); FlowSetFieldAction flowSetEthDstAction = new FlowSetFieldAction("eth_dst", MAC_ADDRESS_2); FlowCopyFieldAction flowCopyFieldAction = FlowCopyFieldAction.builder() .bits(String.valueOf(bits)) .srcOffset(String.valueOf(srcOffset)) .dstOffset(String.valueOf(dstOffset)) .srcOxm(String.valueOf(oxmSrcHeader)) .dstOxm(String.valueOf(oxmDstHeader)) .build(); FlowSwapFieldAction flowSwapFieldAction = FlowSwapFieldAction.builder() .bits(String.valueOf(bits)) .srcOffset(String.valueOf(srcOffset)) .dstOffset(String.valueOf(dstOffset)) .srcOxm(String.valueOf(oxmSrcHeader)) .dstOxm(String.valueOf(oxmDstHeader)) .build(); FlowApplyActions applyActions = new FlowApplyActions(port.toString(), Lists.newArrayList(flowSetEthSrcAction, flowSetEthDstAction), ethType.toString(), null, null, null, group.toString(), flowCopyFieldAction, flowSwapFieldAction); FlowInstructions instructions = new FlowInstructions(applyActions, null, meterId, goToTable.getValue()); assertEquals(instructions, entry.getInstructions()); }
OfFlowStatsMapper { public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
@Test public void testFlowGroupEntry() { OFGroupDescStatsEntry entry = buildFlowGroupEntry(); GroupEntry result = OfFlowStatsMapper.INSTANCE.toFlowGroupEntry(entry); assertEquals(entry.getGroup().getGroupNumber(), result.getGroupId()); assertEquals(entry.getGroupType().toString(), result.getGroupType()); assertEquals(entry.getBuckets().size(), result.getBuckets().size()); GroupBucket firstBucket = result.getBuckets().get(0); assertEquals("12", firstBucket.getApplyActions().getFlowOutput()); GroupBucket secondBucket = result.getBuckets().get(1); assertEquals(EthType.VLAN_FRAME.toString(), secondBucket.getApplyActions().getPushVlan()); assertEquals("vlan_vid", secondBucket.getApplyActions().getSetFieldActions().get(0).getFieldName()); assertEquals("12", secondBucket.getApplyActions().getSetFieldActions().get(0).getFieldValue()); assertEquals("1", secondBucket.getApplyActions().getFlowOutput()); }
OfTableStatsMapper { @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); static final OfTableStatsMapper INSTANCE; }
@Test public void shouldConvertSuccessfully() { OFFactoryVer13 ofFactoryVer13 = new OFFactoryVer13(); OFTableStatsEntry entry = ofFactoryVer13.buildTableStatsEntry() .setTableId(TableId.of(11)) .setActiveCount(10) .setMatchedCount(U64.of(100001L)) .setLookupCount(U64.of(100002L)) .build(); TableStatsEntry result = OfTableStatsMapper.INSTANCE.toTableStatsEntry(entry); assertEquals(result.getTableId(), entry.getTableId().getValue()); assertEquals(result.getActiveEntries(), entry.getActiveCount()); assertEquals(result.getLookupCount(), entry.getLookupCount().getValue()); assertEquals(result.getMatchedCount(), entry.getMatchedCount().getValue()); }
OfPortDescConverter { public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); static final OfPortDescConverter INSTANCE; }
@Test public void testReservedPortCheck() { for (OFPort port : new OFPort[]{ OFPort.LOCAL, OFPort.ALL, OFPort.CONTROLLER, OFPort.ANY, OFPort.FLOOD, OFPort.NO_MASK, OFPort.IN_PORT, OFPort.NORMAL, OFPort.TABLE}) { Assert.assertTrue(String.format("Port %s must be detected as RESERVED, but it's not", port), OfPortDescConverter.INSTANCE.isReservedPort(port)); } for (OFPort port : new OFPort[]{ OFPort.of(1), OFPort.of(OFPort.MAX.getPortNumber() - 1)}) { Assert.assertFalse(String.format("Port %s must be detected as NOT RESERVED, but it's not", port), OfPortDescConverter.INSTANCE.isReservedPort(port)); } }
OfPortDescConverter { public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); static final OfPortDescConverter INSTANCE; }
@Test public void testPortChangeTypeMapping() { OFPortDesc portDesc = OFFactoryVer13.INSTANCE.buildPortDesc() .setPortNo(OFPort.of(1)) .setName("test") .build(); Map<org.openkilda.messaging.info.event.PortChangeType, net.floodlightcontroller.core.PortChangeType> expected = new HashMap<>(); expected.put(org.openkilda.messaging.info.event.PortChangeType.ADD, net.floodlightcontroller.core.PortChangeType.ADD); expected.put(org.openkilda.messaging.info.event.PortChangeType.OTHER_UPDATE, net.floodlightcontroller.core.PortChangeType.OTHER_UPDATE); expected.put(org.openkilda.messaging.info.event.PortChangeType.DELETE, net.floodlightcontroller.core.PortChangeType.DELETE); expected.put(org.openkilda.messaging.info.event.PortChangeType.UP, net.floodlightcontroller.core.PortChangeType.UP); expected.put(org.openkilda.messaging.info.event.PortChangeType.DOWN, net.floodlightcontroller.core.PortChangeType.DOWN); DatapathId dpId = DatapathId.of(1); for (Map.Entry<org.openkilda.messaging.info.event.PortChangeType, net.floodlightcontroller.core.PortChangeType> entry : expected.entrySet()) { PortInfoData encoded = OfPortDescConverter.INSTANCE.toPortInfoData(dpId, portDesc, entry.getValue()); Assert.assertSame(entry.getKey(), encoded.getState()); } }
OfMeterStatsMapper { public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } } MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId); MeterStatsEntry toMeterStatsEntry(OFMeterStats entry); static final OfMeterStatsMapper INSTANCE; }
@Test public void testToPortStatsDataV13() { OFFactoryVer13 factory = new OFFactoryVer13(); OFMeterBandStats bandStats = factory.meterBandStats(U64.of(bandPacketCount), U64.of(bandByteCount)); OFMeterStats meterStats = factory.buildMeterStats() .setMeterId(meterId) .setByteInCount(U64.of(meterByteCount)) .setPacketInCount(U64.of(meterPacketCount)) .setBandStats(Collections.singletonList(bandStats)) .build(); OFMeterStatsReply reply = factory.buildMeterStatsReply() .setEntries(Collections.singletonList(meterStats)) .build(); MeterStatsData data = OfMeterStatsMapper.INSTANCE.toMeterStatsData(Collections.singletonList(reply), switchId); assertEquals(switchId, data.getSwitchId()); assertEquals(1, data.getStats().size()); MeterStatsEntry statsEntry = data.getStats().get(0); assertEquals(bandByteCount, statsEntry.getByteInCount()); assertEquals(bandPacketCount, statsEntry.getPacketsInCount()); }
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
@Test public void switchAdded() throws Exception { SpeakerSwitchView expectedSwitchView = makeSwitchRecord(dpId, switchFeatures, true, true); Capture<Message> producedMessage = prepareAliveSwitchEvent(expectedSwitchView); replayAll(); service.switchAdded(dpId); verifySwitchEvent(SwitchChangeType.ADDED, expectedSwitchView, producedMessage); } @Test public void switchAddedMissing() throws Exception { Capture<Message> producedMessage = prepareRemovedSwitchEvent(); replayAll(); service.switchAdded(dpId); verifySwitchEvent(SwitchChangeType.ADDED, null, producedMessage); }
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
@Test public void switchRemoved() { Capture<Message> producedMessage = prepareSwitchEventCommon(dpId); switchManager.deactivate(eq(dpId)); replayAll(); service.switchRemoved(dpId); verifySwitchEvent(SwitchChangeType.REMOVED, null, producedMessage); }
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
@Test public void switchDeactivated() { Capture<Message> producedMessage = prepareSwitchEventCommon(dpId); switchManager.deactivate(eq(dpId)); replayAll(); service.switchDeactivated(dpId); verifySwitchEvent(SwitchChangeType.DEACTIVATED, null, producedMessage); }
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory, TransactionManager transactionManager, SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices( SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
@Test public void shouldPatchSwitch() throws SwitchNotFoundException { Switch sw = Switch.builder().switchId(TEST_SWITCH_ID).status(SwitchStatus.ACTIVE).build(); switchRepository.add(sw); SwitchPatch switchPatch = new SwitchPatch("pop", new SwitchLocation(48.860611, 2.337633, "street", "city", "country")); switchOperationsService.patchSwitch(TEST_SWITCH_ID, switchPatch); Switch updatedSwitch = switchRepository.findById(TEST_SWITCH_ID).get(); assertEquals(switchPatch.getPop(), updatedSwitch.getPop()); assertEquals(switchPatch.getLocation().getLatitude(), updatedSwitch.getLatitude()); assertEquals(switchPatch.getLocation().getLongitude(), updatedSwitch.getLongitude()); assertEquals(switchPatch.getLocation().getStreet(), updatedSwitch.getStreet()); assertEquals(switchPatch.getLocation().getCity(), updatedSwitch.getCity()); assertEquals(switchPatch.getLocation().getCountry(), updatedSwitch.getCountry()); } @Test public void shouldSetNullPopWhenPopIsEmptyString() throws SwitchNotFoundException { Switch sw = Switch.builder().switchId(TEST_SWITCH_ID).status(SwitchStatus.ACTIVE).build(); switchRepository.add(sw); SwitchPatch switchPatch = new SwitchPatch("", null); switchOperationsService.patchSwitch(TEST_SWITCH_ID, switchPatch); Switch updatedSwitch = switchRepository.findById(TEST_SWITCH_ID).get(); assertNull(updatedSwitch.getPop()); }
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
@Test public void switchChanged() throws Exception { SpeakerSwitchView expectedSwitchRecord = makeSwitchRecord(dpId, switchFeatures, true, true); Capture<Message> producedMessage = prepareAliveSwitchEvent(expectedSwitchRecord); replayAll(); service.switchChanged(dpId); verifySwitchEvent(SwitchChangeType.CHANGED, expectedSwitchRecord, producedMessage); } @Test public void switchChangedMissing() throws Exception { Capture<Message> producedMessage = prepareRemovedSwitchEvent(); replayAll(); service.switchChanged(dpId); verifySwitchEvent(SwitchChangeType.CHANGED, null, producedMessage); }
SwitchTrackingService implements IOFSwitchListener, IService { public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
@Test public void networkDumpTest() throws Exception { OFSwitch iofSwitch1 = mock(OFSwitch.class); OFSwitch iofSwitch2 = mock(OFSwitch.class); final DatapathId swAid = DatapathId.of(1); final DatapathId swBid = DatapathId.of(2); Map<DatapathId, IOFSwitch> switches = ImmutableMap.of( swAid, iofSwitch1, swBid, iofSwitch2 ); Map<DatapathId, InetSocketAddress> switchAddresses = ImmutableMap.of( swAid, new InetSocketAddress(Inet4Address.getByName("127.0.1.1"), 32768), swBid, new InetSocketAddress(Inet4Address.getByName("127.0.1.2"), 32768) ); SwitchDescription ofSwitchDescription = new SwitchDescription( switchDescription.getManufacturer(), switchDescription.getHardware(), switchDescription.getSoftware(), switchDescription.getSerialNumber(), switchDescription.getDatapath()); OFFactoryVer13 ofFactory = new OFFactoryVer13(); for (DatapathId swId : switches.keySet()) { IOFSwitch sw = switches.get(swId); expect(sw.getOFFactory()).andStubReturn(ofFactory); expect(sw.isActive()).andReturn(true).anyTimes(); expect(sw.getId()).andReturn(swId).anyTimes(); expect(sw.getSwitchDescription()).andReturn(ofSwitchDescription); expect(sw.getInetAddress()).andReturn(switchAddresses.get(swId)); expect(sw.getControllerRole()).andStubReturn(OFControllerRole.ROLE_EQUAL); OFConnection connect = createMock(OFConnection.class); expect(connect.getRemoteInetAddress()).andReturn(speakerSocketAddress); expect(sw.getConnectionByCategory(eq(LogicalOFMessageCategory.MAIN))).andReturn(connect); } expect(switchManager.getAllSwitchMap(true)).andReturn(switches); expect(switchManager.getPhysicalPorts(eq(iofSwitch1))).andReturn(ImmutableList.of( makePhysicalPortMock(1, true), makePhysicalPortMock(2, true) )); expect(switchManager.getPhysicalPorts(eq(iofSwitch2))).andReturn(ImmutableList.of( makePhysicalPortMock(3, true), makePhysicalPortMock(4, true), makePhysicalPortMock(5, false) )); expect(featureDetector.detectSwitch(iofSwitch1)) .andReturn(ImmutableSet.of(SwitchFeature.METERS)); expect(featureDetector.detectSwitch(iofSwitch2)) .andReturn(ImmutableSet.of(SwitchFeature.METERS, SwitchFeature.BFD)); ArrayList<Message> producedMessages = new ArrayList<>(); producerService.sendMessageAndTrack(eq(KAFKA_ISL_DISCOVERY_TOPIC), anyObject(), anyObject(InfoMessage.class)); expectLastCall().andAnswer(new IAnswer<Object>() { @Override public Object answer() { Message sentMessage = (Message) getCurrentArguments()[2]; sentMessage.setTimestamp(0); producedMessages.add(sentMessage); return null; } }).anyTimes(); replayAll(); String correlationId = "unit-test-correlation-id"; try (CorrelationContextClosable dummy = CorrelationContext.create(correlationId)) { service.dumpAllSwitches(); } verify(producerService); ArrayList<Message> expectedMessages = new ArrayList<>(); expectedMessages.add(new InfoMessage( new NetworkDumpSwitchData(new SpeakerSwitchView( new SwitchId(swAid.getLong()), new InetSocketAddress(Inet4Address.getByName("127.0.1.1"), 32768), new InetSocketAddress(Inet4Address.getByName("127.0.1.254"), 6653), "OF_13", switchDescription, ImmutableSet.of(SwitchFeature.METERS), ImmutableList.of( new SpeakerSwitchPortView(1, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(2, SpeakerSwitchPortView.State.UP))), true), 0, correlationId)); expectedMessages.add(new InfoMessage( new NetworkDumpSwitchData(new SpeakerSwitchView( new SwitchId(swBid.getLong()), new InetSocketAddress(Inet4Address.getByName("127.0.1.2"), 32768), new InetSocketAddress(Inet4Address.getByName("127.0.1.254"), 6653), "OF_13", switchDescription, ImmutableSet.of(SwitchFeature.METERS, SwitchFeature.BFD), ImmutableList.of( new SpeakerSwitchPortView(3, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(4, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(5, SpeakerSwitchPortView.State.DOWN))), true), 0, correlationId)); assertEquals(expectedMessages, producedMessages); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installDropRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropFlow(defaultDpid); assertEquals(scheme.installDropFlowRule(), capture.getValue()); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installVerificationUnicastRule() throws Exception { mockGetMetersRequest(Lists.newArrayList(broadcastMeterId), true, 10L); mockBarrierRequest(); expect(iofSwitch.write(anyObject(OFMeterMod.class))).andReturn(true).times(1); Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installVerificationRule(defaultDpid, false); assertEquals(scheme.installVerificationUnicastRule(defaultDpid), capture.getValue()); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installDropLoopRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropLoopRule(dpid); OFFlowMod result = capture.getValue(); assertEquals(scheme.installDropLoopRule(dpid), result); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installDropFlowForTable() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropFlowForTable(dpid, 1, DROP_RULE_COOKIE); OFFlowMod result = capture.getValue(); assertEquals(scheme.installDropFlowForTable(dpid, 1, DROP_RULE_COOKIE), result); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installEgressIslVxlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installEgressIslVxlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installEgressIslVxlanRule(dpid, 1), result); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installTransitIslVxlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installTransitIslVxlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installTransitIslVxlanRule(dpid, 1), result); }
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow( DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress, int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort, int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId, boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp, boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType, DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts, List<Integer> flowPorts, Set<Integer> flowLldpPorts, Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts, boolean multiTable, boolean switchLldp, boolean switchArp, boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask, final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort, org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow( DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows( DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress, Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
@Test public void installEgressIslVlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installEgressIslVlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installEgressIslVlanRule(dpid, 1), result); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testConnectSchemaMetadataTranslation() { assertEquals(new SchemaAndValue(Schema.BOOLEAN_SCHEMA, true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes())); assertEquals(new SchemaAndValue(Schema.OPTIONAL_BOOLEAN_SCHEMA, null), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": true }, \"payload\": null }".getBytes())); assertEquals(new SchemaAndValue(SchemaBuilder.bool().defaultValue(true).build(), true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"default\": true }, \"payload\": null }".getBytes())); assertEquals(new SchemaAndValue(SchemaBuilder.bool().required().name("bool").version(2).doc("the documentation").parameter("foo", "bar").build(), true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": false, \"name\": \"bool\", \"version\": 2, \"doc\": \"the documentation\", \"parameters\": { \"foo\": \"bar\" }}, \"payload\": true }".getBytes())); } @Test public void arrayToConnect() { byte[] arrayJson = "{ \"schema\": { \"type\": \"array\", \"items\": { \"type\" : \"int32\" } }, \"payload\": [1, 2, 3] }".getBytes(); assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson)); } @Test public void mapToConnectStringKeys() { byte[] mapJson = "{ \"schema\": { \"type\": \"map\", \"keys\": { \"type\" : \"string\" }, \"values\": { \"type\" : \"int32\" } }, \"payload\": { \"key1\": 12, \"key2\": 15} }".getBytes(); Map<String, Integer> expected = new HashMap<>(); expected.put("key1", 12); expected.put("key2", 15); assertEquals(new SchemaAndValue(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA).build(), expected), converter.toConnectData(TOPIC, mapJson)); } @Test public void mapToConnectNonStringKeys() { byte[] mapJson = "{ \"schema\": { \"type\": \"map\", \"keys\": { \"type\" : \"int32\" }, \"values\": { \"type\" : \"int32\" } }, \"payload\": [ [1, 12], [2, 15] ] }".getBytes(); Map<Integer, Integer> expected = new HashMap<>(); expected.put(1, 12); expected.put(2, 15); assertEquals(new SchemaAndValue(SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(), expected), converter.toConnectData(TOPIC, mapJson)); } @Test public void structToConnect() { byte[] structJson = "{ \"schema\": { \"type\": \"struct\", \"fields\": [{ \"field\": \"field1\", \"type\": \"boolean\" }, { \"field\": \"field2\", \"type\": \"string\" }] }, \"payload\": { \"field1\": true, \"field2\": \"string\" } }".getBytes(); Schema expectedSchema = SchemaBuilder.struct().field("field1", Schema.BOOLEAN_SCHEMA).field("field2", Schema.STRING_SCHEMA).build(); Struct expected = new Struct(expectedSchema).put("field1", true).put("field2", "string"); SchemaAndValue converted = converter.toConnectData(TOPIC, structJson); assertEquals(new SchemaAndValue(expectedSchema, expected), converted); } @Test(expected = DataException.class) public void nullToConnect() { assertEquals(SchemaAndValue.NULL, converter.toConnectData(TOPIC, null)); } @Test public void nullSchemaPrimitiveToConnect() { SchemaAndValue converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": null }".getBytes()); assertEquals(SchemaAndValue.NULL, converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": true }".getBytes()); assertEquals(new SchemaAndValue(null, true), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": 12 }".getBytes()); assertEquals(new SchemaAndValue(null, 12L), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": 12.24 }".getBytes()); assertEquals(new SchemaAndValue(null, 12.24), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": \"a string\" }".getBytes()); assertEquals(new SchemaAndValue(null, "a string"), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": [1, \"2\", 3] }".getBytes()); assertEquals(new SchemaAndValue(null, Arrays.asList(1L, "2", 3L)), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": { \"field1\": 1, \"field2\": 2} }".getBytes()); Map<String, Long> obj = new HashMap<>(); obj.put("field1", 1L); obj.put("field2", 2L); assertEquals(new SchemaAndValue(null, obj), converted); } @Test public void decimalToConnect() { Schema schema = Decimal.schema(2); BigDecimal reference = new BigDecimal(new BigInteger("156"), 2); String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"2\" } }, \"payload\": \"AJw=\" }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); BigDecimal converted = (BigDecimal) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); } @Test public void decimalToConnectOptional() { Schema schema = Decimal.builder(2).optional().schema(); String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"optional\": true, \"parameters\": { \"scale\": \"2\" } }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); } @Test public void decimalToConnectWithDefaultValue() { BigDecimal reference = new BigDecimal(new BigInteger("156"), 2); Schema schema = Decimal.builder(2).defaultValue(reference).build(); String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"default\": \"AJw=\", \"parameters\": { \"scale\": \"2\" } }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void decimalToConnectOptionalWithDefaultValue() { BigDecimal reference = new BigDecimal(new BigInteger("156"), 2); Schema schema = Decimal.builder(2).optional().defaultValue(reference).build(); String msg = "{ \"schema\": { \"type\": \"bytes\", \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"optional\": true, \"default\": \"AJw=\", \"parameters\": { \"scale\": \"2\" } }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void booleanToConnect() { assertEquals(new SchemaAndValue(Schema.BOOLEAN_SCHEMA, true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes())); assertEquals(new SchemaAndValue(Schema.BOOLEAN_SCHEMA, false), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": false }".getBytes())); } @Test public void dateToConnect() { Schema schema = Date.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.DATE, 10000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }, \"payload\": 10000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); } @Test public void dateToConnectOptional() { Schema schema = Date.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); } @Test public void dateToConnectWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Date.builder().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void dateToConnectOptionalWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Date.builder().optional().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"optional\": true, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void timeToConnect() { Schema schema = Time.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 14400000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }, \"payload\": 14400000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); } @Test public void timeToConnectOptional() { Schema schema = Time.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); } @Test public void timeToConnectWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Time.builder().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void timeToConnectOptionalWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Time.builder().optional().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"optional\": true, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); } @Test public void timestampToConnect() { Schema schema = Timestamp.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }, \"payload\": 4000000000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); } @Test public void timestampToConnectOptional() { Schema schema = Timestamp.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); } @Test public void byteToConnect() { assertEquals(new SchemaAndValue(Schema.INT8_SCHEMA, (byte) 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int8\" }, \"payload\": 12 }".getBytes())); } @Test public void timestampToConnectWithDefaultValue() { Schema schema = Timestamp.builder().defaultValue(new java.util.Date(42)).schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"default\": 42 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(new java.util.Date(42), schemaAndValue.value()); } @Test public void timestampToConnectOptionalWithDefaultValue() { Schema schema = Timestamp.builder().optional().defaultValue(new java.util.Date(42)).schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"optional\": true, \"default\": 42 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(new java.util.Date(42), schemaAndValue.value()); } @Test public void testCacheSchemaToConnectConversion() { Cache<JsonNode, Schema> cache = Whitebox.getInternalState(converter, "toConnectSchemaCache"); assertEquals(0, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes()); assertEquals(1, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes()); assertEquals(1, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": true }, \"payload\": true }".getBytes()); assertEquals(2, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": false }, \"payload\": true }".getBytes()); assertEquals(3, cache.size()); } @Test public void shortToConnect() { assertEquals(new SchemaAndValue(Schema.INT16_SCHEMA, (short) 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int16\" }, \"payload\": 12 }".getBytes())); } @Test public void intToConnect() { assertEquals(new SchemaAndValue(Schema.INT32_SCHEMA, 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int32\" }, \"payload\": 12 }".getBytes())); } @Test public void longToConnect() { assertEquals(new SchemaAndValue(Schema.INT64_SCHEMA, 12L), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int64\" }, \"payload\": 12 }".getBytes())); assertEquals(new SchemaAndValue(Schema.INT64_SCHEMA, 4398046511104L), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int64\" }, \"payload\": 4398046511104 }".getBytes())); } @Test public void floatToConnect() { assertEquals(new SchemaAndValue(Schema.FLOAT32_SCHEMA, 12.34f), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"float\" }, \"payload\": 12.34 }".getBytes())); } @Test public void doubleToConnect() { assertEquals(new SchemaAndValue(Schema.FLOAT64_SCHEMA, 12.34), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"double\" }, \"payload\": 12.34 }".getBytes())); } @Test public void bytesToConnect() throws UnsupportedEncodingException { ByteBuffer reference = ByteBuffer.wrap("test-string".getBytes("UTF-8")); String msg = "{ \"schema\": { \"type\": \"bytes\" }, \"payload\": \"dGVzdC1zdHJpbmc=\" }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); ByteBuffer converted = ByteBuffer.wrap((byte[]) schemaAndValue.value()); assertEquals(reference, converted); } @Test public void stringToConnect() { assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes())); }
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
@Test public void shouldRebalanceTasksToClientsBasedOnCapacity() throws Exception { createClientWithPreviousActiveTasks(p2, 1, task00, task03, task02); createClient(p3, 2); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task03); taskAssignor.assign(0); assertThat(clients.get(p2).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p3).assignedTaskCount(), equalTo(2)); } @Test public void shouldMoveMinimalNumberOfTasksWhenPreviouslyAboveCapacityAndNewClientAdded() throws Exception { final Set<TaskId> p1PrevTasks = Utils.mkSet(task00, task02); final Set<TaskId> p2PrevTasks = Utils.mkSet(task01, task03); createClientWithPreviousActiveTasks(p1, 1, task00, task02); createClientWithPreviousActiveTasks(p2, 1, task01, task03); createClientWithPreviousActiveTasks(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(0); final Set<TaskId> p3ActiveTasks = clients.get(p3).activeTasks(); assertThat(p3ActiveTasks.size(), equalTo(1)); if (p1PrevTasks.removeAll(p3ActiveTasks)) { assertThat(clients.get(p2).activeTasks(), equalTo(p2PrevTasks)); } else { assertThat(clients.get(p1).activeTasks(), equalTo(p1PrevTasks)); } } @Test public void shouldNotMoveAnyTasksWhenNewTasksAdded() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00, task01); createClientWithPreviousActiveTasks(p2, 1, task02, task03); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task03, task01, task04, task02, task00, task05); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task00, task01)); assertThat(clients.get(p2).activeTasks(), hasItems(task02, task03)); } @Test public void shouldAssignNewTasksToNewClientWhenPreviousTasksAssignedToOldClients() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task02, task01); createClientWithPreviousActiveTasks(p2, 1, task00, task03); createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task03, task01, task04, task02, task00, task05); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task02, task01)); assertThat(clients.get(p2).activeTasks(), hasItems(task00, task03)); assertThat(clients.get(p3).activeTasks(), hasItems(task04, task05)); } @Test public void shouldAssignTasksNotPreviouslyActiveToNewClient() throws Exception { final TaskId task10 = new TaskId(0, 10); final TaskId task11 = new TaskId(0, 11); final TaskId task12 = new TaskId(1, 2); final TaskId task13 = new TaskId(1, 3); final TaskId task20 = new TaskId(2, 0); final TaskId task21 = new TaskId(2, 1); final TaskId task22 = new TaskId(2, 2); final TaskId task23 = new TaskId(2, 3); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task01, task12, task13); c1.addPreviousStandbyTasks(Utils.mkSet(task00, task11, task20, task21, task23)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task00, task11, task22); c2.addPreviousStandbyTasks(Utils.mkSet(task01, task10, task02, task20, task03, task12, task21, task13, task23)); final ClientState c3 = createClientWithPreviousActiveTasks(p3, 1, task20, task21, task23); c3.addPreviousStandbyTasks(Utils.mkSet(task02, task12)); final ClientState newClient = createClient(p4, 1); newClient.addPreviousStandbyTasks(Utils.mkSet(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23)); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23); taskAssignor.assign(0); assertThat(c1.activeTasks(), equalTo(Utils.mkSet(task01, task12, task13))); assertThat(c2.activeTasks(), equalTo(Utils.mkSet(task00, task11, task22))); assertThat(c3.activeTasks(), equalTo(Utils.mkSet(task20, task21, task23))); assertThat(newClient.activeTasks(), equalTo(Utils.mkSet(task02, task03, task10))); } @Test public void shouldAssignTasksNotPreviouslyActiveToMultipleNewClients() throws Exception { final TaskId task10 = new TaskId(0, 10); final TaskId task11 = new TaskId(0, 11); final TaskId task12 = new TaskId(1, 2); final TaskId task13 = new TaskId(1, 3); final TaskId task20 = new TaskId(2, 0); final TaskId task21 = new TaskId(2, 1); final TaskId task22 = new TaskId(2, 2); final TaskId task23 = new TaskId(2, 3); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task01, task12, task13); c1.addPreviousStandbyTasks(Utils.mkSet(task00, task11, task20, task21, task23)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task00, task11, task22); c2.addPreviousStandbyTasks(Utils.mkSet(task01, task10, task02, task20, task03, task12, task21, task13, task23)); final ClientState bounce1 = createClient(p3, 1); bounce1.addPreviousStandbyTasks(Utils.mkSet(task20, task21, task23)); final ClientState bounce2 = createClient(p4, 1); bounce2.addPreviousStandbyTasks(Utils.mkSet(task02, task03, task10)); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23); taskAssignor.assign(0); assertThat(c1.activeTasks(), equalTo(Utils.mkSet(task01, task12, task13))); assertThat(c2.activeTasks(), equalTo(Utils.mkSet(task00, task11, task22))); assertThat(bounce1.activeTasks(), equalTo(Utils.mkSet(task20, task21, task23))); assertThat(bounce2.activeTasks(), equalTo(Utils.mkSet(task02, task03, task10))); } @Test public void shouldAssignTasksToNewClient() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task01, task02); createClient(p2, 1); createTaskAssignor(task01, task02).assign(0); assertThat(clients.get(p1).activeTaskCount(), equalTo(1)); } @Test public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingClients() throws Exception { final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task00, task01, task02); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task03, task04, task05); final ClientState newClient = createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02, task03, task04, task05); taskAssignor.assign(0); assertThat(c1.activeTasks(), not(hasItem(task03))); assertThat(c1.activeTasks(), not(hasItem(task04))); assertThat(c1.activeTasks(), not(hasItem(task05))); assertThat(c1.activeTaskCount(), equalTo(2)); assertThat(c2.activeTasks(), not(hasItems(task00))); assertThat(c2.activeTasks(), not(hasItems(task01))); assertThat(c2.activeTasks(), not(hasItems(task02))); assertThat(c2.activeTaskCount(), equalTo(2)); assertThat(newClient.activeTaskCount(), equalTo(2)); } @Test public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingAndBouncedClients() throws Exception { final TaskId task06 = new TaskId(0, 6); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task00, task01, task02, task06); final ClientState c2 = createClient(p2, 1); c2.addPreviousStandbyTasks(Utils.mkSet(task03, task04, task05)); final ClientState newClient = createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02, task03, task04, task05, task06); taskAssignor.assign(0); assertThat(c1.activeTasks(), not(hasItem(task03))); assertThat(c1.activeTasks(), not(hasItem(task04))); assertThat(c1.activeTasks(), not(hasItem(task05))); assertThat(c1.activeTaskCount(), equalTo(3)); assertThat(c2.activeTasks(), not(hasItems(task00))); assertThat(c2.activeTasks(), not(hasItems(task01))); assertThat(c2.activeTasks(), not(hasItems(task02))); assertThat(c2.activeTaskCount(), equalTo(2)); assertThat(newClient.activeTaskCount(), equalTo(2)); } @Test public void shouldAssignOneActiveTaskToEachProcessWhenTaskCountSameAsProcessCount() throws Exception { createClient(p1, 1); createClient(p2, 1); createClient(p3, 1); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); for (final Integer processId : clients.keySet()) { assertThat(clients.get(processId).activeTaskCount(), equalTo(1)); } } @Test public void shouldNotMigrateActiveTaskToOtherProcess() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00); createClientWithPreviousActiveTasks(p2, 1, task01); final StickyTaskAssignor firstAssignor = createTaskAssignor(task00, task01, task02); firstAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task00)); assertThat(clients.get(p2).activeTasks(), hasItems(task01)); assertThat(allActiveTasks(), equalTo(Arrays.asList(task00, task01, task02))); clients.clear(); createClientWithPreviousActiveTasks(p1, 1, task01); createClientWithPreviousActiveTasks(p2, 1, task02); final StickyTaskAssignor secondAssignor = createTaskAssignor(task00, task01, task02); secondAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task01)); assertThat(clients.get(p2).activeTasks(), hasItems(task02)); assertThat(allActiveTasks(), equalTo(Arrays.asList(task00, task01, task02))); } @Test public void shouldMigrateActiveTasksToNewProcessWithoutChangingAllAssignments() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00, task02); createClientWithPreviousActiveTasks(p2, 1, task01); createClient(p3, 1); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p2).activeTasks(), equalTo(Collections.singleton(task01))); assertThat(clients.get(p1).activeTasks().size(), equalTo(1)); assertThat(clients.get(p3).activeTasks().size(), equalTo(1)); assertThat(allActiveTasks(), equalTo(Arrays.asList(task00, task01, task02))); } @Test public void shouldAssignBasedOnCapacity() throws Exception { createClient(p1, 1); createClient(p2, 2); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks().size(), equalTo(1)); assertThat(clients.get(p2).activeTasks().size(), equalTo(2)); } @Test public void shouldKeepActiveTaskStickynessWhenMoreClientThanActiveTasks() { final int p5 = 5; createClientWithPreviousActiveTasks(p1, 1, task00); createClientWithPreviousActiveTasks(p2, 1, task02); createClientWithPreviousActiveTasks(p3, 1, task01); createClient(p4, 1); createClient(p5, 1); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), equalTo(Collections.singleton(task00))); assertThat(clients.get(p2).activeTasks(), equalTo(Collections.singleton(task02))); assertThat(clients.get(p3).activeTasks(), equalTo(Collections.singleton(task01))); clients.clear(); createClient(p1, 1); createClientWithPreviousActiveTasks(p2, 1, task00); createClient(p3, 1); createClientWithPreviousActiveTasks(p4, 1, task02); createClientWithPreviousActiveTasks(p5, 1, task01); final StickyTaskAssignor secondAssignor = createTaskAssignor(task00, task01, task02); secondAssignor.assign(0); assertThat(clients.get(p2).activeTasks(), equalTo(Collections.singleton(task00))); assertThat(clients.get(p4).activeTasks(), equalTo(Collections.singleton(task02))); assertThat(clients.get(p5).activeTasks(), equalTo(Collections.singleton(task01))); } @Test public void shouldAssignTasksToClientWithPreviousStandbyTasks() throws Exception { final ClientState client1 = createClient(p1, 1); client1.addPreviousStandbyTasks(Utils.mkSet(task02)); final ClientState client2 = createClient(p2, 1); client2.addPreviousStandbyTasks(Utils.mkSet(task01)); final ClientState client3 = createClient(p3, 1); client3.addPreviousStandbyTasks(Utils.mkSet(task00)); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), equalTo(Collections.singleton(task02))); assertThat(clients.get(p2).activeTasks(), equalTo(Collections.singleton(task01))); assertThat(clients.get(p3).activeTasks(), equalTo(Collections.singleton(task00))); } @Test public void shouldAssignBasedOnCapacityWhenMultipleClientHaveStandbyTasks() throws Exception { final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task00); c1.addPreviousStandbyTasks(Utils.mkSet(task01)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 2, task02); c2.addPreviousStandbyTasks(Utils.mkSet(task01)); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), equalTo(Collections.singleton(task00))); assertThat(clients.get(p2).activeTasks(), equalTo(Utils.mkSet(task02, task01))); } @Test public void shouldAssignStandbyTasksToDifferentClientThanCorrespondingActiveTaskIsAssingedTo() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00); createClientWithPreviousActiveTasks(p2, 1, task01); createClientWithPreviousActiveTasks(p3, 1, task02); createClientWithPreviousActiveTasks(p4, 1, task03); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02, task03); taskAssignor.assign(1); assertThat(clients.get(p1).standbyTasks(), not(hasItems(task00))); assertTrue(clients.get(p1).standbyTasks().size() <= 2); assertThat(clients.get(p2).standbyTasks(), not(hasItems(task01))); assertTrue(clients.get(p2).standbyTasks().size() <= 2); assertThat(clients.get(p3).standbyTasks(), not(hasItems(task02))); assertTrue(clients.get(p3).standbyTasks().size() <= 2); assertThat(clients.get(p4).standbyTasks(), not(hasItems(task03))); assertTrue(clients.get(p4).standbyTasks().size() <= 2); int nonEmptyStandbyTaskCount = 0; for (final Integer client : clients.keySet()) { nonEmptyStandbyTaskCount += clients.get(client).standbyTasks().isEmpty() ? 0 : 1; } assertTrue(nonEmptyStandbyTaskCount >= 3); assertThat(allStandbyTasks(), equalTo(Arrays.asList(task00, task01, task02, task03))); } @Test public void shouldAssignMultipleReplicasOfStandbyTask() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00); createClientWithPreviousActiveTasks(p2, 1, task01); createClientWithPreviousActiveTasks(p3, 1, task02); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(2); assertThat(clients.get(p1).standbyTasks(), equalTo(Utils.mkSet(task01, task02))); assertThat(clients.get(p2).standbyTasks(), equalTo(Utils.mkSet(task02, task00))); assertThat(clients.get(p3).standbyTasks(), equalTo(Utils.mkSet(task00, task01))); } @Test public void shouldNotAssignStandbyTaskReplicasWhenNoClientAvailableWithoutHavingTheTaskAssigned() throws Exception { createClient(p1, 1); final StickyTaskAssignor taskAssignor = createTaskAssignor(task00); taskAssignor.assign(1); assertThat(clients.get(p1).standbyTasks().size(), equalTo(0)); } @Test public void shouldAssignActiveAndStandbyTasks() throws Exception { createClient(p1, 1); createClient(p2, 1); createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(1); assertThat(allActiveTasks(), equalTo(Arrays.asList(task00, task01, task02))); assertThat(allStandbyTasks(), equalTo(Arrays.asList(task00, task01, task02))); } @Test public void shouldAssignAtLeastOneTaskToEachClientIfPossible() throws Exception { createClient(p1, 3); createClient(p2, 1); createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(clients.get(p1).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p2).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p3).assignedTaskCount(), equalTo(1)); } @Test public void shouldAssignEachActiveTaskToOneClientWhenMoreClientsThanTasks() throws Exception { createClient(p1, 1); createClient(p2, 1); createClient(p3, 1); createClient(p4, 1); createClient(5, 1); createClient(6, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(0); assertThat(allActiveTasks(), equalTo(Arrays.asList(task00, task01, task02))); } @Test public void shouldBalanceActiveAndStandbyTasksAcrossAvailableClients() throws Exception { createClient(p1, 1); createClient(p2, 1); createClient(p3, 1); createClient(p4, 1); createClient(5, 1); createClient(6, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02); taskAssignor.assign(1); for (final ClientState clientState : clients.values()) { assertThat(clientState.assignedTaskCount(), equalTo(1)); } } @Test public void shouldAssignMoreTasksToClientWithMoreCapacity() throws Exception { createClient(p2, 2); createClient(p1, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02, new TaskId(1, 0), new TaskId(1, 1), new TaskId(1, 2), new TaskId(2, 0), new TaskId(2, 1), new TaskId(2, 2), new TaskId(3, 0), new TaskId(3, 1), new TaskId(3, 2)); taskAssignor.assign(0); assertThat(clients.get(p2).assignedTaskCount(), equalTo(8)); assertThat(clients.get(p1).assignedTaskCount(), equalTo(4)); } @Test public void shouldNotHaveSameAssignmentOnAnyTwoHosts() throws Exception { createClient(p1, 1); createClient(p2, 1); createClient(p3, 1); createClient(p4, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(1); for (int i = p1; i <= p4; i++) { final Set<TaskId> taskIds = clients.get(i).assignedTasks(); for (int j = p1; j <= p4; j++) { if (j != i) { assertThat("clients shouldn't have same task assignment", clients.get(j).assignedTasks(), not(equalTo(taskIds))); } } } } @Test public void shouldNotHaveSameAssignmentOnAnyTwoHostsWhenThereArePreviousActiveTasks() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task01, task02); createClientWithPreviousActiveTasks(p2, 1, task03); createClientWithPreviousActiveTasks(p3, 1, task00); createClient(p4, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(1); for (int i = p1; i <= p4; i++) { final Set<TaskId> taskIds = clients.get(i).assignedTasks(); for (int j = p1; j <= p4; j++) { if (j != i) { assertThat("clients shouldn't have same task assignment", clients.get(j).assignedTasks(), not(equalTo(taskIds))); } } } } @Test public void shouldNotHaveSameAssignmentOnAnyTwoHostsWhenThereArePreviousStandbyTasks() throws Exception { final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task01, task02); c1.addPreviousStandbyTasks(Utils.mkSet(task03, task00)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task03, task00); c2.addPreviousStandbyTasks(Utils.mkSet(task01, task02)); createClient(p3, 1); createClient(p4, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(1); for (int i = p1; i <= p4; i++) { final Set<TaskId> taskIds = clients.get(i).assignedTasks(); for (int j = p1; j <= p4; j++) { if (j != i) { assertThat("clients shouldn't have same task assignment", clients.get(j).assignedTasks(), not(equalTo(taskIds))); } } } } @Test public void shouldReBalanceTasksAcrossAllClientsWhenCapacityAndTaskCountTheSame() throws Exception { createClientWithPreviousActiveTasks(p3, 1, task00, task01, task02, task03); createClient(p1, 1); createClient(p2, 1); createClient(p4, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(0); assertThat(clients.get(p1).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p2).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p3).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p4).assignedTaskCount(), equalTo(1)); } @Test public void shouldReBalanceTasksAcrossClientsWhenCapacityLessThanTaskCount() throws Exception { createClientWithPreviousActiveTasks(p3, 1, task00, task01, task02, task03); createClient(p1, 1); createClient(p2, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(0); assertThat(clients.get(p3).assignedTaskCount(), equalTo(2)); assertThat(clients.get(p1).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p2).assignedTaskCount(), equalTo(1)); }
SubscriptionInfo { public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } } SubscriptionInfo(UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); private SubscriptionInfo(int version, UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); ByteBuffer encode(); static SubscriptionInfo decode(ByteBuffer data); @Override int hashCode(); @Override boolean equals(Object o); final int version; final UUID processId; final Set<TaskId> prevTasks; final Set<TaskId> standbyTasks; final String userEndPoint; }
@Test public void shouldBeBackwardCompatible() throws Exception { UUID processId = UUID.randomUUID(); Set<TaskId> activeTasks = new HashSet<>(Arrays.asList(new TaskId(0, 0), new TaskId(0, 1), new TaskId(1, 0))); Set<TaskId> standbyTasks = new HashSet<>(Arrays.asList(new TaskId(1, 1), new TaskId(2, 0))); final ByteBuffer v1Encoding = encodePreviousVersion(processId, activeTasks, standbyTasks); final SubscriptionInfo decode = SubscriptionInfo.decode(v1Encoding); assertEquals(activeTasks, decode.prevTasks); assertEquals(standbyTasks, decode.standbyTasks); assertEquals(processId, decode.processId); assertNull(decode.userEndPoint); }
ConnectorsResource { @GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test(expected = NotFoundException.class) public void testGetConnectorConfigConnectorNotFound() throws Throwable { final Capture<Callback<Map<String, String>>> cb = Capture.newInstance(); herder.connectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("not found")); PowerMock.replayAll(); connectorsResource.getConnectorConfig(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); } @Test public void testGetConnectorConfig() throws Throwable { final Capture<Callback<Map<String, String>>> cb = Capture.newInstance(); herder.connectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackResult(cb, CONNECTOR_CONFIG); PowerMock.replayAll(); Map<String, String> connConfig = connectorsResource.getConnectorConfig(CONNECTOR_NAME, FORWARD); assertEquals(CONNECTOR_CONFIG, connConfig); PowerMock.verifyAll(); }
ClientState { boolean reachedCapacity() { return assignedTasks.size() >= capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
@Test public void shouldHaveNotReachedCapacityWhenAssignedTasksLessThanCapacity() throws Exception { assertFalse(client.reachedCapacity()); }
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
@Test public void shouldHaveMoreAvailableCapacityWhenCapacityHigherAndSameAssignedTaskCount() throws Exception { final ClientState c2 = new ClientState(2); assertTrue(c2.hasMoreAvailableCapacityThan(client)); assertFalse(client.hasMoreAvailableCapacityThan(c2)); } @Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionIfCapacityOfThisClientStateIsZero() throws Exception { final ClientState c1 = new ClientState(0); c1.hasMoreAvailableCapacityThan(new ClientState(1)); } @Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionIfCapacityOfOtherClientStateIsZero() throws Exception { final ClientState c1 = new ClientState(1); c1.hasMoreAvailableCapacityThan(new ClientState(0)); }
QuickUnion { public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } } void add(T id); boolean exists(T id); T root(T id); void unite(T id1, T... idList); }
@SuppressWarnings("unchecked") @Test public void testUnite() { QuickUnion<Long> qu = new QuickUnion<>(); long[] ids = { 1L, 2L, 3L, 4L, 5L }; for (long id : ids) { qu.add(id); } assertEquals(5, roots(qu, ids).size()); qu.unite(1L, 2L); assertEquals(4, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); qu.unite(3L, 4L); assertEquals(3, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(3L), qu.root(4L)); qu.unite(1L, 5L); assertEquals(2, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(2L), qu.root(5L)); assertEquals(qu.root(3L), qu.root(4L)); qu.unite(3L, 5L); assertEquals(1, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(2L), qu.root(3L)); assertEquals(qu.root(3L), qu.root(4L)); assertEquals(qu.root(4L), qu.root(5L)); }
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
@SuppressWarnings("unchecked") @Test public void shouldThrowStreamsExceptionWhenTimeoutExceptionThrown() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public Map<String, List<PartitionInfo>> listTopics() { throw new TimeoutException("KABOOM!"); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 0); try { changelogReader.validatePartitionExists(topicPartition, "store"); fail("Should have thrown streams exception"); } catch (final StreamsException e) { } } @Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfPartitionDoesntExistAfterMaxWait() throws Exception { changelogReader.validatePartitionExists(topicPartition, "store"); } @SuppressWarnings("unchecked") @Test public void shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public List<PartitionInfo> partitionsFor(final String topic) { return Collections.singletonList(partitionInfo); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 10); changelogReader.validatePartitionExists(topicPartition, "store"); } @SuppressWarnings("unchecked") @Test public void shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public List<PartitionInfo> partitionsFor(final String topic) { throw new TimeoutException("KABOOM!"); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 5); try { changelogReader.validatePartitionExists(topicPartition, "store"); fail("Should have thrown streams exception"); } catch (final StreamsException e) { } } @Test public void shouldPassIfTopicPartitionExists() throws Exception { consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(partitionInfo)); changelogReader.validatePartitionExists(topicPartition, "store"); } @SuppressWarnings("unchecked") @Test public void shouldRequestPartitionInfoIfItDoesntExist() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public Map<String, List<PartitionInfo>> listTopics() { return Collections.emptyMap(); } }; consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(partitionInfo)); final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, Time.SYSTEM, 5000); changelogReader.validatePartitionExists(topicPartition, "store"); }
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void testPutConnectorConfig() throws Throwable { final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.putConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(CONNECTOR_CONFIG), EasyMock.eq(true), EasyMock.capture(cb)); expectAndCallbackResult(cb, new Herder.Created<>(false, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES))); PowerMock.replayAll(); connectorsResource.putConnectorConfig(CONNECTOR_NAME, FORWARD, CONNECTOR_CONFIG); PowerMock.verifyAll(); } @Test(expected = BadRequestException.class) public void testPutConnectorConfigNameMismatch() throws Throwable { Map<String, String> connConfig = new HashMap<>(CONNECTOR_CONFIG); connConfig.put(ConnectorConfig.NAME_CONFIG, "mismatched-name"); connectorsResource.putConnectorConfig(CONNECTOR_NAME, FORWARD, connConfig); }
StoreChangelogReader implements ChangelogReader { public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
@Test public void shouldThrowExceptionIfConsumerHasCurrentSubscription() throws Exception { consumer.subscribe(Collections.singleton("sometopic")); try { changelogReader.restore(); fail("Should have thrown IllegalStateException"); } catch (final IllegalStateException e) { } }
InternalTopicManager { public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor, final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); static final String CLEANUP_POLICY_PROP; static final String RETENTION_MS; static final Long WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT; }
@Test public void shouldReturnCorrectPartitionCounts() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); Assert.assertEquals(Collections.singletonMap(topic, 1), internalTopicManager.getNumPartitions(Collections.singleton(topic))); }
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor, final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); static final String CLEANUP_POLICY_PROP; static final String RETENTION_MS; static final Long WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT; }
@Test public void shouldCreateRequiredTopics() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); internalTopicManager.makeReady(Collections.singletonMap(new InternalTopicConfig(topic, Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), null), 1)); } @Test public void shouldNotCreateTopicIfExistsWithDifferentPartitions() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); boolean exceptionWasThrown = false; try { internalTopicManager.makeReady(Collections.singletonMap(new InternalTopicConfig(topic, Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), null), 2)); } catch (StreamsException e) { exceptionWasThrown = true; } Assert.assertTrue(exceptionWasThrown); }
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final boolean isStandby, final StateDirectory stateDirectory, final ThreadCache cache, final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
@Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() throws Exception { final Consumer consumer = mockConsumer(new AuthorizationException("blah")); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); } @Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() throws Exception { final Consumer consumer = mockConsumer(new KafkaException("blah")); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); } @Test(expected = WakeupException.class) public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() throws Exception { final Consumer consumer = mockConsumer(new WakeupException()); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); }
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test public void shouldNotAllowOffsetResetSourceWithoutTopics() { final TopologyBuilder builder = new TopologyBuilder(); final Serde<String> stringSerde = Serdes.String(); try { builder.addSource(TopologyBuilder.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), new String[]{}); fail("Should throw TopologyBuilderException with no topics"); } catch (TopologyBuilderException tpe) { } } @Test public void shouldNotAllowOffsetResetSourceWithDuplicateSourceName() { final TopologyBuilder builder = new TopologyBuilder(); final Serde<String> stringSerde = Serdes.String(); builder.addSource(TopologyBuilder.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); try { builder.addSource(TopologyBuilder.AutoOffsetReset.LATEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); fail("Should throw TopologyBuilderException for duplicate source name"); } catch (TopologyBuilderException tpe) { } } @Test(expected = TopologyBuilderException.class) public void testAddSourceWithSameName() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source", "topic-1"); builder.addSource("source", "topic-2"); } @Test(expected = TopologyBuilderException.class) public void testAddSourceWithSameTopic() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source", "topic-1"); builder.addSource("source-2", "topic-1"); } @Test(expected = TopologyBuilderException.class) public void testPatternMatchesAlreadyProvidedTopicSource() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", "foo"); builder.addSource("source-2", Pattern.compile("f.*")); } @Test(expected = TopologyBuilderException.class) public void testNamedTopicMatchesAlreadyProvidedPattern() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", Pattern.compile("f.*")); builder.addSource("source-2", "foo"); } @Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingSource() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource(null, Pattern.compile(".*")); }
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = TopologyBuilderException.class) public void testAddProcessorWithWrongParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("processor", new MockProcessorSupplier(), "source"); } @Test(expected = TopologyBuilderException.class) public void testAddProcessorWithSelfParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("processor", new MockProcessorSupplier(), "processor"); } @Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingProcessor() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor(null, new ProcessorSupplier() { @Override public Processor get() { return null; } }); } @Test(expected = NullPointerException.class) public void shouldNotAllowNullProcessorSupplier() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("name", null); }
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = TopologyBuilderException.class) public void testAddSinkWithWrongParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("sink", "topic-2", "source"); } @Test(expected = TopologyBuilderException.class) public void testAddSinkWithSelfParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("sink", "topic-2", "sink"); } @Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingSink() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink(null, "topic"); } @Test(expected = NullPointerException.class) public void shouldNotAllowNullTopicWhenAddingSink() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("name", null); }
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = TopologyBuilderException.class) public void testAddStateStoreWithNonExistingProcessor() { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(new MockStateStoreSupplier("store", false), "no-such-processsor"); } @Test(expected = TopologyBuilderException.class) public void testAddStateStoreWithDuplicates() { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(new MockStateStoreSupplier("store", false)); builder.addStateStore(new MockStateStoreSupplier("store", false)); } @Test(expected = NullPointerException.class) public void shouldNotAddNullStateStoreSupplier() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(null); }
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void testGetConnectorTaskConfigs() throws Throwable { final Capture<Callback<List<TaskInfo>>> cb = Capture.newInstance(); herder.taskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackResult(cb, TASK_INFOS); PowerMock.replayAll(); List<TaskInfo> taskInfos = connectorsResource.getTaskConfigs(CONNECTOR_NAME, FORWARD); assertEquals(TASK_INFOS, taskInfos); PowerMock.verifyAll(); } @Test(expected = NotFoundException.class) public void testGetConnectorTaskConfigsConnectorNotFound() throws Throwable { final Capture<Callback<List<TaskInfo>>> cb = Capture.newInstance(); herder.taskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("connector not found")); PowerMock.replayAll(); connectorsResource.getTaskConfigs(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); }
TopologyBuilder { public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test public void testTopicGroups() { final TopologyBuilder builder = new TopologyBuilder(); builder.setApplicationId("X"); builder.addInternalTopic("topic-1x"); builder.addSource("source-1", "topic-1", "topic-1x"); builder.addSource("source-2", "topic-2"); builder.addSource("source-3", "topic-3"); builder.addSource("source-4", "topic-4"); builder.addSource("source-5", "topic-5"); builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1"); builder.addProcessor("processor-2", new MockProcessorSupplier(), "source-2", "processor-1"); builder.copartitionSources(mkList("source-1", "source-2")); builder.addProcessor("processor-3", new MockProcessorSupplier(), "source-3", "source-4"); Map<Integer, TopicsInfo> topicGroups = builder.topicGroups(); Map<Integer, TopicsInfo> expectedTopicGroups = new HashMap<>(); expectedTopicGroups.put(0, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-1", "X-topic-1x", "topic-2"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); expectedTopicGroups.put(1, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-3", "topic-4"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); expectedTopicGroups.put(2, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-5"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); assertEquals(3, topicGroups.size()); assertEquals(expectedTopicGroups, topicGroups); Collection<Set<String>> copartitionGroups = builder.copartitionGroups(); assertEquals(mkSet(mkSet("topic-1", "X-topic-1x", "topic-2")), new HashSet<>(copartitionGroups)); }
TopologyBuilder { public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test public void testBuild() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", "topic-1", "topic-1x"); builder.addSource("source-2", "topic-2"); builder.addSource("source-3", "topic-3"); builder.addSource("source-4", "topic-4"); builder.addSource("source-5", "topic-5"); builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1"); builder.addProcessor("processor-2", new MockProcessorSupplier(), "source-2", "processor-1"); builder.addProcessor("processor-3", new MockProcessorSupplier(), "source-3", "source-4"); builder.setApplicationId("X"); ProcessorTopology topology0 = builder.build(0); ProcessorTopology topology1 = builder.build(1); ProcessorTopology topology2 = builder.build(2); assertEquals(mkSet("source-1", "source-2", "processor-1", "processor-2"), nodeNames(topology0.processors())); assertEquals(mkSet("source-3", "source-4", "processor-3"), nodeNames(topology1.processors())); assertEquals(mkSet("source-5"), nodeNames(topology2.processors())); }
TopologyBuilder { public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = NullPointerException.class) public void shouldNotAllowNullProcessorNameWhenConnectingProcessorAndStateStores() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.connectProcessorAndStateStores(null, "store"); }
TopologyBuilder { public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = NullPointerException.class) public void shouldNotAddNullInternalTopic() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addInternalTopic(null); }
TopologyBuilder { public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = NullPointerException.class) public void shouldNotSetApplicationIdToNull() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.setApplicationId(null); }
TopologyBuilder { public synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier) { return addGlobalStore(storeSupplier, sourceName, null, keyDeserializer, valueDeserializer, topic, processorName, stateUpdateSupplier); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final String topic, final String processorName, final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
@Test(expected = TopologyBuilderException.class) public void shouldNotAllowToAddGlobalStoreWithSourceNameEqualsProcessorName() { final String sameNameForSourceAndProcessor = "sameName"; final TopologyBuilder topologyBuilder = new TopologyBuilder() .addGlobalStore(new MockStateStoreSupplier("anyName", false, false), sameNameForSourceAndProcessor, null, null, "anyTopicName", sameNameForSourceAndProcessor, new MockProcessorSupplier()); }
ConnectorsResource { @POST @Path("/{connector}/tasks") public void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs) throws Throwable { FutureCallback<Void> cb = new FutureCallback<>(); herder.putTaskConfigs(connector, taskConfigs, cb); completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "POST", taskConfigs, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void testPutConnectorTaskConfigs() throws Throwable { final Capture<Callback<Void>> cb = Capture.newInstance(); herder.putTaskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(TASK_CONFIGS), EasyMock.capture(cb)); expectAndCallbackResult(cb, null); PowerMock.replayAll(); connectorsResource.putTaskConfigs(CONNECTOR_NAME, FORWARD, TASK_CONFIGS); PowerMock.verifyAll(); } @Test(expected = NotFoundException.class) public void testPutConnectorTaskConfigsConnectorNotFound() throws Throwable { final Capture<Callback<Void>> cb = Capture.newInstance(); herder.putTaskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(TASK_CONFIGS), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("not found")); PowerMock.replayAll(); connectorsResource.putTaskConfigs(CONNECTOR_NAME, FORWARD, TASK_CONFIGS); PowerMock.verifyAll(); }
ConnectorsResource { @POST @Path("/{connector}/restart") public void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Void> cb = new FutureCallback<>(); herder.restartConnector(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector + "/restart", "POST", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test(expected = NotFoundException.class) public void testRestartConnectorNotFound() throws Throwable { final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("not found")); PowerMock.replayAll(); connectorsResource.restartConnector(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); } @Test public void testRestartConnectorLeaderRedirect() throws Throwable { final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackNotLeaderException(cb); EasyMock.expect(RestServer.httpRequest(EasyMock.eq("http: EasyMock.eq("POST"), EasyMock.isNull(), EasyMock.<TypeReference>anyObject())) .andReturn(new RestServer.HttpResponse<>(202, new HashMap<String, List<String>>(), null)); PowerMock.replayAll(); connectorsResource.restartConnector(CONNECTOR_NAME, null); PowerMock.verifyAll(); } @Test public void testRestartConnectorOwnerRedirect() throws Throwable { final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); String ownerUrl = "http: expectAndCallbackException(cb, new NotAssignedException("not owner test", ownerUrl)); EasyMock.expect(RestServer.httpRequest(EasyMock.eq("http: EasyMock.eq("POST"), EasyMock.isNull(), EasyMock.<TypeReference>anyObject())) .andReturn(new RestServer.HttpResponse<>(202, new HashMap<String, List<String>>(), null)); PowerMock.replayAll(); connectorsResource.restartConnector(CONNECTOR_NAME, true); PowerMock.verifyAll(); }
ConnectorsResource { @POST @Path("/{connector}/tasks/{task}/restart") public void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Void> cb = new FutureCallback<>(); ConnectorTaskId taskId = new ConnectorTaskId(connector, task); herder.restartTask(taskId, cb); completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks/" + task + "/restart", "POST", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test(expected = NotFoundException.class) public void testRestartTaskNotFound() throws Throwable { ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_NAME, 0); final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartTask(EasyMock.eq(taskId), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("not found")); PowerMock.replayAll(); connectorsResource.restartTask(CONNECTOR_NAME, 0, FORWARD); PowerMock.verifyAll(); } @Test public void testRestartTaskLeaderRedirect() throws Throwable { ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_NAME, 0); final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartTask(EasyMock.eq(taskId), EasyMock.capture(cb)); expectAndCallbackNotLeaderException(cb); EasyMock.expect(RestServer.httpRequest(EasyMock.eq("http: EasyMock.eq("POST"), EasyMock.isNull(), EasyMock.<TypeReference>anyObject())) .andReturn(new RestServer.HttpResponse<>(202, new HashMap<String, List<String>>(), null)); PowerMock.replayAll(); connectorsResource.restartTask(CONNECTOR_NAME, 0, null); PowerMock.verifyAll(); } @Test public void testRestartTaskOwnerRedirect() throws Throwable { ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_NAME, 0); final Capture<Callback<Void>> cb = Capture.newInstance(); herder.restartTask(EasyMock.eq(taskId), EasyMock.capture(cb)); String ownerUrl = "http: expectAndCallbackException(cb, new NotAssignedException("not owner test", ownerUrl)); EasyMock.expect(RestServer.httpRequest(EasyMock.eq("http: EasyMock.eq("POST"), EasyMock.isNull(), EasyMock.<TypeReference>anyObject())) .andReturn(new RestServer.HttpResponse<>(202, new HashMap<String, List<String>>(), null)); PowerMock.replayAll(); connectorsResource.restartTask(CONNECTOR_NAME, 0, true); PowerMock.verifyAll(); }
ConnectorPluginsResource { @PUT @Path("/{connectorType}/config/validate") public ConfigInfos validateConfigs( final @PathParam("connectorType") String connType, final Map<String, String> connectorConfig ) throws Throwable { String includedConnType = connectorConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); if (includedConnType != null && !normalizedPluginName(includedConnType).endsWith(normalizedPluginName(connType))) { throw new BadRequestException( "Included connector type " + includedConnType + " does not match request type " + connType ); } return herder.validateConnectorConfig(connectorConfig); } ConnectorPluginsResource(Herder herder); @PUT @Path("/{connectorType}/config/validate") ConfigInfos validateConfigs( final @PathParam("connectorType") String connType, final Map<String, String> connectorConfig ); @GET @Path("/") List<ConnectorPluginInfo> listConnectorPlugins(); }
@Test public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() throws Throwable { herder.validateConnectorConfig(EasyMock.eq(partialProps)); PowerMock.expectLastCall().andAnswer(new IAnswer<ConfigInfos>() { @Override public ConfigInfos answer() { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(partialProps); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(partialProps); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); return AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); } }); PowerMock.replayAll(); ConfigInfos configInfos = connectorPluginsResource.validateConfigs( ConnectorPluginsResourceTestConnector.class.getSimpleName(), partialProps ); assertEquals(PARTIAL_CONFIG_INFOS.name(), configInfos.name()); assertEquals(PARTIAL_CONFIG_INFOS.errorCount(), configInfos.errorCount()); assertEquals(PARTIAL_CONFIG_INFOS.groups(), configInfos.groups()); assertEquals( new HashSet<>(PARTIAL_CONFIG_INFOS.values()), new HashSet<>(configInfos.values()) ); PowerMock.verifyAll(); } @Test public void testValidateConfigWithSimpleName() throws Throwable { herder.validateConnectorConfig(EasyMock.eq(props)); PowerMock.expectLastCall().andAnswer(new IAnswer<ConfigInfos>() { @Override public ConfigInfos answer() { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(props); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(props); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); return AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); } }); PowerMock.replayAll(); ConfigInfos configInfos = connectorPluginsResource.validateConfigs( ConnectorPluginsResourceTestConnector.class.getSimpleName(), props ); assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); PowerMock.verifyAll(); } @Test public void testValidateConfigWithAlias() throws Throwable { herder.validateConnectorConfig(EasyMock.eq(props)); PowerMock.expectLastCall().andAnswer(new IAnswer<ConfigInfos>() { @Override public ConfigInfos answer() { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(props); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(props); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); return AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); } }); PowerMock.replayAll(); ConfigInfos configInfos = connectorPluginsResource.validateConfigs( "ConnectorPluginsResourceTest", props ); assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); PowerMock.verifyAll(); } @Test(expected = BadRequestException.class) public void testValidateConfigWithNonExistentName() throws Throwable { herder.validateConnectorConfig(EasyMock.eq(props)); PowerMock.expectLastCall().andAnswer(new IAnswer<ConfigInfos>() { @Override public ConfigInfos answer() { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(props); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(props); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); return AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); } }); PowerMock.replayAll(); String customClassname = "com.custom.package." + ConnectorPluginsResourceTestConnector.class.getSimpleName(); connectorPluginsResource.validateConfigs(customClassname, props); PowerMock.verifyAll(); } @Test(expected = BadRequestException.class) public void testValidateConfigWithNonExistentAlias() throws Throwable { herder.validateConnectorConfig(EasyMock.eq(props)); PowerMock.expectLastCall().andAnswer(new IAnswer<ConfigInfos>() { @Override public ConfigInfos answer() { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(props); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(props); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); return AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); } }); PowerMock.replayAll(); connectorPluginsResource.validateConfigs("ConnectorPluginsTest", props); PowerMock.verifyAll(); }
AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener { @Override public ConnectorStateInfo connectorStatus(String connName) { ConnectorStatus connector = statusBackingStore.get(connName); if (connector == null) throw new NotFoundException("No status found for connector " + connName); Collection<TaskStatus> tasks = statusBackingStore.getAll(connName); ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState( connector.state().toString(), connector.workerId(), connector.trace()); List<ConnectorStateInfo.TaskState> taskStates = new ArrayList<>(); for (TaskStatus status : tasks) { taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(), status.state().toString(), status.workerId(), status.trace())); } Collections.sort(taskStates); return new ConnectorStateInfo(connName, connectorState, taskStates); } AbstractHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore); @Override void onStartup(String connector); @Override void onPause(String connector); @Override void onResume(String connector); @Override void onShutdown(String connector); @Override void onFailure(String connector, Throwable cause); @Override void onStartup(ConnectorTaskId id); @Override void onFailure(ConnectorTaskId id, Throwable cause); @Override void onShutdown(ConnectorTaskId id); @Override void onResume(ConnectorTaskId id); @Override void onPause(ConnectorTaskId id); @Override void onDeletion(String connector); @Override void pauseConnector(String connector); @Override void resumeConnector(String connector); @Override Plugins plugins(); @Override ConnectorStateInfo connectorStatus(String connName); @Override ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id); @Override ConfigInfos validateConnectorConfig(Map<String, String> connectorProps); static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups); }
@Test public void connectorStatus() { ConnectorTaskId taskId = new ConnectorTaskId(connector, 0); ConfigBackingStore configStore = strictMock(ConfigBackingStore.class); StatusBackingStore statusStore = strictMock(StatusBackingStore.class); AbstractHerder herder = partialMockBuilder(AbstractHerder.class) .withConstructor(Worker.class, String.class, StatusBackingStore.class, ConfigBackingStore.class) .withArgs(worker, workerId, statusStore, configStore) .addMockedMethod("generation") .createMock(); EasyMock.expect(herder.generation()).andStubReturn(generation); EasyMock.expect(statusStore.get(connector)) .andReturn(new ConnectorStatus(connector, AbstractStatus.State.RUNNING, workerId, generation)); EasyMock.expect(statusStore.getAll(connector)) .andReturn(Collections.singletonList( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); replayAll(); ConnectorStateInfo state = herder.connectorStatus(connector); assertEquals(connector, state.name()); assertEquals("RUNNING", state.connector().state()); assertEquals(1, state.tasks().size()); assertEquals(workerId, state.connector().workerId()); ConnectorStateInfo.TaskState taskState = state.tasks().get(0); assertEquals(0, taskState.id()); assertEquals("UNASSIGNED", taskState.state()); assertEquals(workerId, taskState.workerId()); verifyAll(); }
AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener { @Override public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) { TaskStatus status = statusBackingStore.get(id); if (status == null) throw new NotFoundException("No status found for task " + id); return new ConnectorStateInfo.TaskState(id.task(), status.state().toString(), status.workerId(), status.trace()); } AbstractHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore); @Override void onStartup(String connector); @Override void onPause(String connector); @Override void onResume(String connector); @Override void onShutdown(String connector); @Override void onFailure(String connector, Throwable cause); @Override void onStartup(ConnectorTaskId id); @Override void onFailure(ConnectorTaskId id, Throwable cause); @Override void onShutdown(ConnectorTaskId id); @Override void onResume(ConnectorTaskId id); @Override void onPause(ConnectorTaskId id); @Override void onDeletion(String connector); @Override void pauseConnector(String connector); @Override void resumeConnector(String connector); @Override Plugins plugins(); @Override ConnectorStateInfo connectorStatus(String connName); @Override ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id); @Override ConfigInfos validateConnectorConfig(Map<String, String> connectorProps); static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups); }
@Test public void taskStatus() { ConnectorTaskId taskId = new ConnectorTaskId("connector", 0); String workerId = "workerId"; ConfigBackingStore configStore = strictMock(ConfigBackingStore.class); StatusBackingStore statusStore = strictMock(StatusBackingStore.class); AbstractHerder herder = partialMockBuilder(AbstractHerder.class) .withConstructor(Worker.class, String.class, StatusBackingStore.class, ConfigBackingStore.class) .withArgs(worker, workerId, statusStore, configStore) .addMockedMethod("generation") .createMock(); EasyMock.expect(herder.generation()).andStubReturn(5); final Capture<TaskStatus> statusCapture = EasyMock.newCapture(); statusStore.putSafe(EasyMock.capture(statusCapture)); EasyMock.expectLastCall(); EasyMock.expect(statusStore.get(taskId)).andAnswer(new IAnswer<TaskStatus>() { @Override public TaskStatus answer() throws Throwable { return statusCapture.getValue(); } }); replayAll(); herder.onFailure(taskId, new RuntimeException()); ConnectorStateInfo.TaskState taskState = herder.taskStatus(taskId); assertEquals(workerId, taskState.workerId()); assertEquals("FAILED", taskState.state()); assertEquals(0, taskState.id()); assertNotNull(taskState.trace()); verifyAll(); }
AbstractHerder implements Herder, TaskStatus.Listener, ConnectorStatus.Listener { @Override public ConfigInfos validateConnectorConfig(Map<String, String> connectorProps) { String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); if (connType == null) throw new BadRequestException("Connector config " + connectorProps + " contains no connector type"); List<ConfigValue> configValues = new ArrayList<>(); Map<String, ConfigKey> configKeys = new LinkedHashMap<>(); Set<String> allGroups = new LinkedHashSet<>(); Connector connector = getConnector(connType); ClassLoader savedLoader = plugins().compareAndSwapLoaders(connector); try { ConfigDef baseConfigDef = (connector instanceof SourceConnector) ? SourceConnectorConfig.configDef() : SinkConnectorConfig.configDef(); ConfigDef enrichedConfigDef = ConnectorConfig.enrich(plugins(), baseConfigDef, connectorProps, false); Map<String, ConfigValue> validatedConnectorConfig = validateBasicConnectorConfig( connector, enrichedConfigDef, connectorProps ); configValues.addAll(validatedConnectorConfig.values()); configKeys.putAll(enrichedConfigDef.configKeys()); allGroups.addAll(enrichedConfigDef.groups()); Config config = connector.validate(connectorProps); ConfigDef configDef = connector.config(); configKeys.putAll(configDef.configKeys()); allGroups.addAll(configDef.groups()); configValues.addAll(config.configValues()); return generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); } finally { Plugins.compareAndSwapLoaders(savedLoader); } } AbstractHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore); @Override void onStartup(String connector); @Override void onPause(String connector); @Override void onResume(String connector); @Override void onShutdown(String connector); @Override void onFailure(String connector, Throwable cause); @Override void onStartup(ConnectorTaskId id); @Override void onFailure(ConnectorTaskId id, Throwable cause); @Override void onShutdown(ConnectorTaskId id); @Override void onResume(ConnectorTaskId id); @Override void onPause(ConnectorTaskId id); @Override void onDeletion(String connector); @Override void pauseConnector(String connector); @Override void resumeConnector(String connector); @Override Plugins plugins(); @Override ConnectorStateInfo connectorStatus(String connName); @Override ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id); @Override ConfigInfos validateConnectorConfig(Map<String, String> connectorProps); static ConfigInfos generateResult(String connType, Map<String, ConfigKey> configKeys, List<ConfigValue> configValues, List<String> groups); }
@Test(expected = BadRequestException.class) public void testConfigValidationEmptyConfig() { AbstractHerder herder = createConfigValidationHerder(TestSourceConnector.class); replayAll(); herder.validateConnectorConfig(new HashMap<String, String>()); verifyAll(); } @Test() public void testConfigValidationMissingName() { AbstractHerder herder = createConfigValidationHerder(TestSourceConnector.class); replayAll(); Map<String, String> config = Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, TestSourceConnector.class.getName()); ConfigInfos result = herder.validateConnectorConfig(config); assertEquals(TestSourceConnector.class.getName(), result.name()); assertEquals(Arrays.asList(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP), result.groups()); assertEquals(2, result.errorCount()); assertEquals(8, result.values().size()); assertEquals(ConnectorConfig.NAME_CONFIG, result.values().get(0).configValue().name()); assertEquals(1, result.values().get(0).configValue().errors().size()); assertEquals("required", result.values().get(6).configValue().name()); assertEquals(1, result.values().get(6).configValue().errors().size()); verifyAll(); } @Test() public void testConfigValidationTransformsExtendResults() { AbstractHerder herder = createConfigValidationHerder(TestSourceConnector.class); Set<PluginDesc<Transformation>> transformations = new HashSet<>(); transformations.add(new PluginDesc<Transformation>(SampleTransformation.class, "1.0", classLoader)); EasyMock.expect(plugins.transformations()).andReturn(transformations).times(2); replayAll(); Map<String, String> config = new HashMap<>(); config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, TestSourceConnector.class.getName()); config.put(ConnectorConfig.NAME_CONFIG, "connector-name"); config.put(ConnectorConfig.TRANSFORMS_CONFIG, "xformA,xformB"); config.put(ConnectorConfig.TRANSFORMS_CONFIG + ".xformA.type", SampleTransformation.class.getName()); config.put("required", "value"); ConfigInfos result = herder.validateConnectorConfig(config); assertEquals(TestSourceConnector.class.getName(), result.name()); List<String> expectedGroups = Arrays.asList( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, "Transforms: xformA", "Transforms: xformB" ); assertEquals(expectedGroups, result.groups()); assertEquals(2, result.errorCount()); assertEquals(11, result.values().size()); assertEquals("transforms.xformA.type", result.values().get(6).configValue().name()); assertTrue(result.values().get(6).configValue().errors().isEmpty()); assertEquals("transforms.xformA.subconfig", result.values().get(7).configValue().name()); assertEquals("transforms.xformB.type", result.values().get(8).configValue().name()); assertFalse(result.values().get(8).configValue().errors().isEmpty()); verifyAll(); }
StandaloneHerder extends AbstractHerder { @Override public synchronized void restartConnector(String connName, Callback<Void> cb) { if (!configState.contains(connName)) cb.onCompletion(new NotFoundException("Connector " + connName + " not found", null), null); Map<String, String> config = configState.connectorConfig(connName); worker.stopConnector(connName); if (startConnector(config)) cb.onCompletion(null, null); else cb.onCompletion(new ConnectException("Failed to start connector: " + connName), null); } StandaloneHerder(Worker worker); StandaloneHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, MemoryConfigBackingStore configBackingStore); synchronized void start(); synchronized void stop(); @Override int generation(); @Override synchronized void connectors(Callback<Collection<String>> callback); @Override synchronized void connectorInfo(String connName, Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback); @Override synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override synchronized void requestTaskReconfiguration(String connName); @Override synchronized void taskConfigs(String connName, Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback); @Override synchronized void restartTask(ConnectorTaskId taskId, Callback<Void> cb); @Override synchronized void restartConnector(String connName, Callback<Void> cb); }
@Test public void testRestartConnector() throws Exception { expectAdd(SourceSink.SOURCE); Map<String, String> config = connectorConfig(SourceSink.SOURCE); expectConfigValidation(config); worker.stopConnector(CONNECTOR_NAME); EasyMock.expectLastCall().andReturn(true); worker.startConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(config), EasyMock.anyObject(HerderConnectorContext.class), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); EasyMock.expectLastCall().andReturn(true); PowerMock.replayAll(); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); FutureCallback<Void> cb = new FutureCallback<>(); herder.restartConnector(CONNECTOR_NAME, cb); cb.get(1000L, TimeUnit.MILLISECONDS); PowerMock.verifyAll(); }
StandaloneHerder extends AbstractHerder { @Override public synchronized void restartTask(ConnectorTaskId taskId, Callback<Void> cb) { if (!configState.contains(taskId.connector())) cb.onCompletion(new NotFoundException("Connector " + taskId.connector() + " not found", null), null); Map<String, String> taskConfigProps = configState.taskConfig(taskId); if (taskConfigProps == null) cb.onCompletion(new NotFoundException("Task " + taskId + " not found", null), null); Map<String, String> connConfigProps = configState.connectorConfig(taskId.connector()); TargetState targetState = configState.targetState(taskId.connector()); worker.stopAndAwaitTask(taskId); if (worker.startTask(taskId, connConfigProps, taskConfigProps, this, targetState)) cb.onCompletion(null, null); else cb.onCompletion(new ConnectException("Failed to start task: " + taskId), null); } StandaloneHerder(Worker worker); StandaloneHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, MemoryConfigBackingStore configBackingStore); synchronized void start(); synchronized void stop(); @Override int generation(); @Override synchronized void connectors(Callback<Collection<String>> callback); @Override synchronized void connectorInfo(String connName, Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback); @Override synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override synchronized void requestTaskReconfiguration(String connName); @Override synchronized void taskConfigs(String connName, Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback); @Override synchronized void restartTask(ConnectorTaskId taskId, Callback<Void> cb); @Override synchronized void restartConnector(String connName, Callback<Void> cb); }
@Test public void testRestartTask() throws Exception { ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_NAME, 0); expectAdd(SourceSink.SOURCE); Map<String, String> connectorConfig = connectorConfig(SourceSink.SOURCE); expectConfigValidation(connectorConfig); worker.stopAndAwaitTask(taskId); EasyMock.expectLastCall(); worker.startTask(taskId, connectorConfig, taskConfig(SourceSink.SOURCE), herder, TargetState.STARTED); EasyMock.expectLastCall().andReturn(true); PowerMock.replayAll(); herder.putConnectorConfig(CONNECTOR_NAME, connectorConfig, false, createCallback); FutureCallback<Void> cb = new FutureCallback<>(); herder.restartTask(taskId, cb); cb.get(1000L, TimeUnit.MILLISECONDS); PowerMock.verifyAll(); }
StandaloneHerder extends AbstractHerder { @Override public synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback) { try { if (maybeAddConfigErrors(validateConnectorConfig(config), callback)) { return; } boolean created = false; if (configState.contains(connName)) { if (!allowReplace) { callback.onCompletion(new AlreadyExistsException("Connector " + connName + " already exists"), null); return; } worker.stopConnector(connName); } else { created = true; } if (!startConnector(config)) { callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null); return; } updateConnectorTasks(connName); callback.onCompletion(null, new Created<>(created, createConnectorInfo(connName))); } catch (ConnectException e) { callback.onCompletion(e, null); } } StandaloneHerder(Worker worker); StandaloneHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, MemoryConfigBackingStore configBackingStore); synchronized void start(); synchronized void stop(); @Override int generation(); @Override synchronized void connectors(Callback<Collection<String>> callback); @Override synchronized void connectorInfo(String connName, Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback); @Override synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override synchronized void requestTaskReconfiguration(String connName); @Override synchronized void taskConfigs(String connName, Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback); @Override synchronized void restartTask(ConnectorTaskId taskId, Callback<Void> cb); @Override synchronized void restartConnector(String connName, Callback<Void> cb); }
@Test public void testPutConnectorConfig() throws Exception { Map<String, String> connConfig = connectorConfig(SourceSink.SOURCE); Map<String, String> newConnConfig = new HashMap<>(connConfig); newConnConfig.put("foo", "bar"); Callback<Map<String, String>> connectorConfigCb = PowerMock.createMock(Callback.class); Callback<Herder.Created<ConnectorInfo>> putConnectorConfigCb = PowerMock.createMock(Callback.class); connector = PowerMock.createMock(BogusSourceConnector.class); expectAdd(SourceSink.SOURCE); Connector connectorMock = PowerMock.createMock(Connector.class); expectConfigValidation(connectorMock, true, connConfig); connectorConfigCb.onCompletion(null, connConfig); EasyMock.expectLastCall(); worker.stopConnector(CONNECTOR_NAME); EasyMock.expectLastCall().andReturn(true); Capture<Map<String, String>> capturedConfig = EasyMock.newCapture(); worker.startConnector(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(capturedConfig), EasyMock.<ConnectorContext>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); EasyMock.expectLastCall().andReturn(true); EasyMock.expect(worker.isRunning(CONNECTOR_NAME)).andReturn(true); EasyMock.expect(worker.connectorTaskConfigs(CONNECTOR_NAME, DEFAULT_MAX_TASKS, null)) .andReturn(singletonList(taskConfig(SourceSink.SOURCE))); worker.isSinkConnector(CONNECTOR_NAME); EasyMock.expectLastCall().andReturn(false); ConnectorInfo newConnInfo = new ConnectorInfo(CONNECTOR_NAME, newConnConfig, Arrays.asList(new ConnectorTaskId(CONNECTOR_NAME, 0))); putConnectorConfigCb.onCompletion(null, new Herder.Created<>(false, newConnInfo)); EasyMock.expectLastCall(); expectConfigValidation(connectorMock, false, newConnConfig); connectorConfigCb.onCompletion(null, newConnConfig); EasyMock.expectLastCall(); PowerMock.replayAll(); herder.putConnectorConfig(CONNECTOR_NAME, connConfig, false, createCallback); herder.connectorConfig(CONNECTOR_NAME, connectorConfigCb); herder.putConnectorConfig(CONNECTOR_NAME, newConnConfig, true, putConnectorConfigCb); assertEquals("bar", capturedConfig.getValue().get("foo")); herder.connectorConfig(CONNECTOR_NAME, connectorConfigCb); PowerMock.verifyAll(); } @Test public void testCorruptConfig() { Map<String, String> config = new HashMap<>(); config.put(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME); config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, BogusSinkConnector.class.getName()); Connector connectorMock = PowerMock.createMock(Connector.class); String error = "This is an error in your config!"; List<String> errors = new ArrayList<>(singletonList(error)); String key = "foo.invalid.key"; EasyMock.expect(connectorMock.validate(config)).andReturn( new Config( Arrays.asList(new ConfigValue(key, null, Collections.emptyList(), errors)) ) ); ConfigDef configDef = new ConfigDef(); configDef.define(key, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, ""); EasyMock.expect(worker.getPlugins()).andReturn(plugins).times(3); EasyMock.expect(plugins.compareAndSwapLoaders(connectorMock)).andReturn(delegatingLoader); EasyMock.expect(worker.getPlugins()).andStubReturn(plugins); EasyMock.expect(plugins.newConnector(EasyMock.anyString())).andReturn(connectorMock); EasyMock.expect(connectorMock.config()).andStubReturn(configDef); EasyMock.expect(Plugins.compareAndSwapLoaders(delegatingLoader)).andReturn(pluginLoader); Callback<Herder.Created<ConnectorInfo>> callback = PowerMock.createMock(Callback.class); Capture<BadRequestException> capture = Capture.newInstance(); callback.onCompletion( EasyMock.capture(capture), EasyMock.isNull(Herder.Created.class) ); PowerMock.replayAll(); herder.putConnectorConfig(CONNECTOR_NAME, config, true, callback); assertEquals( capture.getValue().getMessage(), "Connector configuration is invalid and contains the following 1 error(s):\n" + error + "\n" + "You can also find the above list of errors at the endpoint `/{connectorType}/config/validate`" ); PowerMock.verifyAll(); }
StandaloneHerder extends AbstractHerder { @Override public void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback) { throw new UnsupportedOperationException("Kafka Connect in standalone mode does not support externally setting task configurations."); } StandaloneHerder(Worker worker); StandaloneHerder(Worker worker, String workerId, StatusBackingStore statusBackingStore, MemoryConfigBackingStore configBackingStore); synchronized void start(); synchronized void stop(); @Override int generation(); @Override synchronized void connectors(Callback<Collection<String>> callback); @Override synchronized void connectorInfo(String connName, Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override synchronized void deleteConnectorConfig(String connName, Callback<Created<ConnectorInfo>> callback); @Override synchronized void putConnectorConfig(String connName, final Map<String, String> config, boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override synchronized void requestTaskReconfiguration(String connName); @Override synchronized void taskConfigs(String connName, Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback); @Override synchronized void restartTask(ConnectorTaskId taskId, Callback<Void> cb); @Override synchronized void restartConnector(String connName, Callback<Void> cb); }
@Test(expected = UnsupportedOperationException.class) public void testPutTaskConfigs() { Callback<Void> cb = PowerMock.createMock(Callback.class); PowerMock.replayAll(); herder.putTaskConfigs(CONNECTOR_NAME, Arrays.asList(singletonMap("config", "value")), cb); PowerMock.verifyAll(); }
PluginUtils { public static boolean shouldLoadInIsolation(String name) { return !(name.matches(BLACKLIST) && !name.matches(WHITELIST)); } static boolean shouldLoadInIsolation(String name); static boolean isConcrete(Class<?> klass); static boolean isArchive(Path path); static boolean isClassFile(Path path); static List<Path> pluginLocations(Path topPath); static List<Path> pluginUrls(Path topPath); static String simpleName(PluginDesc<?> plugin); static String prunedName(PluginDesc<?> plugin); static boolean isAliasUnique( PluginDesc<U> alias, Collection<PluginDesc<U>> plugins ); }
@Test public void testJavaLibraryClasses() throws Exception { assertFalse(PluginUtils.shouldLoadInIsolation("java.")); assertFalse(PluginUtils.shouldLoadInIsolation("java.lang.Object")); assertFalse(PluginUtils.shouldLoadInIsolation("java.lang.String")); assertFalse(PluginUtils.shouldLoadInIsolation("java.util.HashMap$Entry")); assertFalse(PluginUtils.shouldLoadInIsolation("java.io.Serializable")); assertFalse(PluginUtils.shouldLoadInIsolation("javax.rmi.")); assertFalse(PluginUtils.shouldLoadInIsolation( "javax.management.loading.ClassLoaderRepository") ); assertFalse(PluginUtils.shouldLoadInIsolation("org.omg.CORBA.")); assertFalse(PluginUtils.shouldLoadInIsolation("org.omg.CORBA.Object")); assertFalse(PluginUtils.shouldLoadInIsolation("org.w3c.dom.")); assertFalse(PluginUtils.shouldLoadInIsolation("org.w3c.dom.traversal.TreeWalker")); assertFalse(PluginUtils.shouldLoadInIsolation("org.xml.sax.")); assertFalse(PluginUtils.shouldLoadInIsolation("org.xml.sax.EntityResolver")); } @Test public void testThirdPartyClasses() throws Exception { assertFalse(PluginUtils.shouldLoadInIsolation("org.slf4j.")); assertFalse(PluginUtils.shouldLoadInIsolation("org.slf4j.LoggerFactory")); } @Test public void testConnectFrameworkClasses() throws Exception { assertFalse(PluginUtils.shouldLoadInIsolation("org.apache.kafka.common.")); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.config.AbstractConfig") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.config.ConfigDef$Type") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.serialization.Deserializer") ); assertFalse(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.")); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.connector.Connector") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.source.SourceConnector") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.sink.SinkConnector") ); assertFalse(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.connector.Task")); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.source.SourceTask") ); assertFalse(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.sink.SinkTask")); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.transforms.Transformation") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.storage.Converter") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.storage.OffsetBackingStore") ); } @Test public void testAllowedConnectFrameworkClasses() throws Exception { assertTrue(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.transforms.")); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.transforms.ExtractField") ); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.transforms.ExtractField$Key") ); assertTrue(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.json.")); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.json.JsonConverter") ); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.json.JsonConverter$21") ); assertTrue(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.file.")); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.file.FileStreamSourceTask") ); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.file.FileStreamSinkConnector") ); assertTrue(PluginUtils.shouldLoadInIsolation("org.apache.kafka.connect.converters.")); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.converters.ByteArrayConverter") ); assertTrue(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.connect.storage.StringConverter") ); }
PluginDesc implements Comparable<PluginDesc<T>> { @JsonProperty("location") public String location() { return location; } PluginDesc(Class<? extends T> klass, String version, ClassLoader loader); @Override String toString(); Class<? extends T> pluginClass(); @JsonProperty("class") String className(); @JsonProperty("version") String version(); PluginType type(); @JsonProperty("type") String typeName(); @JsonProperty("location") String location(); @Override boolean equals(Object o); @Override int hashCode(); @Override int compareTo(PluginDesc other); }
@Test public void testRegularPluginDesc() throws Exception { PluginDesc<Connector> connectorDesc = new PluginDesc<>( Connector.class, regularVersion, pluginLoader ); assertPluginDesc(connectorDesc, Connector.class, regularVersion, pluginLoader.location()); PluginDesc<Converter> converterDesc = new PluginDesc<>( Converter.class, snaphotVersion, pluginLoader ); assertPluginDesc(converterDesc, Converter.class, snaphotVersion, pluginLoader.location()); PluginDesc<Transformation> transformDesc = new PluginDesc<>( Transformation.class, noVersion, pluginLoader ); assertPluginDesc(transformDesc, Transformation.class, noVersion, pluginLoader.location()); } @Test public void testPluginDescWithNullVersion() throws Exception { String nullVersion = "null"; PluginDesc<SourceConnector> connectorDesc = new PluginDesc<>( SourceConnector.class, null, pluginLoader ); assertPluginDesc( connectorDesc, SourceConnector.class, nullVersion, pluginLoader.location() ); String location = "classpath"; PluginDesc<Converter> converterDesc = new PluginDesc<>( Converter.class, null, systemLoader ); assertPluginDesc(converterDesc, Converter.class, nullVersion, location); } @Test public void testRegularPluginDesc() { PluginDesc<Connector> connectorDesc = new PluginDesc<>( Connector.class, regularVersion, pluginLoader ); assertPluginDesc(connectorDesc, Connector.class, regularVersion, pluginLoader.location()); PluginDesc<Converter> converterDesc = new PluginDesc<>( Converter.class, snaphotVersion, pluginLoader ); assertPluginDesc(converterDesc, Converter.class, snaphotVersion, pluginLoader.location()); PluginDesc<Transformation> transformDesc = new PluginDesc<>( Transformation.class, noVersion, pluginLoader ); assertPluginDesc(transformDesc, Transformation.class, noVersion, pluginLoader.location()); } @Test public void testPluginDescWithNullVersion() { String nullVersion = "null"; PluginDesc<SourceConnector> connectorDesc = new PluginDesc<>( SourceConnector.class, null, pluginLoader ); assertPluginDesc( connectorDesc, SourceConnector.class, nullVersion, pluginLoader.location() ); String location = "classpath"; PluginDesc<Converter> converterDesc = new PluginDesc<>( Converter.class, null, systemLoader ); assertPluginDesc(converterDesc, Converter.class, nullVersion, location); }
PluginDesc implements Comparable<PluginDesc<T>> { @Override public int hashCode() { return Objects.hash(klass, version, type); } PluginDesc(Class<? extends T> klass, String version, ClassLoader loader); @Override String toString(); Class<? extends T> pluginClass(); @JsonProperty("class") String className(); @JsonProperty("version") String version(); PluginType type(); @JsonProperty("type") String typeName(); @JsonProperty("location") String location(); @Override boolean equals(Object o); @Override int hashCode(); @Override int compareTo(PluginDesc other); }
@Test public void testPluginDescEquality() throws Exception { PluginDesc<Connector> connectorDescPluginPath = new PluginDesc<>( Connector.class, snaphotVersion, pluginLoader ); PluginDesc<Connector> connectorDescClasspath = new PluginDesc<>( Connector.class, snaphotVersion, systemLoader ); assertEquals(connectorDescPluginPath, connectorDescClasspath); assertEquals(connectorDescPluginPath.hashCode(), connectorDescClasspath.hashCode()); PluginDesc<Converter> converterDescPluginPath = new PluginDesc<>( Converter.class, noVersion, pluginLoader ); PluginDesc<Converter> converterDescClasspath = new PluginDesc<>( Converter.class, noVersion, systemLoader ); assertEquals(converterDescPluginPath, converterDescClasspath); assertEquals(converterDescPluginPath.hashCode(), converterDescClasspath.hashCode()); PluginDesc<Transformation> transformDescPluginPath = new PluginDesc<>( Transformation.class, null, pluginLoader ); PluginDesc<Transformation> transformDescClasspath = new PluginDesc<>( Transformation.class, noVersion, pluginLoader ); assertNotEquals(transformDescPluginPath, transformDescClasspath); } @Test public void testPluginDescEquality() { PluginDesc<Connector> connectorDescPluginPath = new PluginDesc<>( Connector.class, snaphotVersion, pluginLoader ); PluginDesc<Connector> connectorDescClasspath = new PluginDesc<>( Connector.class, snaphotVersion, systemLoader ); assertEquals(connectorDescPluginPath, connectorDescClasspath); assertEquals(connectorDescPluginPath.hashCode(), connectorDescClasspath.hashCode()); PluginDesc<Converter> converterDescPluginPath = new PluginDesc<>( Converter.class, noVersion, pluginLoader ); PluginDesc<Converter> converterDescClasspath = new PluginDesc<>( Converter.class, noVersion, systemLoader ); assertEquals(converterDescPluginPath, converterDescClasspath); assertEquals(converterDescPluginPath.hashCode(), converterDescClasspath.hashCode()); PluginDesc<Transformation> transformDescPluginPath = new PluginDesc<>( Transformation.class, null, pluginLoader ); PluginDesc<Transformation> transformDescClasspath = new PluginDesc<>( Transformation.class, noVersion, pluginLoader ); assertNotEquals(transformDescPluginPath, transformDescClasspath); }
DistributedHerder extends AbstractHerder implements Runnable { @Override public void restartConnector(final String connName, final Callback<Void> callback) { addRequest(new Callable<Void>() { @Override public Void call() throws Exception { if (checkRebalanceNeeded(callback)) return null; if (!configState.connectors().contains(connName)) { callback.onCompletion(new NotFoundException("Unknown connector: " + connName), null); return null; } if (assignment.connectors().contains(connName)) { try { worker.stopConnector(connName); if (startConnector(connName)) callback.onCompletion(null, null); else callback.onCompletion(new ConnectException("Failed to start connector: " + connName), null); } catch (Throwable t) { callback.onCompletion(t, null); } } else if (isLeader()) { callback.onCompletion(new NotAssignedException("Cannot restart connector since it is not assigned to this member", member.ownerUrl(connName)), null); } else { callback.onCompletion(new NotLeaderException("Cannot restart connector since it is not assigned to this member", leaderUrl()), null); } return null; } }, forwardErrorCallback(callback)); } DistributedHerder(DistributedConfig config, Time time, Worker worker, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, String restUrl); DistributedHerder(DistributedConfig config, Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, WorkerGroupMember member, String restUrl, Time time); @Override void start(); @Override void run(); void tick(); void halt(); @Override void stop(); @Override void connectors(final Callback<Collection<String>> callback); @Override void connectorInfo(final String connName, final Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override void deleteConnectorConfig(final String connName, final Callback<Created<ConnectorInfo>> callback); @Override void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override void requestTaskReconfiguration(final String connName); @Override void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback); @Override void restartConnector(final String connName, final Callback<Void> callback); @Override void restartTask(final ConnectorTaskId id, final Callback<Void> callback); @Override int generation(); }
@Test public void testRestartConnector() throws Exception { EasyMock.expect(worker.connectorTaskConfigs(CONN1, MAX_TASKS, null)).andStubReturn(TASK_CONFIGS); EasyMock.expect(member.memberId()).andStubReturn("leader"); EasyMock.expect(worker.getPlugins()).andReturn(plugins); expectRebalance(1, singletonList(CONN1), Collections.<ConnectorTaskId>emptyList()); expectPostRebalanceCatchup(SNAPSHOT); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); worker.startConnector(EasyMock.eq(CONN1), EasyMock.<Map<String, String>>anyObject(), EasyMock.<ConnectorContext>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); EasyMock.expect(worker.isRunning(CONN1)).andReturn(true); member.wakeup(); PowerMock.expectLastCall(); member.ensureActive(); PowerMock.expectLastCall(); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); worker.stopConnector(CONN1); PowerMock.expectLastCall().andReturn(true); EasyMock.expect(worker.getPlugins()).andReturn(plugins); worker.startConnector(EasyMock.eq(CONN1), EasyMock.<Map<String, String>>anyObject(), EasyMock.<ConnectorContext>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); EasyMock.expect(worker.isRunning(CONN1)).andReturn(true); PowerMock.replayAll(); herder.tick(); FutureCallback<Void> callback = new FutureCallback<>(); herder.restartConnector(CONN1, callback); herder.tick(); callback.get(1000L, TimeUnit.MILLISECONDS); PowerMock.verifyAll(); }
DistributedHerder extends AbstractHerder implements Runnable { @Override public void restartTask(final ConnectorTaskId id, final Callback<Void> callback) { addRequest(new Callable<Void>() { @Override public Void call() throws Exception { if (checkRebalanceNeeded(callback)) return null; if (!configState.connectors().contains(id.connector())) { callback.onCompletion(new NotFoundException("Unknown connector: " + id.connector()), null); return null; } if (configState.taskConfig(id) == null) { callback.onCompletion(new NotFoundException("Unknown task: " + id), null); return null; } if (assignment.tasks().contains(id)) { try { worker.stopAndAwaitTask(id); if (startTask(id)) callback.onCompletion(null, null); else callback.onCompletion(new ConnectException("Failed to start task: " + id), null); } catch (Throwable t) { callback.onCompletion(t, null); } } else if (isLeader()) { callback.onCompletion(new NotAssignedException("Cannot restart task since it is not assigned to this member", member.ownerUrl(id)), null); } else { callback.onCompletion(new NotLeaderException("Cannot restart task since it is not assigned to this member", leaderUrl()), null); } return null; } }, forwardErrorCallback(callback)); } DistributedHerder(DistributedConfig config, Time time, Worker worker, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, String restUrl); DistributedHerder(DistributedConfig config, Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, WorkerGroupMember member, String restUrl, Time time); @Override void start(); @Override void run(); void tick(); void halt(); @Override void stop(); @Override void connectors(final Callback<Collection<String>> callback); @Override void connectorInfo(final String connName, final Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override void deleteConnectorConfig(final String connName, final Callback<Created<ConnectorInfo>> callback); @Override void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override void requestTaskReconfiguration(final String connName); @Override void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback); @Override void restartConnector(final String connName, final Callback<Void> callback); @Override void restartTask(final ConnectorTaskId id, final Callback<Void> callback); @Override int generation(); }
@Test public void testRestartTask() throws Exception { EasyMock.expect(worker.connectorTaskConfigs(CONN1, MAX_TASKS, null)).andStubReturn(TASK_CONFIGS); EasyMock.expect(member.memberId()).andStubReturn("leader"); expectRebalance(1, Collections.<String>emptyList(), singletonList(TASK0)); expectPostRebalanceCatchup(SNAPSHOT); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); worker.startTask(EasyMock.eq(TASK0), EasyMock.<Map<String, String>>anyObject(), EasyMock.<Map<String, String>>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); member.wakeup(); PowerMock.expectLastCall(); member.ensureActive(); PowerMock.expectLastCall(); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); worker.stopAndAwaitTask(TASK0); PowerMock.expectLastCall(); worker.startTask(EasyMock.eq(TASK0), EasyMock.<Map<String, String>>anyObject(), EasyMock.<Map<String, String>>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); PowerMock.replayAll(); herder.tick(); FutureCallback<Void> callback = new FutureCallback<>(); herder.restartTask(TASK0, callback); herder.tick(); callback.get(1000L, TimeUnit.MILLISECONDS); PowerMock.verifyAll(); }
DistributedHerder extends AbstractHerder implements Runnable { HerderRequest addRequest(Callable<Void> action, Callback<Void> callback) { return addRequest(0, action, callback); } DistributedHerder(DistributedConfig config, Time time, Worker worker, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, String restUrl); DistributedHerder(DistributedConfig config, Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, WorkerGroupMember member, String restUrl, Time time); @Override void start(); @Override void run(); void tick(); void halt(); @Override void stop(); @Override void connectors(final Callback<Collection<String>> callback); @Override void connectorInfo(final String connName, final Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override void deleteConnectorConfig(final String connName, final Callback<Created<ConnectorInfo>> callback); @Override void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override void requestTaskReconfiguration(final String connName); @Override void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback); @Override void restartConnector(final String connName, final Callback<Void> callback); @Override void restartTask(final ConnectorTaskId id, final Callback<Void> callback); @Override int generation(); }
@Test public void testRequestProcessingOrder() throws Exception { final DistributedHerder.HerderRequest req1 = herder.addRequest(100, null, null); final DistributedHerder.HerderRequest req2 = herder.addRequest(10, null, null); final DistributedHerder.HerderRequest req3 = herder.addRequest(200, null, null); final DistributedHerder.HerderRequest req4 = herder.addRequest(200, null, null); assertEquals(req2, herder.requests.pollFirst()); assertEquals(req1, herder.requests.pollFirst()); assertEquals(req3, herder.requests.pollFirst()); assertEquals(req4, herder.requests.pollFirst()); }
DistributedHerder extends AbstractHerder implements Runnable { @Override public void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback) { log.trace("Submitting connector config write request {}", connName); addRequest( new Callable<Void>() { @Override public Void call() throws Exception { if (maybeAddConfigErrors(validateConnectorConfig(config), callback)) { return null; } log.trace("Handling connector config request {}", connName); if (!isLeader()) { callback.onCompletion(new NotLeaderException("Only the leader can set connector configs.", leaderUrl()), null); return null; } boolean exists = configState.contains(connName); if (!allowReplace && exists) { callback.onCompletion(new AlreadyExistsException("Connector " + connName + " already exists"), null); return null; } log.trace("Submitting connector config {} {} {}", connName, allowReplace, configState.connectors()); configBackingStore.putConnectorConfig(connName, config); ConnectorInfo info = new ConnectorInfo(connName, config, configState.tasks(connName)); callback.onCompletion(null, new Created<>(!exists, info)); return null; } }, forwardErrorCallback(callback) ); } DistributedHerder(DistributedConfig config, Time time, Worker worker, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, String restUrl); DistributedHerder(DistributedConfig config, Worker worker, String workerId, StatusBackingStore statusBackingStore, ConfigBackingStore configBackingStore, WorkerGroupMember member, String restUrl, Time time); @Override void start(); @Override void run(); void tick(); void halt(); @Override void stop(); @Override void connectors(final Callback<Collection<String>> callback); @Override void connectorInfo(final String connName, final Callback<ConnectorInfo> callback); @Override void connectorConfig(String connName, final Callback<Map<String, String>> callback); @Override void deleteConnectorConfig(final String connName, final Callback<Created<ConnectorInfo>> callback); @Override void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback); @Override void requestTaskReconfiguration(final String connName); @Override void taskConfigs(final String connName, final Callback<List<TaskInfo>> callback); @Override void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback); @Override void restartConnector(final String connName, final Callback<Void> callback); @Override void restartTask(final ConnectorTaskId id, final Callback<Void> callback); @Override int generation(); }
@Test public void testPutConnectorConfig() throws Exception { EasyMock.expect(member.memberId()).andStubReturn("leader"); expectRebalance(1, Arrays.asList(CONN1), Collections.<ConnectorTaskId>emptyList()); expectPostRebalanceCatchup(SNAPSHOT); worker.startConnector(EasyMock.eq(CONN1), EasyMock.<Map<String, String>>anyObject(), EasyMock.<ConnectorContext>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); EasyMock.expect(worker.isRunning(CONN1)).andReturn(true); EasyMock.expect(worker.connectorTaskConfigs(CONN1, MAX_TASKS, null)).andReturn(TASK_CONFIGS); member.wakeup(); PowerMock.expectLastCall().anyTimes(); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); member.ensureActive(); PowerMock.expectLastCall(); Connector connectorMock = PowerMock.createMock(Connector.class); EasyMock.expect(worker.getPlugins()).andReturn(plugins).times(5); EasyMock.expect(plugins.compareAndSwapLoaders(connectorMock)).andReturn(delegatingLoader); EasyMock.expect(plugins.newConnector(EasyMock.anyString())).andReturn(connectorMock); EasyMock.expect(connectorMock.config()).andReturn(new ConfigDef()); EasyMock.expect(connectorMock.validate(CONN1_CONFIG_UPDATED)).andReturn(new Config(Collections.<ConfigValue>emptyList())); EasyMock.expect(Plugins.compareAndSwapLoaders(delegatingLoader)).andReturn(pluginLoader); configBackingStore.putConnectorConfig(CONN1, CONN1_CONFIG_UPDATED); PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() { @Override public Object answer() throws Throwable { configUpdateListener.onConnectorConfigUpdate(CONN1); return null; } }); EasyMock.expect(configBackingStore.snapshot()).andReturn(SNAPSHOT_UPDATED_CONN1_CONFIG); worker.stopConnector(CONN1); PowerMock.expectLastCall().andReturn(true); worker.startConnector(EasyMock.eq(CONN1), EasyMock.<Map<String, String>>anyObject(), EasyMock.<ConnectorContext>anyObject(), EasyMock.eq(herder), EasyMock.eq(TargetState.STARTED)); PowerMock.expectLastCall().andReturn(true); EasyMock.expect(worker.isRunning(CONN1)).andReturn(true); EasyMock.expect(worker.connectorTaskConfigs(CONN1, MAX_TASKS, null)).andReturn(TASK_CONFIGS); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); member.ensureActive(); PowerMock.expectLastCall(); member.poll(EasyMock.anyInt()); PowerMock.expectLastCall(); PowerMock.replayAll(); FutureCallback<Map<String, String>> connectorConfigCb = new FutureCallback<>(); herder.connectorConfig(CONN1, connectorConfigCb); herder.tick(); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG, connectorConfigCb.get()); FutureCallback<Herder.Created<ConnectorInfo>> putConfigCb = new FutureCallback<>(); herder.putConnectorConfig(CONN1, CONN1_CONFIG_UPDATED, true, putConfigCb); herder.tick(); assertTrue(putConfigCb.isDone()); ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, Arrays.asList(TASK0, TASK1, TASK2)); assertEquals(new Herder.Created<>(false, updatedInfo), putConfigCb.get()); connectorConfigCb = new FutureCallback<>(); herder.connectorConfig(CONN1, connectorConfigCb); herder.tick(); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG_UPDATED, connectorConfigCb.get()); PowerMock.verifyAll(); }
WorkerCoordinator extends AbstractCoordinator implements Closeable { @Override public List<ProtocolMetadata> metadata() { configSnapshot = configStorage.snapshot(); ConnectProtocol.WorkerState workerState = new ConnectProtocol.WorkerState(restUrl, configSnapshot.offset()); ByteBuffer metadata = ConnectProtocol.serializeMetadata(workerState); return Collections.singletonList(new ProtocolMetadata(DEFAULT_SUBPROTOCOL, metadata)); } WorkerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, String restUrl, ConfigBackingStore configStorage, WorkerRebalanceListener listener); void requestRejoin(); @Override String protocolType(); void poll(long timeout); @Override List<ProtocolMetadata> metadata(); String memberId(); String ownerUrl(String connector); String ownerUrl(ConnectorTaskId task); static final String DEFAULT_SUBPROTOCOL; }
@Test public void testMetadata() { EasyMock.expect(configStorage.snapshot()).andReturn(configState1); PowerMock.replayAll(); List<ProtocolMetadata> serialized = coordinator.metadata(); assertEquals(1, serialized.size()); ProtocolMetadata defaultMetadata = serialized.get(0); assertEquals(WorkerCoordinator.DEFAULT_SUBPROTOCOL, defaultMetadata.name()); ConnectProtocol.WorkerState state = ConnectProtocol.deserializeMetadata(defaultMetadata.metadata()); assertEquals(1, state.offset()); PowerMock.verifyAll(); } @Test public void testLeaderPerformAssignment1() throws Exception { EasyMock.expect(configStorage.snapshot()).andReturn(configState1); PowerMock.replayAll(); coordinator.metadata(); Map<String, ByteBuffer> configs = new HashMap<>(); configs.put("leader", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(LEADER_URL, 1L))); configs.put("member", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(MEMBER_URL, 1L))); Map<String, ByteBuffer> result = Whitebox.invokeMethod(coordinator, "performAssignment", "leader", WorkerCoordinator.DEFAULT_SUBPROTOCOL, configs); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(result.get("leader")); assertEquals(false, leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); assertEquals(Collections.emptyList(), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertEquals(false, memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); assertEquals(Collections.emptyList(), memberAssignment.connectors()); assertEquals(Collections.singletonList(taskId1x0), memberAssignment.tasks()); PowerMock.verifyAll(); } @Test public void testLeaderPerformAssignment2() throws Exception { EasyMock.expect(configStorage.snapshot()).andReturn(configState2); PowerMock.replayAll(); coordinator.metadata(); Map<String, ByteBuffer> configs = new HashMap<>(); configs.put("leader", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(LEADER_URL, 1L))); configs.put("member", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(MEMBER_URL, 1L))); Map<String, ByteBuffer> result = Whitebox.invokeMethod(coordinator, "performAssignment", "leader", WorkerCoordinator.DEFAULT_SUBPROTOCOL, configs); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(result.get("leader")); assertEquals(false, leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); assertEquals(Arrays.asList(taskId1x0, taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertEquals(false, memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); assertEquals(Collections.singletonList(taskId1x1), memberAssignment.tasks()); PowerMock.verifyAll(); } @Test public void testLeaderPerformAssignmentSingleTaskConnectors() throws Exception { EasyMock.expect(configStorage.snapshot()).andReturn(configStateSingleTaskConnectors); PowerMock.replayAll(); coordinator.metadata(); Map<String, ByteBuffer> configs = new HashMap<>(); configs.put("leader", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(LEADER_URL, 1L))); configs.put("member", ConnectProtocol.serializeMetadata(new ConnectProtocol.WorkerState(MEMBER_URL, 1L))); Map<String, ByteBuffer> result = Whitebox.invokeMethod(coordinator, "performAssignment", "leader", WorkerCoordinator.DEFAULT_SUBPROTOCOL, configs); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(result.get("leader")); assertEquals(false, leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); assertEquals(Arrays.asList(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertEquals(false, memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); PowerMock.verifyAll(); }
WorkerCoordinator extends AbstractCoordinator implements Closeable { public String memberId() { Generation generation = generation(); if (generation != null) return generation.memberId; return JoinGroupRequest.UNKNOWN_MEMBER_ID; } WorkerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, String restUrl, ConfigBackingStore configStorage, WorkerRebalanceListener listener); void requestRejoin(); @Override String protocolType(); void poll(long timeout); @Override List<ProtocolMetadata> metadata(); String memberId(); String ownerUrl(String connector); String ownerUrl(ConnectorTaskId task); static final String DEFAULT_SUBPROTOCOL; }
@Test public void testJoinLeaderCannotAssign() { EasyMock.expect(configStorage.snapshot()).andReturn(configState1); EasyMock.expect(configStorage.snapshot()).andReturn(configState2); PowerMock.replayAll(); final String memberId = "member"; client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, memberId, "leader", Errors.NONE)); MockClient.RequestMatcher matcher = new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { SyncGroupRequest sync = (SyncGroupRequest) body; return sync.memberId().equals(memberId) && sync.generationId() == 1 && sync.groupAssignment().isEmpty(); } }; client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.CONFIG_MISMATCH, "leader", 10L, Collections.<String>emptyList(), Collections.<ConnectorTaskId>emptyList(), Errors.NONE)); client.prepareResponse(joinGroupFollowerResponse(1, memberId, "leader", Errors.NONE)); client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", 1L, Collections.<String>emptyList(), Collections.singletonList(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); PowerMock.verifyAll(); }
WorkerCoordinator extends AbstractCoordinator implements Closeable { public void requestRejoin() { rejoinRequested = true; } WorkerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, String restUrl, ConfigBackingStore configStorage, WorkerRebalanceListener listener); void requestRejoin(); @Override String protocolType(); void poll(long timeout); @Override List<ProtocolMetadata> metadata(); String memberId(); String ownerUrl(String connector); String ownerUrl(ConnectorTaskId task); static final String DEFAULT_SUBPROTOCOL; }
@Test public void testRejoinGroup() { EasyMock.expect(configStorage.snapshot()).andReturn(configState1); EasyMock.expect(configStorage.snapshot()).andReturn(configState1); PowerMock.replayAll(); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", 1L, Collections.<String>emptyList(), Collections.singletonList(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(0, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(1L, rebalanceListener.assignment.offset()); assertEquals(Collections.emptyList(), rebalanceListener.assignment.connectors()); assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.assignment.tasks()); coordinator.requestRejoin(); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", 1L, Collections.singletonList(connectorId1), Collections.<ConnectorTaskId>emptyList(), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(1, rebalanceListener.revokedCount); assertEquals(Collections.emptyList(), rebalanceListener.revokedConnectors); assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.revokedTasks); assertEquals(2, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(1L, rebalanceListener.assignment.offset()); assertEquals(Collections.singletonList(connectorId1), rebalanceListener.assignment.connectors()); assertEquals(Collections.emptyList(), rebalanceListener.assignment.tasks()); PowerMock.verifyAll(); }
SourceTaskOffsetCommitter { public void schedule(final ConnectorTaskId id, final WorkerSourceTask workerTask) { long commitIntervalMs = config.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG); ScheduledFuture<?> commitFuture = commitExecutorService.scheduleWithFixedDelay(new Runnable() { @Override public void run() { commit(workerTask); } }, commitIntervalMs, commitIntervalMs, TimeUnit.MILLISECONDS); committers.put(id, commitFuture); } SourceTaskOffsetCommitter(WorkerConfig config, ScheduledExecutorService commitExecutorService, ConcurrentMap<ConnectorTaskId, ScheduledFuture<?>> committers); SourceTaskOffsetCommitter(WorkerConfig config); void close(long timeoutMs); void schedule(final ConnectorTaskId id, final WorkerSourceTask workerTask); void remove(ConnectorTaskId id); }
@Test public void testSchedule() throws Exception { Capture<Runnable> taskWrapper = EasyMock.newCapture(); ScheduledFuture commitFuture = PowerMock.createMock(ScheduledFuture.class); EasyMock.expect(executor.scheduleWithFixedDelay( EasyMock.capture(taskWrapper), eq(DEFAULT_OFFSET_COMMIT_INTERVAL_MS), eq(DEFAULT_OFFSET_COMMIT_INTERVAL_MS), eq(TimeUnit.MILLISECONDS)) ).andReturn(commitFuture); ConnectorTaskId taskId = PowerMock.createMock(ConnectorTaskId.class); WorkerSourceTask task = PowerMock.createMock(WorkerSourceTask.class); EasyMock.expect(committers.put(taskId, commitFuture)).andReturn(null); PowerMock.replayAll(); committer.schedule(taskId, task); assertTrue(taskWrapper.hasCaptured()); assertNotNull(taskWrapper.getValue()); PowerMock.verifyAll(); }
SourceTaskOffsetCommitter { public void remove(ConnectorTaskId id) { final ScheduledFuture<?> task = committers.remove(id); if (task == null) return; try { task.cancel(false); if (!task.isDone()) task.get(); } catch (CancellationException e) { log.trace("Offset commit thread was cancelled by another thread while removing connector task with id: {}", id); } catch (ExecutionException | InterruptedException e) { throw new ConnectException("Unexpected interruption in SourceTaskOffsetCommitter while removing task with id: " + id, e); } } SourceTaskOffsetCommitter(WorkerConfig config, ScheduledExecutorService commitExecutorService, ConcurrentMap<ConnectorTaskId, ScheduledFuture<?>> committers); SourceTaskOffsetCommitter(WorkerConfig config); void close(long timeoutMs); void schedule(final ConnectorTaskId id, final WorkerSourceTask workerTask); void remove(ConnectorTaskId id); }
@Test public void testRemove() throws Exception { ConnectorTaskId taskId = PowerMock.createMock(ConnectorTaskId.class); ScheduledFuture task = PowerMock.createMock(ScheduledFuture.class); EasyMock.expect(committers.remove(taskId)).andReturn(null); PowerMock.replayAll(); committer.remove(taskId); PowerMock.verifyAll(); PowerMock.resetAll(); EasyMock.expect(committers.remove(taskId)).andReturn(task); EasyMock.expect(task.cancel(eq(false))).andReturn(false); EasyMock.expect(task.isDone()).andReturn(false); EasyMock.expect(task.get()).andReturn(null); PowerMock.replayAll(); committer.remove(taskId); PowerMock.verifyAll(); PowerMock.resetAll(); EasyMock.expect(committers.remove(taskId)).andReturn(task); EasyMock.expect(task.cancel(eq(false))).andReturn(false); EasyMock.expect(task.isDone()).andReturn(false); EasyMock.expect(task.get()).andThrow(new CancellationException()); mockLog.trace(EasyMock.anyString(), EasyMock.<Object>anyObject()); PowerMock.expectLastCall(); PowerMock.replayAll(); committer.remove(taskId); PowerMock.verifyAll(); PowerMock.resetAll(); EasyMock.expect(committers.remove(taskId)).andReturn(task); EasyMock.expect(task.cancel(eq(false))).andReturn(false); EasyMock.expect(task.isDone()).andReturn(false); EasyMock.expect(task.get()).andThrow(new InterruptedException()); PowerMock.replayAll(); try { committer.remove(taskId); fail("Expected ConnectException to be raised"); } catch (ConnectException e) { } PowerMock.verifyAll(); }
KafkaConfigBackingStore implements ConfigBackingStore { @Override public void putConnectorConfig(String connector, Map<String, String> properties) { log.debug("Writing connector configuration {} for connector {} configuration", properties, connector); Struct connectConfig = new Struct(CONNECTOR_CONFIGURATION_V0); connectConfig.put("properties", properties); byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_CONFIGURATION_V0, connectConfig); updateConnectorConfig(connector, serializedConfig); } KafkaConfigBackingStore(Converter converter, WorkerConfig config); static String TARGET_STATE_KEY(String connectorName); static String CONNECTOR_KEY(String connectorName); static String TASK_KEY(ConnectorTaskId taskId); static String COMMIT_TASKS_KEY(String connectorName); @Override void setUpdateListener(UpdateListener listener); @Override void start(); @Override void stop(); @Override ClusterConfigState snapshot(); @Override boolean contains(String connector); @Override void putConnectorConfig(String connector, Map<String, String> properties); @Override void removeConnectorConfig(String connector); @Override void removeTaskConfigs(String connector); @Override void putTaskConfigs(String connector, List<Map<String, String>> configs); @Override void refresh(long timeout, TimeUnit unit); @Override void putTargetState(String connector, TargetState state); static final String TARGET_STATE_PREFIX; static final String CONNECTOR_PREFIX; static final String TASK_PREFIX; static final String COMMIT_TASKS_PREFIX; static final Schema CONNECTOR_CONFIGURATION_V0; static final Schema TASK_CONFIGURATION_V0; static final Schema CONNECTOR_TASKS_COMMIT_V0; static final Schema TARGET_STATE_V0; }
@Test public void testPutConnectorConfig() throws Exception { expectConfigure(); expectStart(Collections.EMPTY_LIST, Collections.EMPTY_MAP); expectConvertWriteAndRead( CONNECTOR_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), "properties", SAMPLE_CONFIGS.get(0)); configUpdateListener.onConnectorConfigUpdate(CONNECTOR_IDS.get(0)); EasyMock.expectLastCall(); expectConvertWriteAndRead( CONNECTOR_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), "properties", SAMPLE_CONFIGS.get(1)); configUpdateListener.onConnectorConfigUpdate(CONNECTOR_IDS.get(1)); EasyMock.expectLastCall(); expectConnectorRemoval(CONNECTOR_CONFIG_KEYS.get(1), TARGET_STATE_KEYS.get(1)); configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(1)); EasyMock.expectLastCall(); expectStop(); PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG); configStorage.start(); ClusterConfigState configState = configStorage.snapshot(); assertEquals(-1, configState.offset()); assertNull(configState.connectorConfig(CONNECTOR_IDS.get(0))); assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); configStorage.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0)); configState = configStorage.snapshot(); assertEquals(1, configState.offset()); assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); configStorage.putConnectorConfig(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1)); configState = configStorage.snapshot(); assertEquals(2, configState.offset()); assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.connectorConfig(CONNECTOR_IDS.get(1))); configStorage.removeConnectorConfig(CONNECTOR_IDS.get(1)); configState = configStorage.snapshot(); assertEquals(4, configState.offset()); assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); assertNull(configState.connectorConfig(CONNECTOR_IDS.get(1))); assertNull(configState.targetState(CONNECTOR_IDS.get(1))); configStorage.stop(); PowerMock.verifyAll(); }
KafkaConfigBackingStore implements ConfigBackingStore { @Override public void putTaskConfigs(String connector, List<Map<String, String>> configs) { try { configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { log.error("Failed to write root configuration to Kafka: ", e); throw new ConnectException("Error writing root configuration to Kafka", e); } int taskCount = configs.size(); int index = 0; for (Map<String, String> taskConfig: configs) { Struct connectConfig = new Struct(TASK_CONFIGURATION_V0); connectConfig.put("properties", taskConfig); byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig); log.debug("Writing configuration for task " + index + " configuration: " + taskConfig); ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index); configLog.send(TASK_KEY(connectorTaskId), serializedConfig); index++; } try { if (taskCount > 0) { configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS); } Struct connectConfig = new Struct(CONNECTOR_TASKS_COMMIT_V0); connectConfig.put("tasks", taskCount); byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig); log.debug("Writing commit for connector " + connector + " with " + taskCount + " tasks."); configLog.send(COMMIT_TASKS_KEY(connector), serializedConfig); configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { log.error("Failed to write root configuration to Kafka: ", e); throw new ConnectException("Error writing root configuration to Kafka", e); } } KafkaConfigBackingStore(Converter converter, WorkerConfig config); static String TARGET_STATE_KEY(String connectorName); static String CONNECTOR_KEY(String connectorName); static String TASK_KEY(ConnectorTaskId taskId); static String COMMIT_TASKS_KEY(String connectorName); @Override void setUpdateListener(UpdateListener listener); @Override void start(); @Override void stop(); @Override ClusterConfigState snapshot(); @Override boolean contains(String connector); @Override void putConnectorConfig(String connector, Map<String, String> properties); @Override void removeConnectorConfig(String connector); @Override void removeTaskConfigs(String connector); @Override void putTaskConfigs(String connector, List<Map<String, String>> configs); @Override void refresh(long timeout, TimeUnit unit); @Override void putTargetState(String connector, TargetState state); static final String TARGET_STATE_PREFIX; static final String CONNECTOR_PREFIX; static final String TASK_PREFIX; static final String COMMIT_TASKS_PREFIX; static final Schema CONNECTOR_CONFIGURATION_V0; static final Schema TASK_CONFIGURATION_V0; static final Schema CONNECTOR_TASKS_COMMIT_V0; static final Schema TARGET_STATE_V0; }
@Test public void testPutTaskConfigs() throws Exception { expectConfigure(); expectStart(Collections.EMPTY_LIST, Collections.EMPTY_MAP); expectReadToEnd(new LinkedHashMap<String, byte[]>()); expectConvertWriteRead( TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0), "properties", SAMPLE_CONFIGS.get(0)); expectConvertWriteRead( TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1), "properties", SAMPLE_CONFIGS.get(1)); expectReadToEnd(new LinkedHashMap<String, byte[]>()); expectConvertWriteRead( COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), "tasks", 2); configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); EasyMock.expectLastCall(); LinkedHashMap<String, byte[]> serializedConfigs = new LinkedHashMap<>(); serializedConfigs.put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)); serializedConfigs.put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)); serializedConfigs.put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2)); expectReadToEnd(serializedConfigs); expectStop(); PowerMock.replayAll(); configStorage.setupAndCreateKafkaBasedLog(TOPIC, DEFAULT_DISTRIBUTED_CONFIG); configStorage.start(); whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.EMPTY_LIST); ClusterConfigState configState = configStorage.snapshot(); assertEquals(-1, configState.offset()); assertNull(configState.taskConfig(TASK_IDS.get(0))); assertNull(configState.taskConfig(TASK_IDS.get(1))); List<Map<String, String>> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); configState = configStorage.snapshot(); assertEquals(3, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); assertEquals(Arrays.asList(connectorName), new ArrayList<>(configState.connectors())); assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); configStorage.stop(); PowerMock.verifyAll(); }
OffsetStorageWriter { public synchronized boolean beginFlush() { if (flushing()) { log.error("Invalid call to OffsetStorageWriter flush() while already flushing, the " + "framework should not allow this"); throw new ConnectException("OffsetStorageWriter is already flushing"); } if (data.isEmpty()) return false; assert !flushing(); toFlush = data; data = new HashMap<>(); return true; } OffsetStorageWriter(OffsetBackingStore backingStore, String namespace, Converter keyConverter, Converter valueConverter); synchronized void offset(Map<String, ?> partition, Map<String, ?> offset); synchronized boolean beginFlush(); Future<Void> doFlush(final Callback<Void> callback); synchronized void cancelFlush(); }
@Test public void testNoOffsetsToFlush() { PowerMock.replayAll(); assertFalse(writer.beginFlush()); PowerMock.verifyAll(); }
Time { public static int fromLogical(Schema schema, java.util.Date value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Time object but the schema does not match."); Calendar calendar = Calendar.getInstance(UTC); calendar.setTime(value); long unixMillis = calendar.getTimeInMillis(); if (unixMillis < 0 || unixMillis > MILLIS_PER_DAY) { throw new DataException("Kafka Connect Time type should not have any date fields set to non-zero values."); } return (int) unixMillis; } static SchemaBuilder builder(); static int fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, int value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testFromLogical() { assertEquals(0, Time.fromLogical(Time.SCHEMA, EPOCH.getTime())); assertEquals(10000, Time.fromLogical(Time.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime())); } @Test(expected = DataException.class) public void testFromLogicalInvalidHasDateComponents() { Time.fromLogical(Time.SCHEMA, EPOCH_PLUS_DATE_COMPONENT.getTime()); }
Time { public static java.util.Date toLogical(Schema schema, int value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Date object but the schema does not match."); if (value < 0 || value > MILLIS_PER_DAY) throw new DataException("Time values must use number of milliseconds greater than 0 and less than 86400000"); return new java.util.Date(value); } static SchemaBuilder builder(); static int fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, int value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testToLogical() { assertEquals(EPOCH.getTime(), Time.toLogical(Time.SCHEMA, 0)); assertEquals(EPOCH_PLUS_TEN_THOUSAND_MILLIS.getTime(), Time.toLogical(Time.SCHEMA, 10000)); }
Struct { @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Struct struct = (Struct) o; return Objects.equals(schema, struct.schema) && Arrays.equals(values, struct.values); } Struct(Schema schema); Schema schema(); Object get(String fieldName); Object get(Field field); Object getWithoutDefault(String fieldName); Byte getInt8(String fieldName); Short getInt16(String fieldName); Integer getInt32(String fieldName); Long getInt64(String fieldName); Float getFloat32(String fieldName); Double getFloat64(String fieldName); Boolean getBoolean(String fieldName); String getString(String fieldName); byte[] getBytes(String fieldName); @SuppressWarnings("unchecked") List<T> getArray(String fieldName); @SuppressWarnings("unchecked") Map<K, V> getMap(String fieldName); Struct getStruct(String fieldName); Struct put(String fieldName, Object value); Struct put(Field field, Object value); void validate(); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); }
@Test public void testEquals() { Struct struct1 = new Struct(FLAT_STRUCT_SCHEMA) .put("int8", (byte) 12) .put("int16", (short) 12) .put("int32", 12) .put("int64", (long) 12) .put("float32", 12.f) .put("float64", 12.) .put("boolean", true) .put("string", "foobar") .put("bytes", ByteBuffer.wrap("foobar".getBytes())); Struct struct2 = new Struct(FLAT_STRUCT_SCHEMA) .put("int8", (byte) 12) .put("int16", (short) 12) .put("int32", 12) .put("int64", (long) 12) .put("float32", 12.f) .put("float64", 12.) .put("boolean", true) .put("string", "foobar") .put("bytes", ByteBuffer.wrap("foobar".getBytes())); Struct struct3 = new Struct(FLAT_STRUCT_SCHEMA) .put("int8", (byte) 12) .put("int16", (short) 12) .put("int32", 12) .put("int64", (long) 12) .put("float32", 12.f) .put("float64", 12.) .put("boolean", true) .put("string", "mismatching string") .put("bytes", ByteBuffer.wrap("foobar".getBytes())); assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); List<Byte> array = Arrays.asList((byte) 1, (byte) 2); Map<Integer, String> map = Collections.singletonMap(1, "string"); struct1 = new Struct(NESTED_SCHEMA) .put("array", array) .put("map", map) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); List<Byte> array2 = Arrays.asList((byte) 1, (byte) 2); Map<Integer, String> map2 = Collections.singletonMap(1, "string"); struct2 = new Struct(NESTED_SCHEMA) .put("array", array2) .put("map", map2) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); List<Byte> array3 = Arrays.asList((byte) 1, (byte) 2, (byte) 3); Map<Integer, String> map3 = Collections.singletonMap(2, "string"); struct3 = new Struct(NESTED_SCHEMA) .put("array", array3) .put("map", map3) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 13)); assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); }
ConnectSchema implements Schema { @Override public List<Field> fields() { if (type != Type.STRUCT) throw new DataException("Cannot list fields on non-struct type"); return fields; } ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc, Map<String, String> parameters, List<Field> fields, Schema keySchema, Schema valueSchema); ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc); ConnectSchema(Type type); @Override Type type(); @Override boolean isOptional(); @Override Object defaultValue(); @Override String name(); @Override Integer version(); @Override String doc(); @Override Map<String, String> parameters(); @Override List<Field> fields(); Field field(String fieldName); @Override Schema keySchema(); @Override Schema valueSchema(); static void validateValue(Schema schema, Object value); static void validateValue(String name, Schema schema, Object value); void validateValue(Object value); @Override ConnectSchema schema(); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); static Type schemaType(Class<?> klass); }
@Test(expected = DataException.class) public void testFieldsOnlyValidForStructs() { Schema.INT8_SCHEMA.fields(); } @Test public void testEmptyStruct() { final ConnectSchema emptyStruct = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null); assertEquals(0, emptyStruct.fields().size()); new Struct(emptyStruct); }
ConnectSchema implements Schema { public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); } ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc, Map<String, String> parameters, List<Field> fields, Schema keySchema, Schema valueSchema); ConnectSchema(Type type, boolean optional, Object defaultValue, String name, Integer version, String doc); ConnectSchema(Type type); @Override Type type(); @Override boolean isOptional(); @Override Object defaultValue(); @Override String name(); @Override Integer version(); @Override String doc(); @Override Map<String, String> parameters(); @Override List<Field> fields(); Field field(String fieldName); @Override Schema keySchema(); @Override Schema valueSchema(); static void validateValue(Schema schema, Object value); static void validateValue(String name, Schema schema, Object value); void validateValue(Object value); @Override ConnectSchema schema(); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); static Type schemaType(Class<?> klass); }
@Test public void testValidateValueMatchingType() { ConnectSchema.validateValue(Schema.INT8_SCHEMA, (byte) 1); ConnectSchema.validateValue(Schema.INT16_SCHEMA, (short) 1); ConnectSchema.validateValue(Schema.INT32_SCHEMA, 1); ConnectSchema.validateValue(Schema.INT64_SCHEMA, (long) 1); ConnectSchema.validateValue(Schema.FLOAT32_SCHEMA, 1.f); ConnectSchema.validateValue(Schema.FLOAT64_SCHEMA, 1.); ConnectSchema.validateValue(Schema.BOOLEAN_SCHEMA, true); ConnectSchema.validateValue(Schema.STRING_SCHEMA, "a string"); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, "a byte array".getBytes()); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, ByteBuffer.wrap("a byte array".getBytes())); ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)); ConnectSchema.validateValue( SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build(), Collections.singletonMap(1, "value") ); Struct structValue = new Struct(STRUCT_SCHEMA) .put("first", 1) .put("second", "foo") .put("array", Arrays.asList(1, 2, 3)) .put("map", Collections.singletonMap(1, "value")) .put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12)); ConnectSchema.validateValue(STRUCT_SCHEMA, structValue); } @Test(expected = DataException.class) public void testValidateValueMismatchInt8() { ConnectSchema.validateValue(Schema.INT8_SCHEMA, 1); } @Test(expected = DataException.class) public void testValidateValueMismatchInt16() { ConnectSchema.validateValue(Schema.INT16_SCHEMA, 1); } @Test(expected = DataException.class) public void testValidateValueMismatchInt32() { ConnectSchema.validateValue(Schema.INT32_SCHEMA, (long) 1); } @Test(expected = DataException.class) public void testValidateValueMismatchInt64() { ConnectSchema.validateValue(Schema.INT64_SCHEMA, 1); } @Test(expected = DataException.class) public void testValidateValueMismatchFloat() { ConnectSchema.validateValue(Schema.FLOAT32_SCHEMA, 1.0); } @Test(expected = DataException.class) public void testValidateValueMismatchDouble() { ConnectSchema.validateValue(Schema.FLOAT64_SCHEMA, 1.f); } @Test(expected = DataException.class) public void testValidateValueMismatchBoolean() { ConnectSchema.validateValue(Schema.BOOLEAN_SCHEMA, 1.f); } @Test(expected = DataException.class) public void testValidateValueMismatchString() { CharBuffer cbuf = CharBuffer.wrap("abc"); ConnectSchema.validateValue(Schema.STRING_SCHEMA, cbuf); } @Test(expected = DataException.class) public void testValidateValueMismatchBytes() { ConnectSchema.validateValue(Schema.BYTES_SCHEMA, new Object[]{1, "foo"}); } @Test(expected = DataException.class) public void testValidateValueMismatchArray() { ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList("a", "b", "c")); } @Test(expected = DataException.class) public void testValidateValueMismatchArraySomeMatch() { ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, "c")); } @Test(expected = DataException.class) public void testValidateValueMismatchMapKey() { ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap("wrong key type", "value")); } @Test(expected = DataException.class) public void testValidateValueMismatchMapValue() { ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap(1, 2)); } @Test(expected = DataException.class) public void testValidateValueMismatchMapSomeKeys() { Map<Object, String> data = new HashMap<>(); data.put(1, "abc"); data.put("wrong", "it's as easy as one two three"); ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, data); } @Test(expected = DataException.class) public void testValidateValueMismatchMapSomeValues() { Map<Integer, Object> data = new HashMap<>(); data.put(1, "abc"); data.put(2, "wrong".getBytes()); ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, data); } @Test(expected = DataException.class) public void testValidateValueMismatchDate() { ConnectSchema.validateValue(Date.SCHEMA, 1000L); } @Test(expected = DataException.class) public void testValidateValueMismatchTime() { ConnectSchema.validateValue(Time.SCHEMA, 1000L); } @Test(expected = DataException.class) public void testValidateValueMismatchTimestamp() { ConnectSchema.validateValue(Timestamp.SCHEMA, 1000L); }
SchemaBuilder implements Schema { @Override public Map<String, String> parameters() { return parameters == null ? null : Collections.unmodifiableMap(parameters); } SchemaBuilder(Type type); @Override boolean isOptional(); SchemaBuilder optional(); SchemaBuilder required(); @Override Object defaultValue(); SchemaBuilder defaultValue(Object value); @Override String name(); SchemaBuilder name(String name); @Override Integer version(); SchemaBuilder version(Integer version); @Override String doc(); SchemaBuilder doc(String doc); @Override Map<String, String> parameters(); SchemaBuilder parameter(String propertyName, String propertyValue); SchemaBuilder parameters(Map<String, String> props); @Override Type type(); static SchemaBuilder type(Type type); static SchemaBuilder int8(); static SchemaBuilder int16(); static SchemaBuilder int32(); static SchemaBuilder int64(); static SchemaBuilder float32(); static SchemaBuilder float64(); static SchemaBuilder bool(); static SchemaBuilder string(); static SchemaBuilder bytes(); static SchemaBuilder struct(); SchemaBuilder field(String fieldName, Schema fieldSchema); List<Field> fields(); Field field(String fieldName); static SchemaBuilder array(Schema valueSchema); static SchemaBuilder map(Schema keySchema, Schema valueSchema); @Override Schema keySchema(); @Override Schema valueSchema(); Schema build(); @Override Schema schema(); }
@Test public void testParameters() { Map<String, String> expectedParameters = new HashMap<>(); expectedParameters.put("foo", "val"); expectedParameters.put("bar", "baz"); Schema schema = SchemaBuilder.string().parameter("foo", "val").parameter("bar", "baz").build(); assertTypeAndDefault(schema, Schema.Type.STRING, false, null); assertMetadata(schema, null, null, null, expectedParameters); schema = SchemaBuilder.string().parameters(expectedParameters).build(); assertTypeAndDefault(schema, Schema.Type.STRING, false, null); assertMetadata(schema, null, null, null, expectedParameters); }
Date { public static int fromLogical(Schema schema, java.util.Date value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Date object but the schema does not match."); Calendar calendar = Calendar.getInstance(UTC); calendar.setTime(value); if (calendar.get(Calendar.HOUR_OF_DAY) != 0 || calendar.get(Calendar.MINUTE) != 0 || calendar.get(Calendar.SECOND) != 0 || calendar.get(Calendar.MILLISECOND) != 0) { throw new DataException("Kafka Connect Date type should not have any time fields set to non-zero values."); } long unixMillis = calendar.getTimeInMillis(); return (int) (unixMillis / MILLIS_PER_DAY); } static SchemaBuilder builder(); static int fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, int value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testFromLogical() { assertEquals(0, Date.fromLogical(Date.SCHEMA, EPOCH.getTime())); assertEquals(10000, Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime())); } @Test(expected = DataException.class) public void testFromLogicalInvalidHasTimeComponents() { Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TIME_COMPONENT.getTime()); }
Date { public static java.util.Date toLogical(Schema schema, int value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Date object but the schema does not match."); return new java.util.Date(value * MILLIS_PER_DAY); } static SchemaBuilder builder(); static int fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, int value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testToLogical() { assertEquals(EPOCH.getTime(), Date.toLogical(Date.SCHEMA, 0)); assertEquals(EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime(), Date.toLogical(Date.SCHEMA, 10000)); }
Timestamp { public static long fromLogical(Schema schema, java.util.Date value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Timestamp object but the schema does not match."); return value.getTime(); } static SchemaBuilder builder(); static long fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, long value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testFromLogical() { assertEquals(0L, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH.getTime())); assertEquals(TOTAL_MILLIS, Timestamp.fromLogical(Timestamp.SCHEMA, EPOCH_PLUS_MILLIS.getTime())); }
Timestamp { public static java.util.Date toLogical(Schema schema, long value) { if (schema.name() == null || !(schema.name().equals(LOGICAL_NAME))) throw new DataException("Requested conversion of Timestamp object but the schema does not match."); return new java.util.Date(value); } static SchemaBuilder builder(); static long fromLogical(Schema schema, java.util.Date value); static java.util.Date toLogical(Schema schema, long value); static final String LOGICAL_NAME; static final Schema SCHEMA; }
@Test public void testToLogical() { assertEquals(EPOCH.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, 0L)); assertEquals(EPOCH_PLUS_MILLIS.getTime(), Timestamp.toLogical(Timestamp.SCHEMA, TOTAL_MILLIS)); }
SchemaProjector { public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException { checkMaybeCompatible(source, target); if (source.isOptional() && !target.isOptional()) { if (target.defaultValue() != null) { if (record != null) { return projectRequiredSchema(source, record, target); } else { return target.defaultValue(); } } else { throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value."); } } else { if (record != null) { return projectRequiredSchema(source, record, target); } else { return null; } } } static Object project(Schema source, Object record, Schema target); }
@Test public void testPrimitiveTypeProjection() throws Exception { Object projected; projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA); assertEquals(false, projected); byte[] bytes = {(byte) 1, (byte) 2}; projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA); assertEquals(bytes, projected); projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.STRING_SCHEMA); assertEquals("abc", projected); projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.OPTIONAL_BOOLEAN_SCHEMA); assertEquals(false, projected); projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.OPTIONAL_BYTES_SCHEMA); assertEquals(bytes, projected); projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.OPTIONAL_STRING_SCHEMA); assertEquals("abc", projected); try { SchemaProjector.project(Schema.OPTIONAL_BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } try { SchemaProjector.project(Schema.OPTIONAL_BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } try { SchemaProjector.project(Schema.OPTIONAL_STRING_SCHEMA, "abc", Schema.STRING_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } } @Test public void testNumericTypeProjection() throws Exception { Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, Schema.FLOAT64_SCHEMA}; Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA, Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA}; Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345}; Map<Object, List<?>> expectedProjected = new HashMap<>(); expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.)); expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.)); expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.)); expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2)); expectedProjected.put(values[5], Arrays.asList(1.2345)); Object promoted; for (int i = 0; i < promotableSchemas.length; ++i) { Schema source = promotableSchemas[i]; List<?> expected = expectedProjected.get(values[i]); for (int j = i; j < promotableSchemas.length; ++j) { Schema target = promotableSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } for (int j = i; j < promotableOptionalSchemas.length; ++j) { Schema target = promotableOptionalSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } } for (int i = 0; i < promotableOptionalSchemas.length; ++i) { Schema source = promotableSchemas[i]; List<?> expected = expectedProjected.get(values[i]); for (int j = i; j < promotableOptionalSchemas.length; ++j) { Schema target = promotableOptionalSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } } Schema[] nonPromotableSchemas = {Schema.BOOLEAN_SCHEMA, Schema.BYTES_SCHEMA, Schema.STRING_SCHEMA}; for (Schema promotableSchema: promotableSchemas) { for (Schema nonPromotableSchema: nonPromotableSchemas) { Object dummy = new Object(); try { SchemaProjector.project(promotableSchema, dummy, nonPromotableSchema); fail("Cannot promote " + promotableSchema.type() + " to " + nonPromotableSchema.type()); } catch (DataException e) { } } } } @Test public void testStructAddField() throws Exception { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", SchemaBuilder.int32().defaultValue(123).build()) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, (int) targetStruct.getInt32("field")); assertEquals(123, (int) targetStruct.getInt32("field2")); Schema incompatibleTargetSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); try { SchemaProjector.project(source, sourceStruct, incompatibleTargetSchema); fail("Incompatible schema."); } catch (DataException e) { } } @Test public void testStructRemoveField() throws Exception { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); sourceStruct.put("field2", 234); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, targetStruct.get("field")); try { targetStruct.get("field2"); fail("field2 is not part of the projected struct"); } catch (DataException e) { } } @Test public void testStructDefaultValue() throws Exception { Schema source = SchemaBuilder.struct().optional() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); SchemaBuilder builder = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA); Struct defaultStruct = new Struct(builder).put("field", 12).put("field2", 345); builder.defaultValue(defaultStruct); Schema target = builder.build(); Object projected = SchemaProjector.project(source, null, target); assertEquals(defaultStruct, projected); Struct sourceStruct = new Struct(source).put("field", 45).put("field2", 678); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(sourceStruct.get("field"), targetStruct.get("field")); assertEquals(sourceStruct.get("field2"), targetStruct.get("field2")); } @Test public void testNestedSchemaProjection() throws Exception { Schema sourceFlatSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Schema targetFlatSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", SchemaBuilder.int32().defaultValue(123).build()) .build(); Schema sourceNestedSchema = SchemaBuilder.struct() .field("first", Schema.INT32_SCHEMA) .field("second", Schema.STRING_SCHEMA) .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build()) .field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build()) .field("nested", sourceFlatSchema) .build(); Schema targetNestedSchema = SchemaBuilder.struct() .field("first", Schema.INT32_SCHEMA) .field("second", Schema.STRING_SCHEMA) .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build()) .field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build()) .field("nested", targetFlatSchema) .build(); Struct sourceFlatStruct = new Struct(sourceFlatSchema); sourceFlatStruct.put("field", 113); Struct sourceNestedStruct = new Struct(sourceNestedSchema); sourceNestedStruct.put("first", 1); sourceNestedStruct.put("second", "abc"); sourceNestedStruct.put("array", Arrays.asList(1, 2)); sourceNestedStruct.put("map", Collections.singletonMap(5, "def")); sourceNestedStruct.put("nested", sourceFlatStruct); Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct, targetNestedSchema); assertEquals(1, targetNestedStruct.get("first")); assertEquals("abc", targetNestedStruct.get("second")); assertEquals(Arrays.asList(1, 2), (List<Integer>) targetNestedStruct.get("array")); assertEquals(Collections.singletonMap(5, "def"), (Map<Integer, String>) targetNestedStruct.get("map")); Struct projectedStruct = (Struct) targetNestedStruct.get("nested"); assertEquals(113, projectedStruct.get("field")); assertEquals(123, projectedStruct.get("field2")); } @Test public void testLogicalTypeProjection() throws Exception { Schema[] logicalTypeSchemas = {Decimal.schema(2), Date.SCHEMA, Time.SCHEMA, Timestamp.SCHEMA}; Object projected; BigDecimal testDecimal = new BigDecimal(new BigInteger("156"), 2); projected = SchemaProjector.project(Decimal.schema(2), testDecimal, Decimal.schema(2)); assertEquals(testDecimal, projected); projected = SchemaProjector.project(Date.SCHEMA, 1000, Date.SCHEMA); assertEquals(1000, projected); projected = SchemaProjector.project(Time.SCHEMA, 231, Time.SCHEMA); assertEquals(231, projected); projected = SchemaProjector.project(Timestamp.SCHEMA, 34567L, Timestamp.SCHEMA); assertEquals(34567L, projected); Schema namedSchema = SchemaBuilder.int32().name("invalidLogicalTypeName").build(); for (Schema logicalTypeSchema: logicalTypeSchemas) { try { SchemaProjector.project(logicalTypeSchema, null, Schema.BOOLEAN_SCHEMA); fail("Cannot project logical types to non-logical types."); } catch (SchemaProjectorException e) { } try { SchemaProjector.project(logicalTypeSchema, null, namedSchema); fail("Reader name is not a valid logical type name."); } catch (SchemaProjectorException e) { } try { SchemaProjector.project(Schema.BOOLEAN_SCHEMA, null, logicalTypeSchema); fail("Cannot project non-logical types to logical types."); } catch (SchemaProjectorException e) { } } } @Test public void testArrayProjection() throws Exception { Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source); assertEquals(Arrays.asList(1, 2, 3), (List<Integer>) projected); Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build(); Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build(); projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target); assertEquals(Arrays.asList(4, 5), (List<Integer>) projected); projected = SchemaProjector.project(optionalSource, null, target); assertEquals(Arrays.asList(1, 2, 3), (List<Integer>) projected); Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build(); projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget); List<Long> expectedProjected = Arrays.asList(4L, 5L); assertEquals(expectedProjected, (List<Long>) projected); projected = SchemaProjector.project(optionalSource, null, promotedTarget); assertEquals(Arrays.asList(1L, 2L, 3L), (List<Long>) projected); Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); try { SchemaProjector.project(optionalSource, null, noDefaultValueTarget); fail("Target schema does not provide a default value."); } catch (SchemaProjectorException e) { } Schema nonPromotableTarget = SchemaBuilder.array(Schema.BOOLEAN_SCHEMA).build(); try { SchemaProjector.project(optionalSource, null, nonPromotableTarget); fail("Neither source type matches target type nor source type can be promoted to target type"); } catch (SchemaProjectorException e) { } } @Test public void testMapProjection() throws Exception { Schema source = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).optional().build(); Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Collections.singletonMap(1, 2)).build(); Object projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), target); assertEquals(Collections.singletonMap(3, 4), (Map<Integer, Integer>) projected); projected = SchemaProjector.project(source, null, target); assertEquals(Collections.singletonMap(1, 2), (Map<Integer, Integer>) projected); Schema promotedTarget = SchemaBuilder.map(Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA).defaultValue( Collections.singletonMap(3L, 4.5F)).build(); projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), promotedTarget); assertEquals(Collections.singletonMap(3L, 4.F), (Map<Long, Float>) projected); projected = SchemaProjector.project(source, null, promotedTarget); assertEquals(Collections.singletonMap(3L, 4.5F), (Map<Long, Float>) projected); Schema noDefaultValueTarget = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(); try { SchemaProjector.project(source, null, noDefaultValueTarget); fail("Reader does not provide a default value."); } catch (SchemaProjectorException e) { } Schema nonPromotableTarget = SchemaBuilder.map(Schema.BOOLEAN_SCHEMA, Schema.STRING_SCHEMA).build(); try { SchemaProjector.project(source, null, nonPromotableTarget); fail("Neither source type matches target type nor source type can be promoted to target type"); } catch (SchemaProjectorException e) { } } @Test public void testMaybeCompatible() throws Exception { Schema source = SchemaBuilder.int32().name("source").build(); Schema target = SchemaBuilder.int32().name("target").build(); try { SchemaProjector.project(source, 12, target); fail("Source name and target name mismatch."); } catch (SchemaProjectorException e) { } Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value")); try { SchemaProjector.project(source, 34, targetWithParameters); fail("Source parameters and target parameters mismatch."); } catch (SchemaProjectorException e) { } } @Test public void testPrimitiveTypeProjection() { Object projected; projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA); assertEquals(false, projected); byte[] bytes = {(byte) 1, (byte) 2}; projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA); assertEquals(bytes, projected); projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.STRING_SCHEMA); assertEquals("abc", projected); projected = SchemaProjector.project(Schema.BOOLEAN_SCHEMA, false, Schema.OPTIONAL_BOOLEAN_SCHEMA); assertEquals(false, projected); projected = SchemaProjector.project(Schema.BYTES_SCHEMA, bytes, Schema.OPTIONAL_BYTES_SCHEMA); assertEquals(bytes, projected); projected = SchemaProjector.project(Schema.STRING_SCHEMA, "abc", Schema.OPTIONAL_STRING_SCHEMA); assertEquals("abc", projected); try { SchemaProjector.project(Schema.OPTIONAL_BOOLEAN_SCHEMA, false, Schema.BOOLEAN_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } try { SchemaProjector.project(Schema.OPTIONAL_BYTES_SCHEMA, bytes, Schema.BYTES_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } try { SchemaProjector.project(Schema.OPTIONAL_STRING_SCHEMA, "abc", Schema.STRING_SCHEMA); fail("Cannot project optional schema to schema with no default value."); } catch (DataException e) { } } @Test public void testNumericTypeProjection() { Schema[] promotableSchemas = {Schema.INT8_SCHEMA, Schema.INT16_SCHEMA, Schema.INT32_SCHEMA, Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA, Schema.FLOAT64_SCHEMA}; Schema[] promotableOptionalSchemas = {Schema.OPTIONAL_INT8_SCHEMA, Schema.OPTIONAL_INT16_SCHEMA, Schema.OPTIONAL_INT32_SCHEMA, Schema.OPTIONAL_INT64_SCHEMA, Schema.OPTIONAL_FLOAT32_SCHEMA, Schema.OPTIONAL_FLOAT64_SCHEMA}; Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345}; Map<Object, List<?>> expectedProjected = new HashMap<>(); expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.)); expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.)); expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.)); expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2)); expectedProjected.put(values[5], Arrays.asList(1.2345)); Object promoted; for (int i = 0; i < promotableSchemas.length; ++i) { Schema source = promotableSchemas[i]; List<?> expected = expectedProjected.get(values[i]); for (int j = i; j < promotableSchemas.length; ++j) { Schema target = promotableSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } for (int j = i; j < promotableOptionalSchemas.length; ++j) { Schema target = promotableOptionalSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } } for (int i = 0; i < promotableOptionalSchemas.length; ++i) { Schema source = promotableSchemas[i]; List<?> expected = expectedProjected.get(values[i]); for (int j = i; j < promotableOptionalSchemas.length; ++j) { Schema target = promotableOptionalSchemas[j]; promoted = SchemaProjector.project(source, values[i], target); if (target.type() == Type.FLOAT64) { assertEquals((Double) (expected.get(j - i)), (double) promoted, 1e-6); } else { assertEquals(expected.get(j - i), promoted); } } } Schema[] nonPromotableSchemas = {Schema.BOOLEAN_SCHEMA, Schema.BYTES_SCHEMA, Schema.STRING_SCHEMA}; for (Schema promotableSchema: promotableSchemas) { for (Schema nonPromotableSchema: nonPromotableSchemas) { Object dummy = new Object(); try { SchemaProjector.project(promotableSchema, dummy, nonPromotableSchema); fail("Cannot promote " + promotableSchema.type() + " to " + nonPromotableSchema.type()); } catch (DataException e) { } } } } @Test public void testStructAddField() { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", SchemaBuilder.int32().defaultValue(123).build()) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, (int) targetStruct.getInt32("field")); assertEquals(123, (int) targetStruct.getInt32("field2")); Schema incompatibleTargetSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); try { SchemaProjector.project(source, sourceStruct, incompatibleTargetSchema); fail("Incompatible schema."); } catch (DataException e) { } } @Test public void testStructRemoveField() { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); sourceStruct.put("field2", 234); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, targetStruct.get("field")); try { targetStruct.get("field2"); fail("field2 is not part of the projected struct"); } catch (DataException e) { } } @Test public void testStructDefaultValue() { Schema source = SchemaBuilder.struct().optional() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); SchemaBuilder builder = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA); Struct defaultStruct = new Struct(builder).put("field", 12).put("field2", 345); builder.defaultValue(defaultStruct); Schema target = builder.build(); Object projected = SchemaProjector.project(source, null, target); assertEquals(defaultStruct, projected); Struct sourceStruct = new Struct(source).put("field", 45).put("field2", 678); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(sourceStruct.get("field"), targetStruct.get("field")); assertEquals(sourceStruct.get("field2"), targetStruct.get("field2")); } @Test public void testNestedSchemaProjection() { Schema sourceFlatSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Schema targetFlatSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", SchemaBuilder.int32().defaultValue(123).build()) .build(); Schema sourceNestedSchema = SchemaBuilder.struct() .field("first", Schema.INT32_SCHEMA) .field("second", Schema.STRING_SCHEMA) .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build()) .field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build()) .field("nested", sourceFlatSchema) .build(); Schema targetNestedSchema = SchemaBuilder.struct() .field("first", Schema.INT32_SCHEMA) .field("second", Schema.STRING_SCHEMA) .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA).build()) .field("map", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build()) .field("nested", targetFlatSchema) .build(); Struct sourceFlatStruct = new Struct(sourceFlatSchema); sourceFlatStruct.put("field", 113); Struct sourceNestedStruct = new Struct(sourceNestedSchema); sourceNestedStruct.put("first", 1); sourceNestedStruct.put("second", "abc"); sourceNestedStruct.put("array", Arrays.asList(1, 2)); sourceNestedStruct.put("map", Collections.singletonMap(5, "def")); sourceNestedStruct.put("nested", sourceFlatStruct); Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct, targetNestedSchema); assertEquals(1, targetNestedStruct.get("first")); assertEquals("abc", targetNestedStruct.get("second")); assertEquals(Arrays.asList(1, 2), targetNestedStruct.get("array")); assertEquals(Collections.singletonMap(5, "def"), targetNestedStruct.get("map")); Struct projectedStruct = (Struct) targetNestedStruct.get("nested"); assertEquals(113, projectedStruct.get("field")); assertEquals(123, projectedStruct.get("field2")); } @Test public void testLogicalTypeProjection() { Schema[] logicalTypeSchemas = {Decimal.schema(2), Date.SCHEMA, Time.SCHEMA, Timestamp.SCHEMA}; Object projected; BigDecimal testDecimal = new BigDecimal(new BigInteger("156"), 2); projected = SchemaProjector.project(Decimal.schema(2), testDecimal, Decimal.schema(2)); assertEquals(testDecimal, projected); projected = SchemaProjector.project(Date.SCHEMA, 1000, Date.SCHEMA); assertEquals(1000, projected); projected = SchemaProjector.project(Time.SCHEMA, 231, Time.SCHEMA); assertEquals(231, projected); projected = SchemaProjector.project(Timestamp.SCHEMA, 34567L, Timestamp.SCHEMA); assertEquals(34567L, projected); java.util.Date date = new java.util.Date(); projected = SchemaProjector.project(Date.SCHEMA, date, Date.SCHEMA); assertEquals(date, projected); projected = SchemaProjector.project(Time.SCHEMA, date, Time.SCHEMA); assertEquals(date, projected); projected = SchemaProjector.project(Timestamp.SCHEMA, date, Timestamp.SCHEMA); assertEquals(date, projected); Schema namedSchema = SchemaBuilder.int32().name("invalidLogicalTypeName").build(); for (Schema logicalTypeSchema: logicalTypeSchemas) { try { SchemaProjector.project(logicalTypeSchema, null, Schema.BOOLEAN_SCHEMA); fail("Cannot project logical types to non-logical types."); } catch (SchemaProjectorException e) { } try { SchemaProjector.project(logicalTypeSchema, null, namedSchema); fail("Reader name is not a valid logical type name."); } catch (SchemaProjectorException e) { } try { SchemaProjector.project(Schema.BOOLEAN_SCHEMA, null, logicalTypeSchema); fail("Cannot project non-logical types to logical types."); } catch (SchemaProjectorException e) { } } } @Test public void testArrayProjection() { Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source); assertEquals(Arrays.asList(1, 2, 3), projected); Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build(); Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build(); projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target); assertEquals(Arrays.asList(4, 5), projected); projected = SchemaProjector.project(optionalSource, null, target); assertEquals(Arrays.asList(1, 2, 3), projected); Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build(); projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget); List<Long> expectedProjected = Arrays.asList(4L, 5L); assertEquals(expectedProjected, projected); projected = SchemaProjector.project(optionalSource, null, promotedTarget); assertEquals(Arrays.asList(1L, 2L, 3L), projected); Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); try { SchemaProjector.project(optionalSource, null, noDefaultValueTarget); fail("Target schema does not provide a default value."); } catch (SchemaProjectorException e) { } Schema nonPromotableTarget = SchemaBuilder.array(Schema.BOOLEAN_SCHEMA).build(); try { SchemaProjector.project(optionalSource, null, nonPromotableTarget); fail("Neither source type matches target type nor source type can be promoted to target type"); } catch (SchemaProjectorException e) { } } @Test public void testMapProjection() { Schema source = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).optional().build(); Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Collections.singletonMap(1, 2)).build(); Object projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), target); assertEquals(Collections.singletonMap(3, 4), projected); projected = SchemaProjector.project(source, null, target); assertEquals(Collections.singletonMap(1, 2), projected); Schema promotedTarget = SchemaBuilder.map(Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA).defaultValue( Collections.singletonMap(3L, 4.5F)).build(); projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), promotedTarget); assertEquals(Collections.singletonMap(3L, 4.F), projected); projected = SchemaProjector.project(source, null, promotedTarget); assertEquals(Collections.singletonMap(3L, 4.5F), projected); Schema noDefaultValueTarget = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(); try { SchemaProjector.project(source, null, noDefaultValueTarget); fail("Reader does not provide a default value."); } catch (SchemaProjectorException e) { } Schema nonPromotableTarget = SchemaBuilder.map(Schema.BOOLEAN_SCHEMA, Schema.STRING_SCHEMA).build(); try { SchemaProjector.project(source, null, nonPromotableTarget); fail("Neither source type matches target type nor source type can be promoted to target type"); } catch (SchemaProjectorException e) { } } @Test public void testMaybeCompatible() { Schema source = SchemaBuilder.int32().name("source").build(); Schema target = SchemaBuilder.int32().name("target").build(); try { SchemaProjector.project(source, 12, target); fail("Source name and target name mismatch."); } catch (SchemaProjectorException e) { } Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value")); try { SchemaProjector.project(source, 34, targetWithParameters); fail("Source parameters and target parameters mismatch."); } catch (SchemaProjectorException e) { } } @Test public void testProjectMissingDefaultValuedStructField() { final Schema source = SchemaBuilder.struct().build(); final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.int64().defaultValue(42L).build()).build(); assertEquals(42L, (long) ((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id")); } @Test public void testProjectMissingOptionalStructField() { final Schema source = SchemaBuilder.struct().build(); final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.OPTIONAL_INT64_SCHEMA).build(); assertEquals(null, ((Struct) SchemaProjector.project(source, new Struct(source), target)).getInt64("id")); } @Test(expected = SchemaProjectorException.class) public void testProjectMissingRequiredField() { final Schema source = SchemaBuilder.struct().build(); final Schema target = SchemaBuilder.struct().field("id", SchemaBuilder.INT64_SCHEMA).build(); SchemaProjector.project(source, new Struct(source), target); }
ConnectorUtils { public static <T> List<List<T>> groupPartitions(List<T> elements, int numGroups) { if (numGroups <= 0) throw new IllegalArgumentException("Number of groups must be positive."); List<List<T>> result = new ArrayList<>(numGroups); int perGroup = elements.size() / numGroups; int leftover = elements.size() - (numGroups * perGroup); int assigned = 0; for (int group = 0; group < numGroups; group++) { int numThisGroup = group < leftover ? perGroup + 1 : perGroup; List<T> groupList = new ArrayList<>(numThisGroup); for (int i = 0; i < numThisGroup; i++) { groupList.add(elements.get(assigned)); assigned++; } result.add(groupList); } return result; } static List<List<T>> groupPartitions(List<T> elements, int numGroups); }
@Test public void testGroupPartitions() { List<List<Integer>> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1); assertEquals(Arrays.asList(FIVE_ELEMENTS), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2); assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3); assertEquals(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4), Arrays.asList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5); assertEquals(Arrays.asList(Arrays.asList(1), Arrays.asList(2), Arrays.asList(3), Arrays.asList(4), Arrays.asList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7); assertEquals(Arrays.asList(Arrays.asList(1), Arrays.asList(2), Arrays.asList(3), Arrays.asList(4), Arrays.asList(5), Collections.EMPTY_LIST, Collections.EMPTY_LIST), grouped); } @Test public void testGroupPartitions() { List<List<Integer>> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1); assertEquals(Arrays.asList(FIVE_ELEMENTS), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2); assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3); assertEquals(Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3, 4), Arrays.asList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5); assertEquals(Arrays.asList(Arrays.asList(1), Arrays.asList(2), Arrays.asList(3), Arrays.asList(4), Arrays.asList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7); assertEquals(Arrays.asList(Arrays.asList(1), Arrays.asList(2), Arrays.asList(3), Arrays.asList(4), Arrays.asList(5), Collections.emptyList(), Collections.emptyList()), grouped); } @Test(expected = IllegalArgumentException.class) public void testGroupPartitionsInvalidCount() { ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 0); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testStringToBytes() throws UnsupportedEncodingException { assertArrayEquals(SAMPLE_STRING.getBytes("UTF8"), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING)); } @Test public void testNonStringToBytes() throws UnsupportedEncodingException { assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); } @Test public void testNullToBytes() { assertEquals(null, converter.fromConnectData(TOPIC, Schema.OPTIONAL_STRING_SCHEMA, null)); } @Test public void testToBytesIgnoresSchema() throws UnsupportedEncodingException { assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, null, true)); }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testBytesToString() { SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes()); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(SAMPLE_STRING, data.value()); } @Test public void testBytesNullToString() { SchemaAndValue data = converter.toConnectData(TOPIC, null); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(null, data.value()); }
FileStreamSourceConnector extends SourceConnector { @Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String TOPIC_CONFIG; static final String FILE_CONFIG; }
@Test(expected = ConnectException.class) public void testMultipleSourcesInvalid() { sourceProperties.put(FileStreamSourceConnector.TOPIC_CONFIG, MULTIPLE_TOPICS); connector.start(sourceProperties); }
FileStreamSourceConnector extends SourceConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String TOPIC_CONFIG; static final String FILE_CONFIG; }
@Test public void testTaskClass() { PowerMock.replayAll(); connector.start(sourceProperties); assertEquals(FileStreamSourceTask.class, connector.taskClass()); PowerMock.verifyAll(); }
FileStreamSinkConnector extends SinkConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String FILE_CONFIG; }
@Test public void testTaskClass() { PowerMock.replayAll(); connector.start(sinkProperties); assertEquals(FileStreamSinkTask.class, connector.taskClass()); PowerMock.verifyAll(); } @Test public void testTaskClass() { replayAll(); connector.start(sinkProperties); assertEquals(FileStreamSinkTask.class, connector.taskClass()); verifyAll(); }
FileStreamSourceTask extends SourceTask { @Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); } @Override String version(); @Override void start(Map<String, String> props); @Override List<SourceRecord> poll(); @Override void stop(); static final String FILENAME_FIELD; static final String POSITION_FIELD; }
@Test(expected = ConnectException.class) public void testMissingTopic() throws InterruptedException { replay(); config.remove(FileStreamSourceConnector.TOPIC_CONFIG); task.start(config); }
ApiVersions { public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; } synchronized void update(String nodeId, NodeApiVersions nodeApiVersions); synchronized void remove(String nodeId); synchronized NodeApiVersions get(String nodeId); synchronized byte maxUsableProduceMagic(); }
@Test public void testMaxUsableProduceMagic() { ApiVersions apiVersions = new ApiVersions(); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("0", NodeApiVersions.create()); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("1", NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); assertEquals(RecordBatch.MAGIC_VALUE_V1, apiVersions.maxUsableProduceMagic()); apiVersions.remove("1"); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); } @Test public void testMaxUsableProduceMagic() { ApiVersions apiVersions = new ApiVersions(); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("0", NodeApiVersions.create()); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("1", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2)); assertEquals(RecordBatch.MAGIC_VALUE_V1, apiVersions.maxUsableProduceMagic()); apiVersions.remove("1"); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); }
ClientUtils { public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; } static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls); static void closeQuietly(Closeable c, String name, AtomicReference<Throwable> firstException); static ChannelBuilder createChannelBuilder(AbstractConfig config); }
@Test public void testParseAndValidateAddresses() { check("127.0.0.1:8000"); check("mydomain.com:8080"); check("[::1]:8000"); check("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "mydomain.com:10000"); List<InetSocketAddress> validatedAddresses = check("some.invalid.hostname.foo.bar.local:9999", "mydomain.com:10000"); assertEquals(1, validatedAddresses.size()); InetSocketAddress onlyAddress = validatedAddresses.get(0); assertEquals("mydomain.com", onlyAddress.getHostName()); assertEquals(10000, onlyAddress.getPort()); }
NetworkClient implements KafkaClient { @Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testClose() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); ProduceRequest.Builder builder = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap()); ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true); client.send(request, time.milliseconds()); assertEquals("There should be 1 in-flight request after send", 1, client.inFlightRequestCount(node.idString())); assertTrue(client.hasInFlightRequests(node.idString())); assertTrue(client.hasInFlightRequests()); client.close(node.idString()); assertEquals("There should be no in-flight request after close", 0, client.inFlightRequestCount(node.idString())); assertFalse(client.hasInFlightRequests(node.idString())); assertFalse(client.hasInFlightRequests()); assertFalse("Connection should not be ready after close", client.isReady(node, 0)); }
NetworkClient implements KafkaClient { @Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testLeastLoadedNode() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); Node leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be one leastloadednode", leastNode.id(), node.id()); time.sleep(reconnectBackoffMsTest); selector.close(node.idString()); client.poll(1, time.milliseconds()); assertFalse("After we forced the disconnection the client is no longer ready.", client.ready(node, time.milliseconds())); leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be NO leastloadednode", leastNode, null); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testConnectionDelay() { long now = time.milliseconds(); long delay = client.connectionDelay(node, now); assertEquals(0, delay); } @Test public void testConnectionDelayConnected() { awaitReady(client, node); long now = time.milliseconds(); long delay = client.connectionDelay(node, now); assertEquals(Long.MAX_VALUE, delay); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testUnsupportedVersionsToString() { NodeApiVersions versions = new NodeApiVersions(Collections.<ApiVersion>emptyList()); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.values()) { bld.append(prefix).append(apiKey.name). append("(").append(apiKey.id).append("): UNSUPPORTED"); prefix = ", "; } bld.append(")"); assertEquals(bld.toString(), versions.toString()); } @Test public void testUnknownApiVersionsToString() { ApiVersion unknownApiVersion = new ApiVersion((short) 337, (short) 0, (short) 1); NodeApiVersions versions = new NodeApiVersions(Collections.singleton(unknownApiVersion)); assertTrue(versions.toString().endsWith("UNKNOWN(337): 0 to 1)")); } @Test public void testVersionsToString() { List<ApiVersion> versionList = new ArrayList<>(); for (ApiKeys apiKey : ApiKeys.values()) { if (apiKey == ApiKeys.CONTROLLED_SHUTDOWN_KEY) { versionList.add(new ApiVersion(apiKey.id, (short) 0, (short) 0)); } else if (apiKey == ApiKeys.DELETE_TOPICS) { versionList.add(new ApiVersion(apiKey.id, (short) 10000, (short) 10001)); } else { versionList.add(new ApiVersion(apiKey)); } } NodeApiVersions versions = new NodeApiVersions(versionList); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.values()) { bld.append(prefix); if (apiKey == ApiKeys.CONTROLLED_SHUTDOWN_KEY) { bld.append("ControlledShutdown(7): 0 [unusable: node too old]"); } else if (apiKey == ApiKeys.DELETE_TOPICS) { bld.append("DeleteTopics(20): 10000 to 10001 [unusable: node too new]"); } else { bld.append(apiKey.name).append("("). append(apiKey.id).append("): "); if (apiKey.oldestVersion() == apiKey.latestVersion()) { bld.append(apiKey.oldestVersion()); } else { bld.append(apiKey.oldestVersion()). append(" to "). append(apiKey.latestVersion()); } bld.append(" [usable: ").append(apiKey.latestVersion()). append("]"); } prefix = ", "; } bld.append(")"); assertEquals(bld.toString(), versions.toString()); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testUsableVersionCalculation() { List<ApiVersion> versionList = new ArrayList<>(); versionList.add(new ApiVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY.id, (short) 0, (short) 0)); versionList.add(new ApiVersion(ApiKeys.FETCH.id, (short) 1, (short) 2)); NodeApiVersions versions = new NodeApiVersions(versionList); try { versions.usableVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY); Assert.fail("expected UnsupportedVersionException"); } catch (UnsupportedVersionException e) { } assertEquals(2, versions.usableVersion(ApiKeys.FETCH)); } @Test(expected = UnsupportedVersionException.class) public void testUsableVersionCalculationNoKnownVersions() { List<ApiVersion> versionList = new ArrayList<>(); NodeApiVersions versions = new NodeApiVersions(versionList); versions.usableVersion(ApiKeys.FETCH); } @Test public void testUsableVersionLatestVersions() { List<ApiVersion> versionList = new LinkedList<>(); for (ApiVersion apiVersion: ApiVersionsResponse.API_VERSIONS_RESPONSE.apiVersions()) { versionList.add(apiVersion); } versionList.add(new ApiVersion((short) 100, (short) 0, (short) 1)); NodeApiVersions versions = new NodeApiVersions(versionList); for (ApiKeys apiKey: ApiKeys.values()) { assertEquals(apiKey.latestVersion(), versions.usableVersion(apiKey)); } }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Properties addDeserializerToConfig(Properties properties, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Set<String> configNames(); static void main(String[] args); static final String GROUP_ID_CONFIG; static final String MAX_POLL_RECORDS_CONFIG; static final String MAX_POLL_INTERVAL_MS_CONFIG; static final String SESSION_TIMEOUT_MS_CONFIG; static final String HEARTBEAT_INTERVAL_MS_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String ENABLE_AUTO_COMMIT_CONFIG; static final String AUTO_COMMIT_INTERVAL_MS_CONFIG; static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG; static final String AUTO_OFFSET_RESET_CONFIG; static final String AUTO_OFFSET_RESET_DOC; static final String FETCH_MIN_BYTES_CONFIG; static final String FETCH_MAX_BYTES_CONFIG; static final int DEFAULT_FETCH_MAX_BYTES; static final String FETCH_MAX_WAIT_MS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String MAX_PARTITION_FETCH_BYTES_CONFIG; static final int DEFAULT_MAX_PARTITION_FETCH_BYTES; static final String SEND_BUFFER_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String CLIENT_ID_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String CHECK_CRCS_CONFIG; static final String KEY_DESERIALIZER_CLASS_CONFIG; static final String KEY_DESERIALIZER_CLASS_DOC; static final String VALUE_DESERIALIZER_CLASS_CONFIG; static final String VALUE_DESERIALIZER_CLASS_DOC; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String INTERCEPTOR_CLASSES_CONFIG; static final String INTERCEPTOR_CLASSES_DOC; static final String EXCLUDE_INTERNAL_TOPICS_CONFIG; static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS; static final String ISOLATION_LEVEL_CONFIG; static final String ISOLATION_LEVEL_DOC; static final String DEFAULT_ISOLATION_LEVEL; }
@Test public void testDeserializerToPropertyConfig() { Properties properties = new Properties(); properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); Properties newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, null); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, null); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, valueDeserializer); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, valueDeserializer); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); } @Test public void testDeserializerToMapConfig() { Map<String, Object> configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); Map<String, Object> newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, null); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, null); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, valueDeserializer); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, valueDeserializer); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList())); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 0); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic))); subscriptions.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic))); subscriptions.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic, 1)), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testMultipleConsumersMixedTopicSubscriptions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic1))); subscriptions.put(consumer2, new Subscription(topics(topic1, topic2))); subscriptions.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2)), assignment.get(consumer1)); assertEquals(partitions(tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertEquals(partitions(tp(topic1, 1)), assignment.get(consumer3)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic1, topic2))); subscriptions.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); } @Test public void testPoorRoundRobinAssignmentScenario() { Map<String, Integer> partitionsPerTopic = new HashMap<>(); for (int i = 1; i <= 5; i++) partitionsPerTopic.put(String.format("topic%d", i), (i % 2) + 1); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put("consumer1", new Subscription(topics("topic1", "topic2", "topic3", "topic4", "topic5"))); subscriptions.put("consumer2", new Subscription(topics("topic1", "topic3", "topic5"))); subscriptions.put("consumer3", new Subscription(topics("topic1", "topic3", "topic5"))); subscriptions.put("consumer4", new Subscription(topics("topic1", "topic2", "topic3", "topic4", "topic5"))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); verifyValidityAndBalance(subscriptions, assignment); }