focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testSpdySynStreamFrame() throws Exception { short type = 1; byte flags = 0; int length = 10; int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; byte priority = (byte) (RANDOM.nextInt() & 0x07); ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); buf.writeInt(associatedToStreamId); buf.writeByte(priority << 5); buf.writeByte(0); decoder.decode(buf); verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); verify(delegate).readHeaderBlockEnd(); assertFalse(buf.isReadable()); buf.release(); }
public Socket awaitConnection() throws InterruptedException { return awaitConnection(Long.MAX_VALUE); }
@Test public void testConnect() throws Exception { Thread thread = new Thread(); thread.start(); Socket socket = connector.awaitConnection(2 * DELAY); assertNotNull(socket); thread.join(DELAY); assertFalse(thread.isAlive()); socket.close(); }
static String getAbbreviation(Exception ex, Integer statusCode, String storageErrorMessage) { String result = null; for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) { final String abbreviation = retryReasonCategory.captureAndGetAbbreviation(ex, statusCode, storageErrorMessage); if (abbreviation != null) { result = abbreviation; } } return result; }
@Test public void testConnectionResetRetryReason() { SocketException connReset = new SocketException(CONNECTION_RESET_MESSAGE.toUpperCase()); Assertions.assertThat(RetryReason.getAbbreviation(connReset, null, null)).isEqualTo(CONNECTION_RESET_ABBREVIATION); }
public static String convertPortalRegistrySerivceId(URL url) { return url.getPath(); }
@Test public void testConvertServiceId() { String tempServiceId = PortalRegistryUtils.convertPortalRegistrySerivceId(url); assertEquals(testServiceId, tempServiceId); }
@Override protected void setFileNameParser( FileNameParser parser ) { if ( !( parser instanceof ConnectionFileNameParser ) ) { throw new IllegalArgumentException( "Argument 'parser' is not an instance of 'ConnectionFileNameParser'." ); } super.setFileNameParser( parser ); }
@Test( expected = IllegalArgumentException.class ) public void testSetFileNameParserThrowsIfIncorrectType() { try ( ConnectionFileProvider provider = new ConnectionFileProvider() ) { provider.setFileNameParser( mock( FileNameParser.class ) ); } }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void cabbageContractCabbageHarvestableAndEmptyPatch() { final long unixNow = Instant.now().getEpochSecond(); final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4773); assertNotNull(patch); when(farmingTracker.predictPatch(patch)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.HARVESTABLE, unixNow, 3, 3)); farmingContractManager.setContract(Produce.CABBAGE); assertEquals(SummaryState.COMPLETED, farmingContractManager.getSummary()); }
public int run() throws IOException { String[] header = new String[]{"Address", "State", "Start Time", "Last Heartbeat Time", "Version", "Revision"}; try { List<ProxyStatus> allProxyStatus = mMetaMasterClient.listProxyStatus(); int liveCount = 0; int lostCount = 0; int maxAddressLength = 24; for (ProxyStatus proxyStatus : allProxyStatus) { String state = proxyStatus.getState(); if (state.equals("ACTIVE")) { liveCount++; } else if (state.equals("LOST")) { lostCount++; } NetAddress address = proxyStatus.getAddress(); String addressStr = address.getHost() + ":" + address.getRpcPort(); if (maxAddressLength < addressStr.length()) { maxAddressLength = addressStr.length(); } } mPrintStream.printf("%s Proxy instances in the cluster, %s serving and %s lost%n%n", liveCount + lostCount, liveCount, lostCount); String format = "%-" + maxAddressLength + "s %-8s %-16s %-20s %-32s %-8s%n"; mPrintStream.printf(format, header); for (ProxyStatus proxyStatus : allProxyStatus) { NetAddress address = proxyStatus.getAddress(); BuildVersion version = proxyStatus.getVersion(); mPrintStream.printf(format, address.getHost() + ":" + address.getRpcPort(), proxyStatus.getState(), DATETIME_FORMAT.format(Instant.ofEpochMilli(proxyStatus.getStartTime())), DATETIME_FORMAT.format(Instant.ofEpochMilli(proxyStatus.getLastHeartbeatTime())), version.getVersion(), version.getRevision()); } return 0; } catch (Exception e) { e.printStackTrace(); return 1; } }
@Test public void listProxyInstances() throws IOException { List<ProxyStatus> longInfoList = prepareInfoList(); Mockito.when(mMetaMasterClient.listProxyStatus()) .thenReturn(longInfoList); try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); PrintStream printStream = new PrintStream(outputStream, true, "utf-8")) { ProxyCommand proxyCommand = new ProxyCommand(mMetaMasterClient, printStream); proxyCommand.run(); String output = new String(outputStream.toByteArray(), StandardCharsets.UTF_8); // CHECKSTYLE.OFF: LineLengthExceed - Much more readable List<String> expectedOutput = Arrays.asList("2 Proxy instances in the cluster, 1 serving and 1 lost", "", "Address State Start Time Last Heartbeat Time Version Revision", "proxy-0:12345 ACTIVE 20230421-182944 20230421-183005 1.0 abc ", "proxy-1:23456 LOST 20230421-182707 20230421-190507 1.1 abc "); // CHECKSTYLE.ON: LineLengthExceed List<String> testOutput = Arrays.asList(output.split("\n")); Assert.assertThat(testOutput, IsIterableContainingInOrder.contains(expectedOutput.toArray())); } }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_otherTypes_longOutOfRange() { long expected = (1L << 24) + 1L; float[] actual = array(1.0f, 2.0f, 3.0f); expectFailureWhenTestingThat(actual).usingExactEquality().contains(expected); assertFailureKeys( "value of", "expected to contain", "testing whether", "but was", "additionally, one or more exceptions were thrown while comparing elements", "first exception"); assertFailureValue("expected to contain", Long.toString(expected)); assertThatFailure() .factValue("first exception") .startsWith( "compare(" + actual[0] + ", " + expected + ") threw java.lang.IllegalArgumentException"); assertThatFailure() .factValue("first exception") .contains( "Expected value " + expected + " in assertion using exact float equality was a long with an absolute value " + "greater than 2^24 which has no exact float representation"); }
public List<Metadata> loadExtract(Path extractFile) throws ExtractReaderException { List<Metadata> metadataList = null; if (extractFile == null || !Files.isRegularFile(extractFile)) { throw new ExtractReaderException(ExtractReaderException.TYPE.NO_EXTRACT_FILE); } FileSuffixes fileSuffixes = parseSuffixes(extractFile .getFileName() .toString()); if (fileSuffixes.format == null) { throw new ExtractReaderException(ExtractReaderException.TYPE.INCORRECT_EXTRACT_FILE_SUFFIX); } if (!Files.isRegularFile(extractFile)) { throw new ExtractReaderException(ExtractReaderException.TYPE.NO_EXTRACT_FILE); } long length = -1L; try { length = Files.size(extractFile); } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION, e); } if (length == 0L) { throw new ExtractReaderException(ExtractReaderException.TYPE.ZERO_BYTE_EXTRACT_FILE); } if (minExtractLength > IGNORE_LENGTH && length < minExtractLength) { LOG.info("minExtractLength {} > IGNORE_LENGTH {} and length {} < minExtractLength {} for file '{}'", minExtractLength, IGNORE_LENGTH, length, minExtractLength, extractFile); throw new ExtractReaderException(ExtractReaderException.TYPE.EXTRACT_FILE_TOO_SHORT); } if (maxExtractLength > IGNORE_LENGTH && length > maxExtractLength) { LOG.info("maxExtractLength {} > IGNORE_LENGTH {} and length {} > maxExtractLength {} for file '{}'", maxExtractLength, IGNORE_LENGTH, length, maxExtractLength, extractFile); throw new ExtractReaderException(ExtractReaderException.TYPE.EXTRACT_FILE_TOO_LONG); } Reader reader = null; InputStream is = null; try { is = Files.newInputStream(extractFile); if (fileSuffixes.compression != null) { switch (fileSuffixes.compression) { case "bz2": is = new BZip2CompressorInputStream(is); break; case "gz": case "gzip": is = new GzipCompressorInputStream(is); break; case "zip": is = new ZCompressorInputStream(is); break; default: LOG.warn("Can't yet process compression of type: {}", fileSuffixes.compression); return metadataList; } } reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION, e); } try { if (fileSuffixes.format == FileSuffixes.FORMAT.JSON) { metadataList = JsonMetadataList.fromJson(reader); if (alterMetadataList.equals(ALTER_METADATA_LIST.FIRST_ONLY) && metadataList.size() > 1) { while (metadataList.size() > 1) { metadataList.remove(metadataList.size() - 1); } } else if (alterMetadataList.equals(ALTER_METADATA_LIST.AS_IS.CONCATENATE_CONTENT_INTO_FIRST) && metadataList.size() > 1) { StringBuilder sb = new StringBuilder(); Metadata containerMetadata = metadataList.get(0); for (Metadata m : metadataList) { String c = m.get(TikaCoreProperties.TIKA_CONTENT); if (c != null) { sb.append(c); sb.append(" "); } } containerMetadata.set(TikaCoreProperties.TIKA_CONTENT, sb.toString()); while (metadataList.size() > 1) { metadataList.remove(metadataList.size() - 1); } } } else { metadataList = generateListFromTextFile(reader, fileSuffixes); } } catch (IOException e) { throw new ExtractReaderException(ExtractReaderException.TYPE.IO_EXCEPTION, e); } finally { IOUtils.closeQuietly(reader); IOUtils.closeQuietly(is); } return metadataList; }
@Test public void testBasic() throws Exception { ExtractReader extractReader = new ExtractReader(); List<Metadata> metadataList = extractReader.loadExtract(testJsonFile); assertEquals(2, metadataList.size()); assertEquals(1, metadataList .get(0) .getValues(TikaCoreProperties.TIKA_CONTENT).length); assertEquals(1, metadataList .get(1) .getValues(TikaCoreProperties.TIKA_CONTENT).length); assertContains("fox", metadataList .get(0) .get(TikaCoreProperties.TIKA_CONTENT)); assertContains("attachment", metadataList .get(1) .get(TikaCoreProperties.TIKA_CONTENT)); extractReader = new ExtractReader(ExtractReader.ALTER_METADATA_LIST.FIRST_ONLY); metadataList = extractReader.loadExtract(testJsonFile); assertEquals(1, metadataList.size()); assertEquals(1, metadataList .get(0) .getValues(TikaCoreProperties.TIKA_CONTENT).length); assertContains("fox", metadataList .get(0) .get(TikaCoreProperties.TIKA_CONTENT)); assertNotContained("attachment", metadataList .get(0) .get(TikaCoreProperties.TIKA_CONTENT)); extractReader = new ExtractReader(ExtractReader.ALTER_METADATA_LIST.CONCATENATE_CONTENT_INTO_FIRST); metadataList = extractReader.loadExtract(testJsonFile); assertEquals(1, metadataList.size()); assertEquals(1, metadataList .get(0) .getValues(TikaCoreProperties.TIKA_CONTENT).length); assertContains("fox", metadataList .get(0) .get(TikaCoreProperties.TIKA_CONTENT)); assertContains("attachment", metadataList .get(0) .get(TikaCoreProperties.TIKA_CONTENT)); }
@Udf public <T> List<T> slice( @UdfParameter(description = "the input array") final List<T> in, @UdfParameter(description = "start index") final Integer from, @UdfParameter(description = "end index") final Integer to) { if (in == null) { return null; } try { // SQL systems are usually 1-indexed and are inclusive of end index final int start = from == null ? 0 : from - 1; final int end = to == null ? in.size() : to; return in.subList(start, end); } catch (final IndexOutOfBoundsException e) { return null; } }
@Test public void shouldHandleIntegers() { // Given: final List<Integer> list = Lists.newArrayList(1, 2, 3); // When: final List<Integer> slice = new Slice().slice(list, 2, 2); // Then: assertThat(slice, is(Lists.newArrayList(2))); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullInitializerOnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(null)); }
public static long getMessageDigestHash(final String algorithmName, final String string) { MessageDigest md; try { md = MessageDigest.getInstance(algorithmName); } catch (NoSuchAlgorithmException e) { throw new HoodieException(e); } return asLong(Objects.requireNonNull(md).digest(getUTF8Bytes(string))); }
@Test public void testGetMessageDigestHash() { assertEquals(6808551913422584641L, NumericUtils.getMessageDigestHash("MD5", "This is a string")); assertEquals(2549749777095932358L, NumericUtils.getMessageDigestHash("MD5", "This is a test string")); assertNotEquals(1L, NumericUtils.getMessageDigestHash("MD5", "This")); assertNotEquals(6808551913422584641L, NumericUtils.getMessageDigestHash("SHA-256", "This is a string")); }
public static <T> KernelFunction<T> fromYaml(Path filePath) throws IOException { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); try (InputStream inputStream = classLoader.getResourceAsStream(filePath.toString())) { return fromYaml(inputStream, null); } }
@Test @Disabled public void testFromYaml() throws Exception { System.out.println("fromYaml"); Path filePath = null; KernelFunction expResult = null; KernelFunction result = KernelFunctionYaml.fromYaml(filePath); assertEquals(expResult, result); // TODO review the generated test code and remove the default call to fail. fail("The test case is a prototype."); }
@Override public GroupingShuffleReaderIterator<K, V> iterator() throws IOException { ApplianceShuffleEntryReader entryReader = new ApplianceShuffleEntryReader( shuffleReaderConfig, executionContext, operationContext, true); initCounter(entryReader.getDatasetId()); return iterator(entryReader); }
@Test public void testReadFromShuffleDataAndFailToSplit() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); BatchModeExecutionContext context = BatchModeExecutionContext.forTesting(options, "testStage"); final int kFirstShard = 0; TestShuffleReader shuffleReader = new TestShuffleReader(); final int kNumRecords = 2; for (int i = 0; i < kNumRecords; ++i) { byte[] key = CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), i); shuffleReader.addEntry( newShuffleEntry(fabricatePosition(kFirstShard, key), key, EMPTY_BYTE_ARRAY, key)); } // Note that TestShuffleReader start/end positions are in the // space of keys not the positions (TODO: should probably always // use positions instead). String stop = encodeBase64URLSafeString(fabricatePosition(kNumRecords).getPosition().toByteArray()); TestOperationContext operationContext = TestOperationContext.create(); GroupingShuffleReader<Integer, Integer> groupingShuffleReader = new GroupingShuffleReader<>( options, null, null, stop, WindowedValue.getFullCoder( KvCoder.of( BigEndianIntegerCoder.of(), IterableCoder.of(BigEndianIntegerCoder.of())), IntervalWindow.getCoder()), context, operationContext, ShuffleReadCounterFactory.INSTANCE, false /* do not sort values */); assertFalse(shuffleReader.isClosed()); try (GroupingShuffleReaderIterator<Integer, Integer> iter = groupingShuffleReader.iterator(shuffleReader)) { // Poke the iterator so we can test dynamic splitting. assertTrue(iter.start()); // Cannot split since the value provided is past the current stop position. assertNull( iter.requestDynamicSplit( splitRequestAtPosition(makeShufflePosition(kNumRecords + 1, null)))); byte[] key = CoderUtils.encodeToByteArray(BigEndianIntegerCoder.of(), 0); // Cannot split since the split position is identical with the position of the record // that was just returned. assertNull( iter.requestDynamicSplit(splitRequestAtPosition(makeShufflePosition(kFirstShard, key)))); // Cannot split since the requested split position comes before current position assertNull( iter.requestDynamicSplit(splitRequestAtPosition(makeShufflePosition(kFirstShard, null)))); int numRecordsReturned = 1; // including start() above. for (; iter.advance(); ++numRecordsReturned) { iter.getCurrent().getValue(); // ignored } assertEquals(kNumRecords, numRecordsReturned); // Cannot split since all input was consumed. assertNull( iter.requestDynamicSplit(splitRequestAtPosition(makeShufflePosition(kFirstShard, null)))); } assertTrue(shuffleReader.isClosed()); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { return payload.getByteBuf().readMediumLE(); }
@Test void assertRead() { when(payload.getByteBuf()).thenReturn(byteBuf); when(byteBuf.readMediumLE()).thenReturn(1); MySQLInt24BinlogProtocolValue actual = new MySQLInt24BinlogProtocolValue(); assertThat(actual.read(columnDef, payload), is(1)); }
@VisibleForTesting void clearStartTimeCache() { startTimeWriteCache.clear(); startTimeReadCache.clear(); }
@Test public void testGetSingleEntity() throws IOException { super.testGetSingleEntity(); ((RollingLevelDBTimelineStore)store).clearStartTimeCache(); super.testGetSingleEntity(); loadTestEntityData(); }
private static boolean canSatisfyConstraints(ApplicationId appId, PlacementConstraint constraint, SchedulerNode node, AllocationTagsManager atm, Optional<DiagnosticsCollector> dcOpt) throws InvalidAllocationTagsQueryException { if (constraint == null) { LOG.debug("Constraint is found empty during constraint validation for" + " app:{}", appId); return true; } // If this is a single constraint, transform to SingleConstraint SingleConstraintTransformer singleTransformer = new SingleConstraintTransformer(constraint); constraint = singleTransformer.transform(); AbstractConstraint sConstraintExpr = constraint.getConstraintExpr(); // TODO handle other type of constraints, e.g CompositeConstraint if (sConstraintExpr instanceof SingleConstraint) { SingleConstraint single = (SingleConstraint) sConstraintExpr; return canSatisfySingleConstraint(appId, single, node, atm, dcOpt); } else if (sConstraintExpr instanceof And) { And and = (And) sConstraintExpr; return canSatisfyAndConstraint(appId, and, node, atm, dcOpt); } else if (sConstraintExpr instanceof Or) { Or or = (Or) sConstraintExpr; return canSatisfyOrConstraint(appId, or, node, atm, dcOpt); } else { throw new InvalidAllocationTagsQueryException( "Unsupported type of constraint: " + sConstraintExpr.getClass().getSimpleName()); } }
@Test public void testANDConstraintAssignment() throws InvalidAllocationTagsQueryException { AllocationTagsManager tm = new AllocationTagsManager(rmContext); PlacementConstraintManagerService pcm = new MemoryPlacementConstraintManager(); // Register App1 with anti-affinity constraint map. pcm.registerApplication(appId1, constraintMap3); RMNode n0r1 = rmNodes.get(0); RMNode n1r1 = rmNodes.get(1); RMNode n2r2 = rmNodes.get(2); RMNode n3r2 = rmNodes.get(3); /** * Place container: * n0: hbase-m(1) * n1: "" * n2: hbase-m(1) * n3: "" */ tm.addContainer(n0r1.getNodeID(), newContainerId(appId1, 0), ImmutableSet.of("hbase-m")); tm.addContainer(n2r2.getNodeID(), newContainerId(appId1, 1), ImmutableSet.of("hbase-m")); Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID()) .get("hbase-m").longValue()); Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID()) .get("hbase-m").longValue()); SchedulerNode schedulerNode0 =newSchedulerNode(n0r1.getHostName(), n0r1.getRackName(), n0r1.getNodeID()); SchedulerNode schedulerNode1 =newSchedulerNode(n1r1.getHostName(), n1r1.getRackName(), n1r1.getNodeID()); SchedulerNode schedulerNode2 =newSchedulerNode(n2r2.getHostName(), n2r2.getRackName(), n2r2.getNodeID()); SchedulerNode schedulerNode3 =newSchedulerNode(n3r2.getHostName(), n3r2.getRackName(), n3r2.getNodeID()); // Anti-affinity with hbase-m so it should not be able to be placed // onto n0 and n2 as they already have hbase-m allocated. Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm)); /** * Place container: * n0: hbase-m(1) * n1: spark(3) * n2: hbase-m(1) * n3: "" */ for (int i=0; i<4; i++) { tm.addContainer(n1r1.getNodeID(), newContainerId(appId1, i+2), ImmutableSet.of("spark")); } Assert.assertEquals(4L, tm.getAllocationTagsWithCount(n1r1.getNodeID()) .get("spark").longValue()); // Violate cardinality constraint Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm)); }
@SuppressWarnings("deprecation") public boolean setSocketOpt(int option, Object optval) { final ValueReference<Boolean> result = new ValueReference<>(false); switch (option) { case ZMQ.ZMQ_SNDHWM: sendHwm = (Integer) optval; if (sendHwm < 0) { throw new IllegalArgumentException("sendHwm " + optval); } return true; case ZMQ.ZMQ_RCVHWM: recvHwm = (Integer) optval; if (recvHwm < 0) { throw new IllegalArgumentException("recvHwm " + optval); } return true; case ZMQ.ZMQ_AFFINITY: affinity = (Long) optval; return true; case ZMQ.ZMQ_IDENTITY: byte[] val = parseBytes(option, optval); if (val == null || val.length > 255) { throw new IllegalArgumentException("identity must not be null or less than 255 " + optval); } identity = Arrays.copyOf(val, val.length); identitySize = (short) identity.length; return true; case ZMQ.ZMQ_RATE: rate = (Integer) optval; return true; case ZMQ.ZMQ_RECOVERY_IVL: recoveryIvl = (Integer) optval; return true; case ZMQ.ZMQ_SNDBUF: sndbuf = (Integer) optval; return true; case ZMQ.ZMQ_RCVBUF: rcvbuf = (Integer) optval; return true; case ZMQ.ZMQ_TOS: tos = (Integer) optval; return true; case ZMQ.ZMQ_LINGER: linger = (Integer) optval; return true; case ZMQ.ZMQ_RECONNECT_IVL: reconnectIvl = (Integer) optval; if (reconnectIvl < -1) { throw new IllegalArgumentException("reconnectIvl " + optval); } return true; case ZMQ.ZMQ_RECONNECT_IVL_MAX: reconnectIvlMax = (Integer) optval; if (reconnectIvlMax < 0) { throw new IllegalArgumentException("reconnectIvlMax " + optval); } return true; case ZMQ.ZMQ_BACKLOG: backlog = (Integer) optval; return true; case ZMQ.ZMQ_MAXMSGSIZE: maxMsgSize = (Long) optval; return true; case ZMQ.ZMQ_MULTICAST_HOPS: multicastHops = (Integer) optval; return true; case ZMQ.ZMQ_RCVTIMEO: recvTimeout = (Integer) optval; return true; case ZMQ.ZMQ_SNDTIMEO: sendTimeout = (Integer) optval; return true; /* Deprecated in favor of ZMQ_IPV6 */ case ZMQ.ZMQ_IPV4ONLY: return setSocketOpt(ZMQ.ZMQ_IPV6, !parseBoolean(option, optval)); /* To replace the somewhat surprising IPV4ONLY */ case ZMQ.ZMQ_IPV6: ipv6 = parseBoolean(option, optval); return true; case ZMQ.ZMQ_SOCKS_PROXY: socksProxyAddress = parseString(option, optval); return true; case ZMQ.ZMQ_TCP_KEEPALIVE: tcpKeepAlive = ((Number) optval).intValue(); if (tcpKeepAlive != -1 && tcpKeepAlive != 0 && tcpKeepAlive != 1) { throw new IllegalArgumentException("tcpKeepAlive only accepts one of -1,0,1 " + optval); } return true; case ZMQ.ZMQ_TCP_KEEPALIVE_CNT: this.tcpKeepAliveCnt = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_IDLE: this.tcpKeepAliveIdle = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_TCP_KEEPALIVE_INTVL: this.tcpKeepAliveIntvl = ((Number) optval).intValue(); return true; case ZMQ.ZMQ_IMMEDIATE: immediate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_DELAY_ATTACH_ON_CONNECT: immediate = !parseBoolean(option, optval); return true; case ZMQ.ZMQ_TCP_ACCEPT_FILTER: String filterStr = parseString(option, optval); if (filterStr == null) { tcpAcceptFilters.clear(); } else if (filterStr.isEmpty() || filterStr.length() > 255) { throw new IllegalArgumentException("tcp_accept_filter " + optval); } else { TcpAddressMask filter = new TcpAddressMask(filterStr, ipv6); tcpAcceptFilters.add(filter); } return true; case ZMQ.ZMQ_PLAIN_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.PLAIN : Mechanisms.NULL); return true; case ZMQ.ZMQ_PLAIN_USERNAME: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainUsername = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_PLAIN_PASSWORD: if (optval == null) { mechanism = Mechanisms.NULL; asServer = false; return true; } plainPassword = parseString(option, optval); asServer = false; mechanism = Mechanisms.PLAIN; return true; case ZMQ.ZMQ_ZAP_DOMAIN: String domain = parseString(option, optval); if (domain != null && domain.length() < 256) { zapDomain = domain; return true; } throw new IllegalArgumentException("zap domain length shall be < 256 : " + optval); case ZMQ.ZMQ_CURVE_SERVER: asServer = parseBoolean(option, optval); mechanism = (asServer ? Mechanisms.CURVE : Mechanisms.NULL); return true; case ZMQ.ZMQ_CURVE_PUBLICKEY: curvePublicKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SECRETKEY: curveSecretKey = setCurveKey(option, optval, result); return result.get(); case ZMQ.ZMQ_CURVE_SERVERKEY: curveServerKey = setCurveKey(option, optval, result); if (curveServerKey == null) { asServer = false; } return result.get(); case ZMQ.ZMQ_CONFLATE: conflate = parseBoolean(option, optval); return true; case ZMQ.ZMQ_GSSAPI_SERVER: asServer = parseBoolean(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PRINCIPAL: gssPrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_SERVICE_PRINCIPAL: gssServicePrincipal = parseString(option, optval); mechanism = Mechanisms.GSSAPI; return true; case ZMQ.ZMQ_GSSAPI_PLAINTEXT: gssPlaintext = parseBoolean(option, optval); return true; case ZMQ.ZMQ_HANDSHAKE_IVL: handshakeIvl = (Integer) optval; if (handshakeIvl < 0) { throw new IllegalArgumentException("handshakeIvl only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_IVL: heartbeatInterval = (Integer) optval; if (heartbeatInterval < 0) { throw new IllegalArgumentException("heartbeatInterval only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TIMEOUT: heartbeatTimeout = (Integer) optval; if (heartbeatTimeout < 0) { throw new IllegalArgumentException("heartbeatTimeout only accept positive values " + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_TTL: Integer value = (Integer) optval; // Convert this to deciseconds from milliseconds value /= 100; if (value >= 0 && value <= 6553) { heartbeatTtl = value; } else { throw new IllegalArgumentException("heartbeatTtl is out of range [0..655399]" + optval); } return true; case ZMQ.ZMQ_HEARTBEAT_CONTEXT: heartbeatContext = (byte[]) optval; if (heartbeatContext == null) { throw new IllegalArgumentException("heartbeatContext cannot be null"); } return true; case ZMQ.ZMQ_DECODER: decoder = checkCustomCodec(optval, IDecoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_ENCODER: encoder = checkCustomCodec(optval, IEncoder.class); rawSocket = true; // failure throws ZError.InstantiationException // if that line is reached, everything is fine return true; case ZMQ.ZMQ_MSG_ALLOCATOR: if (optval instanceof String) { try { allocator = allocator(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { allocator = allocator((Class<?>) optval); return true; } else if (optval instanceof MsgAllocator) { allocator = (MsgAllocator) optval; return true; } return false; case ZMQ.ZMQ_MSG_ALLOCATION_HEAP_THRESHOLD: Integer allocationHeapThreshold = (Integer) optval; allocator = new MsgAllocatorThreshold(allocationHeapThreshold); return true; case ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER: if (optval instanceof String) { try { selectorChooser = chooser(Class.forName((String) optval)); return true; } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } else if (optval instanceof Class) { selectorChooser = chooser((Class<?>) optval); return true; } else if (optval instanceof SelectorProviderChooser) { selectorChooser = (SelectorProviderChooser) optval; return true; } return false; case ZMQ.ZMQ_HELLO_MSG: if (optval == null) { helloMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { helloMsg = null; } else { helloMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_DISCONNECT_MSG: if (optval == null) { disconnectMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { disconnectMsg = null; } else { disconnectMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_HICCUP_MSG: if (optval == null) { hiccupMsg = null; } else { byte[] bytes = parseBytes(option, optval); if (bytes.length == 0) { hiccupMsg = null; } else { hiccupMsg = new Msg(Arrays.copyOf(bytes, bytes.length)); } } return true; case ZMQ.ZMQ_AS_TYPE: this.asType = (Integer) optval; return true; case ZMQ.ZMQ_SELFADDR_PROPERTY_NAME: this.selfAddressPropertyName = parseString(option, optval); return true; default: throw new IllegalArgumentException("Unknown Option " + option); } }
@Test(expected = IllegalArgumentException.class) public void testSelectorClassNameFailed() { Options opt = new Options(); opt.setSocketOpt(ZMQ.ZMQ_SELECTOR_PROVIDERCHOOSER, String.class.getName()); }
public static SonarEdition loadEdition(System2 system) { URL url = system.getResource(EDITION_FILE_PATH); if (url == null) { return SonarEdition.COMMUNITY; } try (Scanner scanner = new Scanner(url.openStream(), StandardCharsets.UTF_8)) { String editionInFile = scanner.nextLine(); return parseEdition(editionInFile); } catch (IOException e) { throw new IllegalStateException(format(CAN_NOT_LOAD_FROM_CLASSPATH, EDITION_FILE_PATH), e); } }
@Test void load_edition_from_file_in_classpath() { SonarEdition edition = MetadataLoader.loadEdition(System2.INSTANCE); assertThat(edition).isNotNull(); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testWindowOrderByAnalysis() { assertHasWarning(analyzeWithWarnings("SELECT SUM(x) OVER (PARTITION BY y ORDER BY 1) AS s\n" + "FROM (values (1,10), (2, 10)) AS T(x, y)"), PERFORMANCE_WARNING, "ORDER BY literals/constants with window function:"); assertHasWarning(analyzeWithWarnings("SELECT SUM(x) OVER (ORDER BY 1) AS s\n" + "FROM (values (1,10), (2, 10)) AS T(x, y)"), PERFORMANCE_WARNING, "ORDER BY literals/constants with window function:"); // Now test for error when the session param is set to disallow this. Session session = testSessionBuilder(new SessionPropertyManager(new SystemSessionProperties( new QueryManagerConfig(), new TaskManagerConfig(), new MemoryManagerConfig(), new FeaturesConfig().setAllowWindowOrderByLiterals(false), new NodeMemoryConfig(), new WarningCollectorConfig(), new NodeSchedulerConfig(), new NodeSpillConfig(), new TracingConfig(), new CompilerConfig(), new HistoryBasedOptimizationConfig()))).build(); assertFails(session, WINDOW_FUNCTION_ORDERBY_LITERAL, "SELECT SUM(x) OVER (PARTITION BY y ORDER BY 1) AS s\n" + "FROM (values (1,10), (2, 10)) AS T(x, y)"); assertFails(session, WINDOW_FUNCTION_ORDERBY_LITERAL, "SELECT SUM(x) OVER (ORDER BY 1) AS s\n" + "FROM (values (1,10), (2, 10)) AS T(x, y)"); analyze(session, "SELECT SUM(x) OVER (PARTITION BY y ORDER BY y) AS s\n" + "FROM (values (1,10), (2, 10)) AS T(x, y)"); }
@Override public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) { if(StringUtils.containsAny(remote.getName(), '*')) { // Treat asterisk as wildcard return Collections.singleton(new TransferItem(remote.getParent())); } return Collections.singleton(new TransferItem(remote)); }
@Test public void testFindFile() throws Exception { final CommandLineParser parser = new PosixParser(); final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--delete", "rackspace://cdn.cyberduck.ch/remote"}); assertTrue(new DeletePathFinder().find(input, TerminalAction.delete, new Path("/remote", EnumSet.of(Path.Type.file))).contains( new TransferItem(new Path("/remote", EnumSet.of(Path.Type.file))) )); }
@Override public HashSlotCursor12byteKey cursor() { return new CursorIntKey2(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_key1_withoutAdvance() { HashSlotCursor12byteKey cursor = hsa.cursor(); cursor.key1(); }
@Override public double score(int[] truth, int[] prediction) { return of(truth, prediction, beta, strategy); }
@Test public void testMacro() { System.out.println("Macro-FScore"); int[] truth = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 }; int[] prediction = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 2, 2, 3, 1, 3, 3, 3, 4, 5, 4, 4, 4, 4, 1, 5, 5 }; FScore instance = new FScore(1.0, Averaging.Macro); double expResult = 0.8432; double result = instance.score(truth, prediction); assertEquals(expResult, result, 1E-4); }
public static <T> boolean isEmpty(T[] array) { return null == array || array.length == 0; }
@Test public void isEmpty() { final Object[] array = {}; Assert.assertTrue(CollectionKit.isEmpty(array)); final Object[] array2 = {null}; Assert.assertFalse(CollectionKit.isEmpty(array2)); final Object[] array3 = null; Assert.assertTrue(CollectionKit.isEmpty(array3)); final ArrayList collection = new ArrayList(); Assert.assertTrue(CollectionKit.isEmpty(collection)); final ArrayList collection2 = new ArrayList(); collection2.add(null); Assert.assertFalse(CollectionKit.isEmpty(collection2)); final ArrayList collection3 = null; Assert.assertTrue(CollectionKit.isEmpty(collection3)); }
ImmutableList<PayloadDefinition> validatePayloads(List<PayloadDefinition> payloads) { for (PayloadDefinition p : payloads) { checkArgument(p.hasName(), "Parsed payload does not have a name."); checkArgument( p.getInterpretationEnvironment() != PayloadGeneratorConfig.InterpretationEnvironment .INTERPRETATION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an interpretation_environment."); checkArgument( p.getExecutionEnvironment() != PayloadGeneratorConfig.ExecutionEnvironment.EXECUTION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an exeuction_environment."); checkArgument( !p.getVulnerabilityTypeList().isEmpty(), "Parsed payload has no entries for vulnerability_type."); checkArgument(p.hasPayloadString(), "Parsed payload does not have a payload_string."); if (p.getUsesCallbackServer().getValue()) { checkArgument( p.getPayloadString().getValue().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"), "Parsed payload uses callback server but $TSUNAMI_PAYLOAD_TOKEN_URL not found in" + " payload_string."); } else { checkArgument( p.getValidationType() != PayloadValidationType.VALIDATION_TYPE_UNSPECIFIED, "Parsed payload has no validation_type and does not use the callback server."); if (p.getValidationType() == PayloadValidationType.VALIDATION_REGEX) { checkArgument( p.hasValidationRegex(), "Parsed payload has no validation_regex but uses PayloadValidationType.REGEX"); } } } return ImmutableList.copyOf(payloads); }
@Test public void validatePayloads_withGoodPayloads_returnsPayloads() throws IOException { PayloadDefinition p0 = goodCallbackDefinition.build(); PayloadDefinition p1 = goodNoCallbackDefinition.build(); ImmutableList<PayloadDefinition> payloads = module.validatePayloads(ImmutableList.of(p0, p1)); assertThat(payloads).containsExactly(p0, p1).inOrder(); }
public void onFailedPartitionRequest() { inputGate.triggerPartitionStateCheck(partitionId, channelInfo); }
@Test void testOnFailedPartitionRequest() { final ResultPartitionID partitionId = new ResultPartitionID(); final TestPartitionProducerStateProvider provider = new TestPartitionProducerStateProvider(partitionId); final SingleInputGate inputGate = new SingleInputGateBuilder().setPartitionProducerStateProvider(provider).build(); final RemoteInputChannel ch = InputChannelBuilder.newBuilder() .setPartitionId(partitionId) .buildRemoteChannel(inputGate); ch.onFailedPartitionRequest(); assertThat(provider.isInvoked()).isTrue(); }
public MetadataReportBuilder cycleReport(Boolean cycleReport) { this.cycleReport = cycleReport; return getThis(); }
@Test void cycleReport() { MetadataReportBuilder builder = new MetadataReportBuilder(); builder.cycleReport(true); Assertions.assertTrue(builder.build().getCycleReport()); builder.cycleReport(false); Assertions.assertFalse(builder.build().getCycleReport()); builder.cycleReport(null); Assertions.assertNull(builder.build().getCycleReport()); }
public static GradientTreeBoost fit(Formula formula, DataFrame data) { return fit(formula, data, new Properties()); }
@Test public void testBreastCancer() { System.out.println("Breast Cancer"); MathEx.setSeed(19650218); // to get repeatable results. ClassificationValidations<GradientTreeBoost> result = CrossValidation.classification(10, BreastCancer.formula, BreastCancer.data, (f, x) -> GradientTreeBoost.fit(f, x, 100, 20, 6, 5, 0.05, 0.7)); System.out.println(result); assertEquals(0.962, result.avg.accuracy, 0.003); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testReservedSpdyWindowUpdateFrameBits() throws Exception { short type = 9; byte flags = 0; int length = 8; int streamId = RANDOM.nextInt() & 0x7FFFFFFF; int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId | 0x80000000); // should ignore reserved bit buf.writeInt(deltaWindowSize | 0x80000000); // should ignore reserved bit decoder.decode(buf); verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize); assertFalse(buf.isReadable()); buf.release(); }
static int delta(boolean prev, boolean next) { if (prev) { return next ? 0 : -1; } else { return next ? 1 : 0; } }
@Test public void testDelta() { assertEquals(0, ControllerMetricsChanges.delta(false, false)); assertEquals(1, ControllerMetricsChanges.delta(false, true)); assertEquals(-1, ControllerMetricsChanges.delta(true, false)); assertEquals(0, ControllerMetricsChanges.delta(true, true)); }
public void setMemorySegment(MemorySegment memorySegment, int offset) { Preconditions.checkArgument(memorySegment != null, "MemorySegment can not be null."); Preconditions.checkArgument(offset >= 0, "Offset should be positive integer."); Preconditions.checkArgument( offset + byteLength <= memorySegment.size(), "Could not set MemorySegment, the remain buffers is not enough."); this.memorySegment = memorySegment; this.offset = offset; }
@TestTemplate void verifyBitSetSize1() { assertThatThrownBy(() -> bitSet.setMemorySegment(memorySegment, 1)) .isInstanceOf(IllegalArgumentException.class); }
public static String credentialToString(ScramCredential credential) { return String.format("%s=%s,%s=%s,%s=%s,%s=%d", SALT, Base64.getEncoder().encodeToString(credential.salt()), STORED_KEY, Base64.getEncoder().encodeToString(credential.storedKey()), SERVER_KEY, Base64.getEncoder().encodeToString(credential.serverKey()), ITERATIONS, credential.iterations()); }
@Test public void generateCredential() { ScramCredential credential1 = formatter.generateCredential("password", 4096); ScramCredential credential2 = formatter.generateCredential("password", 4096); // Random salt should ensure that the credentials persisted are different every time assertNotEquals(ScramCredentialUtils.credentialToString(credential1), ScramCredentialUtils.credentialToString(credential2)); }
@Override public Set<Device> allocateDevices(Set<Device> availableDevices, int count, Map<String, String> env) { // Can consider topology, utilization.etc Set<Device> allocated = new HashSet<>(); int number = 0; for (Device d : availableDevices) { allocated.add(d); number++; if (number == count) { break; } } return allocated; }
@Test public void testAllocateMultipleDevices() throws ResourceHandlerException, IOException { setupTestDirectoryWithScript(); plugin = new NECVEPlugin(envProvider, defaultSearchDirs, udevUtil); Set<Device> available = new HashSet<>(); Device device0 = getTestDevice(0); Device device1 = getTestDevice(1); available.add(device0); available.add(device1); Set<Device> allocated = plugin.allocateDevices(available, 2, env); assertEquals("No. of devices", 2, allocated.size()); assertTrue("Device missing", allocated.contains(device0)); assertTrue("Device missing", allocated.contains(device1)); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayKeyCannotBeNull() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); assertThrows(NullPointerException.class, () -> coordinator.replay( 0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(null, null)) ); }
@Override public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); return segment; }
@Test public void shouldGetSegmentForTimestamp() { final TimestampedSegment segment = segments.getOrCreateSegmentIfLive(0, context, -1L); segments.getOrCreateSegmentIfLive(1, context, -1L); assertEquals(segment, segments.getSegmentForTimestamp(0L)); }
@Override public void deleteDiyPage(Long id) { // 校验存在 validateDiyPageExists(id); // 删除 diyPageMapper.deleteById(id); }
@Test public void testDeleteDiyPage_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> diyPageService.deleteDiyPage(id), DIY_PAGE_NOT_EXISTS); }
@Override public boolean isNeedReloadConfiguration() { final LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); final Configuration contextConfiguration = loggerContext.getConfiguration(); for (Map.Entry<String, Appender> entry : contextConfiguration.getAppenders().entrySet()) { if (APPENDER_MARK.equals(entry.getValue().getName())) { return false; } } return true; }
@Test void testIsNeedReloadConfiguration() { assertTrue(log4J2NacosLoggingAdapter.isNeedReloadConfiguration()); log4J2NacosLoggingAdapter.loadConfiguration(nacosLoggingProperties); assertFalse(log4J2NacosLoggingAdapter.isNeedReloadConfiguration()); }
public static QueryInfo generateQueryInfo(JavaSparkContext jssc, String srcBasePath, int numInstantsPerFetch, Option<String> beginInstant, MissingCheckpointStrategy missingCheckpointStrategy, HollowCommitHandling handlingMode, String orderColumn, String keyColumn, String limitColumn, boolean sourceLimitBasedBatching, Option<String> lastCheckpointKey) { ValidationUtils.checkArgument(numInstantsPerFetch > 0, "Make sure the config hoodie.streamer.source.hoodieincr.num_instants is set to a positive value"); HoodieTableMetaClient srcMetaClient = HoodieTableMetaClient.builder() .setConf(HadoopFSUtils.getStorageConfWithCopy(jssc.hadoopConfiguration())) .setBasePath(srcBasePath).setLoadActiveTimelineOnLoad(true).build(); HoodieTimeline completedCommitTimeline = srcMetaClient.getCommitsAndCompactionTimeline().filterCompletedInstants(); final HoodieTimeline activeCommitTimeline = handleHollowCommitIfNeeded(completedCommitTimeline, srcMetaClient, handlingMode); Function<HoodieInstant, String> timestampForLastInstant = instant -> handlingMode == HollowCommitHandling.USE_TRANSITION_TIME ? instant.getCompletionTime() : instant.getTimestamp(); String beginInstantTime = beginInstant.orElseGet(() -> { if (missingCheckpointStrategy != null) { if (missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST) { Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return lastInstant.map(hoodieInstant -> getStrictlyLowerTimestamp(timestampForLastInstant.apply(hoodieInstant))).orElse(DEFAULT_BEGIN_TIMESTAMP); } else { return DEFAULT_BEGIN_TIMESTAMP; } } else { throw new IllegalArgumentException("Missing begin instant for incremental pull. For reading from latest " + "committed instant set hoodie.streamer.source.hoodieincr.missing.checkpoint.strategy to a valid value"); } }); // When `beginInstantTime` is present, `previousInstantTime` is set to the completed commit before `beginInstantTime` if that exists. // If there is no completed commit before `beginInstantTime`, e.g., `beginInstantTime` is the first commit in the active timeline, // `previousInstantTime` is set to `DEFAULT_BEGIN_TIMESTAMP`. String previousInstantTime = DEFAULT_BEGIN_TIMESTAMP; if (!beginInstantTime.equals(DEFAULT_BEGIN_TIMESTAMP)) { Option<HoodieInstant> previousInstant = activeCommitTimeline.findInstantBefore(beginInstantTime); if (previousInstant.isPresent()) { previousInstantTime = previousInstant.get().getTimestamp(); } else { // if begin instant time matches first entry in active timeline, we can set previous = beginInstantTime - 1 if (activeCommitTimeline.filterCompletedInstants().firstInstant().isPresent() && activeCommitTimeline.filterCompletedInstants().firstInstant().get().getTimestamp().equals(beginInstantTime)) { previousInstantTime = String.valueOf(Long.parseLong(beginInstantTime) - 1); } } } if (missingCheckpointStrategy == MissingCheckpointStrategy.READ_LATEST || !activeCommitTimeline.isBeforeTimelineStarts(beginInstantTime)) { Option<HoodieInstant> nthInstant; // When we are in the upgrade code path from non-sourcelimit-based batching to sourcelimit-based batching, we need to avoid fetching the commit // that is read already. Else we will have duplicates in append-only use case if we use "findInstantsAfterOrEquals". // As soon as we have a new format of checkpoint and a key we will move to the new code of fetching the current commit as well. if (sourceLimitBasedBatching && lastCheckpointKey.isPresent()) { nthInstant = Option.fromJavaOptional(activeCommitTimeline .findInstantsAfterOrEquals(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } else { nthInstant = Option.fromJavaOptional(activeCommitTimeline .findInstantsAfter(beginInstantTime, numInstantsPerFetch).getInstantsAsStream().reduce((x, y) -> y)); } return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_INCREMENTAL_OPT_VAL(), previousInstantTime, beginInstantTime, nthInstant.map(HoodieInstant::getTimestamp).orElse(beginInstantTime), orderColumn, keyColumn, limitColumn); } else { // when MissingCheckpointStrategy is set to read everything until latest, trigger snapshot query. Option<HoodieInstant> lastInstant = activeCommitTimeline.lastInstant(); return new QueryInfo(DataSourceReadOptions.QUERY_TYPE_SNAPSHOT_OPT_VAL(), previousInstantTime, beginInstantTime, lastInstant.get().getTimestamp(), orderColumn, keyColumn, limitColumn); } }
@Test void testQueryInfoGeneration() throws IOException { String commitTimeForReads = "1"; String commitTimeForWrites = "2"; Pair<String, List<HoodieRecord>> inserts = writeS3MetadataRecords(commitTimeForReads); inserts = writeS3MetadataRecords(commitTimeForWrites); String startInstant = commitTimeForReads; String orderColumn = "_hoodie_commit_time"; String keyColumn = "s3.object.key"; String limitColumn = "s3.object.size"; QueryInfo queryInfo = IncrSourceHelper.generateQueryInfo(jsc, basePath(), 5, Option.of(startInstant), null, TimelineUtils.HollowCommitHandling.BLOCK, orderColumn, keyColumn, limitColumn, true, Option.empty()); assertEquals(String.valueOf(Integer.parseInt(commitTimeForReads) - 1), queryInfo.getPreviousInstant()); assertEquals(commitTimeForReads, queryInfo.getStartInstant()); assertEquals(commitTimeForWrites, queryInfo.getEndInstant()); startInstant = commitTimeForWrites; queryInfo = IncrSourceHelper.generateQueryInfo(jsc, basePath(), 5, Option.of(startInstant), null, TimelineUtils.HollowCommitHandling.BLOCK, orderColumn, keyColumn, limitColumn, true, Option.empty()); assertEquals(commitTimeForReads, queryInfo.getPreviousInstant()); assertEquals(commitTimeForWrites, queryInfo.getStartInstant()); assertEquals(commitTimeForWrites, queryInfo.getEndInstant()); }
void readEntries(ReadHandle lh, long firstEntry, long lastEntry, boolean shouldCacheEntry, final AsyncCallbacks.ReadEntriesCallback callback, Object ctx) { final PendingReadKey key = new PendingReadKey(firstEntry, lastEntry); Map<PendingReadKey, PendingRead> pendingReadsForLedger = cachedPendingReads.computeIfAbsent(lh.getId(), (l) -> new ConcurrentHashMap<>()); boolean listenerAdded = false; while (!listenerAdded) { AtomicBoolean createdByThisThread = new AtomicBoolean(); FindPendingReadOutcome findBestCandidateOutcome = findPendingRead(key, pendingReadsForLedger, createdByThisThread); PendingRead pendingRead = findBestCandidateOutcome.pendingRead; if (findBestCandidateOutcome.needsAdditionalReads()) { AsyncCallbacks.ReadEntriesCallback wrappedCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entries, Object ctx) { PendingReadKey missingOnLeft = findBestCandidateOutcome.missingOnLeft; PendingReadKey missingOnRight = findBestCandidateOutcome.missingOnRight; if (missingOnRight != null && missingOnLeft != null) { AsyncCallbacks.ReadEntriesCallback readFromLeftCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx1) { AsyncCallbacks.ReadEntriesCallback readFromRightCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromRight, Object dummyCtx2) { List<Entry> finalResult = new ArrayList<>(entriesFromLeft.size() + entries.size() + entriesFromRight.size()); finalResult.addAll(entriesFromLeft); finalResult.addAll(entries); finalResult.addAll(entriesFromRight); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx3) { entries.forEach(Entry::release); entriesFromLeft.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry, shouldCacheEntry, readFromRightCallback, null); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx4) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry, shouldCacheEntry, readFromLeftCallback, null); } else if (missingOnLeft != null) { AsyncCallbacks.ReadEntriesCallback readFromLeftCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromLeft, Object dummyCtx5) { List<Entry> finalResult = new ArrayList<>(entriesFromLeft.size() + entries.size()); finalResult.addAll(entriesFromLeft); finalResult.addAll(entries); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx6) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnLeft.startEntry, missingOnLeft.endEntry, shouldCacheEntry, readFromLeftCallback, null); } else if (missingOnRight != null) { AsyncCallbacks.ReadEntriesCallback readFromRightCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entriesFromRight, Object dummyCtx7) { List<Entry> finalResult = new ArrayList<>(entriesFromRight.size() + entries.size()); finalResult.addAll(entries); finalResult.addAll(entriesFromRight); callback.readEntriesComplete(finalResult, ctx); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object dummyCtx8) { entries.forEach(Entry::release); callback.readEntriesFailed(exception, ctx); } }; rangeEntryCache.asyncReadEntry0(lh, missingOnRight.startEntry, missingOnRight.endEntry, shouldCacheEntry, readFromRightCallback, null); } } @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { callback.readEntriesFailed(exception, ctx); } }; listenerAdded = pendingRead.addListener(wrappedCallback, ctx, key.startEntry, key.endEntry); } else { listenerAdded = pendingRead.addListener(callback, ctx, key.startEntry, key.endEntry); } if (createdByThisThread.get()) { CompletableFuture<List<EntryImpl>> readResult = rangeEntryCache.readFromStorage(lh, firstEntry, lastEntry, shouldCacheEntry); pendingRead.attach(readResult); } } }
@Test public void simpleConcurrentReadIncluding() throws Exception { long firstEntry = 100; long endEntry = 199; long firstEntrySecondRead = firstEntry + 10; long endEntrySecondRead = endEntry - 10; boolean shouldCacheEntry = false; PreparedReadFromStorage read1 = prepareReadFromStorage(lh, rangeEntryCache, firstEntry, endEntry, shouldCacheEntry); PendingReadsManager pendingReadsManager = new PendingReadsManager(rangeEntryCache); CapturingReadEntriesCallback callback = new CapturingReadEntriesCallback(); pendingReadsManager.readEntries(lh, firstEntry, endEntry, shouldCacheEntry, callback, CTX); CapturingReadEntriesCallback callback2 = new CapturingReadEntriesCallback(); pendingReadsManager.readEntries(lh, firstEntrySecondRead, endEntrySecondRead, shouldCacheEntry, callback2, CTX2); // complete the read from BK // only one read completes 2 callbacks read1.storageReadCompleted(); callback.get(); callback2.get(); assertSame(callback.getCtx(), CTX); assertSame(callback2.getCtx(), CTX2); verifyRange(callback.entries, firstEntry, endEntry); verifyRange(callback2.entries, firstEntrySecondRead, endEntrySecondRead); int pos = 0; for (long entry = firstEntry; entry <= endEntry; entry++) {; if (entry >= firstEntrySecondRead && entry <= endEntrySecondRead) { int posInSecondList = (int) (pos - (firstEntrySecondRead - firstEntry)); assertNotSame(callback.entries.get(pos), callback2.entries.get(posInSecondList)); assertEquals(callback.entries.get(pos).getEntryId(), callback2.entries.get(posInSecondList).getEntryId()); } pos++; } }
static public String jsonEscapeString(String input) { int length = input.length(); int lenthWithLeeway = (int) (length * 1.1); StringBuilder sb = new StringBuilder(lenthWithLeeway); for (int i = 0; i < length; i++) { final char c = input.charAt(i); String escaped = getObligatoryEscapeCode(c); if (escaped == null) sb.append(c); else { sb.append(escaped); } } return sb.toString(); }
@Test public void smokeTestEscapeString() { assertEquals("abc", JsonEscapeUtil.jsonEscapeString("abc")); assertEquals("{world: \\\"world\\\"}", JsonEscapeUtil.jsonEscapeString("{world: \"world\"}")); assertEquals("{world: "+'\\'+'"'+"world\\\"}", JsonEscapeUtil.jsonEscapeString("{world: \"world\"}")); }
public static <T> JSONSchema<T> of(SchemaDefinition<T> schemaDefinition) { SchemaReader<T> reader = schemaDefinition.getSchemaReaderOpt() .orElseGet(() -> new JacksonJsonReader<>(jsonMapper(), schemaDefinition.getPojo())); SchemaWriter<T> writer = schemaDefinition.getSchemaWriterOpt() .orElseGet(() -> new JacksonJsonWriter<>(jsonMapper())); return new JSONSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.JSON), schemaDefinition.getPojo(), reader, writer); }
@Test public void testEncodeAndDecodeObject() throws JsonProcessingException { JSONSchema<PC> jsonSchema = JSONSchema.of(SchemaDefinition.<PC>builder().withPojo(PC.class).build()); PC pc = new PC("dell", "alienware", 2021, GPU.AMD, new Seller("WA", "street", 98004)); byte[] encoded = jsonSchema.encode(pc); PC roundtrippedPc = jsonSchema.decode(encoded); assertEquals(roundtrippedPc, pc); }
@Override public void process() { JMeterContext context = getThreadContext(); Sampler sam = context.getCurrentSampler(); SampleResult res = context.getPreviousResult(); HTTPSamplerBase sampler; HTTPSampleResult result; if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) { log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request."); return; } else { sampler = (HTTPSamplerBase) sam; result = (HTTPSampleResult) res; } List<HTTPSamplerBase> potentialLinks = new ArrayList<>(); String responseText = result.getResponseDataAsString(); int index = responseText.indexOf('<'); // $NON-NLS-1$ if (index == -1) { index = 0; } if (log.isDebugEnabled()) { log.debug("Check for matches against: "+sampler.toString()); } Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index)); addAnchorUrls(html, result, sampler, potentialLinks); addFormUrls(html, result, sampler, potentialLinks); addFramesetUrls(html, result, sampler, potentialLinks); if (!potentialLinks.isEmpty()) { HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size())); if (log.isDebugEnabled()) { log.debug("Selected: "+url.toString()); } sampler.setDomain(url.getDomain()); sampler.setPath(url.getPath()); if (url.getMethod().equals(HTTPConstants.POST)) { for (JMeterProperty jMeterProperty : sampler.getArguments()) { Argument arg = (Argument) jMeterProperty.getObjectValue(); modifyArgument(arg, url.getArguments()); } } else { sampler.setArguments(url.getArguments()); } sampler.setProtocol(url.getProtocol()); } else { log.debug("No matches found"); } }
@Test public void testSimpleParse() throws Exception { HTTPSamplerBase config = makeUrlConfig(".*/index\\.html"); HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/previous.html"); String responseText = "<html><head><title>Test page</title></head><body>" + "<a href=\"index.html\">Goto index page</a></body></html>"; HTTPSampleResult result = new HTTPSampleResult(); jmctx.setCurrentSampler(context); jmctx.setCurrentSampler(config); result.setResponseData(responseText, null); result.setSampleLabel(context.toString()); result.setSamplerData(context.toString()); result.setURL(context.getUrl()); jmctx.setPreviousResult(result); parser.process(); assertEquals("http://www.apache.org/subdir/index.html", config.getUrl().toString()); }
@Override public void processElement(StreamRecord<RowData> element) throws Exception { element.getValue().setRowKind(targetRowKind); output.collect(element); }
@Test public void testSetRowKind() throws Exception { // test set to all row kind for (RowKind targetRowKind : RowKind.values()) { RowKindSetter rowKindSetter = new RowKindSetter(targetRowKind); try (OneInputStreamOperatorTestHarness<RowData, RowData> operatorTestHarness = new OneInputStreamOperatorTestHarness<>(rowKindSetter)) { operatorTestHarness.open(); // get the rows with all row kind List<RowData> rows = getRowsWithAllRowKind(); for (RowData row : rows) { operatorTestHarness.processElement(new StreamRecord<>(row)); } // verify the row kind of output verifyRowKind(operatorTestHarness.extractOutputValues(), targetRowKind); } } }
@Override public Collection<String> doSharding(final Collection<String> availableTargetNames, final ComplexKeysShardingValue<Comparable<?>> shardingValue) { if (!shardingValue.getColumnNameAndRangeValuesMap().isEmpty()) { ShardingSpherePreconditions.checkState(allowRangeQuery, () -> new UnsupportedSQLOperationException(String.format("Since the property of `%s` is false, inline sharding algorithm can not tackle with range query", ALLOW_RANGE_QUERY_KEY))); return availableTargetNames; } Map<String, Collection<Comparable<?>>> columnNameAndShardingValuesMap = shardingValue.getColumnNameAndShardingValuesMap(); ShardingSpherePreconditions.checkState(shardingColumns.isEmpty() || shardingColumns.size() == columnNameAndShardingValuesMap.size(), () -> new MismatchedComplexInlineShardingAlgorithmColumnAndValueSizeException(shardingColumns.size(), columnNameAndShardingValuesMap.size())); return flatten(columnNameAndShardingValuesMap).stream().map(this::doSharding).collect(Collectors.toList()); }
@Test void assertDoShardingWithRangeValue() { Properties props = PropertiesBuilder.build(new Property("algorithm-expression", "t_order_${type % 2}_${order_id % 2}"), new Property("sharding-columns", "type,order_id"), new Property("allow-range-query-with-inline-sharding", Boolean.TRUE.toString())); ComplexInlineShardingAlgorithm algorithm = (ComplexInlineShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "COMPLEX_INLINE", props); List<String> availableTargetNames = Arrays.asList("t_order_0_0", "t_order_0_1", "t_order_1_0", "t_order_1_1"); Collection<String> actual = algorithm.doSharding(availableTargetNames, new ComplexKeysShardingValue<>("t_order", Collections.emptyMap(), Collections.singletonMap("type", Range.all()))); assertTrue(actual.containsAll(availableTargetNames)); }
public abstract Object getPropertyValue(Object instance, String pn) throws NoSuchPropertyException, IllegalArgumentException;
@Test void testGetPropertyValue() throws Exception { Assertions.assertThrows(NoSuchPropertyException.class, () -> { Wrapper w = Wrapper.getWrapper(Object.class); w.getPropertyValue(null, null); }); }
@Override public BigDecimal getValue() { final Number unscaledValue = this.metric.getValue(); final BigDecimal scaledValue = convertToBigDecimal(unscaledValue).multiply(scaleFactor); return scaledValue; }
@Test public void testGetValue() { UpScaledMetric upScaledMetric = new UpScaledMetric("up_scaled_metric", uptimeMetric, 10); clock.advance(Duration.ofSeconds(10)); Assert.assertTrue(BigDecimal.valueOf(100.0d).compareTo(upScaledMetric.getValue()) == 0); }
public static <T> void forEach(Iterable<T> iterable, Consumer<T> consumer) { if (iterable == null) { return; } forEach(iterable.iterator(), consumer); }
@Test public void foreachTest() { final HashMap<String, String> map = MapUtil.newHashMap(); map.put("a", "1"); map.put("b", "2"); map.put("c", "3"); final String[] result = new String[1]; final String a = "a"; CollUtil.forEach(map, (key, value, index) -> { if (a.equals(key)) { result[0] = value; } }); assertEquals("1", result[0]); }
@Override public List<Distribution.Method> getMethods(final Path container) { return Arrays.asList(Distribution.DOWNLOAD, Distribution.STREAMING); }
@Test public void testGetMethods() { final S3Session session = new S3Session(new Host(new S3Protocol(), new S3Protocol().getDefaultHostname())); assertEquals(Arrays.asList(Distribution.DOWNLOAD, Distribution.STREAMING), new CloudFrontDistributionConfiguration(session, new S3LocationFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager() ).getMethods(new Path("/bbb", EnumSet.of(Path.Type.directory, Path.Type.volume)))); }
public void add(String name, String value) { headers.computeIfAbsent(name, k -> new ArrayList<>(1)).add(value); }
@Test public void testAdd() { HttpHeaders headers = new HttpHeaders(2); headers.set("Connection", "Keep-Alive"); headers.add("Connection", "close"); Assert.assertEquals("Keep-Alive", headers.get("Connection")); Assert.assertEquals(Arrays.asList("Keep-Alive", "close"), headers.getValues("Connection")); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNoopWithoutVersion(VertxTestContext context) { String kafkaVersion = VERSIONS.defaultVersion().version(); String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion(); String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion(); VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(null, null, null), mockNewCluster( null, mockSps(kafkaVersion), mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldThrowIfTopicDoesNotExist() { // Given: final SourceName STREAM_1 = SourceName.of("stream1"); final DataSource other1 = givenSource(STREAM_1, "topicName"); when(metaStore.getSource(STREAM_1)).thenAnswer(inv -> other1); when(other1.getKafkaTopicName()).thenReturn("topicName"); final ConfiguredStatement<DropStream> dropStatement = givenStatement( "DROP stream1 DELETE TOPIC;", new DropStream(SourceName.of("stream1"), true, true) ); doThrow(RuntimeException.class).when(topicClient).deleteTopics(ImmutableList.of("topicName")); // When: final Exception e = assertThrows( RuntimeException.class, () -> deleteInjector.inject(dropStatement) ); // Then: assertThat(e.getMessage(), containsString("" + "Could not delete the corresponding kafka topic: topicName")); }
@Override protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List<Object> out) { int readableBytes = in.readableBytes(); if (!databasePacketCodecEngine.isValidHeader(readableBytes)) { return; } if (log.isDebugEnabled()) { log.debug("Read from client {} :\n{}", context.channel().id().asShortText(), ByteBufUtil.prettyHexDump(in)); } databasePacketCodecEngine.decode(context, in, out); }
@Test void assertDecodeWithValidHeader() { when(byteBuf.readableBytes()).thenReturn(1); when(databasePacketCodecEngine.isValidHeader(1)).thenReturn(true); packetCodec.decode(context, byteBuf, Collections.emptyList()); verify(databasePacketCodecEngine).decode(context, byteBuf, Collections.emptyList()); }
@Override public Object next() { if (_numberOfValuesPerEntry == 1) { return getNextNumber(); } return MultiValueGeneratorHelper.generateMultiValueEntries(_numberOfValuesPerEntry, _random, this::getNextNumber); }
@Test public void testNextMultiValued() { Random random = mock(Random.class); when(random.nextInt(anyInt())).thenReturn(10); // initial value when(random.nextDouble()).thenReturn(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9); // for MV generation int cardinality = 5; double numValuesPerEntry = 2.4; NumberGenerator generator = new NumberGenerator(cardinality, FieldSpec.DataType.INT, numValuesPerEntry, random); int[][] expectedValues = { // {10, 11, 12}, // rnd < 0.4 {13, 14, 10}, // rnd < 0.4 {11, 12, 13}, // rnd < 0.4 {14, 10, 11}, // rnd < 0.4 {12, 13}, // rnd >= 0.4 {14, 10}, // rnd >= 0.4 {11, 12}, // rnd >= 0.4 {13, 14}, // rnd >= 0.4 {10, 11}, // rnd >= 0.4 {12, 13}, // rnd >= 0.4 }; for (int[] expected : expectedValues) { List<Integer> actual = (List<Integer>) generator.next(); assertEquals(actual.toArray(), expected); } }
public static PaneInfo createPane(boolean isFirst, boolean isLast, Timing timing) { checkArgument(isFirst, "Indices must be provided for non-first pane info."); return createPane(isFirst, isLast, timing, 0, timing == Timing.EARLY ? -1 : 0); }
@Test public void testInterned() throws Exception { assertSame( PaneInfo.createPane(true, true, Timing.EARLY), PaneInfo.createPane(true, true, Timing.EARLY)); }
@Override public boolean shouldRescale( VertexParallelism currentParallelism, VertexParallelism newParallelism) { for (JobVertexID vertex : currentParallelism.getVertices()) { int parallelismChange = newParallelism.getParallelism(vertex) - currentParallelism.getParallelism(vertex); if (parallelismChange != 0) { return true; } } return false; }
@Test void testScaleUp() { final RescalingController rescalingController = new EnforceParallelismChangeRescalingController(); assertThat(rescalingController.shouldRescale(forParallelism(1), forParallelism(2))) .isTrue(); }
@Override public Page<ConfigInfo> findConfigInfo4Page(final int pageNo, final int pageSize, final String dataId, final String group, final String tenant, final Map<String, Object> configAdvanceInfo) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; PaginationHelper<ConfigInfo> helper = createPaginationHelper(); final String appName = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("appName"); final String content = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("content"); final String configTags = configAdvanceInfo == null ? null : (String) configAdvanceInfo.get("config_tags"); MapperResult sql; MapperResult sqlCount; final MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.TENANT_ID, tenantTmp); if (StringUtils.isNotBlank(dataId)) { context.putWhereParameter(FieldConstant.DATA_ID, dataId); } if (StringUtils.isNotBlank(group)) { context.putWhereParameter(FieldConstant.GROUP_ID, group); } if (StringUtils.isNotBlank(appName)) { context.putWhereParameter(FieldConstant.APP_NAME, appName); } if (!StringUtils.isBlank(content)) { context.putWhereParameter(FieldConstant.CONTENT, content); } context.setStartRow((pageNo - 1) * pageSize); context.setPageSize(pageSize); if (StringUtils.isNotBlank(configTags)) { String[] tagArr = configTags.split(","); context.putWhereParameter(FieldConstant.TAG_ARR, tagArr); ConfigTagsRelationMapper configTagsRelationMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION); sqlCount = configTagsRelationMapper.findConfigInfo4PageCountRows(context); sql = configTagsRelationMapper.findConfigInfo4PageFetchRows(context); } else { ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); sqlCount = configInfoMapper.findConfigInfo4PageCountRows(context); sql = configInfoMapper.findConfigInfo4PageFetchRows(context); } try { Page<ConfigInfo> page = helper.fetchPageLimit(sqlCount, sql, pageNo, pageSize, CONFIG_INFO_ROW_MAPPER); for (ConfigInfo configInfo : page.getPageItems()) { Pair<String, String> pair = EncryptionHandler.decryptHandler(configInfo.getDataId(), configInfo.getEncryptedDataKey(), configInfo.getContent()); configInfo.setContent(pair.getSecond()); } return page; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] ", e); throw e; } }
@Test void testFindConfigInfo4Page() { String dataId = "dataId4567222"; String group = "group3456789"; String tenant = "tenant4567890"; //mock total count when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {tenant, dataId, group}), eq(Integer.class))).thenReturn( new Integer(9)); //mock page list List<ConfigInfo> result = new ArrayList<>(); result.add(createMockConfigInfo(0)); result.add(createMockConfigInfo(1)); result.add(createMockConfigInfo(2)); when(jdbcTemplate.query(anyString(), eq(new Object[] {tenant, dataId, group}), eq(CONFIG_INFO_ROW_MAPPER))).thenReturn(result); Map<String, Object> configAdvanceInfo = new HashMap<>(); Page<ConfigInfo> configInfo4Page = externalConfigInfoPersistService.findConfigInfo4Page(1, 3, dataId, group, tenant, configAdvanceInfo); assertEquals(result.size(), configInfo4Page.getPageItems().size()); assertEquals(9, configInfo4Page.getTotalCount()); }
@PostMapping("/api/v1/meetings") public ResponseEntity<MomoApiResponse<MeetingCreateResponse>> create( @RequestBody @Valid MeetingCreateRequest request ) { MeetingCreateResponse response = meetingService.create(request); String path = cookieManager.pathOf(response.uuid()); String cookie = cookieManager.createNewCookie(response.token(), path); return ResponseEntity.created(URI.create("/meeting/" + response.uuid())) .header(HttpHeaders.SET_COOKIE, cookie) .body(new MomoApiResponse<>(response)); }
@DisplayName("약속을 잠글 때 호스트 권한이 없다면 403을 반환한다.") @Test void lockWithNoPermission() { Meeting meeting = meetingRepository.save(MeetingFixture.DINNER.create()); Attendee attendee = attendeeRepository.save(AttendeeFixture.GUEST_PEDRO.create(meeting)); String token = getToken(attendee, meeting); RestAssured.given().log().all() .cookie("ACCESS_TOKEN", token) .contentType(ContentType.JSON) .pathParam("uuid", meeting.getUuid()) .when().patch("/api/v1/meetings/{uuid}/lock") .then().log().all() .statusCode(HttpStatus.FORBIDDEN.value()); }
protected boolean dataValidationAndGoOn(ConnectionProxy conn) throws SQLException { TableRecords beforeRecords = sqlUndoLog.getBeforeImage(); TableRecords afterRecords = sqlUndoLog.getAfterImage(); // Compare current data with before data // No need undo if the before data snapshot is equivalent to the after data snapshot. Result<Boolean> beforeEqualsAfterResult = DataCompareUtils.isRecordsEquals(beforeRecords, afterRecords); if (beforeEqualsAfterResult.getResult()) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Stop rollback because there is no data change " + "between the before data snapshot and the after data snapshot."); } // no need continue undo. return false; } // Validate if data is dirty. TableRecords currentRecords = queryCurrentRecords(conn); // compare with current data and after image. Result<Boolean> afterEqualsCurrentResult = DataCompareUtils.isRecordsEquals(afterRecords, currentRecords); if (!afterEqualsCurrentResult.getResult()) { // If current data is not equivalent to the after data, then compare the current data with the before // data, too. No need continue to undo if current data is equivalent to the before data snapshot Result<Boolean> beforeEqualsCurrentResult = DataCompareUtils.isRecordsEquals(beforeRecords, currentRecords); if (beforeEqualsCurrentResult.getResult()) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Stop rollback because there is no data change " + "between the before data snapshot and the current data snapshot."); } // no need continue undo. return false; } else { if (LOGGER.isInfoEnabled()) { if (StringUtils.isNotBlank(afterEqualsCurrentResult.getErrMsg())) { LOGGER.info(afterEqualsCurrentResult.getErrMsg(), afterEqualsCurrentResult.getErrMsgParams()); } } if (LOGGER.isDebugEnabled()) { LOGGER.debug("check dirty data failed, old and new data are not equal, " + "tableName:[" + sqlUndoLog.getTableName() + "]," + "oldRows:[" + JSON.toJSONString(afterRecords.getRows()) + "]," + "newRows:[" + JSON.toJSONString(currentRecords.getRows()) + "]."); } throw new SQLUndoDirtyException("Has dirty records when undo."); } } return true; }
@Test public void dataValidationInsert() throws SQLException { TableRecords beforeImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);"); execSQL("INSERT INTO table_name(id, name) VALUES (12345,'aaa');"); execSQL("INSERT INTO table_name(id, name) VALUES (12346,'aaa');"); TableRecords afterImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);"); SQLUndoLog sqlUndoLog = new SQLUndoLog(); sqlUndoLog.setSqlType(SQLType.INSERT); sqlUndoLog.setTableMeta(tableMeta); sqlUndoLog.setTableName("table_name"); sqlUndoLog.setBeforeImage(beforeImage); sqlUndoLog.setAfterImage(afterImage); TestUndoExecutor spy = new TestUndoExecutor(sqlUndoLog, false); // case1: normal case before:0 -> after:2 -> current:2 Assertions.assertTrue(spy.dataValidationAndGoOn(connection)); // case2: dirty data before:0 -> after:2 -> current:2' execSQL("update table_name set name = 'yyy' where id in (12345, 12346);"); try { Assertions.assertTrue(spy.dataValidationAndGoOn(connection)); Assertions.fail(); } catch (Exception e) { Assertions.assertTrue(e instanceof SQLException); } // case3: before == current before:0 -> after:2 -> current:0 execSQL("delete from table_name where id in (12345, 12346);"); Assertions.assertFalse(spy.dataValidationAndGoOn(connection)); // case 4: before == after before:0 -> after:0 afterImage = execQuery(tableMeta, "SELECT * FROM table_name WHERE id IN (12345, 12346);"); sqlUndoLog.setAfterImage(afterImage); Assertions.assertFalse(spy.dataValidationAndGoOn(connection)); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowSingleTableStatement sqlStatement, final ContextManager contextManager) { Collection<DataNode> resultDataNodes = getPattern(sqlStatement) .map(optional -> getDataNodesWithLikePattern(rule.getAttributes().getAttribute(DataNodeRuleAttribute.class).getAllDataNodes(), optional)) .orElseGet(() -> getDataNodes(rule.getAttributes().getAttribute(DataNodeRuleAttribute.class).getAllDataNodes(), sqlStatement)); Collection<DataNode> sortedDataNodes = resultDataNodes.stream().sorted(Comparator.comparing(DataNode::getTableName)).collect(Collectors.toList()); return sortedDataNodes.stream().map(each -> new LocalDataQueryResultRow(each.getTableName(), each.getDataSourceName())).collect(Collectors.toList()); }
@Test void assertGetRowData() throws SQLException { engine = setUp(mock(ShowSingleTableStatement.class)); engine.executeQuery(); Collection<LocalDataQueryResultRow> actual = engine.getRows(); assertThat(actual.size(), is(2)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is("t_order")); assertThat(row.getCell(2), is("ds_1")); row = iterator.next(); assertThat(row.getCell(1), is("t_order_item")); assertThat(row.getCell(2), is("ds_2")); }
public static ByteBuf copyLong(long value) { ByteBuf buf = buffer(8); buf.writeLong(value); return buf; }
@Test public void testWrapSingleLong() { ByteBuf buffer = copyLong(42); assertEquals(8, buffer.capacity()); assertEquals(42, buffer.readLong()); assertFalse(buffer.isReadable()); buffer.release(); }
public static QueryServiceResponse buildFailResponse(String message) { QueryServiceResponse queryServiceResponse = new QueryServiceResponse(); queryServiceResponse.setResultCode(ResponseCode.FAIL.getCode()); queryServiceResponse.setMessage(message); return queryServiceResponse; }
@Test void testSerializeFailResponse() throws JsonProcessingException { QueryServiceResponse response = QueryServiceResponse.buildFailResponse("test"); String json = mapper.writeValueAsString(response); assertTrue(json.contains("\"resultCode\":500")); assertTrue(json.contains("\"errorCode\":0")); assertTrue(json.contains("\"message\":\"test\"")); assertTrue(json.contains("\"success\":false")); }
public CompletableFuture<Void> redeemReceipt( final Account account, final ReceiptCredentialPresentation receiptCredentialPresentation) { try { serverZkReceiptOperations.verifyReceiptCredentialPresentation(receiptCredentialPresentation); } catch (VerificationFailedException e) { throw Status.INVALID_ARGUMENT .withDescription("receipt credential presentation verification failed") .asRuntimeException(); } final ReceiptSerial receiptSerial = receiptCredentialPresentation.getReceiptSerial(); final Instant receiptExpiration = Instant.ofEpochSecond(receiptCredentialPresentation.getReceiptExpirationTime()); if (clock.instant().isAfter(receiptExpiration)) { throw Status.INVALID_ARGUMENT.withDescription("receipt is already expired").asRuntimeException(); } final long receiptLevel = receiptCredentialPresentation.getReceiptLevel(); if (BackupLevelUtil.fromReceiptLevel(receiptLevel) != BackupLevel.MEDIA) { throw Status.INVALID_ARGUMENT .withDescription("server does not recognize the requested receipt level") .asRuntimeException(); } return redeemedReceiptsManager .put(receiptSerial, receiptExpiration.getEpochSecond(), receiptLevel, account.getUuid()) .thenCompose(receiptAllowed -> { if (!receiptAllowed) { throw Status.INVALID_ARGUMENT .withDescription("receipt serial is already redeemed") .asRuntimeException(); } return accountsManager.updateAsync(account, a -> { final Account.BackupVoucher newPayment = new Account.BackupVoucher(receiptLevel, receiptExpiration); final Account.BackupVoucher existingPayment = a.getBackupVoucher(); account.setBackupVoucher(merge(existingPayment, newPayment)); }); }) .thenRun(Util.NOOP); }
@Test void receiptAlreadyRedeemed() throws InvalidInputException, VerificationFailedException { final Instant expirationTime = Instant.EPOCH.plus(Duration.ofDays(1)); final BackupAuthManager authManager = create(BackupLevel.MESSAGES, false); final Account account = mock(Account.class); when(account.getUuid()).thenReturn(aci); clock.pin(Instant.EPOCH.plus(Duration.ofDays(1))); when(accountsManager.updateAsync(any(), any())).thenReturn(CompletableFuture.completedFuture(account)); when(redeemedReceiptsManager.put(any(), eq(expirationTime.getEpochSecond()), eq(201L), eq(aci))) .thenReturn(CompletableFuture.completedFuture(false)); final CompletableFuture<Void> result = authManager.redeemReceipt(account, receiptPresentation(201, expirationTime)); assertThat(CompletableFutureTestUtil.assertFailsWithCause(StatusRuntimeException.class, result)) .extracting(ex -> ex.getStatus().getCode()) .isEqualTo(Status.Code.INVALID_ARGUMENT); verifyNoInteractions(accountsManager); }
public boolean offer(E item) throws ConcurrentConveyorException { return offer(queue, item); }
@Test public void when_offer_then_poll() { // when boolean didOffer = conveyorSingleQueue.offer(item1); // then assertTrue(didOffer); assertSame(item1, defaultQ.poll()); }
public static Map<String, String> getSegmentSourcesMap(final SegmentCompilationDTO segmentCompilationDTO, final List<KiePMMLModel> nestedModels) { logger.debug(GET_SEGMENT, segmentCompilationDTO.getSegment()); final KiePMMLModel nestedModel = getFromCommonDataAndTransformationDictionaryAndModelWithSources(segmentCompilationDTO) .orElseThrow(() -> new KiePMMLException("Failed to get the KiePMMLModel for segment " + segmentCompilationDTO.getModel().getModelName())); final Map<String, String> toReturn = getSegmentSourcesMapCommon(segmentCompilationDTO, nestedModels, nestedModel); segmentCompilationDTO.addFields(getFieldsFromModel(segmentCompilationDTO.getModel())); return toReturn; }
@Test void getSegmentSourcesMapHasSourcesWithKiePMMLModelClass() { final Segment segment = MINING_MODEL.getSegmentation().getSegments().get(0); final String regressionModelName = "CategoricalVariablesRegression"; final String kiePMMLModelClass = PACKAGE_NAME + "." + regressionModelName; final Map<String, String> sourcesMap = new HashMap<>(); sourcesMap.put(kiePMMLModelClass, String.format("public class %s {}", regressionModelName)); final CommonCompilationDTO<MiningModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, MINING_MODEL, new PMMLCompilationContextMock(), "FILENAME"); final MiningModelCompilationDTO compilationDTO = MiningModelCompilationDTO.fromCompilationDTO(source); final SegmentCompilationDTO segmentCompilationDTO = SegmentCompilationDTO.fromGeneratedPackageNameAndFields(compilationDTO, segment, compilationDTO.getFields()); final Map<String, String> retrieved = KiePMMLSegmentFactory.getSegmentSourcesMap(segmentCompilationDTO, true); commonEvaluateMap(retrieved, segment); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testNestedInInsideNot() { Not filter = Not.apply(And.apply(EqualTo.apply("col1", 1), In.apply("col2", new Integer[] {1, 2}))); Expression converted = SparkFilters.convert(filter); Assert.assertNull("Expression should not be converted", converted); }
@Override public int compareTo(SubscriptionData other) { String thisValue = this.topic + "@" + this.subString; String otherValue = other.topic + "@" + other.subString; return thisValue.compareTo(otherValue); }
@Test public void testCompareTo() { SubscriptionData subscriptionData = new SubscriptionData("TOPICA", "*"); SubscriptionData subscriptionData1 = new SubscriptionData("TOPICBA", "*"); assertThat(subscriptionData.compareTo(subscriptionData1)).isEqualTo("TOPICA@*".compareTo("TOPICB@*")); }
public static String findAddress(List<NodeAddress> addresses, NodeAddressType preferredAddressType) { if (addresses == null) { return null; } Map<String, String> addressMap = addresses.stream() .collect(Collectors.toMap(NodeAddress::getType, NodeAddress::getAddress, (address1, address2) -> { LOGGER.warnOp("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1); return address1; })); // If user set preferred address type, we should check it first if (preferredAddressType != null && addressMap.containsKey(preferredAddressType.toValue())) { return addressMap.get(preferredAddressType.toValue()); } if (addressMap.containsKey("ExternalDNS")) { return addressMap.get("ExternalDNS"); } else if (addressMap.containsKey("ExternalIP")) { return addressMap.get("ExternalIP"); } else if (addressMap.containsKey("InternalDNS")) { return addressMap.get("InternalDNS"); } else if (addressMap.containsKey("InternalIP")) { return addressMap.get("InternalIP"); } else if (addressMap.containsKey("Hostname")) { return addressMap.get("Hostname"); } return null; }
@Test public void testFindAddressNullWhenAddressesNull() { List<NodeAddress> addresses = null; String address = NodeUtils.findAddress(addresses, null); assertThat(address, is(nullValue())); }
public static Version of(int major, int minor) { if (major == UNKNOWN_VERSION && minor == UNKNOWN_VERSION) { return UNKNOWN; } else { return new Version(major, minor); } }
@Test public void test_ofString() { Version v = of("3.0"); assertEquals(v, V3_0); }
@Override protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, Map<String, Object> requestContext) { DMNResult dmnResult = (DMNResult) requestContext.get(DMNScenarioExecutableBuilder.DMN_RESULT); List<DMNMessage> dmnMessages = dmnResult.getMessages(); for (ScenarioExpect output : scenarioRunnerData.getExpects()) { FactIdentifier factIdentifier = output.getFactIdentifier(); String decisionName = factIdentifier.getName(); DMNDecisionResult decisionResult = dmnResult.getDecisionResultByName(decisionName); if (decisionResult == null) { throw new ScenarioException("DMN execution has not generated a decision result with name " + decisionName); } for (FactMappingValue expectedResult : output.getExpectedResult()) { ExpressionIdentifier expressionIdentifier = expectedResult.getExpressionIdentifier(); FactMapping factMapping = scesimModelDescriptor.getFactMapping(factIdentifier, expressionIdentifier) .orElseThrow(() -> new IllegalStateException("Wrong expression, this should not happen")); ExpressionEvaluator expressionEvaluator = expressionEvaluatorFactory.getOrCreate(expectedResult); ScenarioResult scenarioResult = fillResult(expectedResult, () -> getSingleFactValueResult(factMapping, expectedResult, decisionResult, dmnMessages, expressionEvaluator), expressionEvaluator); scenarioRunnerData.addResult(scenarioResult); } } }
@Test public void verifyConditions_checksArePerformed_success() { // test 5 - check are performed (but success) ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData(); scenarioRunnerData.addExpect(new ScenarioExpect(personFactIdentifier, List.of(firstNameExpectedValue))); when(dmnResultMock.getDecisionResultByName(anyString())).thenReturn(dmnDecisionResultMock); when(dmnDecisionResultMock.getEvaluationStatus()).thenReturn(DecisionEvaluationStatus.SUCCEEDED); Map<String, Object> resultMap = new HashMap<>(); resultMap.put("firstName", NAME); when(dmnDecisionResultMock.getResult()).thenReturn(resultMap); runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(), scenarioRunnerData, expressionEvaluatorFactory, requestContextMock); assertThat(scenarioRunnerData.getResults()).hasSize(1); assertThat(scenarioRunnerData.getResults().get(0).getResult()).isTrue(); }
@Override public UserIdentity create(GsonUser user, @Nullable String email, @Nullable List<GsonTeam> teams) { UserIdentity.Builder builder = UserIdentity.builder() .setProviderId(user.getId()) .setProviderLogin(user.getLogin()) .setName(generateName(user)) .setEmail(email); if (teams != null) { builder.setGroups(teams.stream() .map(GithubTeamConverter::toGroupName) .collect(Collectors.toSet())); } return builder.build(); }
@Test public void empty_name_is_replaced_by_provider_login() { GsonUser gson = new GsonUser("ABCD", "octocat", "", "octocat@github.com"); UserIdentity identity = underTest.create(gson, null, null); assertThat(identity.getName()).isEqualTo("octocat"); }
@Override public final boolean cancel(String errMsg) { isCancelling.set(true); try { // If waitingCreatingReplica == false, we will assume that // cancel thread will get the object lock very quickly. if (waitingCreatingReplica.get()) { Preconditions.checkState(createReplicaLatch != null); createReplicaLatch.countDownToZero(new Status(TStatusCode.OK, "")); } synchronized (this) { return cancelImpl(errMsg); } } finally { isCancelling.set(false); } }
@Test public void testDropTableBeforeCancel() { db.dropTable(table.getName()); schemaChangeJob.cancel("test"); Assert.assertEquals(AlterJobV2.JobState.CANCELLED, schemaChangeJob.getJobState()); }
public static boolean isIPv6MixedAddress(final String input) { int splitIndex = input.lastIndexOf(':'); if (splitIndex == -1) { return false; } //the last part is a ipv4 address boolean ipv4PartValid = isIPv4Address(input.substring(splitIndex + 1)); String ipV6Part = input.substring(ZERO, splitIndex + 1); if (DOUBLE_COLON.equals(ipV6Part)) { return ipv4PartValid; } boolean ipV6UncompressedDetected = IPV6_MIXED_UNCOMPRESSED_REGEX.matcher(ipV6Part).matches(); boolean ipV6CompressedDetected = IPV6_MIXED_COMPRESSED_REGEX.matcher(ipV6Part).matches(); return ipv4PartValid && (ipV6UncompressedDetected || ipV6CompressedDetected); }
@Test public void isIPv6MixedAddress() { assertThat(NetAddressValidatorUtil.isIPv6MixedAddress("1:0:0:0:0:0:172.12.55.18")).isTrue(); assertThat(NetAddressValidatorUtil.isIPv6MixedAddress("2001:DB8::8:800:200C141aA")).isFalse(); }
public <T extends AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder) { if (Strings.isNullOrEmpty(httpClientType)) { httpClientType = CLIENT_TYPE_DEFAULT; } switch (httpClientType) { case CLIENT_TYPE_URLCONNECTION: UrlConnectionHttpClientConfigurations urlConnectionHttpClientConfigurations = loadHttpClientConfigurations(UrlConnectionHttpClientConfigurations.class.getName()); urlConnectionHttpClientConfigurations.configureHttpClientBuilder(builder); break; case CLIENT_TYPE_APACHE: ApacheHttpClientConfigurations apacheHttpClientConfigurations = loadHttpClientConfigurations(ApacheHttpClientConfigurations.class.getName()); apacheHttpClientConfigurations.configureHttpClientBuilder(builder); break; default: throw new IllegalArgumentException("Unrecognized HTTP client type " + httpClientType); } }
@Test public void testApacheHttpClientConfiguration() { Map<String, String> properties = Maps.newHashMap(); properties.put(HttpClientProperties.CLIENT_TYPE, "apache"); HttpClientProperties httpProperties = new HttpClientProperties(properties); S3ClientBuilder mockS3ClientBuilder = Mockito.mock(S3ClientBuilder.class); ArgumentCaptor<SdkHttpClient.Builder> httpClientBuilderCaptor = ArgumentCaptor.forClass(SdkHttpClient.Builder.class); httpProperties.applyHttpClientConfigurations(mockS3ClientBuilder); Mockito.verify(mockS3ClientBuilder).httpClientBuilder(httpClientBuilderCaptor.capture()); SdkHttpClient.Builder capturedHttpClientBuilder = httpClientBuilderCaptor.getValue(); assertThat(capturedHttpClientBuilder) .as("Should use apache http client") .isInstanceOf(ApacheHttpClient.Builder.class); }
@Override public Optional<GaugeMetricFamilyMetricsCollector> export(final String pluginType) { if (null == ProxyContext.getInstance().getContextManager()) { return Optional.empty(); } Optional<InstanceStateContext> stateContext = ProxyContext.getInstance().getInstanceStateContext(); if (!stateContext.isPresent()) { return Optional.empty(); } GaugeMetricFamilyMetricsCollector result = MetricsCollectorRegistry.get(config, pluginType); result.cleanMetrics(); result.addMetric(Collections.emptyList(), stateContext.get().getCurrentState().ordinal()); return Optional.of(result); }
@Test void assertExportWithoutContextManager() { when(ProxyContext.getInstance().getContextManager()).thenReturn(null); assertFalse(new ProxyStateExporter().export("FIXTURE").isPresent()); }
public boolean isGreaterThan(String clusterId, Version version) { return isGreaterThan(() -> clusterVersionGetCommander.getCassandraVersion(clusterId), version); }
@Test void isGreaterThan() { // given BDDMockito.when(clusterVersionGetCommander.getCassandraVersion(CLUSTER_ID)).thenReturn(Version.parse("4.0.1")); // when assertThat(clusterVersionEvaluator.isGreaterThan(CLUSTER_ID, Version.parse("4.0.0"))).isTrue(); assertThat(clusterVersionEvaluator.isGreaterThan(CLUSTER_ID, Version.parse("4.0.1"))).isFalse(); assertThat(clusterVersionEvaluator.isGreaterThan(CLUSTER_ID, Version.parse("4.0.2"))).isFalse(); }
@Override public void set(String name, String value) { checkKey(name); String[] keyParts = splitKey(name); String ns = registry.getNamespaceURI(keyParts[0]); if (ns != null) { try { xmpData.setProperty(ns, keyParts[1], value); } catch (XMPException e) { // Ignore } } }
@Test public void set_nullInput_throw() { String notInitialized = null; assertThrows(PropertyTypeException.class, () -> { xmpMeta.set(notInitialized, "value"); }); }
public void updateQuotaAndCollectBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) { byte storagePolicyID = file.getStoragePolicyID(); BlockStoragePolicy bsp = null; if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { bsp = reclaimContext.storagePolicySuite(). getPolicy(file.getStoragePolicyID()); } QuotaCounts oldCounts; if (removed.snapshotINode != null) { oldCounts = new QuotaCounts.Builder().build(); // collect all distinct blocks Set<BlockInfo> allBlocks = new HashSet<BlockInfo>(); if (file.getBlocks() != null) { allBlocks.addAll(Arrays.asList(file.getBlocks())); } if (removed.getBlocks() != null) { allBlocks.addAll(Arrays.asList(removed.getBlocks())); } for (FileDiff diff : diffs) { BlockInfo[] diffBlocks = diff.getBlocks(); if (diffBlocks != null) { allBlocks.addAll(Arrays.asList(diffBlocks)); } } for (BlockInfo b: allBlocks) { short replication = b.getReplication(); long blockSize = b.isComplete() ? b.getNumBytes() : file .getPreferredBlockSize(); oldCounts.addStorageSpace(blockSize * replication); if (bsp != null) { List<StorageType> oldTypeChosen = bsp.chooseStorageTypes(replication); for (StorageType t : oldTypeChosen) { if (t.supportTypeQuota()) { oldCounts.addTypeSpace(t, blockSize); } } } } AclFeature aclFeature = removed.getSnapshotINode().getAclFeature(); if (aclFeature != null) { AclStorage.removeAclFeature(aclFeature); } } else { oldCounts = file.storagespaceConsumed(null); } getDiffs().combineAndCollectSnapshotBlocks(reclaimContext, file, removed); if (file.getBlocks() != null) { short replInDiff = getMaxBlockRepInDiffs(removed); short repl = (short) Math.max(file.getPreferredBlockReplication(), replInDiff); for (BlockInfo b : file.getBlocks()) { if (repl != b.getReplication()) { reclaimContext.collectedBlocks().addUpdateReplicationFactor(b, repl); } } } QuotaCounts current = file.storagespaceConsumed(bsp); reclaimContext.quotaDelta().add(oldCounts.subtract(current)); }
@Test public void testUpdateQuotaAndCollectBlocks() { FileDiffList diffs = new FileDiffList(); FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs); FileDiff diff = mock(FileDiff.class); BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class); BlockStoragePolicy bsp = mock(BlockStoragePolicy.class); BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) }; BlockManager bm = mock(BlockManager.class); // No snapshot INodeFile file = mock(INodeFile.class); when(file.getFileWithSnapshotFeature()).thenReturn(sf); when(file.getBlocks()).thenReturn(blocks); when(file.getStoragePolicyID()).thenReturn((byte) 1); Whitebox.setInternalState(file, "header", (long) REPL_1 << 48); when(file.getPreferredBlockReplication()).thenReturn(REPL_1); when(bsps.getPolicy(anyByte())).thenReturn(bsp); INode.BlocksMapUpdateInfo collectedBlocks = mock( INode.BlocksMapUpdateInfo.class); ArrayList<INode> removedINodes = new ArrayList<>(); INode.ReclaimContext ctx = new INode.ReclaimContext( bsps, collectedBlocks, removedINodes, null); sf.updateQuotaAndCollectBlocks(ctx, file, diff); QuotaCounts counts = ctx.quotaDelta().getCountsCopy(); Assert.assertEquals(0, counts.getStorageSpace()); Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0)); // INode only exists in the snapshot INodeFile snapshotINode = mock(INodeFile.class); Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48); Whitebox.setInternalState(diff, "snapshotINode", snapshotINode); when(diff.getSnapshotINode()).thenReturn(snapshotINode); when(bsp.chooseStorageTypes(REPL_1)) .thenReturn(Lists.newArrayList(SSD)); when(bsp.chooseStorageTypes(REPL_3)) .thenReturn(Lists.newArrayList(DISK)); blocks[0].setReplication(REPL_3); sf.updateQuotaAndCollectBlocks(ctx, file, diff); counts = ctx.quotaDelta().getCountsCopy(); Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace()); Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK)); Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD)); }
public static Permission getPermission(String name, String serviceName, String... actions) { PermissionFactory permissionFactory = PERMISSION_FACTORY_MAP.get(serviceName); if (permissionFactory == null) { throw new IllegalArgumentException("No permissions found for service: " + serviceName); } return permissionFactory.create(name, actions); }
@Test public void getPermission_Cache() { Permission permission = ActionConstants.getPermission("foo", ICacheService.SERVICE_NAME); assertNotNull(permission); assertTrue(permission instanceof CachePermission); }
public boolean resumeCheckHalfMessage(final String addr, String topic, String msgId, final long timeoutMillis) throws RemotingException, InterruptedException { ResumeCheckHalfMessageRequestHeader requestHeader = new ResumeCheckHalfMessageRequestHeader(); requestHeader.setTopic(topic); requestHeader.setMsgId(msgId); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.RESUME_CHECK_HALF_MESSAGE, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return true; } default: log.error("Failed to resume half message check logic. Remark={}", response.getRemark()); return false; } }
@Test public void testResumeCheckHalfMessage_Success() throws InterruptedException, RemotingException { doAnswer(mock -> { RemotingCommand request = mock.getArgument(1); return createResumeSuccessResponse(request); }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); boolean result = mqClientAPI.resumeCheckHalfMessage(brokerAddr, "topic", "test", 3000); assertThat(result).isEqualTo(true); }
@Override @SuppressWarnings("unchecked") public <T> T create(Class<T> extensionClass) { String extensionClassName = extensionClass.getName(); ClassLoader extensionClassLoader = extensionClass.getClassLoader(); if (!cache.containsKey(extensionClassLoader)) { cache.put(extensionClassLoader, new HashMap<>()); } Map<String, Object> classLoaderBucket = cache.get(extensionClassLoader); if (classLoaderBucket.containsKey(extensionClassName)) { return (T) classLoaderBucket.get(extensionClassName); } T extension = super.create(extensionClass); if (extensionClassNames.isEmpty() || extensionClassNames.contains(extensionClassName)) { classLoaderBucket.put(extensionClassName, extension); } return extension; }
@Test public void create() { ExtensionFactory extensionFactory = new SingletonExtensionFactory(pluginManager); Object extensionOne = extensionFactory.create(TestExtension.class); Object extensionTwo = extensionFactory.create(TestExtension.class); assertSame(extensionOne, extensionTwo); }
@Override public Optional<EfestoOutputPMML> evaluateInput(EfestoInput<PMMLRequestData> toEvaluate, EfestoRuntimeContext context) { return executeEfestoInput(toEvaluate, context); }
@Test void evaluateCorrectInput() { modelLocalUriId = getModelLocalUriIdFromPmmlIdFactory(FILE_NAME, MODEL_NAME); PMMLRequestData pmmlRequestData = getPMMLRequestDataWithInputData(MODEL_NAME, FILE_NAME); EfestoInput<PMMLRequestData> efestoInput = new BaseEfestoInput<>(modelLocalUriId, pmmlRequestData); Optional<EfestoOutputPMML> retrieved = kieRuntimeServicePMMLRequestData.evaluateInput(efestoInput, getEfestoContext(memoryCompilerClassLoader)); assertThat(retrieved).isNotNull().isPresent(); }
Iterator<SourceRecords> splitSchemaChangeStream(List<DataChangeEvent> batchEvents) { return new SchemaChangeStreamSplitter().split(batchEvents); }
@Test public void testSplitSchemaChangeStream() throws Exception { IncrementalSourceStreamFetcher fetcher = createFetcher(); List<DataChangeEvent> inputEvents = new ArrayList<>(); List<SourceRecords> records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); Iterator<SourceRecords> outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(1, records.size()); Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(2, records.size()); Assertions.assertEquals(1, records.get(0).getSourceRecordList().size()); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(0))); Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(2))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeUnknownEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(2, records.size()); Assertions.assertEquals(3, records.get(0).getSourceRecordList().size()); Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(2))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(2))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeUnknownEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(3, records.size()); Assertions.assertEquals(1, records.get(0).getSourceRecordList().size()); Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(2))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(1))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(3, records.size()); Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); Assertions.assertEquals(3, records.get(1).getSourceRecordList().size()); Assertions.assertEquals(1, records.get(2).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(1))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(2))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(4, records.size()); Assertions.assertEquals(2, records.get(0).getSourceRecordList().size()); Assertions.assertEquals(2, records.get(1).getSourceRecordList().size()); Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); Assertions.assertEquals(2, records.get(3).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(1))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(1))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(2).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(2).getSourceRecordList().get(1))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(3).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(3).getSourceRecordList().get(1))); inputEvents = new ArrayList<>(); records = new ArrayList<>(); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createSchemaChangeEvent())); inputEvents.add(new DataChangeEvent(createDataEvent())); inputEvents.add(new DataChangeEvent(createHeartbeatEvent())); outputEvents = fetcher.splitSchemaChangeStream(inputEvents); outputEvents.forEachRemaining(records::add); Assertions.assertEquals(11, records.size()); Assertions.assertEquals(3, records.get(0).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(0).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(0).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(0).getSourceRecordList().get(2))); Assertions.assertEquals(2, records.get(1).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(1).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(1).getSourceRecordList().get(1))); Assertions.assertEquals(2, records.get(2).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(2).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(2).getSourceRecordList().get(1))); Assertions.assertEquals(2, records.get(3).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(3).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(3).getSourceRecordList().get(1))); Assertions.assertEquals(3, records.get(4).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(4).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(4).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(4).getSourceRecordList().get(2))); Assertions.assertEquals(2, records.get(5).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(5).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(5).getSourceRecordList().get(1))); Assertions.assertEquals(4, records.get(6).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(6).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(6).getSourceRecordList().get(1))); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(6).getSourceRecordList().get(2))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(6).getSourceRecordList().get(3))); Assertions.assertEquals(3, records.get(7).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(7).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(7).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(7).getSourceRecordList().get(2))); Assertions.assertEquals(3, records.get(8).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(8).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(8).getSourceRecordList().get(1))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeBeforeWatermarkEvent( records.get(8).getSourceRecordList().get(2))); Assertions.assertEquals(2, records.get(9).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isSchemaChangeEvent(records.get(9).getSourceRecordList().get(0))); Assertions.assertTrue( WatermarkEvent.isSchemaChangeAfterWatermarkEvent( records.get(9).getSourceRecordList().get(1))); Assertions.assertEquals(2, records.get(10).getSourceRecordList().size()); Assertions.assertTrue( SourceRecordUtils.isDataChangeRecord(records.get(10).getSourceRecordList().get(0))); Assertions.assertTrue( SourceRecordUtils.isHeartbeatRecord(records.get(10).getSourceRecordList().get(1))); }
public static List<Annotation> scanClass(Class<?> targetClass) { return AnnotationScanner.TYPE_HIERARCHY.getAnnotationsIfSupport(targetClass); }
@Test public void scanClassTest() { // TargetClass -> TargetSuperClass ----------------------------------> SuperInterface // -> TargetSuperInterface -> SuperTargetSuperInterface -> SuperInterface final List<Annotation> annotations = AnnotationUtil.scanClass(TargetClass.class); assertEquals(5, annotations.size()); assertEquals("TargetClass", ((AnnotationForTest)annotations.get(0)).value()); assertEquals("TargetSuperClass", ((AnnotationForTest)annotations.get(1)).value()); assertEquals("TargetSuperInterface", ((AnnotationForTest)annotations.get(2)).value()); assertEquals("SuperInterface", ((AnnotationForTest)annotations.get(3)).value()); assertEquals("SuperTargetSuperInterface", ((AnnotationForTest)annotations.get(4)).value()); }
@Override public Producer createProducer() throws Exception { return new FopProducer(this, fopFactory, outputType.getFormatExtended()); }
@Test public void generatePdfFromXslfoWithSpecificText() throws Exception { if (!canTest()) { // cannot run on CI return; } Endpoint endpoint = context().getEndpoint("fop:pdf"); Producer producer = endpoint.createProducer(); Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody(FopHelper.decorateTextWithXSLFO("Test Content")); producer.process(exchange); PDDocument document = getDocumentFrom(exchange); String content = FopHelper.extractTextFrom(document); assertEquals("Test Content", content); }
public boolean error(final SelectableChannel channel) { return error((Object) channel); }
@Test(timeout = 5000) public void testError() { ZContext ctx = new ZContext(); ZPoller poller = new ZPoller(ctx); try { Socket socket = ctx.createSocket(SocketType.XPUB); poller.register(socket, ZPoller.ERR); boolean rc = poller.error(socket); assertThat(rc, is(false)); rc = poller.isError(socket); assertThat(rc, is(false)); rc = poller.pollerr(socket); assertThat(rc, is(false)); } finally { poller.close(); ctx.close(); } }
public KsqlTarget target(final URI server) { return target(server, Collections.emptyMap()); }
@Test public void shouldHandleArbitraryErrorsOnPostRequests() { // Given: server.setErrorCode(417); // When: KsqlTarget target = ksqlClient.target(serverUri); RestResponse<KsqlEntityList> response = target.postKsqlRequest("sql", Collections.emptyMap(), Optional.of(123L)); // Then: assertThat(server.getHttpMethod(), is(HttpMethod.POST)); assertThat(response.isErroneous(), is(true)); assertThat(response.getErrorMessage().getMessage(), is("The server returned an unexpected error: Expectation Failed")); }
public static SourceDescription create( final DataSource dataSource, final boolean extended, final List<RunningQuery> readQueries, final List<RunningQuery> writeQueries, final Optional<TopicDescription> topicDescription, final List<QueryOffsetSummary> queryOffsetSummaries, final List<String> sourceConstraints, final MetricCollectors metricCollectors ) { return create( dataSource, extended, readQueries, writeQueries, topicDescription, queryOffsetSummaries, sourceConstraints, Stream.empty(), Stream.empty(), new KsqlHostInfo("", 0), metricCollectors ); }
@Test public void shouldReturnLocalStatsBasedOnKafkaTopic() { // Given: final String kafkaTopicName = "kafka"; final DataSource dataSource = buildDataSource(kafkaTopicName, Optional.empty()); final MetricCollectors mock = Mockito.mock(MetricCollectors.class); Mockito.when(mock.getAndFormatStatsFor(anyString(), anyBoolean())) .thenReturn(mockStringStat); Mockito.when(mock.getStatsFor(dataSource.getKafkaTopicName(), true)) .thenReturn(errorStats); Mockito.when(mock.getStatsFor(dataSource.getKafkaTopicName(), false)) .thenReturn(stats); KsqlHostInfo localhost = new KsqlHostInfo("myhost", 10); // When final SourceDescription sourceDescription = SourceDescriptionFactory.create( dataSource, true, Collections.emptyList(), Collections.emptyList(), Optional.empty(), Collections.emptyList(), Collections.emptyList(), Stream.empty(), Stream.empty(), localhost, mock ); // Then: // TODO deprecate and remove assertThat( sourceDescription.getStatistics(), containsString(mockStringStat)); assertThat( sourceDescription.getErrorStats(), containsString(mockStringStat)); // Also check includes its own stats in cluster stats final Stream<QueryHostStat> localStats = stats.stream() .map((s) -> QueryHostStat.fromStat(s, new KsqlHostInfoEntity(localhost))); assertThat( localStats.collect(Collectors.toList()), everyItem(isIn(sourceDescription.getClusterStatistics())) ); final Stream<QueryHostStat> localErrors = errorStats.stream() .map((s) -> QueryHostStat.fromStat(s, new KsqlHostInfoEntity(localhost))); assertThat( localErrors.collect(Collectors.toList()), everyItem(isIn(sourceDescription.getClusterErrorStats())) ); }
public void register(OpChain operatorChain) { Future<?> scheduledFuture = _executorService.submit(new TraceRunnable() { @Override public void runJob() { boolean isFinished = false; TransferableBlock returnedErrorBlock = null; Throwable thrown = null; try { LOGGER.trace("({}): Executing", operatorChain); TransferableBlock result = operatorChain.getRoot().nextBlock(); while (!result.isEndOfStreamBlock()) { result = operatorChain.getRoot().nextBlock(); } isFinished = true; if (result.isErrorBlock()) { returnedErrorBlock = result; LOGGER.error("({}): Completed erroneously {} {}", operatorChain, result.getQueryStats(), result.getExceptions()); } else { LOGGER.debug("({}): Completed {}", operatorChain, result.getQueryStats()); } } catch (Exception e) { LOGGER.error("({}): Failed to execute operator chain!", operatorChain, e); thrown = e; } finally { _submittedOpChainMap.remove(operatorChain.getId()); if (returnedErrorBlock != null || thrown != null) { if (thrown == null) { thrown = new RuntimeException("Error block " + returnedErrorBlock.getExceptions()); } operatorChain.cancel(thrown); } else if (isFinished) { operatorChain.close(); } } } }); _submittedOpChainMap.put(operatorChain.getId(), scheduledFuture); }
@Test public void shouldCallCloseOnOperatorsThatFinishSuccessfully() throws InterruptedException { OpChain opChain = getChain(_operatorA); OpChainSchedulerService schedulerService = new OpChainSchedulerService(_executor); CountDownLatch latch = new CountDownLatch(1); Mockito.when(_operatorA.nextBlock()).thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0)); Mockito.doAnswer(inv -> { latch.countDown(); return null; }).when(_operatorA).close(); schedulerService.register(opChain); Assert.assertTrue(latch.await(10, TimeUnit.SECONDS), "expected await to be called in less than 10 seconds"); }
@VisibleForTesting static String createMessageHtmlEntry( Class<?> messageClass, @Nullable Class<?> nestedAsyncOperationResultClass, Class<?> emptyMessageClass) { JsonSchema schema = generateSchema(messageClass); if (nestedAsyncOperationResultClass != null) { JsonSchema innerSchema = generateSchema(nestedAsyncOperationResultClass); schema.asObjectSchema() .getProperties() .put(AsynchronousOperationResult.FIELD_NAME_OPERATION, innerSchema); } ApiSpecGeneratorUtils.findAdditionalFieldType(messageClass) .map(RestAPIDocGenerator::generateSchema) .ifPresent( additionalFieldSchema -> schema.asObjectSchema() .setAdditionalProperties( new ObjectSchema.SchemaAdditionalProperties( additionalFieldSchema))); String json; if (messageClass == emptyMessageClass) { json = "{}"; } else { try { json = mapper.writerWithDefaultPrettyPrinter().writeValueAsString(schema); } catch (JsonProcessingException e) { LOG.error( "Failed to write message schema for class {}.", messageClass.getCanonicalName(), e); throw new RuntimeException( "Failed to write message schema for class " + messageClass.getCanonicalName() + ".", e); } } return json; }
@Test void testAdditionalFields() { final String messageHtmlEntry = RestAPIDocGenerator.createMessageHtmlEntry( TestAdditionalFields.class, null, EmptyRequestBody.class); assertThat(messageHtmlEntry) .isEqualTo( "{\n" + " \"type\" : \"object\",\n" + " \"id\" : \"urn:jsonschema:org:apache:flink:docs:rest:RestAPIDocGeneratorTest:TestAdditionalFields\",\n" + " \"additionalProperties\" : {\n" + " \"type\" : \"string\"\n" + " }\n" + "}"); }
public int getActivitiesFailedRetrieved() { return numGetActivitiesFailedRetrieved.value(); }
@Test public void testGetActivitiesRetrievedFailed() { long totalBadBefore = metrics.getActivitiesFailedRetrieved(); badSubCluster.getActivitiesFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getActivitiesFailedRetrieved()); }
T call() throws IOException, RegistryException { String apiRouteBase = "https://" + registryEndpointRequestProperties.getServerUrl() + "/v2/"; URL initialRequestUrl = registryEndpointProvider.getApiRoute(apiRouteBase); return call(initialRequestUrl); }
@Test public void testCall_credentialsNotSentOverHttp() throws IOException, RegistryException { ResponseException unauthorizedException = mockResponseException(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED); Mockito.when(unauthorizedException.requestAuthorizationCleared()).thenReturn(true); setUpRegistryResponse(unauthorizedException); try { endpointCaller.call(); Assert.fail("Call should have failed"); } catch (RegistryCredentialsNotSentException ex) { Assert.assertEquals( "Required credentials for serverUrl/imageName were not sent because the connection was over HTTP", ex.getMessage()); } }
@POST @ApiOperation("Get all views that match given parameter value") @NoAuditEvent("Only returning matching views, not changing any data") public Collection<ViewParameterSummaryDTO> forParameter(@Context SearchUser searchUser) { return qualifyingViewsService.forValue() .stream() .filter(searchUser::canReadView) .collect(Collectors.toSet()); }
@Test public void returnsNoViewsIfNoneArePermitted() { final SearchUser searchUser = TestSearchUser.builder() .denyView("view1") .denyView("view2") .build(); final QualifyingViewsService service = mockViewsService("view1", "view2"); final QualifyingViewsResource resource = new QualifyingViewsResource(service); final Collection<ViewParameterSummaryDTO> result = resource.forParameter(searchUser); assertThat(result).isEmpty(); }
public static JoinParams create( final ColumnName keyColName, final LogicalSchema leftSchema, final LogicalSchema rightSchema ) { final boolean appendKey = neitherContain(keyColName, leftSchema, rightSchema); return new JoinParams( new KsqlValueJoiner(leftSchema.value().size(), rightSchema.value().size(), appendKey ? 1 : 0 ), createSchema(keyColName, leftSchema, rightSchema) ); }
@Test public void shouldBuildCorrectRightKeyedSchema() { // Given: final ColumnName keyName = Iterables.getOnlyElement(RIGHT_SCHEMA.key()).name(); // When: final JoinParams joinParams = JoinParamsFactory.create(keyName, LEFT_SCHEMA, RIGHT_SCHEMA); // Then: assertThat(joinParams.getSchema(), is(LogicalSchema.builder() .keyColumn(ColumnName.of("R_K"), SqlTypes.STRING) .valueColumn(ColumnName.of("L_BLUE"), SqlTypes.STRING) .valueColumn(ColumnName.of("L_GREEN"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("L_K"), SqlTypes.STRING) .valueColumn(ColumnName.of("R_RED"), SqlTypes.BIGINT) .valueColumn(ColumnName.of("R_ORANGE"), SqlTypes.DOUBLE) .valueColumn(ColumnName.of("R_K"), SqlTypes.STRING) .build()) ); }
@Override public ExecuteContext after(ExecuteContext context) { if (InvokeUtils.isKafkaInvokeBySermant(Thread.currentThread().getStackTrace())) { return context; } KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(context.getObject().hashCode()); if (kafkaConsumerWrapper == null) { return context; } updateKafkaConsumerWrapper(kafkaConsumerWrapper); if (handler != null) { handler.doAfter(context); } return context; }
@Test public void testAfter() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.after(context); Assert.assertEquals(0, kafkaConsumerWrapper.getOriginalPartitions().size()); Assert.assertEquals(0, kafkaConsumerWrapper.getOriginalTopics().size()); Assert.assertFalse(kafkaConsumerWrapper.isAssign()); }
@Override public boolean hasPrivileges(final String database) { return databases.contains(AuthorityConstants.PRIVILEGE_WILDCARD) || databases.contains(database); }
@Test void assertHasPrivileges() { assertTrue(new DatabasePermittedPrivileges(Collections.singleton("foo_db")).hasPrivileges("foo_db")); }
@Override public void collectDouble(MetricDescriptor descriptor, double value) { for (MetricsCollector collector : collectors) { collector.collectDouble(descriptor, value); } }
@Test public void testCollectDouble() { compositeCollector.collectDouble(metricsDescriptor, 42.42D); verify(collectorMock1).collectDouble(metricsDescriptor, 42.42D); verify(collectorMock2).collectDouble(metricsDescriptor, 42.42D); }
@Override public ConsumeStatsList fetchConsumeStatsInBroker(final String brokerAddr, boolean isOrder, long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, MQClientException, InterruptedException { return this.defaultMQAdminExtImpl.fetchConsumeStatsInBroker(brokerAddr, isOrder, timeoutMillis); }
@Test public void testFetchConsumeStatsInBroker() throws InterruptedException, RemotingTimeoutException, MQClientException, RemotingSendRequestException, RemotingConnectException { ConsumeStatsList result = new ConsumeStatsList(); result.setBrokerAddr("127.0.0.1:10911"); when(mqClientInstance.getMQClientAPIImpl().fetchConsumeStatsInBroker("127.0.0.1:10911", false, 10000)).thenReturn(result); ConsumeStatsList consumeStatsList = defaultMQAdminExt.fetchConsumeStatsInBroker("127.0.0.1:10911", false, 10000); assertThat(consumeStatsList.getBrokerAddr()).isEqualTo("127.0.0.1:10911"); }
@PostMapping("/order") public ResponseEntity<String> processOrder(@RequestBody(required = false) String request) { LOGGER.info("Received order request: {}", request); var result = orderService.processOrder(); LOGGER.info("Order processed result: {}", result); return ResponseEntity.ok(result); }
@Test void ProcessOrderShouldReturnFailureStatusWhen() { // Arrange when(orderService.processOrder()).thenReturn("Order processing failed"); // Act ResponseEntity<String> response = orderController.processOrder("test order"); // Assert assertEquals("Order processing failed", response.getBody()); }
public static Map<String, Set<Dependency>> parseShadeOutput(Path buildOutput) throws IOException { try (Stream<String> lines = Files.lines(buildOutput)) { return parseShadeOutput(lines); } }
@Test void testParsing() { final Map<String, Set<Dependency>> dependenciesByModule = ShadeParser.parseShadeOutput(getTestDependencyCopy()); assertThat(dependenciesByModule).containsOnlyKeys("m1", "m2"); assertThat(dependenciesByModule.get("m1")) .containsExactlyInAnyOrder( Dependency.create("external", "dependency1", "2.1", null), Dependency.create("external", "dependency4", "2.4", "classifier")); assertThat(dependenciesByModule.get("m2")) .containsExactlyInAnyOrder(Dependency.create("internal", "m1", "1.1", null)); }
static boolean isValidHostNameForSNI(String hostname) { // See https://datatracker.ietf.org/doc/html/rfc6066#section-3 return hostname != null && // SNI HostName has to be a FQDN according to TLS SNI Extension spec (see [1]), // which means that is has to have at least a host name and a domain part. hostname.indexOf('.') > 0 && !hostname.endsWith(".") && !hostname.startsWith("/") && !NetUtil.isValidIpV4Address(hostname) && !NetUtil.isValidIpV6Address(hostname); }
@Test public void testValidHostNameForSni() { assertFalse(SslUtils.isValidHostNameForSNI("/test.de"), "SNI domain can't start with /"); assertFalse(SslUtils.isValidHostNameForSNI("test.de."), "SNI domain can't end with a dot/"); assertTrue(SslUtils.isValidHostNameForSNI("test.de")); // see https://datatracker.ietf.org/doc/html/rfc6066#section-3 // it has to be test.local to qualify as SNI assertFalse(SslUtils.isValidHostNameForSNI("test"), "SNI has to be FQDN"); }