focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean enableSendingOldValues(final boolean forceMaterialization) { if (queryableName != null) { sendOldValues = true; return true; } if (parent.enableSendingOldValues(forceMaterialization)) { sendOldValues = true; } return sendOldValues; }
@Test public void shouldNotEnableSendingOldValuesOnParentIfMapValuesMaterialized() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; final KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(topic1, consumed); final KTableImpl<String, String, Integer> table2 = (KTableImpl<String, String, Integer>) table1.mapValues( s -> Integer.valueOf(s), Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("bob").withValueSerde(Serdes.Integer()) ); table2.enableSendingOldValues(true); assertThat(table1.sendingOldValueEnabled(), is(false)); assertThat(table2.sendingOldValueEnabled(), is(true)); testSendingOldValues(builder, topic1, table2); }
@VisibleForTesting static void enforceStreamStateDirAvailability(final File streamsStateDir) { if (!streamsStateDir.exists()) { final boolean mkDirSuccess = streamsStateDir.mkdirs(); if (!mkDirSuccess) { throw new KsqlServerException("Could not create the kafka streams state directory: " + streamsStateDir.getPath() + "\n Make sure the directory exists and is writable for KSQL server " + "\n or its parent directory is writable by KSQL server" + "\n or change it to a writable directory by setting '" + KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG + "' config in the properties file." ); } } if (!streamsStateDir.isDirectory()) { throw new KsqlServerException(streamsStateDir.getPath() + " is not a directory." + "\n Make sure the directory exists and is writable for KSQL server " + "\n or its parent directory is writable by KSQL server" + "\n or change it to a writable directory by setting '" + KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG + "' config in the properties file." ); } if (!streamsStateDir.canWrite() || !streamsStateDir.canExecute()) { throw new KsqlServerException("The kafka streams state directory is not writable " + "for KSQL server: " + streamsStateDir.getPath() + "\n Make sure the directory exists and is writable for KSQL server " + "\n or change it to a writable directory by setting '" + KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG + "' config in the properties file." ); } }
@Test public void shouldFailIfStreamsStateDirectoryIsNotDirectory() { // Given: when(mockStreamsStateDir.isDirectory()).thenReturn(false); // When: final Exception e = assertThrows( KsqlServerException.class, () -> KsqlServerMain.enforceStreamStateDirAvailability(mockStreamsStateDir) ); // Then: assertThat(e.getMessage(), containsString( "/var/lib/kafka-streams is not a directory.\n" + " Make sure the directory exists and is writable for KSQL server \n" + " or its parent directory is writable by KSQL server\n" + " or change it to a writable directory by setting 'ksql.streams.state.dir' config in" + " the properties file.")); }
public final void buildListing(Path pathToListFile, DistCpContext distCpContext) throws IOException { validatePaths(distCpContext); doBuildListing(pathToListFile, distCpContext); Configuration config = getConf(); config.set(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH, pathToListFile.toString()); config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy()); config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths()); validateFinalListing(pathToListFile, distCpContext); LOG.info("Number of paths in the copy list: " + this.getNumberOfPaths()); }
@Test(timeout=10000) public void testBuildListing() { FileSystem fs = null; try { fs = FileSystem.get(getConf()); List<Path> srcPaths = new ArrayList<Path>(); Path p1 = new Path("/tmp/in/1"); Path p2 = new Path("/tmp/in/2"); Path p3 = new Path("/tmp/in2/2"); Path target = new Path("/tmp/out/1"); srcPaths.add(p1.getParent()); srcPaths.add(p3.getParent()); TestDistCpUtils.createFile(fs, "/tmp/in/1"); TestDistCpUtils.createFile(fs, "/tmp/in/2"); TestDistCpUtils.createFile(fs, "/tmp/in2/2"); fs.mkdirs(target); OutputStream out = fs.create(p1); out.write("ABC".getBytes()); out.close(); out = fs.create(p2); out.write("DEF".getBytes()); out.close(); out = fs.create(p3); out.write("GHIJ".getBytes()); out.close(); Path listingFile = new Path("/tmp/file"); final DistCpOptions options = new DistCpOptions.Builder(srcPaths, target) .withSyncFolder(true) .build(); CopyListing listing = new SimpleCopyListing(getConf(), CREDENTIALS); try { listing.buildListing(listingFile, new DistCpContext(options)); Assert.fail("Duplicates not detected"); } catch (DuplicateFileException ignore) { } assertThat(listing.getBytesToCopy()).isEqualTo(10); assertThat(listing.getNumberOfPaths()).isEqualTo(3); TestDistCpUtils.delete(fs, "/tmp"); try { listing.buildListing(listingFile, new DistCpContext(options)); Assert.fail("Invalid input not detected"); } catch (InvalidInputException ignore) { } TestDistCpUtils.delete(fs, "/tmp"); } catch (IOException e) { LOG.error("Exception encountered ", e); Assert.fail("Test build listing failed"); } finally { TestDistCpUtils.delete(fs, "/tmp"); } }
@Override public void processElement(StreamRecord<RowData> element) throws Exception { RowData inputRow = element.getValue(); long timestamp; if (windowAssigner.isEventTime()) { if (inputRow.isNullAt(rowtimeIndex)) { // null timestamp would be dropped numNullRowTimeRecordsDropped.inc(); return; } timestamp = inputRow.getTimestamp(rowtimeIndex, 3).getMillisecond(); } else { timestamp = getProcessingTimeService().getCurrentProcessingTime(); } timestamp = toUtcTimestampMills(timestamp, shiftTimeZone); Collection<TimeWindow> elementWindows = windowAssigner.assignWindows(inputRow, timestamp); collect(inputRow, elementWindows); }
@Test public void testHopWindows() throws Exception { final SlidingWindowAssigner assigner = SlidingWindowAssigner.of(Duration.ofSeconds(3), Duration.ofSeconds(1)); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(assigner, shiftTimeZone); testHarness.setup(OUT_SERIALIZER); testHarness.open(); // process elements ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); testHarness.processElement(insertRecord("key1", 1, 20L)); testHarness.processElement(insertRecord("key2", 1, 3999L)); testHarness.processWatermark(new Watermark(999)); // append 3 fields: window_start, window_end, window_time expectedOutput.add( insertRecord("key1", 1, 20L, localMills(-2000L), localMills(1000L), 999L)); expectedOutput.add( insertRecord("key1", 1, 20L, localMills(-1000L), localMills(2000L), 1999L)); expectedOutput.add(insertRecord("key1", 1, 20L, localMills(0L), localMills(3000L), 2999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(1000L), localMills(4000L), 3999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(2000L), localMills(5000L), 4999L)); expectedOutput.add( insertRecord("key2", 1, 3999L, localMills(3000L), localMills(6000L), 5999L)); expectedOutput.add(new Watermark(999)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); // late element would not be dropped testHarness.processElement(insertRecord("key2", 1, 80L)); expectedOutput.add( insertRecord("key2", 1, 80L, localMills(-2000L), localMills(1000L), 999L)); expectedOutput.add( insertRecord("key2", 1, 80L, localMills(-1000L), localMills(2000L), 1999L)); expectedOutput.add(insertRecord("key2", 1, 80L, localMills(0L), localMills(3000L), 2999L)); ASSERTER.assertOutputEqualsSorted( "Output was not correct.", expectedOutput, testHarness.getOutput()); testHarness.close(); }
@Override protected SchemaTransform from(ManagedConfig managedConfig) { managedConfig.validate(); SchemaTransformProvider schemaTransformProvider = Preconditions.checkNotNull( getAllProviders().get(managedConfig.getTransformIdentifier()), "Could not find a transform with the identifier " + "%s. This could be either due to the dependency with the " + "transform not being available in the classpath or due to " + "the specified transform not being supported.", managedConfig.getTransformIdentifier()); return new ManagedSchemaTransform(managedConfig, schemaTransformProvider); }
@Test public void testBuildWithYamlFile() throws URISyntaxException { String yamlConfigPath = Paths.get(getClass().getClassLoader().getResource("test_config.yaml").toURI()) .toFile() .getAbsolutePath(); ManagedConfig config = ManagedConfig.builder() .setTransformIdentifier(TestSchemaTransformProvider.IDENTIFIER) .setConfigUrl(yamlConfigPath) .build(); new ManagedSchemaTransformProvider(null).from(config); }
public static Map<String, Object> replaceKeyCharacter(Map<String, Object> map, char oldChar, char newChar) { final Map<String, Object> result = new HashMap<>(map.size()); for (Map.Entry<String, Object> entry : map.entrySet()) { final String key = entry.getKey().replace(oldChar, newChar); final Object value = entry.getValue(); result.put(key, value); } return result; }
@Test public void renameKeyReturnsMutatedMap() { final Map<String, Object> map = ImmutableMap.of( "foo.bar", "test", "baz@quux", "test"); assertThat(MapUtils.replaceKeyCharacter(map, '.', '_')) .hasSameSizeAs(map) .containsEntry("foo_bar", "test") .containsEntry("baz@quux", "test"); }
public static ImmutableList<String> glob(final String glob) { Path path = getGlobPath(glob); int globIndex = getGlobIndex(path); if (globIndex < 0) { return of(glob); } return doGlob(path, searchPath(path, globIndex)); }
@Test public void should_glob_relative_files() { ImmutableList<String> files = Globs.glob("src/test/resources/details/*.json"); assertThat(files.contains("src/test/resources/details/foo.json"), is(true)); assertThat(files.contains("src/test/resources/details/bar.json"), is(true)); }
protected void serviceStop() throws Exception { //stop all services that were started int numOfServicesToStop = serviceList.size(); if (LOG.isDebugEnabled()) { LOG.debug(getName() + ": stopping services, size=" + numOfServicesToStop); } stop(numOfServicesToStop, STOP_ONLY_STARTED_SERVICES); super.serviceStop(); }
@Test public void testServiceStop() { ServiceManager serviceManager = new ServiceManager("ServiceManager"); // Add services for (int i = 0; i < NUM_OF_SERVICES; i++) { CompositeServiceImpl service = new CompositeServiceImpl(i); if (i == FAILED_SERVICE_SEQ_NUMBER) { service.setThrowExceptionOnStop(true); } serviceManager.addTestService(service); } CompositeServiceImpl[] services = serviceManager.getServices().toArray( new CompositeServiceImpl[0]); Configuration conf = new Configuration(); // Initialise the composite service serviceManager.init(conf); serviceManager.start(); // Stop the composite service try { serviceManager.stop(); } catch (ServiceTestRuntimeException e) { } assertInState(STATE.STOPPED, services); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testFieldAccess() throws IllegalAccessException { FieldAccessDescriptor descriptor = FieldAccessDescriptor.withFieldNames("foo", "bar"); DoFn<String, String> doFn = new DoFn<String, String>() { @FieldAccess("foo") final FieldAccessDescriptor fieldAccess = descriptor; @ProcessElement public void process(@FieldAccess("foo") @Element Row row) {} }; DoFnSignature sig = DoFnSignatures.getSignature(doFn.getClass()); assertThat(sig.fieldAccessDeclarations().get("foo"), notNullValue()); Field field = sig.fieldAccessDeclarations().get("foo").field(); assertThat(field.getName(), equalTo("fieldAccess")); assertThat(field.get(doFn), equalTo(descriptor)); assertFalse(sig.processElement().getSchemaElementParameters().isEmpty()); }
@Override public void put(String key, String value) { // Assume any header property that begins with 'Camel' is for internal use if (!key.startsWith("Camel")) { this.map.put(encodeDash(key), value); } }
@Test public void putProperties() { CamelMessagingHeadersInjectAdapter adapter = new CamelMessagingHeadersInjectAdapter(map, true); adapter.put("key1", "value1"); adapter.put("key2", "value2"); adapter.put("key1", "value3"); assertEquals("value3", map.get("key1")); assertEquals("value2", map.get("key2")); }
public static String describe(Object o) { try { if (o.getClass().getMethod("toString").getDeclaringClass() != Object.class) { String str = o.toString(); if (str != null) { return str; } } } catch (Exception e) { // fallback } return o.getClass().getName(); }
@Test public void describe() { assertThat(ScannerUtils.describe(new Object())).isEqualTo("java.lang.Object"); assertThat(ScannerUtils.describe(new TestClass())).isEqualTo("overridden"); }
public KafkaFuture<Void> all() { final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>(); this.future.whenComplete((memberErrors, throwable) -> { if (throwable != null) { result.completeExceptionally(throwable); } else { if (removeAll()) { for (Map.Entry<MemberIdentity, Errors> entry: memberErrors.entrySet()) { Exception exception = entry.getValue().exception(); if (exception != null) { Throwable ex = new KafkaException("Encounter exception when trying to remove: " + entry.getKey(), exception); result.completeExceptionally(ex); return; } } } else { for (MemberToRemove memberToRemove : memberInfos) { if (maybeCompleteExceptionally(memberErrors, memberToRemove.toMemberIdentity(), result)) { return; } } } result.complete(null); } }); return result; }
@Test public void testTopLevelErrorConstructor() throws InterruptedException { memberFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); RemoveMembersFromConsumerGroupResult topLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class); }
@Udf(description = "When filtering a map, " + "the function provided must have a boolean result. " + "For each map entry, the function will be applied to the " + "key and value arguments in that order. The filtered map is returned." ) public <K, V> Map<K, V> filterMap( @UdfParameter(description = "The map") final Map<K, V> map, @UdfParameter(description = "The lambda function") final BiFunction<K, V, Boolean> biFunction ) { if (map == null || biFunction == null) { return null; } return map.entrySet() .stream() .filter(e -> biFunction.apply(e.getKey(), e.getValue())) .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); }
@Test public void shouldThrowErrorOnNullMapInput() { final Map<Integer, Integer> m1 = new HashMap<>(); m1.put(null, 4); final Map<String, String> m2 = new HashMap<>(); m2.put("nope", null); assertThrows(NullPointerException.class, () -> udf.filterMap(m1, biFunction1())); assertThrows(NullPointerException.class, () -> udf.filterMap(m2, biFunction2())); }
@Override protected boolean hasPluginInfo() { return metadataStore().getPluginInfo(getPluginId()) != null; }
@Test public void shouldReturnTrueIfPluginInfoIsDefined() { final ArtifactPluginInfo pluginInfo = new ArtifactPluginInfo(pluginDescriptor("plugin_id"), null, null, null, null, null); store.setPluginInfo(pluginInfo); final ArtifactStore artifactStore = new ArtifactStore("id", "plugin_id"); assertTrue(artifactStore.hasPluginInfo()); }
@Override public boolean encode( @NonNull Resource<GifDrawable> resource, @NonNull File file, @NonNull Options options) { GifDrawable drawable = resource.get(); Transformation<Bitmap> transformation = drawable.getFrameTransformation(); boolean isTransformed = !(transformation instanceof UnitTransformation); if (isTransformed && options.get(ENCODE_TRANSFORMATION)) { return encodeTransformedToFile(drawable, file); } else { return writeDataDirect(drawable.getBuffer(), file); } }
@Test public void testWritesTransformedBitmaps() { final Bitmap frame = Bitmap.createBitmap(100, 100, Bitmap.Config.ARGB_8888); when(decoder.getFrameCount()).thenReturn(1); when(decoder.getNextFrame()).thenReturn(frame); when(gifEncoder.start(any(OutputStream.class))).thenReturn(true); int expectedWidth = 123; int expectedHeight = 456; when(gifDrawable.getIntrinsicWidth()).thenReturn(expectedWidth); when(gifDrawable.getIntrinsicHeight()).thenReturn(expectedHeight); Bitmap transformedFrame = Bitmap.createBitmap(200, 200, Bitmap.Config.RGB_565); when(transformedResource.get()).thenReturn(transformedFrame); when(frameTransformation.transform( anyContext(), eq(frameResource), eq(expectedWidth), eq(expectedHeight))) .thenReturn(transformedResource); when(gifDrawable.getFrameTransformation()).thenReturn(frameTransformation); encoder.encode(resource, file, options); verify(gifEncoder).addFrame(eq(transformedFrame)); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { final FilesApi files = new FilesApi(session.getClient()); final CreateFolderRequest request = new CreateFolderRequest(); request.setName(folder.getName()); request.setParentID(fileid.getFileId(folder.getParent())); final File f = files.filesCreateFolder(request); fileid.cache(folder, f.getId()); return folder.withAttributes(new StoregateAttributesFinderFeature(session, fileid).toAttributes(f)); } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Cannot create folder {0}", e, folder); } }
@Test public void testCreateDirectory() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new StoregateFindFeature(session, nodeid).find(folder)); // Can create again regardless if exists new StoregateDirectoryFeature(session, nodeid).mkdir(folder, new TransferStatus()); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DefaultFindFeature(session).find(folder)); }
@Override public String toString() { // we can't use uri.toString(), which escapes everything, because we want // illegal characters unescaped in the string, for glob processing, etc. StringBuilder buffer = new StringBuilder(); if (uri.getScheme() != null) { buffer.append(uri.getScheme()) .append(":"); } if (uri.getAuthority() != null) { buffer.append("//") .append(uri.getAuthority()); } if (uri.getPath() != null) { String path = uri.getPath(); if (path.indexOf('/')==0 && hasWindowsDrive(path) && // has windows drive uri.getScheme() == null && // but no scheme uri.getAuthority() == null) // or authority path = path.substring(1); // remove slash before drive buffer.append(path); } if (uri.getFragment() != null) { buffer.append("#") .append(uri.getFragment()); } return buffer.toString(); }
@Test (timeout = 5000) public void testWindowsPaths() throws URISyntaxException, IOException { assumeWindows(); assertEquals(new Path("c:\\foo\\bar").toString(), "c:/foo/bar"); assertEquals(new Path("c:/foo/bar").toString(), "c:/foo/bar"); assertEquals(new Path("/c:/foo/bar").toString(), "c:/foo/bar"); assertEquals(new Path("file://c:/foo/bar").toString(), "file://c:/foo/bar"); }
@Override public OffsetFetchResponseData data() { return data; }
@Test public void testNullableMetadataV8AndAbove() { PartitionData pd = new PartitionData( offset, leaderEpochOne, null, Errors.UNKNOWN_TOPIC_OR_PARTITION); // test PartitionData.equals with null metadata assertEquals(pd, pd); partitionDataMap.clear(); partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd); OffsetFetchResponse response = new OffsetFetchResponse( throttleTimeMs, Collections.singletonMap(groupOne, Errors.GROUP_AUTHORIZATION_FAILED), Collections.singletonMap(groupOne, partitionDataMap)); OffsetFetchResponseData expectedData = new OffsetFetchResponseData() .setGroups(Collections.singletonList( new OffsetFetchResponseGroup() .setGroupId(groupOne) .setTopics(Collections.singletonList( new OffsetFetchResponseTopics() .setName(topicOne) .setPartitions(Collections.singletonList( new OffsetFetchResponsePartitions() .setPartitionIndex(partitionOne) .setCommittedOffset(offset) .setCommittedLeaderEpoch(leaderEpochOne.orElse(-1)) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) .setMetadata(null))))) .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()))) .setThrottleTimeMs(throttleTimeMs); assertEquals(expectedData, response.data()); }
public static <K> KTableHolder<K> build( final KTableHolder<K> table, final TableFilter<K> step, final RuntimeBuildContext buildContext) { return build(table, step, buildContext, SqlPredicate::new); }
@Test public void shouldFilterMaterialization() { // When: step.build(planBuilder, planInfo); // Then: verify(materializationBuilder).filter( predicateFactoryCaptor.capture(), eq(queryContext)); // Given: final KsqlTransformer<Object, Optional<GenericRow>> predicate = predicateFactoryCaptor .getValue() .apply(processingLogger); when(preTransformer.transform(any(), any(), any())).thenReturn(Optional.empty()); // When: Optional<GenericRow> result = predicate.transform(key, value, ctx); // Then: verify(preTransformer).transform(key, value, ctx); assertThat(result, is(Optional.empty())); // Given: when(preTransformer.transform(any(), any(), any())) .thenAnswer(inv -> Optional.of(inv.getArgument(1))); // When: result = predicate.transform(key, value, ctx); // Then: assertThat(result, is(Optional.of(value))); }
@Bean public TimeLimiterRegistry timeLimiterRegistry( TimeLimiterConfigurationProperties timeLimiterConfigurationProperties, EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry, RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer, @Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) { TimeLimiterRegistry timeLimiterRegistry = createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer, compositeTimeLimiterCustomizer); registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties); initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer); return timeLimiterRegistry; }
@Test public void testTimeLimiterRegistry() { // Given io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties1 = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); instanceProperties1.setTimeoutDuration(Duration.ofSeconds(3)); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties instanceProperties2 = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); instanceProperties2.setTimeoutDuration(Duration.ofSeconds(2)); TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties(); timeLimiterConfigurationProperties.getInstances().put("backend1", instanceProperties1); timeLimiterConfigurationProperties.getInstances().put("backend2", instanceProperties2); timeLimiterConfigurationProperties.setTimeLimiterAspectOrder(200); TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration(); DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); // When TimeLimiterRegistry timeLimiterRegistry = timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()); // Then assertThat(timeLimiterConfigurationProperties.getTimeLimiterAspectOrder()).isEqualTo(200); assertThat(timeLimiterRegistry.getAllTimeLimiters().size()).isEqualTo(2); TimeLimiter timeLimiter1 = timeLimiterRegistry.timeLimiter("backend1"); assertThat(timeLimiter1).isNotNull(); assertThat(timeLimiter1.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(3)); TimeLimiter timeLimiter2 = timeLimiterRegistry.timeLimiter("backend2"); assertThat(timeLimiter2).isNotNull(); assertThat(timeLimiter2.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(2)); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final FileEntity entity = new FilesApi(new BrickApiClient(session)) .download(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)), null, null, null, null); final HttpUriRequest request = new HttpGet(entity.getDownloadUri()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = session.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response, status); default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new BrickExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRangeUnknownLength() throws Exception { final Path room = new BrickDirectoryFeature(session).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new BrickTouchFeature(session).touch(test, new TransferStatus()); final Local local = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); final byte[] content = RandomUtils.nextBytes(1000); final OutputStream out = local.getOutputStream(false); assertNotNull(out); IOUtils.write(content, out); out.close(); final TransferStatus upload = new TransferStatus().withLength(content.length); upload.setExists(true); new BrickUploadFeature(session, new BrickWriteFeature(session)).upload( test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), upload, new DisabledConnectionCallback()); final TransferStatus status = new TransferStatus(); status.setLength(-1L); status.setAppend(true); status.setOffset(100L); final InputStream in = new BrickReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); new BrickDeleteFeature(session).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testSingle() throws IOException { ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MILLISECONDS); assertThat(extractor.extractWatermark(split(0))) .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue()); }
public Set<MediaType> getSupportedTypes(ParseContext context) { return SUPPORTED_TYPES; }
@Test public void testExcelXLSB() throws Exception { Detector detector = new DefaultDetector(); Metadata m = new Metadata(); m.add(TikaCoreProperties.RESOURCE_NAME_KEY, "excel.xlsb"); // Should be detected correctly MediaType type; try (InputStream input = getResourceAsStream("/test-documents/testEXCEL.xlsb")) { type = detector.detect(input, m); assertEquals("application/vnd.ms-excel.sheet.binary.macroenabled.12", type.toString()); } // OfficeParser won't handle it assertEquals(false, (new OfficeParser()).getSupportedTypes(new ParseContext()).contains(type)); // OOXMLParser will (soon) handle it assertTrue((new OOXMLParser()).getSupportedTypes(new ParseContext()).contains(type)); // AutoDetectParser doesn't break on it ParseContext context = new ParseContext(); context.set(Locale.class, Locale.US); String content = getText("testEXCEL.xlsb", new Metadata(), context); assertContains("This is an example spreadsheet", content); }
@Override public void fetchSegmentToLocal(URI downloadURI, File dest) throws Exception { // Create a RoundRobinURIProvider to round robin IP addresses when retry uploading. Otherwise may always try to // download from a same broken host as: 1) DNS may not RR the IP addresses 2) OS cache the DNS resolution result. RoundRobinURIProvider uriProvider = new RoundRobinURIProvider(List.of(downloadURI), true); int retryCount = getRetryCount(uriProvider); _logger.info("Retry downloading for {} times. retryCount from pinot server config: {}, number of IP addresses for " + "download URI: {}", retryCount, _retryCount, uriProvider.numAddresses()); RetryPolicies.exponentialBackoffRetryPolicy(retryCount, _retryWaitMs, _retryDelayScaleFactor).attempt(() -> { URI uri = uriProvider.next(); try { String hostName = downloadURI.getHost(); int port = downloadURI.getPort(); // If the original download address is specified as host name, need add a "HOST" HTTP header to the HTTP // request. Otherwise, if the download address is a LB address, when the LB be configured as "disallow direct // access by IP address", downloading will fail. List<Header> httpHeaders = new LinkedList<>(); if (!InetAddresses.isInetAddress(hostName)) { httpHeaders.add(new BasicHeader(HttpHeaders.HOST, hostName + ":" + port)); } int statusCode = _httpClient.downloadFile(uri, dest, _authProvider, httpHeaders); _logger.info("Downloaded segment from: {} to: {} of size: {}; Response status code: {}", uri, dest, dest.length(), statusCode); return true; } catch (HttpErrorStatusException e) { int statusCode = e.getStatusCode(); if (statusCode == HttpStatus.SC_NOT_FOUND || statusCode >= 500) { // Temporary exception // 404 is treated as a temporary exception, as the downloadURI may be backed by multiple hosts, // if singe host is down, can retry with another host. _logger.warn("Got temporary error status code: {} while downloading segment from: {} to: {}", statusCode, uri, dest, e); return false; } else { // Permanent exception _logger.error("Got permanent error status code: {} while downloading segment from: {} to: {}, won't retry", statusCode, uri, dest, e); throw e; } } catch (Exception e) { _logger.warn("Caught exception while downloading segment from: {} to: {}", uri, dest, e); return false; } }); }
@Test public void testFetchSegmentToLocalSucceedAtFirstAttempt() throws Exception { FileUploadDownloadClient client = mock(FileUploadDownloadClient.class); when(client.downloadFile(any(), any(), any())).thenReturn(200); HttpSegmentFetcher segmentFetcher = getSegmentFetcher(client); List<URI> uris = List.of(new URI("http://h1:8080"), new URI("http://h2:8080")); segmentFetcher.fetchSegmentToLocal(SEGMENT_NAME, () -> uris, SEGMENT_FILE); }
@Nullable public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) { return parseB3SingleFormat(b3, 0, b3.length()); }
@Test void parseB3SingleFormat_parent_debug() { assertThat(parseB3SingleFormat(traceId + "-" + spanId + "-d-" + parentId).context()) .isEqualToComparingFieldByField(TraceContext.newBuilder() .traceId(Long.parseUnsignedLong(traceId, 16)) .parentId(Long.parseUnsignedLong(parentId, 16)) .spanId(Long.parseUnsignedLong(spanId, 16)) .debug(true).build() ); }
public boolean isUserAnAdmin(final CaseInsensitiveString userName, List<Role> memberRoles) { return adminsConfig.isAdmin(new AdminUser(userName), memberRoles); }
@Test public void shouldReturnTrueIfAnUserBelongsToAnAdminRole() { Authorization authorization = new Authorization(new AdminsConfig(new AdminRole(new CaseInsensitiveString("bar1")), new AdminRole(new CaseInsensitiveString("bar2")))); assertThat(authorization.isUserAnAdmin(new CaseInsensitiveString("foo1"), List.of(new RoleConfig(new CaseInsensitiveString("bar1")), new RoleConfig(new CaseInsensitiveString("bar1") ))), is(true)); assertThat(authorization.isUserAnAdmin(new CaseInsensitiveString("foo2"), List.of(new RoleConfig(new CaseInsensitiveString("bar2")))), is(true)); assertThat(authorization.isUserAnAdmin(new CaseInsensitiveString("foo3"), List.of(new RoleConfig(new CaseInsensitiveString("bar1")))), is(true)); assertThat(authorization.isUserAnAdmin(new CaseInsensitiveString("foo4"), new ArrayList<>()), is(false)); }
public boolean isBroadcast() { for (final byte b : this.address) { if (b != -1) { return false; } } return true; }
@Test public void testIsBroadcast() throws Exception { assertFalse(MAC_NORMAL.isBroadcast()); assertTrue(MAC_BCAST.isBroadcast()); assertFalse(MAC_MCAST.isBroadcast()); assertFalse(MAC_MCAST_2.isBroadcast()); assertFalse(MAC_LLDP.isBroadcast()); assertFalse(MAC_LLDP_2.isBroadcast()); assertFalse(MAC_LLDP_3.isBroadcast()); assertFalse(MAC_ONOS.isBroadcast()); }
public Set<ContentPackInstallation> findByContentPackIdAndRevision(ModelId id, int revision) { final DBQuery.Query query = DBQuery .is(ContentPackInstallation.FIELD_CONTENT_PACK_ID, id) .is(ContentPackInstallation.FIELD_CONTENT_PACK_REVISION, revision); try (final DBCursor<ContentPackInstallation> installations = dbCollection.find(query)) { return ImmutableSet.copyOf((Iterator<ContentPackInstallation>) installations); } }
@Test @MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json") public void findByContentPackIdAndRevisionWithInvalidId() { final Set<ContentPackInstallation> contentPack = persistenceService.findByContentPackIdAndRevision(ModelId.of("4e3d7025-881e-6870-da03-cafebabe0001"), 3); assertThat(contentPack).isEmpty(); }
public boolean matchStage(StageConfigIdentifier stageIdentifier, StageEvent event) { return this.event.include(event) && appliesTo(stageIdentifier.getPipelineName(), stageIdentifier.getStageName()); }
@Test void shouldNotMatchStageWithDifferentName() { NotificationFilter filter = new NotificationFilter("cruise", "xyz", StageEvent.All, false); assertThat(filter.matchStage(new StageConfigIdentifier("cruise", "dev"), StageEvent.All)).isFalse(); }
public CompletableFuture<CompletedCheckpoint> triggerCheckpoint(boolean isPeriodic) { return triggerCheckpointFromCheckpointThread(checkpointProperties, null, isPeriodic); }
@Test void testCheckpointStatsTrackerPendingCheckpointCallback() throws Exception { // set up the coordinator and validate the initial state CheckpointStatsTracker tracker = mock(CheckpointStatsTracker.class); CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder() .setTimer(manuallyTriggeredScheduledExecutor) .setCheckpointStatsTracker(tracker) .build(EXECUTOR_RESOURCE.getExecutor()); when(tracker.reportPendingCheckpoint( anyLong(), anyLong(), any(CheckpointProperties.class), any(Map.class))) .thenReturn(mock(PendingCheckpointStats.class)); // Trigger a checkpoint and verify callback CompletableFuture<CompletedCheckpoint> checkpointFuture = checkpointCoordinator.triggerCheckpoint(false); manuallyTriggeredScheduledExecutor.triggerAll(); FutureUtils.throwIfCompletedExceptionally(checkpointFuture); verify(tracker, times(1)) .reportPendingCheckpoint( eq(1L), any(Long.class), eq( CheckpointProperties.forCheckpoint( CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION)), any()); }
@Override public AWSCredentials getCredentials() { return this.credentials.getCredentials(); }
@Test public void encryptSecretKeyFromPluginConfigUsingSystemSecret() { when(systemConfiguration.getPasswordSecret()).thenReturn("encryptionKey123"); final AWSPluginConfiguration config = AWSPluginConfiguration.createDefault() .toBuilder() .accessKey("MyAccessKey") .secretKey("aVerySecretKey", "encryptionKey123") .build(); AWSAuthProvider authProvider = createForConfig(config); assertThat(authProvider.getCredentials().getAWSSecretKey()).isEqualTo("aVerySecretKey"); assertThat(authProvider.getCredentials().getAWSAccessKeyId()).isEqualTo("MyAccessKey"); }
public static Validator mapWithIntKeyDoubleValue() { return (name, val) -> { if (!(val instanceof String)) { throw new ConfigException(name, val, "Must be a string"); } final String str = (String) val; final Map<String, String> map = KsqlConfig.parseStringAsMap(name, str); map.forEach((keyStr, valueStr) -> { try { Integer.parseInt(keyStr); } catch (NumberFormatException e) { throw new ConfigException(name, keyStr, "Not an int"); } try { Double.parseDouble(valueStr); } catch (NumberFormatException e) { throw new ConfigException(name, valueStr, "Not a double"); } }); }; }
@Test public void shouldParseIntKeyDoubleValueInMap() { // Given: final Validator validator = ConfigValidators.mapWithIntKeyDoubleValue(); validator.ensureValid("propName", "123:1.2,345:9.0"); }
public abstract List<String> cipherSuites();
@Test public void testSupportedCiphers() throws KeyManagementException, NoSuchAlgorithmException, SSLException { SSLContext jdkSslContext = SSLContext.getInstance("TLS"); jdkSslContext.init(null, null, null); SSLEngine sslEngine = jdkSslContext.createSSLEngine(); String unsupportedCipher = "TLS_DH_anon_WITH_DES_CBC_SHA"; IllegalArgumentException exception = null; try { sslEngine.setEnabledCipherSuites(new String[] {unsupportedCipher}); } catch (IllegalArgumentException e) { exception = e; } assumeTrue(exception != null); File keyFile = ResourcesUtil.getFile(getClass(), "test_unencrypted.pem"); File crtFile = ResourcesUtil.getFile(getClass(), "test.crt"); SslContext sslContext = newSslContext(crtFile, keyFile, null); assertFalse(sslContext.cipherSuites().contains(unsupportedCipher)); }
public TolerantFloatComparison isWithin(float tolerance) { return new TolerantFloatComparison() { @Override public void of(float expected) { Float actual = FloatSubject.this.actual; checkNotNull( actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected); checkTolerance(tolerance); if (!equalWithinTolerance(actual, expected, tolerance)) { failWithoutActual( fact("expected", floatToString(expected)), butWas(), fact("outside tolerance", floatToString(tolerance))); } } }; }
@Test public void isWithinZeroTolerance() { float max = Float.MAX_VALUE; assertThat(max).isWithin(0.0f).of(max); assertThat(NEARLY_MAX).isWithin(0.0f).of(NEARLY_MAX); assertThatIsWithinFails(max, 0.0f, NEARLY_MAX); assertThatIsWithinFails(NEARLY_MAX, 0.0f, max); float negativeMax = -1.0f * Float.MAX_VALUE; assertThat(negativeMax).isWithin(0.0f).of(negativeMax); assertThat(NEGATIVE_NEARLY_MAX).isWithin(0.0f).of(NEGATIVE_NEARLY_MAX); assertThatIsWithinFails(negativeMax, 0.0f, NEGATIVE_NEARLY_MAX); assertThatIsWithinFails(NEGATIVE_NEARLY_MAX, 0.0f, negativeMax); float min = Float.MIN_VALUE; assertThat(min).isWithin(0.0f).of(min); assertThat(JUST_OVER_MIN).isWithin(0.0f).of(JUST_OVER_MIN); assertThatIsWithinFails(min, 0.0f, JUST_OVER_MIN); assertThatIsWithinFails(JUST_OVER_MIN, 0.0f, min); float negativeMin = -1.0f * Float.MIN_VALUE; assertThat(negativeMin).isWithin(0.0f).of(negativeMin); assertThat(JUST_UNDER_NEGATIVE_MIN).isWithin(0.0f).of(JUST_UNDER_NEGATIVE_MIN); assertThatIsWithinFails(negativeMin, 0.0f, JUST_UNDER_NEGATIVE_MIN); assertThatIsWithinFails(JUST_UNDER_NEGATIVE_MIN, 0.0f, negativeMin); }
public static CloudConfiguration buildCloudConfigurationForStorage(Map<String, String> properties) { return buildCloudConfigurationForStorage(properties, false); }
@Test public void testHDFSCloudConfiguration() { Map<String, String> map = new HashMap<String, String>() { { put(CloudConfigurationConstants.HDFS_AUTHENTICATION, "simple"); put(CloudConfigurationConstants.HDFS_USERNAME, "XX"); put(CloudConfigurationConstants.HDFS_PASSWORD, "XX"); } }; CloudConfiguration cc = CloudConfigurationFactory.buildCloudConfigurationForStorage(map); Assert.assertEquals(cc.getCloudType(), CloudType.HDFS); TCloudConfiguration tc = new TCloudConfiguration(); cc.toThrift(tc); Configuration conf = new Configuration(); cc.applyToConfiguration(conf); cc.toFileStoreInfo(); Assert.assertEquals(cc.toConfString(), "HDFSCloudConfiguration{resources='', jars='', hdpuser='XX', cred=HDFSCloudCredential{authentication='simple', " + "username='XX', password='XX', krbPrincipal='', krbKeyTabFile='', krbKeyTabData=''}}"); map.clear(); cc = CloudConfigurationFactory.buildCloudConfigurationForStorage(map); Assert.assertEquals(CloudType.DEFAULT, cc.getCloudType()); cc = CloudConfigurationFactory.buildCloudConfigurationForStorage(map, true); Assert.assertEquals(CloudType.HDFS, cc.getCloudType()); }
@Override public BigDecimal getBigNumber( Object object ) throws KettleValueException { InetAddress address = getInternetAddress( object ); if ( null == address ) { return null; } BigInteger bi = BigInteger.ZERO; byte[] addr = address.getAddress(); for ( byte aByte : addr ) { bi = bi.shiftLeft( 8 ).add( BigInteger.valueOf( aByte & 0xFF ) ); } return new BigDecimal( bi ); }
@Test public void testGetBigNumber_Success() throws UnknownHostException, KettleValueException { ValueMetaInternetAddress vm = new ValueMetaInternetAddress(); String[] addresses = { // Some IPv6 addresses "1080:0:0:0:8:800:200C:417A", "1080::8:800:200C:417A", "::1", "0:0:0:0:0:0:0:1", "::", "0:0:0:0:0:0:0:0", "::d", // Some IPv4-mapped IPv6 addresses "::ffff:0:0", "::ffff:d", "::ffff:127.0.0.1", // Some IPv4-compatible IPv6 addresses "::0.0.0.0", "::255.255.0.10", // Some IPv4 addresses "192.168.10.0", "0.0.0.1", "0.0.0.0", "127.0.0.1", "255.255.0.10", "192.0.2.235" }; // No exception should be thrown in any of the following calls for ( String address : addresses ) { InetAddress addr = InetAddress.getByName( address ); vm.getBigNumber( addr ); } }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldHandleNegativeValueExpression() { // Given: givenSourceStreamWithSchema(SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()); final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(COL0, COL1), ImmutableList.of( new StringLiteral("str"), ArithmeticUnaryExpression.negative(Optional.empty(), new LongLiteral(1)) ) ); // When: executor.execute(statement, mock(SessionProperties.class), engine, serviceContext); // Then: verify(keySerializer).serialize(TOPIC_NAME, genericKey((String) null)); verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", -1L)); verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE)); }
public SAExposureConfig getExposureConfig() { return exposureConfig; }
@Test public void getExposureConfig() { SAExposureData exposureData = new SAExposureData("ExposeEvent"); Assert.assertNull(exposureData.getExposureConfig()); }
synchronized void ensureTokenInitialized() throws IOException { // we haven't inited yet, or we used to have a token but it expired if (!hasInitedToken || (action != null && !action.isValid())) { //since we don't already have a token, go get one Token<?> token = fs.getDelegationToken(null); // security might be disabled if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); LOG.debug("Created new DT for {}", token.getService()); } hasInitedToken = true; } }
@Test public void testGetRemoteToken() throws IOException, URISyntaxException { Configuration conf = new Configuration(); DummyFs fs = spy(new DummyFs()); Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0], new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234")); doReturn(token).when(fs).getDelegationToken(any()); doReturn(token).when(fs).getRenewToken(); fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf); fs.tokenAspect.ensureTokenInitialized(); // Select a token, store and renew it verify(fs).setDelegationToken(token); assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer")); assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "action")); }
@Override public void handle(ContainerLauncherEvent event) { try { eventQueue.put(event); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } }
@Test(timeout = 5000) public void testOutOfOrder() throws Exception { LOG.info("STARTING testOutOfOrder"); AppContext mockContext = mock(AppContext.class); @SuppressWarnings("unchecked") EventHandler<Event> mockEventHandler = mock(EventHandler.class); when(mockContext.getEventHandler()).thenReturn(mockEventHandler); ContainerManagementProtocolClient mockCM = mock(ContainerManagementProtocolClient.class); ContainerLauncherImplUnderTest ut = new ContainerLauncherImplUnderTest(mockContext, mockCM); Configuration conf = new Configuration(); ut.init(conf); ut.start(); try { ContainerId contId = makeContainerId(0l, 0, 0, 1); TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0); String cmAddress = "127.0.0.1:8000"; StartContainersResponse startResp = recordFactory.newRecordInstance(StartContainersResponse.class); startResp.setAllServicesMetaData(serviceResponse); LOG.info("inserting cleanup event"); ContainerLauncherEvent mockCleanupEvent = mock(ContainerLauncherEvent.class); when(mockCleanupEvent.getType()) .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP); when(mockCleanupEvent.getContainerID()) .thenReturn(contId); when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId); when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress); ut.handle(mockCleanupEvent); ut.waitForPoolToIdle(); verify(mockCM, never()).stopContainers(any(StopContainersRequest.class)); LOG.info("inserting launch event"); ContainerRemoteLaunchEvent mockLaunchEvent = mock(ContainerRemoteLaunchEvent.class); when(mockLaunchEvent.getType()) .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH); when(mockLaunchEvent.getContainerID()) .thenReturn(contId); when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId); when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress); when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp); when(mockLaunchEvent.getContainerToken()).thenReturn( createNewContainerToken(contId, cmAddress)); ut.handle(mockLaunchEvent); ut.waitForPoolToIdle(); verify(mockCM).startContainers(any(StartContainersRequest.class)); LOG.info("inserting cleanup event"); ContainerLauncherEvent mockCleanupEvent2 = mock(ContainerLauncherEvent.class); when(mockCleanupEvent2.getType()).thenReturn(EventType.CONTAINER_REMOTE_CLEANUP); when(mockCleanupEvent2.getContainerID()).thenReturn(contId); when(mockCleanupEvent2.getTaskAttemptID()).thenReturn(taskAttemptId); when(mockCleanupEvent2.getContainerMgrAddress()).thenReturn(cmAddress); ut.handle(mockCleanupEvent2); ut.waitForPoolToIdle(); // Verifies stopContainers is called on existing container verify(mockCM).stopContainers(any(StopContainersRequest.class)); } finally { ut.stop(); } }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchNormal() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp0); assertEquals(3, records.size()); assertEquals(4L, subscriptions.position(tp0).offset); // this is the next fetching position long offset = 1; for (ConsumerRecord<byte[], byte[]> record : records) { assertEquals(offset, record.offset()); offset += 1; } }
public static int appendToLabel( final AtomicBuffer metaDataBuffer, final int counterId, final String value) { Objects.requireNonNull(metaDataBuffer); if (counterId < 0) { throw new IllegalArgumentException("counter id " + counterId + " is negative"); } final int maxCounterId = (metaDataBuffer.capacity() / CountersReader.METADATA_LENGTH) - 1; if (counterId > maxCounterId) { throw new IllegalArgumentException( "counter id " + counterId + " out of range: 0 - maxCounterId=" + maxCounterId); } final int counterMetaDataOffset = CountersReader.metaDataOffset(counterId); final int state = metaDataBuffer.getIntVolatile(counterMetaDataOffset); if (CountersReader.RECORD_ALLOCATED != state) { throw new IllegalArgumentException("counter id " + counterId + " is not allocated, state: " + state); } final int existingLabelLength = metaDataBuffer.getInt(counterMetaDataOffset + CountersReader.LABEL_OFFSET); final int remainingLabelLength = CountersReader.MAX_LABEL_LENGTH - existingLabelLength; final int writtenLength = metaDataBuffer.putStringWithoutLengthAscii( counterMetaDataOffset + CountersReader.LABEL_OFFSET + SIZE_OF_INT + existingLabelLength, value, 0, remainingLabelLength); if (writtenLength > 0) { metaDataBuffer.putIntOrdered( counterMetaDataOffset + CountersReader.LABEL_OFFSET, existingLabelLength + writtenLength); } return writtenLength; }
@Test void appendToLabelShouldAddSuffix() { final CountersManager countersManager = new CountersManager( new UnsafeBuffer(new byte[CountersReader.METADATA_LENGTH]), new UnsafeBuffer(ByteBuffer.allocateDirect(CountersReader.COUNTER_LENGTH)), StandardCharsets.US_ASCII); final int counterId = countersManager.allocate("initial value: "); final int length = AeronCounters.appendToLabel(countersManager.metaDataBuffer(), counterId, "test"); assertEquals(4, length); assertEquals("initial value: test", countersManager.getCounterLabel(counterId)); }
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { try { decodeMsg(msg); } catch (Throwable t) { notifyAllChannelsOfErrorAndClose(t); } }
@Test void testNotifyCreditAvailableAfterReleased() throws Exception { final CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler(); final EmbeddedChannel channel = new EmbeddedChannel(handler); final PartitionRequestClient client = new NettyPartitionRequestClient( channel, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class)); final NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32); final SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool); final RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate, client); try { inputGate.setInputChannels(inputChannel); final BufferPool bufferPool = networkBufferPool.createBufferPool(6, 6); inputGate.setBufferPool(bufferPool); inputGate.setupChannels(); inputChannel.requestSubpartitions(); // This should send the partition request Object readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class); assertThat(((PartitionRequest) readFromOutbound).credit).isEqualTo(2); // Trigger request floating buffers via buffer response to notify credits available final BufferResponse bufferResponse = createBufferResponse( TestBufferFactory.createBuffer(32), 0, inputChannel.getInputChannelId(), 1, new NetworkBufferAllocator(handler)); handler.channelRead(mock(ChannelHandlerContext.class), bufferResponse); assertThat(inputChannel.getUnannouncedCredit()).isEqualTo(2); // Release the input channel inputGate.close(); // it should send a close request after releasing the input channel, // but will not notify credits for a released input channel. readFromOutbound = channel.readOutbound(); assertThat(readFromOutbound).isInstanceOf(CloseRequest.class); channel.runPendingTasks(); assertThat((Object) channel.readOutbound()).isNull(); } finally { releaseResource(inputGate, networkBufferPool); channel.close(); } }
public static UUnary create(Kind unaryOp, UExpression expression) { checkArgument( UNARY_OP_CODES.containsKey(unaryOp), "%s is not a recognized unary operation", unaryOp); return new AutoValue_UUnary(unaryOp, expression); }
@Test public void equality() { ULiteral sevenLit = ULiteral.intLit(7); ULiteral threeLit = ULiteral.intLit(3); ULiteral falseLit = ULiteral.booleanLit(false); new EqualsTester() .addEqualityGroup(UUnary.create(Kind.UNARY_MINUS, sevenLit)) .addEqualityGroup(UUnary.create(Kind.UNARY_MINUS, threeLit)) .addEqualityGroup(UUnary.create(Kind.BITWISE_COMPLEMENT, sevenLit)) .addEqualityGroup(UUnary.create(Kind.LOGICAL_COMPLEMENT, falseLit)) .testEquals(); }
public static ImmutableByteSequence copyAndFit(ByteBuffer original, int bitWidth) throws ByteSequenceTrimException { checkArgument(original != null && original.capacity() > 0, "Cannot copy from an empty or null byte buffer"); checkArgument(bitWidth > 0, "bit-width must be a non-zero positive integer"); if (original.order() == ByteOrder.LITTLE_ENDIAN) { // FIXME: this can be improved, e.g. read bytes in reverse order from original byte[] newBytes = new byte[original.capacity()]; original.get(newBytes); reverse(newBytes); return internalCopyAndFit(ByteBuffer.wrap(newBytes), bitWidth); } else { return internalCopyAndFit(original.duplicate(), bitWidth); } }
@Test public void testCopyAndFit() throws Exception { int originalByteWidth = 3; int paddedByteWidth = 4; int trimmedByteWidth = 2; int indexFirstNonZeroByte = 1; byte byteValue = (byte) 1; byte[] arrayValue = new byte[originalByteWidth]; arrayValue[indexFirstNonZeroByte] = byteValue; ByteBuffer bufferValue = ByteBuffer.allocate(originalByteWidth).put(arrayValue); ImmutableByteSequence bsBuffer = ImmutableByteSequence.copyAndFit( bufferValue, originalByteWidth * 8); ImmutableByteSequence bsBufferTrimmed = ImmutableByteSequence.copyAndFit( bufferValue, trimmedByteWidth * 8); ImmutableByteSequence bsBufferPadded = ImmutableByteSequence.copyAndFit( bufferValue, paddedByteWidth * 8); assertThat("byte sequence of the byte buffer must be 3 bytes long", bsBuffer.size(), is(equalTo(originalByteWidth))); assertThat("byte sequence of the byte buffer must be 3 bytes long", bsBufferTrimmed.size(), is(equalTo(trimmedByteWidth))); assertThat("byte sequence of the byte buffer must be 3 bytes long", bsBufferPadded.size(), is(equalTo(paddedByteWidth))); String errStr = "incorrect byte sequence value"; assertThat(errStr, bsBuffer.asArray()[indexFirstNonZeroByte], is(equalTo(byteValue))); assertThat(errStr, bsBufferTrimmed.asArray()[indexFirstNonZeroByte - 1], is(equalTo(byteValue))); assertThat(errStr, bsBufferPadded.asArray()[indexFirstNonZeroByte + 1], is(equalTo(byteValue))); assertThat(errStr, bsBufferPadded.asArray()[paddedByteWidth - 1], is(equalTo((byte) 0x00))); }
@Override public String getServiceId() { return serviceId; }
@Test public void testGetServiceId() { assertThat(polarisRegistration1.getServiceId()).isEqualTo(SERVICE_PROVIDER); }
RegistryEndpointProvider<Optional<URL>> initializer() { return new Initializer(); }
@Test public void testInitializer_getApiRoute_nullSource() throws MalformedURLException { Assert.assertEquals( new URL("http://someApiBase/someImageName/blobs/uploads/"), testBlobPusher.initializer().getApiRoute("http://someApiBase/")); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testCompletedFetchRemoval() { // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records. buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(mkSet(tp0, tp1, tp2, tp3)); subscriptions.seek(tp0, 1); subscriptions.seek(tp1, 1); subscriptions.seek(tp2, 1); subscriptions.seek(tp3, 1); assertEquals(1, sendFetches()); Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>(); partitions.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setHighWatermark(100) .setRecords(records)); partitions.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); partitions.put(tidp2, new FetchResponseData.PartitionData() .setPartitionIndex(tp2.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); partitions.put(tidp3, new FetchResponseData.PartitionData() .setPartitionIndex(tp3.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>(); fetchRecordsInto(fetchedRecords); assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1); assertEquals(4, subscriptions.position(tp1).offset); assertEquals(3, fetchedRecords.size()); List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>(); try { fetchRecordsInto(fetchedRecords); } catch (OffsetOutOfRangeException oor) { oorExceptions.add(oor); } // Should have received one OffsetOutOfRangeException for partition tp1 assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); assertEquals(oor.offsetOutOfRangePartitions().size(), 1); fetchRecordsInto(fetchedRecords); // Should not have received an Exception for tp2. assertEquals(6, subscriptions.position(tp2).offset); assertEquals(5, fetchedRecords.size()); int numExceptionsExpected = 3; List<KafkaException> kafkaExceptions = new ArrayList<>(); for (int i = 1; i <= numExceptionsExpected; i++) { try { fetchRecordsInto(fetchedRecords); } catch (KafkaException e) { kafkaExceptions.add(e); } } // Should have received as much as numExceptionsExpected Kafka exceptions for tp3. assertEquals(numExceptionsExpected, kafkaExceptions.size()); }
public static KeyFormat sanitizeKeyFormat( final KeyFormat keyFormat, final List<SqlType> newKeyColumnSqlTypes, final boolean allowKeyFormatChangeToSupportNewKeySchema ) { return sanitizeKeyFormatWrapping( !allowKeyFormatChangeToSupportNewKeySchema ? keyFormat : sanitizeKeyFormatForTypeCompatibility( sanitizeKeyFormatForMultipleColumns( keyFormat, newKeyColumnSqlTypes.size()), newKeyColumnSqlTypes ), newKeyColumnSqlTypes.size() == 1 ); }
@Test public void shouldConvertFormatForMulticolKeysWhenSanitizingFromKafkaFormat() { // Given: final KeyFormat format = KeyFormat.nonWindowed( FormatInfo.of(KafkaFormat.NAME), SerdeFeatures.of()); // When: final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, MULTI_SQL_TYPES, true); // Then: assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME))); assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of())); }
static Map<String, String> generateConfig(int scale, Function<Integer, String> zkNodeAddress) { Map<String, String> servers = new HashMap<>(scale); for (int i = 0; i < scale; i++) { // The Zookeeper server IDs starts with 1, but pod index starts from 0 String key = String.format("server.%d", i + 1); String value = String.format("%s:%d:%d:participant;127.0.0.1:%d", zkNodeAddress.apply(i), ZookeeperCluster.CLUSTERING_PORT, ZookeeperCluster.LEADER_ELECTION_PORT, ZookeeperCluster.CLIENT_PLAINTEXT_PORT); servers.put(key, value); } return servers; }
@Test public void testGenerateConfigOneNode() { Map<String, String> expected = new HashMap<>(3); expected.put("server.1", "my-cluster-zookeeper-0.my-cluster-zookeeper-nodes.myproject.svc:2888:3888:participant;127.0.0.1:12181"); assertThat(ZookeeperScaler.generateConfig(1, zkNodeAddress), is(expected)); }
@Override public boolean retryRequest( HttpRequest request, IOException exception, int execCount, HttpContext context) { if (execCount > maxRetries) { // Do not retry if over max retries return false; } if (nonRetriableExceptions.contains(exception.getClass())) { return false; } else { for (Class<? extends IOException> rejectException : nonRetriableExceptions) { if (rejectException.isInstance(exception)) { return false; } } } if (request instanceof CancellableDependency && ((CancellableDependency) request).isCancelled()) { return false; } // Retry if the request is considered idempotent return Method.isIdempotent(request.getMethod()); }
@Test public void testRetryBadGateway() { BasicHttpResponse response502 = new BasicHttpResponse(502, "Bad gateway failure"); assertThat(retryStrategy.retryRequest(response502, 3, null)).isTrue(); }
@SuppressWarnings("unchecked") public <T> T convert(DocString docString, Type targetType) { if (DocString.class.equals(targetType)) { return (T) docString; } List<DocStringType> docStringTypes = docStringTypeRegistry.lookup(docString.getContentType(), targetType); if (docStringTypes.isEmpty()) { if (docString.getContentType() == null) { throw new CucumberDocStringException(format( "It appears you did not register docstring type for %s", targetType.getTypeName())); } throw new CucumberDocStringException(format( "It appears you did not register docstring type for '%s' or %s", docString.getContentType(), targetType.getTypeName())); } if (docStringTypes.size() > 1) { List<String> suggestedContentTypes = suggestedContentTypes(docStringTypes); if (docString.getContentType() == null) { throw new CucumberDocStringException(format( "Multiple converters found for type %s, add one of the following content types to your docstring %s", targetType.getTypeName(), suggestedContentTypes)); } throw new CucumberDocStringException(format( "Multiple converters found for type %s, and the content type '%s' did not match any of the registered types %s. Change the content type of the docstring or register a docstring type for '%s'", targetType.getTypeName(), docString.getContentType(), suggestedContentTypes, docString.getContentType())); } return (T) docStringTypes.get(0).transform(docString.getContent()); }
@Test void doc_string_is_not_converted() { DocString docString = DocString.create("{\"hello\":\"world\"}"); DocString converted = converter.convert(docString, DocString.class); assertThat(converted, is(docString)); }
public void incrementCount() { long now = System.currentTimeMillis(); if (count.incrementAndGet() == 1) { first.compareAndSet(0L, now); } last.updateAndGet(v -> Math.max(v, now)); }
@Test void testIncrementCount() { PrioritizedFilterStatistics stats = new PrioritizedFilterStatistics("test"); long first = stats.getFirst(); Assertions.assertEquals(0, stats.getCount()); stats.incrementCount(); Assertions.assertEquals(1, stats.getCount()); long last = stats.getLast(); Assertions.assertTrue(last > first); }
@Override protected Result[] run(String value) { final Grok grok = grokPatternRegistry.cachedGrokForPattern(this.pattern, this.namedCapturesOnly); // the extractor instance is rebuilt every second anyway final Match match = grok.match(value); final Map<String, Object> matches = match.captureFlattened(); final List<Result> results = new ArrayList<>(matches.size()); for (final Map.Entry<String, Object> entry : matches.entrySet()) { // never add null values to the results, those don't make sense for us if (entry.getValue() != null) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } } return results.toArray(new Result[0]); }
@Test public void testIssue4773() { // See: https://github.com/Graylog2/graylog2-server/issues/4773 final Map<String, Object> config = new HashMap<>(); config.put("named_captures_only", true); // Using an OR with the same named capture should only return one value "2015" instead of "[2015, null]" final GrokExtractor extractor = makeExtractor("(%{BASE10NUM:num}|%{BASE10NUM:num})", config); assertThat(extractor.run("2015")) .hasSize(1) .containsOnly( new Extractor.Result("2015", "num", -1, -1) ); }
List<Endpoint> endpoints() { try { String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace); return enrichWithPublicAddresses(parsePodsList(callGet(urlString))); } catch (RestClientException e) { return handleKnownException(e); } }
@Test public void endpointsByNamespaceWithMultipleNodePortPublicIpMatchByServicePerPodLabel() throws JsonProcessingException { // given String servicePerPodLabel = "sample-service-per-pod-service-label"; String servicePerPodLabelValue = "sample-service-per-pod-service-label-value"; cleanUpClient(); kubernetesClient = newKubernetesClient(false, servicePerPodLabel, servicePerPodLabelValue); stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse()); Map<String, String> queryParams = singletonMap("labelSelector", String.format("%s=%s", servicePerPodLabel, servicePerPodLabelValue)); stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), queryParams, endpointsListResponse()); stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE), service(servicePort(32123, 5701, 31916))); stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE), service(servicePort(32124, 5701, 31917))); stub("/api/v1/nodes/node-name-1", node("node-name-1", "10.240.0.21", "35.232.226.200")); stub("/api/v1/nodes/node-name-2", node("node-name-2", "10.240.0.22", "35.232.226.201")); stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-0", NAMESPACE), pod("hazelcast-0", NAMESPACE, "node-name-1", 5701)); stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-1", NAMESPACE), pod("hazelcast-1", NAMESPACE, "node-name-2", 5701)); // when List<Endpoint> result = kubernetesClient.endpoints(); // then assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("192.168.0.25", 5701), ready("172.17.0.5", 5702)); assertThat(formatPublic(result)).containsExactlyInAnyOrder(ready("35.232.226.200", 31916), ready("35.232.226.201", 31917)); }
@Override public void updateSubnet(Subnet osSubnet) { checkNotNull(osSubnet, ERR_NULL_SUBNET); checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID); checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID); checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR); osNetworkStore.updateSubnet(osSubnet); log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_UPDATED)); }
@Test(expected = IllegalArgumentException.class) public void testUpdateSubnetWithNullId() { final Subnet testSubnet = NeutronSubnet.builder() .networkId(NETWORK_ID) .cidr("192.168.0.0/24") .build(); target.updateSubnet(testSubnet); }
@Override public void checkAuthorization( final KsqlSecurityContext securityContext, final MetaStore metaStore, final Statement statement ) { if (statement instanceof Query) { validateQuery(securityContext, metaStore, (Query)statement); } else if (statement instanceof InsertInto) { validateInsertInto(securityContext, metaStore, (InsertInto)statement); } else if (statement instanceof CreateAsSelect) { validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement); } else if (statement instanceof PrintTopic) { validatePrintTopic(securityContext, (PrintTopic)statement); } else if (statement instanceof CreateSource) { validateCreateSource(securityContext, (CreateSource)statement); } }
@Test public void shouldInsertIntoWithAllPermissionsAllowed() { // Given: final Statement statement = givenStatement(String.format( "INSERT INTO %s SELECT * FROM %s;", AVRO_STREAM_TOPIC, KAFKA_STREAM_TOPIC) ); // When/then: authorizationValidator.checkAuthorization(securityContext, metaStore, statement); }
public Future<Void> scale(int scaleTo) { return connect() .compose(zkAdmin -> { Promise<Void> scalePromise = Promise.promise(); getCurrentConfig(zkAdmin) .compose(servers -> scaleTo(zkAdmin, servers, scaleTo)) .onComplete(res -> closeConnection(zkAdmin) .onComplete(closeResult -> { // Ignoring the result of `closeConnection` if (res.succeeded()) { scalePromise.complete(); } else { scalePromise.fail(res.cause()); } })); return scalePromise.future(); }); }
@Test public void testConnectionToNonExistingHost(VertxTestContext context) { // Real "dummy" certificates to test the non-TLS connection error String certificate = """ -----BEGIN CERTIFICATE----- MIIB+DCCAaKgAwIBAgIUM7rPDjaMHJdrfgoO6IDeE19O47EwDQYJKoZIhvcNAQEL BQAwQDEPMA0GA1UEAwwGY2xpZW50MQswCQYDVQQGEwJDWjEPMA0GA1UECAwGUHJh Z3VlMQ8wDQYDVQQHDAZQcmFndWUwHhcNMjQwNDMwMjMxNTM5WhcNNDQwNDI1MjMx NTM5WjBAMQ8wDQYDVQQDDAZjbGllbnQxCzAJBgNVBAYTAkNaMQ8wDQYDVQQIDAZQ cmFndWUxDzANBgNVBAcMBlByYWd1ZTBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQDM EloEmtwrsWf5ry0iiLuf3H5GoSotCKzodWEXkVxZFjhscZZ5yon9JXp7rIiK4847 yzAkMhw53+fur315jzsVAgMBAAGjdDByMB0GA1UdDgQWBBRU7rjtLujQcx/wAeqx Oy8OGJaWYjAfBgNVHSMEGDAWgBRU7rjtLujQcx/wAeqxOy8OGJaWYjAOBgNVHQ8B Af8EBAMCBaAwIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA0GCSqG SIb3DQEBCwUAA0EApdR0AvYNrxzv8v4iknZrMpjUe14Em5M40vhe/tzsI3NYvnCK eMYtGeFBbgBiG7R4nviUdbrXDqSeIfGQlZZpcA== -----END CERTIFICATE----- """; String privateKey = """ -----BEGIN PRIVATE KEY----- MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAzBJaBJrcK7Fn+a8t Ioi7n9x+RqEqLQis6HVhF5FcWRY4bHGWecqJ/SV6e6yIiuPOO8swJDIcOd/n7q99 eY87FQIDAQABAkAZPaPYsfbNiLHdlic1AEiEq1cLEWAQFeSdE/egXKBZfEeDjfEr UYJY+GklzmVojaXOq1xZTJoiUwPnfvnoxwQBAiEA7hzOg38uXIEKClDnMrZatXcp e2jataWv8bEes6WOvIECIQDbZuXw5Ox38F3RnvEx/JxZoGb+zR+VGc3cxQXJA8mE lQIhAK7hH1d6oA02hK5A7xzSy1o9s4y83OzOTKOhJ2Bftq6BAiAKg+r/Walvsih8 9HYw5B+GOCbXjXM3DS6Npy+4y6Kr5QIhAKmn4b+0Kwtwo1G7SUb7Gujkitg/K/fz xrwTW5qklBSa -----END PRIVATE KEY----- """; PemAuthIdentity pemAuthIdentity = PemAuthIdentity.clusterOperator( new SecretBuilder() .withNewMetadata() .withName("my-secret") .endMetadata() .addToData("cluster-operator.crt", Base64.getEncoder().encodeToString(certificate.getBytes(Charset.defaultCharset()))) .addToData("cluster-operator.key", Base64.getEncoder().encodeToString(privateKey.getBytes(Charset.defaultCharset()))) .build() ); TlsPemIdentity pemIdentity = new TlsPemIdentity(dummyPemTrustSet, pemAuthIdentity); ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, new DefaultZooKeeperAdminProvider(), "i-do-not-exist.com:2181", null, pemIdentity, 2_000, 10_000); Checkpoint check = context.checkpoint(); scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { assertThat(cause.getMessage(), is("Failed to connect to Zookeeper i-do-not-exist.com:2181. Connection was not ready in 2000 ms.")); check.flag(); }))); }
public static SchedulerFactory getInstance() { return SchedulerFactoryInstance.lazyInstance; }
@Test public void shouldBeSameSchedulerFactoryInstance() { SchedulerFactory instance = SchedulerFactory.getInstance(); SchedulerFactory instance2 = SchedulerFactory.getInstance(); assertThat(instance).isEqualTo(instance2); }
public int hashCode() { return Objects.hash(super.hashCode(), mapStatistics); }
@Test(dataProvider = "keySupplier") public void testEqualsHashCode(KeyInfo[] keys) { MapColumnStatisticsBuilder builder1 = new MapColumnStatisticsBuilder(true); builder1.addMapStatistics(keys[0], new ColumnStatistics(3L, null, 1L, 2L)); builder1.addMapStatistics(keys[1], new ColumnStatistics(5L, null, 1L, 2L)); builder1.increaseValueCount(8); ColumnStatistics columnStatistics1 = builder1.buildColumnStatistics(); // same as builder1 MapColumnStatisticsBuilder builder2 = new MapColumnStatisticsBuilder(true); builder2.addMapStatistics(keys[0], new ColumnStatistics(3L, null, 1L, 2L)); builder2.addMapStatistics(keys[1], new ColumnStatistics(5L, null, 1L, 2L)); builder2.increaseValueCount(8); ColumnStatistics columnStatistics2 = builder2.buildColumnStatistics(); MapColumnStatisticsBuilder builder3 = new MapColumnStatisticsBuilder(true); builder3.addMapStatistics(keys[1], new ColumnStatistics(5L, null, 1L, 2L)); builder3.addMapStatistics(keys[2], new ColumnStatistics(6L, null, 1L, 2L)); builder3.increaseValueCount(11); ColumnStatistics columnStatistics3 = builder3.buildColumnStatistics(); // 1 and 2 should be equal assertEquals(columnStatistics1, columnStatistics2); assertEquals(columnStatistics1.hashCode(), columnStatistics2.hashCode()); // 2 and 3 should be not equal assertNotEquals(columnStatistics2, columnStatistics3); assertNotEquals(columnStatistics2.hashCode(), columnStatistics3.hashCode()); }
public static <K> KTableHolder<K> build( final KTableHolder<K> left, final KTableHolder<K> right, final TableTableJoin<K> join ) { final LogicalSchema leftSchema; final LogicalSchema rightSchema; if (join.getJoinType().equals(RIGHT)) { leftSchema = right.getSchema(); rightSchema = left.getSchema(); } else { leftSchema = left.getSchema(); rightSchema = right.getSchema(); } final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); final KTable<K, GenericRow> result; switch (join.getJoinType()) { case INNER: result = left.getTable().join(right.getTable(), joinParams.getJoiner()); break; case LEFT: result = left.getTable().leftJoin(right.getTable(), joinParams.getJoiner()); break; case RIGHT: result = right.getTable().leftJoin(left.getTable(), joinParams.getJoiner()); break; case OUTER: result = left.getTable().outerJoin(right.getTable(), joinParams.getJoiner()); break; default: throw new IllegalStateException("invalid join type: " + join.getJoinType()); } return KTableHolder.unmaterialized( result, joinParams.getSchema(), left.getExecutionKeyFactory()); }
@Test public void shouldReturnCorrectSchema() { // Given: givenInnerJoin(R_KEY); // When: final KTableHolder<Struct> result = join.build(planBuilder, planInfo); // Then: assertThat( result.getSchema(), is(LogicalSchema.builder() .keyColumns(RIGHT_SCHEMA.key()) .valueColumns(LEFT_SCHEMA.value()) .valueColumns(RIGHT_SCHEMA.value()) .build() ) ); }
@Udf public String concat(@UdfParameter final String... jsonStrings) { if (jsonStrings == null) { return null; } final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length); boolean allObjects = true; for (final String jsonString : jsonStrings) { if (jsonString == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonString); if (node.isMissingNode()) { return null; } if (allObjects && !node.isObject()) { allObjects = false; } nodes.add(node); } JsonNode result = nodes.get(0); if (allObjects) { for (int i = 1; i < nodes.size(); i++) { result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i)); } } else { for (int i = 1; i < nodes.size(); i++) { result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i))); } } return UdfJsonMapper.writeValueAsJson(result); }
@Test public void shouldMerge3Args() { // When: final String result = udf.concat("1", "2", "3"); // Then: assertEquals("[1,2,3]", result); }
@Override public <T> TypeAdapter<T> create(Gson gson, TypeToken<T> type) { for (Class<?> t = type.getRawType(); (t != Object.class) && (t.getSuperclass() != null); t = t.getSuperclass()) { for (Method m : t.getDeclaredMethods()) { if (m.isAnnotationPresent(PostConstruct.class)) { m.setAccessible(true); TypeAdapter<T> delegate = gson.getDelegateAdapter(this, type); return new PostConstructAdapter<>(delegate, m); } } } return null; }
@Test public void testList() { MultipleSandwiches sandwiches = new MultipleSandwiches( Arrays.asList(new Sandwich("white", "cheddar"), new Sandwich("whole wheat", "swiss"))); Gson gson = new GsonBuilder().registerTypeAdapterFactory(new PostConstructAdapterFactory()).create(); // Throws NullPointerException without the fix in https://github.com/google/gson/pull/1103 String json = gson.toJson(sandwiches); assertThat(json) .isEqualTo( "{\"sandwiches\":[{\"bread\":\"white\",\"cheese\":\"cheddar\"}," + "{\"bread\":\"whole wheat\",\"cheese\":\"swiss\"}]}"); MultipleSandwiches sandwichesFromJson = gson.fromJson(json, MultipleSandwiches.class); assertThat(sandwichesFromJson).isEqualTo(sandwiches); }
@Override public void close() throws IOException { loggers.close(); connectionFactory.destroy(); }
@Test public void testNewerVersionOfSegmentWins() throws Exception { setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery(); // Now start writing again without JN0 present: cluster.getJournalNode(0).stopAndJoin(0); qjm = createSpyingQJM(); try { assertEquals(100, QJMTestUtil.recoverAndReturnLastTxn(qjm)); // Write segment but do not finalize writeSegment(cluster, qjm, 101, 50, false); } finally { qjm.close(); } // Now try to recover a new writer, with JN0 present, // and ensure that all of the above-written transactions are recovered. cluster.restartJournalNode(0); qjm = createSpyingQJM(); try { assertEquals(150, QJMTestUtil.recoverAndReturnLastTxn(qjm)); } finally { qjm.close(); } }
public static String[] toStringArray(ArrayList<String> src) { return src.toArray(new String[0]); }
@Test public void toStringArray() { class TestCase { String[] mExpected; public TestCase(String... strings) { mExpected = strings; } } List<TestCase> testCases = new ArrayList<>(); testCases.add(new TestCase()); testCases.add(new TestCase("foo")); testCases.add(new TestCase("foo", "bar")); for (TestCase testCase : testCases) { ArrayList<String> input = new ArrayList<>(); Collections.addAll(input, testCase.mExpected); String[] got = CommonUtils.toStringArray(input); assertEquals(testCase.mExpected.length, got.length); for (int k = 0; k < got.length; k++) { assertEquals(testCase.mExpected[k], got[k]); } } }
@Override public Health check(Set<NodeHealth> nodeHealths) { Set<NodeHealth> appNodes = nodeHealths.stream() .filter(s -> s.getDetails().getType() == NodeDetails.Type.APPLICATION) .collect(Collectors.toSet()); return Arrays.stream(AppNodeClusterHealthSubChecks.values()) .map(s -> s.check(appNodes)) .reduce(Health.GREEN, HealthReducer::merge); }
@Test public void status_YELLOW_when_two_YELLOW_application_nodes() { Set<NodeHealth> nodeHealths = nodeHealths(YELLOW, YELLOW).collect(toSet()); Health check = underTest.check(nodeHealths); assertThat(check) .forInput(nodeHealths) .hasStatus(Health.Status.YELLOW) .andCauses("Status of all application nodes is YELLOW"); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesNullAsObject() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "null"); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is(Object.class.getName())); }
public RegistryBuilder transporter(String transporter) { this.transporter = transporter; return getThis(); }
@Test void transporter() { RegistryBuilder builder = new RegistryBuilder(); builder.transporter("transporter"); Assertions.assertEquals("transporter", builder.build().getTransporter()); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit4() { String s4 = "{\"type\": \"record\", \"name\": \"st1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": \"int\"}" + "]}"; Assert.assertEquals("st1.!", Schemas.visit(new Schema.Parser().parse(s4), new TestVisitor())); }
public T lookup(final CacheReference<T> reference) { final T value = reverse.get(reference); if(MISSING_ITEM == value) { log.warn(String.format("Lookup failed for %s in reverse cache", reference)); return null; } return value; }
@Test public void testLookup() { final Cache<Path> cache = new ReverseLookupCache<>(new PathCache(1), 1); assertNull(cache.lookup(new SimplePathPredicate(new Path("/", EnumSet.of(Path.Type.directory))))); final AttributedList<Path> list = new AttributedList<>(); final Path directory = new Path("p", EnumSet.of(Path.Type.directory)); final Path file1 = new Path(directory, "name1", EnumSet.of(Path.Type.file)); list.add(file1); final Path file2 = new Path(directory, "name2", EnumSet.of(Path.Type.file)); list.add(file2); cache.put(directory, list); assertNotNull(cache.lookup(new DefaultPathPredicate(file1))); assertNotNull(cache.lookup(new DefaultPathPredicate(file2))); }
public Set<Integer> nodesThatShouldBeDown(ClusterState state) { return calculate(state).nodesThatShouldBeDown(); }
@Test void maintenance_node_not_counted_as_down() { GroupAvailabilityCalculator calc = calcForHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.99); assertThat(calc.nodesThatShouldBeDown(clusterState( "distributor:6 storage:6 .4.s:m")), equalTo(emptySet())); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void compositedKeyword() throws ScanException { { List<Token> tl = new TokenStream("%d(A)", new AlmostAsIsEscapeUtil()).tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.COMPOSITE_KEYWORD, "d")); witness.add(new Token(Token.LITERAL, "A")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); assertEquals(witness, tl); } { List<Token> tl = new TokenStream("a %subst(%b C)", new AlmostAsIsEscapeUtil()).tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(new Token(Token.LITERAL, "a ")); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.COMPOSITE_KEYWORD, "subst")); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "b")); witness.add(new Token(Token.LITERAL, " C")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); assertEquals(witness, tl); } }
public boolean containAck(String ackId) { return ackMap.containsKey(ackId); }
@Test void testContainAck() { when(ackMap.containsKey("1111")).thenReturn(true); assertTrue(udpConnector.containAck("1111")); }
@Override public Boolean mSet(Map<byte[], byte[]> tuple) { if (isQueueing() || isPipelined()) { for (Entry<byte[], byte[]> entry: tuple.entrySet()) { write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } return true; } CommandBatchService es = new CommandBatchService(executorService); for (Entry<byte[], byte[]> entry: tuple.entrySet()) { es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue()); } es.execute(); return true; }
@Test public void testMSet() { testInCluster(connection -> { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); for (Map.Entry<byte[], byte[]> entry : map.entrySet()) { assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue()); } }); }
public static Read read() { return new AutoValue_InfluxDbIO_Read.Builder() .setDisableCertificateValidation(false) .setRetentionPolicy(DEFAULT_RETENTION_POLICY) .build(); }
@Test public void validateReadTest() { String influxHost = "http://localhost"; String userName = "admin"; String password = "admin"; String influxDatabaseName = "testDataBase"; InfluxDB influxDb = Mockito.mock(InfluxDB.class); PowerMockito.when( InfluxDBFactory.connect( anyString(), anyString(), anyString(), any(OkHttpClient.Builder.class))) .thenReturn(influxDb); PowerMockito.when(InfluxDBFactory.connect(anyString(), anyString(), anyString())) .thenReturn(influxDb); doReturn(getDatabase(influxDatabaseName)).when(influxDb).query(new Query("SHOW DATABASES")); doReturn(getDatabase(influxDatabaseName)).when(influxDb).query(new Query("SHOW SHARDS")); doReturn(mockResultForNumberAndSizeOfBlocks()) .when(influxDb) .query(new Query("EXPLAIN SELECT * FROM cpu", influxDatabaseName)); doReturn(mockResult("cpu", 20)) .when(influxDb) .query(new Query("SELECT * FROM cpu", influxDatabaseName)); PCollection<Long> data = pipeline .apply( "Read data to InfluxDB", InfluxDbIO.read() .withDataSourceConfiguration( DataSourceConfiguration.create( StaticValueProvider.of(influxHost), StaticValueProvider.of(userName), StaticValueProvider.of(password))) .withDatabase(influxDatabaseName) .withQuery("SELECT * FROM cpu")) .apply(Count.globally()); PAssert.that(data).containsInAnyOrder(20L); PipelineResult result = pipeline.run(); Assert.assertEquals(State.DONE, result.waitUntilFinish()); }
@Override public String getOtp() throws OtpInfoException { checkSecret(); try { OTP otp = TOTP.generateOTP(getSecret(), getAlgorithm(true), getDigits(), getPeriod()); return otp.toString(); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
@Test public void testTotpInfoOtp() throws OtpInfoException { for (TOTPTest.Vector vector : TOTPTest.VECTORS) { byte[] seed = TOTPTest.getSeed(vector.Algo); TotpInfo info = new TotpInfo(seed, vector.Algo, 8, TotpInfo.DEFAULT_PERIOD); assertEquals(vector.OTP, info.getOtp(vector.Time)); } }
public void exportData(OutputStream outputStream, List<Env> exportEnvs) { if (CollectionUtils.isEmpty(exportEnvs)) { exportEnvs = portalSettings.getActiveEnvs(); } exportApps(exportEnvs, outputStream); }
@Test public void testNamespaceExportImport() throws FileNotFoundException { File temporaryFolder = Files.newTemporaryFolder(); temporaryFolder.deleteOnExit(); String filePath = temporaryFolder + File.separator + "export.zip"; //export config UserInfo userInfo = genUser(); when(userInfoHolder.getUser()).thenReturn(userInfo); Env env = Env.DEV; String appId1 = "app1"; String appId2 = "app2"; App app1 = genApp(appId1, appId1, "org1", "org2"); App app2 = genApp(appId2, appId2, "org1", "org2"); List<App> exportApps = Lists.newArrayList(app1, app2); String appNamespaceName1 = "ns1"; String appNamespaceName2 = "ns2"; AppNamespace app1Namespace1 = genAppNamespace(appId1, appNamespaceName1, false); AppNamespace app1Namespace2 = genAppNamespace(appId1, appNamespaceName2, true); AppNamespace app2Namespace1 = genAppNamespace(appId2, appNamespaceName1, false); List<AppNamespace> appNamespaces = Lists.newArrayList(app1Namespace1, app1Namespace2, app2Namespace1); String clusterName1 = "c1"; String clusterName2 = "c2"; ClusterDTO app1Cluster1 = genCluster(clusterName1, appId1); ClusterDTO app1Cluster2 = genCluster(clusterName2, appId1); ClusterDTO app2Cluster1 = genCluster(clusterName1, appId2); ClusterDTO app2Cluster2 = genCluster(clusterName2, appId2); List<ClusterDTO> app1Clusters = Lists.newArrayList(app1Cluster1, app1Cluster2); List<ClusterDTO> app2Clusters = Lists.newArrayList(app2Cluster1, app2Cluster2); ItemBO item1 = genItem("k1", "v1"); ItemBO item2 = genItem("k2", "v2"); List<ItemBO> items = Lists.newArrayList(item1, item2); String namespaceName1 = "namespace1"; String namespaceName2 = "namespace2"; NamespaceBO app1Cluster1Namespace1 = genNamespace(app1, app1Cluster1, items, namespaceName1); NamespaceBO app1Cluster1Namespace2 = genNamespace(app1, app1Cluster1, items, namespaceName2); List<NamespaceBO> app1Cluster1Namespace = Lists.newArrayList(app1Cluster1Namespace1, app1Cluster1Namespace2); NamespaceBO app1Cluster2Namespace1 = genNamespace(app1, app1Cluster2, items, namespaceName1); List<NamespaceBO> app1Cluster2Namespace = Lists.newArrayList(app1Cluster2Namespace1); NamespaceBO app2Cluster1Namespace1 = genNamespace(app2, app1Cluster1, items, namespaceName1); List<NamespaceBO> app2Cluster1Namespace = Lists.newArrayList(app2Cluster1Namespace1); NamespaceBO app2Cluster2Namespace1 = genNamespace(app2, app1Cluster2, items, namespaceName1); NamespaceBO app2Cluster2Namespace2 = genNamespace(app2, app1Cluster2, items, namespaceName2); List<NamespaceBO> app2Cluster2Namespace = Lists.newArrayList(app2Cluster2Namespace1, app2Cluster2Namespace2); when(appService.findAll()).thenReturn(exportApps); when(appNamespaceService.findAll()).thenReturn(appNamespaces); when(permissionValidator.isAppAdmin(any())).thenReturn(true); when(clusterService.findClusters(env, appId1)).thenReturn(app1Clusters); when(clusterService.findClusters(env, appId2)).thenReturn(app2Clusters); when(namespaceService.findNamespaceBOs(appId1, Env.DEV, clusterName1, false)).thenReturn(app1Cluster1Namespace); when(namespaceService.findNamespaceBOs(appId1, Env.DEV, clusterName2, false)).thenReturn(app1Cluster2Namespace); when(namespaceService.findNamespaceBOs(appId2, Env.DEV, clusterName1, false)).thenReturn(app2Cluster1Namespace); when(namespaceService.findNamespaceBOs(appId2, Env.DEV, clusterName2, false)).thenReturn(app2Cluster2Namespace); FileOutputStream fileOutputStream = new FileOutputStream(filePath); configsExportService.exportData(fileOutputStream, Lists.newArrayList(Env.DEV)); //import config when(appNamespaceService.findByAppIdAndName(any(), any())).thenReturn(null); when(appNamespaceService.importAppNamespaceInLocal(any())).thenReturn(app1Namespace1); when(appService.load(any())).thenReturn(null); when(appService.load(any(), any())).thenThrow(new RuntimeException()); when(clusterService.loadCluster(any(), any(), any())).thenThrow(new RuntimeException()); when(namespaceService.loadNamespaceBaseInfo(any(), any(), any(), any())).thenThrow(new RuntimeException()); when(namespaceService.createNamespace(any(), any())).thenReturn(genNamespaceDTO(1)); when(itemService.findItems(any(), any(), any(), any())).thenReturn(Lists.newArrayList()); HttpStatusCodeException itemNotFoundException = new HttpClientErrorException(HttpStatus.NOT_FOUND); when(itemService.loadItem(any(), any(), any(), any(), anyString())).thenThrow(itemNotFoundException); FileInputStream fileInputStream = new FileInputStream(filePath); ZipInputStream zipInputStream = new ZipInputStream(fileInputStream); try { configsImportService.importDataFromZipFile(Lists.newArrayList(Env.DEV), zipInputStream, false); } catch (Exception e) { e.printStackTrace(); } verify(appNamespaceService, times(3)).importAppNamespaceInLocal(any()); verify(applicationEventPublisher, times(3)).publishEvent(any()); verify(appService, times(2)).createAppInRemote(any(), any()); verify(clusterService, times(4)).createCluster(any(), any()); verify(namespaceService, times(6)).createNamespace(any(), any()); verify(roleInitializationService,times(6)).initNamespaceRoles(any(), any(), anyString()); verify(roleInitializationService,times(6)).initNamespaceEnvRoles(any(), any(), anyString()); verify(itemService, times(12)).createItem(any(), any(), any(), any(), any()); }
@Override public int hashCode() { if (value == null) { return 31; } // Using recommended hashing algorithm from Effective Java for longs and doubles if (isIntegral(this)) { long value = getAsNumber().longValue(); return (int) (value ^ (value >>> 32)); } if (value instanceof Number) { long value = Double.doubleToLongBits(getAsNumber().doubleValue()); return (int) (value ^ (value >>> 32)); } return value.hashCode(); }
@Test public void testIntegerEqualsBigInteger() { JsonPrimitive p1 = new JsonPrimitive(10); JsonPrimitive p2 = new JsonPrimitive(new BigInteger("10")); assertThat(p1).isEqualTo(p2); assertThat(p1.hashCode()).isEqualTo(p2.hashCode()); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final String resourceId = fileid.getFileId(file); final UiFsModel uiFsModel = new ListResourceApi(new EueApiClient(session)).resourceResourceIdGet(resourceId, null, null, null, null, null, null, Collections.singletonList(EueAttributesFinderFeature.OPTION_DOWNLOAD), null); final HttpUriRequest request = new HttpGet(uiFsModel.getUilink().getDownloadURI()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = session.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response); default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new EueExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRange() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path container = new EueDirectoryFeature(session, fileid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(1000); final Path test = createFile(fileid, new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), content); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setAppend(true); status.setOffset(100L); final InputStream in = new EueReadFeature(session, fileid).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void add(T element) { Preconditions.checkNotNull(element); if (elements.add(element) && elements.size() > maxSize) { elements.poll(); } }
@Test void testSizeWithMaxSize0() { final BoundedFIFOQueue<Integer> testInstance = new BoundedFIFOQueue<>(0); assertThat(testInstance).isEmpty(); testInstance.add(1); assertThat(testInstance).isEmpty(); }
@Override public DescribeProducersResult describeProducers(Collection<TopicPartition> topicPartitions, DescribeProducersOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, DescribeProducersResult.PartitionProducerState> future = DescribeProducersHandler.newFuture(topicPartitions); DescribeProducersHandler handler = new DescribeProducersHandler(options, logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeProducersResult(future.all()); }
@Test public void testDescribeProducersRetryAfterDisconnect() throws Exception { MockTime time = new MockTime(); int retryBackoffMs = 100; Cluster cluster = mockCluster(3, 0); Map<String, Object> configOverride = newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoffMs); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, configOverride)) { TopicPartition topicPartition = new TopicPartition("foo", 0); Iterator<Node> nodeIterator = env.cluster().nodes().iterator(); Node initialLeader = nodeIterator.next(); expectMetadataRequest(env, topicPartition, initialLeader); List<ProducerState> expected = asList( new ProducerState(12345L, 15, 30, env.time().milliseconds(), OptionalInt.of(99), OptionalLong.empty()), new ProducerState(12345L, 15, 30, env.time().milliseconds(), OptionalInt.empty(), OptionalLong.of(23423L)) ); DescribeProducersResponse response = buildDescribeProducersResponse( topicPartition, expected ); env.kafkaClient().prepareResponseFrom( request -> { // We need a sleep here because the client will attempt to // backoff after the disconnect env.time().sleep(retryBackoffMs); return request instanceof DescribeProducersRequest; }, response, initialLeader, true ); Node retryLeader = nodeIterator.next(); expectMetadataRequest(env, topicPartition, retryLeader); env.kafkaClient().prepareResponseFrom( request -> request instanceof DescribeProducersRequest, response, retryLeader ); DescribeProducersResult result = env.adminClient().describeProducers(singleton(topicPartition)); KafkaFuture<DescribeProducersResult.PartitionProducerState> partitionFuture = result.partitionResult(topicPartition); assertEquals(new HashSet<>(expected), new HashSet<>(partitionFuture.get().activeProducers())); } }
public static Map<Integer, Map<RowExpression, VariableReferenceExpression>> collectCSEByLevel(List<? extends RowExpression> expressions) { if (expressions.isEmpty()) { return ImmutableMap.of(); } CommonSubExpressionCollector expressionCollector = new CommonSubExpressionCollector(); expressions.forEach(expression -> expression.accept(expressionCollector, null)); if (expressionCollector.cseByLevel.isEmpty()) { return ImmutableMap.of(); } Map<Integer, Map<RowExpression, Integer>> cseByLevel = removeRedundantCSE(expressionCollector.cseByLevel, expressionCollector.expressionCount); VariableAllocator variableAllocator = new VariableAllocator(); ImmutableMap.Builder<Integer, Map<RowExpression, VariableReferenceExpression>> commonSubExpressions = ImmutableMap.builder(); Map<RowExpression, VariableReferenceExpression> rewriteWith = new HashMap<>(); int startCSELevel = cseByLevel.keySet().stream().reduce(Math::min).get(); int maxCSELevel = cseByLevel.keySet().stream().reduce(Math::max).get(); for (int i = startCSELevel; i <= maxCSELevel; i++) { if (cseByLevel.containsKey(i)) { ExpressionRewriter rewriter = new ExpressionRewriter(rewriteWith); ImmutableMap.Builder<RowExpression, VariableReferenceExpression> expressionVariableMapBuilder = ImmutableMap.builder(); for (Map.Entry<RowExpression, Integer> entry : cseByLevel.get(i).entrySet()) { RowExpression rewrittenExpression = entry.getKey().accept(rewriter, null); expressionVariableMapBuilder.put(rewrittenExpression, variableAllocator.newVariable(rewrittenExpression, "cse")); } Map<RowExpression, VariableReferenceExpression> expressionVariableMap = expressionVariableMapBuilder.build(); commonSubExpressions.put(i, expressionVariableMap); rewriteWith.putAll(expressionVariableMap.entrySet().stream().collect(toImmutableMap(Map.Entry::getKey, entry -> entry.getValue()))); } } return commonSubExpressions.build(); }
@Test void testNoRedundantCSE() { List<RowExpression> expressions = ImmutableList.of(rowExpression("x * 2 + y + z"), rowExpression("(x * 2 + y + z) * 2"), rowExpression("x * 2")); Map<Integer, Map<RowExpression, VariableReferenceExpression>> cseByLevel = collectCSEByLevel(expressions); // x * 2 + y is redundant thus should not appear in results assertEquals(cseByLevel, ImmutableMap.of( 3, ImmutableMap.of(rowExpression("\"multiply$cse\" + y + z"), rowExpression("\"add$cse\"")), 1, ImmutableMap.of(rowExpression("x * 2"), rowExpression("\"multiply$cse\"")))); }
public static Impl join(By clause) { return new Impl(new JoinArguments(clause)); }
@Test @Category(NeedsRunner.class) public void testNoMainInput() { PCollection<Row> pc1 = pipeline .apply( "Create1", Create.of(Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build())) .setRowSchema(CG_SCHEMA_1); PCollection<Row> pc2 = pipeline .apply( "Create2", Create.of(Row.withSchema(CG_SCHEMA_2).addValues("user1", 9, "us").build())) .setRowSchema(CG_SCHEMA_2); PCollection<Row> pc3 = pipeline .apply( "Create3", Create.of(Row.withSchema(CG_SCHEMA_3).addValues("user1", 17, "us").build())) .setRowSchema(CG_SCHEMA_3); thrown.expect(IllegalArgumentException.class); PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply( "CoGroup1", CoGroup.join("pc1", By.fieldNames("user", "country").withSideInput()) .join("pc2", By.fieldNames("user2", "country2").withSideInput()) .join("pc3", By.fieldNames("user3", "country3").withSideInput())); pipeline.run(); }
public static Read read() { return new Read(null, "", new Scan()); }
@Test public void testReadBuildsCorrectly() { HBaseIO.Read read = HBaseIO.read().withConfiguration(conf).withTableId("table"); assertEquals("table", read.getTableId()); assertNotNull("configuration", read.getConfiguration()); }
public static String acquireHost(final ServerWebExchange exchange) { return SpringBeanUtils.getInstance().getBean(RemoteAddressResolver.class).resolve(exchange).getHostString(); }
@Test public void acquireHostTest() { assertEquals("0.0.0.0", HostAddressUtils.acquireHost(exchange)); }
public DataNode elementToDataNode( RepositoryElementInterface element ) throws KettleException { SlaveServer slaveServer = (SlaveServer) element; DataNode rootNode = new DataNode( NODE_ROOT ); /* * // Check for naming collision ObjectId slaveId = repo.getSlaveID(slaveServer.getName()); if (slaveId != null && * slaveServer.getObjectId()!=null && !slaveServer.getObjectId().equals(slaveId)) { // We have a naming collision, * abort the save throw new KettleException("Failed to save object to repository. Object [" + slaveServer.getName() * + "] already exists."); } */ // Create or version a new slave node // rootNode.setProperty( PROP_HOST_NAME, slaveServer.getHostname() ); rootNode.setProperty( PROP_PORT, slaveServer.getPort() ); rootNode.setProperty( PROP_WEBAPP_NAME, slaveServer.getWebAppName() ); rootNode.setProperty( PROP_USERNAME, slaveServer.getUsername() ); rootNode.setProperty( PROP_PASSWORD, Encr.encryptPasswordIfNotUsingVariables( slaveServer.getPassword() ) ); rootNode.setProperty( PROP_PROXY_HOST_NAME, slaveServer.getProxyHostname() ); rootNode.setProperty( PROP_PROXY_PORT, slaveServer.getProxyPort() ); rootNode.setProperty( PROP_NON_PROXY_HOSTS, slaveServer.getNonProxyHosts() ); rootNode.setProperty( PROP_MASTER, slaveServer.isMaster() ); rootNode.setProperty( PROP_USE_HTTPS_PROTOCOL, slaveServer.isSslMode() ); return rootNode; }
@Test public void testElementToDataNode() throws KettleException { DataNode dataNode = slaveDelegate.elementToDataNode( mockSlaveServer ); Assert.assertEquals( PROP_HOST_NAME_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_HOST_NAME ) ); Assert.assertEquals( PROP_USERNAME_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_USERNAME ) ); Assert.assertEquals( Encr.encryptPasswordIfNotUsingVariables( PROP_PASSWORD_VALUE ), slaveDelegate.getString( dataNode, SlaveDelegate.PROP_PASSWORD ) ); Assert.assertEquals( PROP_PORT_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_PORT ) ); Assert.assertEquals( PROP_PROXY_HOST_NAME_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_PROXY_HOST_NAME ) ); Assert.assertEquals( PROP_PROXY_PORT_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_PROXY_PORT ) ); Assert.assertEquals( PROP_WEBAPP_NAME_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_WEBAPP_NAME ) ); Assert.assertEquals( PROP_NON_PROXY_HOSTS_VALUE, slaveDelegate.getString( dataNode, SlaveDelegate.PROP_NON_PROXY_HOSTS ) ); Assert.assertEquals( PROP_MASTER_VALUE, slaveDelegate.getBoolean( dataNode, SlaveDelegate.PROP_MASTER ) ); Assert.assertEquals( PROP_USE_HTTPS_PROTOCOL_VALUE, slaveDelegate.getBoolean( dataNode, SlaveDelegate.PROP_USE_HTTPS_PROTOCOL ) ); }
@Nonnull public static <T> AggregateOperation1<T, LongDoubleAccumulator, Double> averagingDouble( @Nonnull ToDoubleFunctionEx<? super T> getDoubleValueFn ) { checkSerializable(getDoubleValueFn, "getDoubleValueFn"); // count == accumulator.value1 // sum == accumulator.value2 return AggregateOperation .withCreate(LongDoubleAccumulator::new) .andAccumulate((LongDoubleAccumulator a, T item) -> { // a bit faster check than in addExact, specialized for increment if (a.getLong() == Long.MAX_VALUE) { throw new ArithmeticException("Counter overflow"); } a.setLong(a.getLong() + 1); a.setDouble(a.getDouble() + getDoubleValueFn.applyAsDouble(item)); }) .andCombine((a1, a2) -> { a1.setLong(Math.addExact(a1.getLong(), a2.getLong())); a1.setDouble(a1.getDouble() + a2.getDouble()); }) .andDeduct((a1, a2) -> { a1.setLong(Math.subtractExact(a1.getLong(), a2.getLong())); a1.setDouble(a1.getDouble() - a2.getDouble()); }) .andExportFinish(a -> a.getDouble() / a.getLong()); }
@Test public void when_averagingDouble_tooManyItems_then_exception() { // Given AggregateOperation1<Double, LongDoubleAccumulator, Double> aggrOp = averagingDouble(Double::doubleValue); LongDoubleAccumulator acc = new LongDoubleAccumulator(Long.MAX_VALUE, 0.0d); // When BiConsumerEx<? super LongDoubleAccumulator, ? super Double> biConsumerEx = aggrOp.accumulateFn(); assertThrows(ArithmeticException.class, () -> biConsumerEx.accept(acc, 0.0d)); }
@Override public String toString() { if(useJdkToStringStyle){ return super.toString(); } return toString(this.timeZone); }
@Test public void toStringTest2() { DateTime dateTime = new DateTime("2017-01-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT); String dateStr = dateTime.toString(DatePattern.UTC_WITH_ZONE_OFFSET_PATTERN); assertEquals("2017-01-05T12:34:23+0800", dateStr); dateStr = dateTime.toString(DatePattern.UTC_WITH_XXX_OFFSET_PATTERN); assertEquals("2017-01-05T12:34:23+08:00", dateStr); }
public static Transaction read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException { return Transaction.read(payload, ProtocolVersion.CURRENT.intValue()); }
@Test public void parseTransactionWithHugeDeclaredInputsSize() { Transaction tx = new HugeDeclaredSizeTransaction(true, false, false); byte[] serializedTx = tx.serialize(); try { Transaction.read(ByteBuffer.wrap(serializedTx)); fail("We expect BufferUnderflowException with the fixed code and OutOfMemoryError with the buggy code, so this is weird"); } catch (BufferUnderflowException e) { //Expected, do nothing } }
public static List<PKafkaOffsetProxyResult> getBatchOffsets(List<PKafkaOffsetProxyRequest> requests) throws UserException { return PROXY_API.getBatchOffsets(requests); }
@Test public void testNoAliveComputeNode() throws UserException { new Expectations() { { service.getBackendOrComputeNode(anyLong); result = null; } }; KafkaUtil.ProxyAPI api = new KafkaUtil.ProxyAPI(); LoadException e = Assert.assertThrows(LoadException.class, () -> api.getBatchOffsets(null)); Assert.assertEquals( "Failed to send get kafka partition info request. err: No alive backends or compute nodes", e.getMessage()); }
public Set<String> errorLogDirs() { return errorLogDirs; }
@Test public void testErrorLogDirsForEmpty() { assertEquals(new HashSet<>(), EMPTY.errorLogDirs()); }
public static int removeSpecificPerms(int perms, int remove) { return perms ^ remove; }
@Test public void testRemoveSpecificPerms() { int perms = Perms.ALL; int remove = Perms.CREATE; int newPerms = ZKUtil.removeSpecificPerms(perms, remove); assertEquals("Removal failed", 0, newPerms & Perms.CREATE); }
public CustomToggle config(Properties properties) { for (String key : properties.stringPropertyNames()) { String value = properties.getProperty(key); key = key.toLowerCase(); // compare with legal configuration names for (Property p: Property.values()) { if (key.equals(p.key())) { String ability = key.split("\\.")[1]; if (key.contains("enabled") && value.equalsIgnoreCase("false")) { this.turnOff(ability); }else if (key.contains("toggle")) { this.toggle(ability, value); } } } } return this; }
@Test public void canProcessAbilitiesThroughProperties() { Properties properties = new Properties(); try { properties.load(Files.newInputStream(Paths.get(filename))); toggle = new CustomToggle().config(properties); } catch (IOException e) { System.out.println("No such file"); } customBot = new DefaultBot(null, EMPTY, db, toggle); customBot.onRegister(); String targetName = "restrict"; assertTrue(customBot.getAbilities().containsKey(targetName)); }
public Object set(final String property, final Object value) { Objects.requireNonNull(value, "value"); final Object parsed = parser.parse(property, value); return props.put(property, parsed); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowIfParserThrows() { // Given: when(parser.parse("prop-1", "new-val")) .thenThrow(new IllegalArgumentException("Boom")); // When: propsWithMockParser.set("prop-1", "new-val"); }
static UContinue create(@Nullable CharSequence label) { return new AutoValue_UContinue((label == null) ? null : StringName.of(label)); }
@Test public void serialization() { SerializableTester.reserializeAndAssert(UContinue.create("bar")); }
public String getPasswordSecret() { return passwordSecret.trim(); }
@Test public void testPasswordSecretIsValid() throws ValidationException, RepositoryException { validProperties.put("password_secret", "abcdefghijklmnopqrstuvwxyz"); Configuration configuration = initConfig(new Configuration(), validProperties); assertThat(configuration.getPasswordSecret()).isEqualTo("abcdefghijklmnopqrstuvwxyz"); }
@Override public void onProjectsRekeyed(Set<RekeyedProject> rekeyedProjects) { checkNotNull(rekeyedProjects, "rekeyedProjects can't be null"); if (rekeyedProjects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectsRekeyed(rekeyedProjects))); }
@Test public void onProjectsRekeyed_throws_NPE_if_set_is_null_even_if_no_listeners() { assertThatThrownBy(() -> underTestNoListeners.onProjectsRekeyed(null)) .isInstanceOf(NullPointerException.class) .hasMessage("rekeyedProjects can't be null"); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldThrowOnNonIntegerId() { // Given: final SingleStatementContext stmt = givenQuery("ASSERT SCHEMA ID FALSE TIMEOUT 10 SECONDS;"); // When: final Exception e = assertThrows(KsqlException.class, () -> builder.buildStatement(stmt)); // Then: assertThat(e.getMessage(), is("ID must be an integer")); }
@Override public Collection<Subscriber> getSubscribers(String namespaceId, String serviceName) { String serviceNameWithoutGroup = NamingUtils.getServiceName(serviceName); String groupName = NamingUtils.getGroupName(serviceName); Service service = Service.newService(namespaceId, groupName, serviceNameWithoutGroup); return getSubscribers(service); }
@Test void testGetSubscribersByService() { Collection<Subscriber> actual = subscriberService.getSubscribers(service); assertEquals(1, actual.size()); assertEquals(service.getGroupedServiceName(), actual.iterator().next().getServiceName()); }
public ParsedQuery parse(final String query) throws ParseException { final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER); parser.setSplitOnWhitespace(true); parser.setAllowLeadingWildcard(allowLeadingWildcard); final Query parsed = parser.parse(query); final ParsedQuery.Builder builder = ParsedQuery.builder().query(query); builder.tokensBuilder().addAll(parser.getTokens()); final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup()); parsed.visit(visitor); builder.termsBuilder().addAll(visitor.getParsedTerms()); return builder.build(); }
@Test void testMatchingPositions() { assertThatThrownBy(() -> parser.parse("foo:")) .hasMessageContaining("Cannot parse 'foo:': Encountered \"<EOF>\" at line 1, column 3."); }