focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void finish() throws IOException { if (finished) { return; } flush(); // Finish the stream with the terminatorValue. VarInt.encode(terminatorValue, os); if (!BUFFER_POOL.offer(buffer)) { // The pool is full, we can't store the buffer. We just drop the buffer. } finished = true; }
@Test public void testFinishingWhenFinishedIsNoOp() throws Exception { BufferedElementCountingOutputStream os = testValues(toBytes("a")); os.finish(); os.finish(); os.finish(); }
@Override public String getActions() { return actions; }
@Test void testActions() { assertEquals("A B C", new InstantiatableInstancePermission(getClass().getSimpleName(), "A", "B", "C").getActions()); }
Converter<E> compile() { head = tail = null; for (Node n = top; n != null; n = n.next) { switch (n.type) { case Node.LITERAL: addToList(new LiteralConverter<E>((String) n.getValue())); break; case Node.COMPOSITE_KEYWORD: CompositeNode cn = (CompositeNode) n; CompositeConverter<E> compositeConverter = createCompositeConverter(cn); if(compositeConverter == null) { addError("Failed to create converter for [%"+cn.getValue()+"] keyword"); addToList(new LiteralConverter<E>("%PARSER_ERROR["+cn.getValue()+"]")); break; } compositeConverter.setFormattingInfo(cn.getFormatInfo()); compositeConverter.setOptionList(cn.getOptions()); Compiler<E> childCompiler = new Compiler<E>(cn.getChildNode(), converterMap); childCompiler.setContext(context); Converter<E> childConverter = childCompiler.compile(); compositeConverter.setChildConverter(childConverter); addToList(compositeConverter); break; case Node.SIMPLE_KEYWORD: SimpleKeywordNode kn = (SimpleKeywordNode) n; DynamicConverter<E> dynaConverter = createConverter(kn); if (dynaConverter != null) { dynaConverter.setFormattingInfo(kn.getFormatInfo()); dynaConverter.setOptionList(kn.getOptions()); addToList(dynaConverter); } else { // if the appropriate dynaconverter cannot be found, then replace // it with a dummy LiteralConverter indicating an error. Converter<E> errConveter = new LiteralConverter<E>("%PARSER_ERROR[" + kn.getValue() + "]"); addStatus(new ErrorStatus("[" + kn.getValue() + "] is not a valid conversion word", this)); addToList(errConveter); } } } return head; }
@Test public void testCompositeFormatting() throws Exception { { Parser<Object> p = new Parser<Object>("xyz %4.10(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC", result); } { Parser<Object> p = new Parser<Object>("xyz %-4.10(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC ", result); } { Parser<Object> p = new Parser<Object>("xyz %.2(ABC %hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz lo", result); } { Parser<Object> p = new Parser<Object>("xyz %.-2(ABC)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz AB", result); } { Parser<Object> p = new Parser<Object>("xyz %30.30(ABC %20hello)"); p.setContext(context); Node t = p.parse(); Converter<Object> head = p.compile(t, converterMap); String result = write(head, new Object()); assertEquals("xyz ABC Hello", result); } }
public <T> CompletableFuture<T> scheduleWriteOperation( String name, TopicPartition tp, Duration timeout, CoordinatorWriteOperation<S, T, U> op ) { throwIfNotRunning(); log.debug("Scheduled execution of write operation {}.", name); CoordinatorWriteEvent<T> event = new CoordinatorWriteEvent<>(name, tp, timeout, op); enqueueLast(event); return event.future; }
@Test public void testScheduleWriteOpWhenInactive() { MockTimer timer = new MockTimer(); CoordinatorRuntime<MockCoordinatorShard, String> runtime = new CoordinatorRuntime.Builder<MockCoordinatorShard, String>() .withTime(timer.time()) .withTimer(timer) .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) .withLoader(new MockCoordinatorLoader()) .withEventProcessor(new DirectEventProcessor()) .withPartitionWriter(new MockPartitionWriter()) .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .build(); // Scheduling a write fails with a NotCoordinatorException because the coordinator // does not exist. CompletableFuture<String> write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, state -> new CoordinatorResult<>(Collections.emptyList(), "response1")); assertFutureThrows(write, NotCoordinatorException.class); }
@Override public byte[] encode(ILoggingEvent event) { var baos = new ByteArrayOutputStream(); try (var generator = jsonFactory.createGenerator(baos)) { generator.writeStartObject(); // https://cloud.google.com/logging/docs/structured-logging#structured_logging_special_fields // https://github.com/googleapis/java-logging-logback/blob/main/src/main/java/com/google/cloud/logging/logback/LoggingAppender.java writeTimestamp(generator, event); writeSeverity(generator, event); writeLogger(generator, event); writeMessage(generator, event); writeThread(generator, event); writeServiceContext(generator); writeTraceContext(generator); var mdc = event.getMDCPropertyMap(); writeMdc(generator, mdc); writeKeyValue(generator, event); if ("ERROR".equals(event.getLevel().toString())) { writeError(generator, event, mdc); } writeStackTrace(generator, event); generator.writeEndObject(); generator.writeRaw('\n'); generator.flush(); } catch (NullPointerException | IOException e) { return logFallbackError(event, e); } return baos.toByteArray(); }
@Test void encode_error() { var e = mockEvent(); when(e.getLevel()).thenReturn(Level.ERROR); when(e.getFormattedMessage()).thenReturn("what a terrible failure"); var msg = encoder.encode(e); assertMatchesJson( """ {"@type":"type.googleapis.com/google.devtools.clouderrorreporting.v1beta1.ReportedErrorEvent","logger":"com.example.MyLogger","message":"what a terrible failure","severity":"ERROR","thread_name":"main","time":"2024-08-09T14:13:33Z"} """, msg); }
public HttpResponse get(Application application, String hostName, String serviceType, Path path, Query query) { return get(application, hostName, serviceType, path, query, null); }
@Test public void testNormalGetWithRewrite() throws Exception { ArgumentCaptor<HttpFetcher.Params> actualParams = ArgumentCaptor.forClass(HttpFetcher.Params.class); ArgumentCaptor<URI> actualUrl = ArgumentCaptor.forClass(URI.class); doAnswer(invoc -> new StaticResponse(200, "application/json", toJson(invoc.getArgument(1, URI.class), "/nested/path"))) .when(fetcher).get(actualParams.capture(), actualUrl.capture()); HttpResponse actualResponse = proxy.get(applicationMock, hostname, CLUSTERCONTROLLER_CONTAINER.serviceName, Path.parse("/service/path"), Query.parse("foo=%2F"), HttpURL.from(URI.create("https://api:666/api/path%2E/with?foo=%2F"))); assertEquals(1, actualUrl.getAllValues().size()); assertEquals(URI.create("http://" + hostname + ":" + port + "/service/path?foo=%2F"), actualUrl.getValue()); ByteArrayOutputStream buffer = new ByteArrayOutputStream(); actualResponse.render(buffer); assertEquals("{\"url\":\"https://api:666/api/path./with/nested/path?foo=%2F\"}", buffer.toString(UTF_8)); }
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) { checkArgument( OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp); return new AutoValue_UBinary(binaryOp, lhs, rhs); }
@Test public void equality() { ULiteral oneLit = ULiteral.intLit(1); ULiteral twoLit = ULiteral.intLit(2); ULiteral piLit = ULiteral.doubleLit(Math.PI); ULiteral trueLit = ULiteral.booleanLit(true); ULiteral falseLit = ULiteral.booleanLit(false); new EqualsTester() .addEqualityGroup(UBinary.create(Kind.PLUS, oneLit, twoLit)) .addEqualityGroup(UBinary.create(Kind.PLUS, oneLit, piLit)) .addEqualityGroup(UBinary.create(Kind.PLUS, piLit, twoLit)) .addEqualityGroup(UBinary.create(Kind.MINUS, oneLit, twoLit)) .addEqualityGroup(UBinary.create(Kind.XOR, oneLit, twoLit)) .addEqualityGroup(UBinary.create(Kind.CONDITIONAL_OR, trueLit, falseLit)) .addEqualityGroup(UBinary.create(Kind.OR, trueLit, falseLit)) .testEquals(); }
public static BigInteger signedMessageToKey(byte[] message, SignatureData signatureData) throws SignatureException { return signedMessageHashToKey(Hash.sha3(message), signatureData); }
@Test public void testSignedMessageToKey() throws SignatureException { Sign.SignatureData signatureData = Sign.signPrefixedMessage(TEST_MESSAGE, SampleKeys.KEY_PAIR); BigInteger key = Sign.signedPrefixedMessageToKey(TEST_MESSAGE, signatureData); assertEquals(key, (SampleKeys.PUBLIC_KEY)); }
@Override public <T> T target(FeignClientFactoryBean factory, Feign.Builder feign, FeignClientFactory context, Target.HardCodedTarget<T> target) { if (!(feign instanceof PolarisFeignCircuitBreaker.Builder)) { return feign.target(target); } PolarisFeignCircuitBreaker.Builder builder = (PolarisFeignCircuitBreaker.Builder) feign; String name = !StringUtils.hasText(factory.getContextId()) ? factory.getName() : factory.getContextId(); Class<?> fallback = factory.getFallback(); if (fallback != void.class) { return targetWithFallback(name, context, target, builder, fallback); } Class<?> fallbackFactory = factory.getFallbackFactory(); if (fallbackFactory != void.class) { return targetWithFallbackFactory(name, context, target, builder, fallbackFactory); } return builder(name, builder).target(target); }
@Test public void testTarget3() { PolarisFeignCircuitBreakerTargeter targeter = new PolarisFeignCircuitBreakerTargeter(circuitBreakerFactory, circuitBreakerNameResolver); FeignClientFactoryBean feignClientFactoryBean = mock(FeignClientFactoryBean.class); doReturn(void.class).when(feignClientFactoryBean).getFallback(); doReturn(TestApi.class).when(feignClientFactoryBean).getFallbackFactory(); doReturn("test").when(feignClientFactoryBean).getName(); FeignClientFactory feignClientFactory = mock(FeignClientFactory.class); doReturn(Object.class).when(feignClientFactory).getInstance("test", TestApi.class); assertThatThrownBy(() -> { targeter.target(feignClientFactoryBean, new PolarisFeignCircuitBreaker.Builder(), feignClientFactory, new Target.HardCodedTarget<>(TestApi.class, "/test")); }).isInstanceOf(IllegalStateException.class); }
@Override public SelType call(String methodName, SelType[] args) { if (args.length == 0 && "currentTimeMillis".equals(methodName)) { return SelLong.of(DateTimeUtils.currentTimeMillis()); } // no-op to support Arrays.asList if (args.length == 1 && "asList".equals(methodName)) { return args[0]; } throw new UnsupportedOperationException( type() + " DO NOT support calling method: " + methodName + " with args: " + Arrays.toString(args)); }
@Test(expected = UnsupportedOperationException.class) public void testCallCurrentTimeMillisWithWrongArgs() { SelMiscFunc.INSTANCE.call("currentTimeMillis", new SelType[1]); }
@ManagedOperation(description = "set job executor activate") public void setJobExecutorActivate(Boolean active) { if (active) jobExecutor.start(); else jobExecutor.shutdown(); }
@Test public void setJobExecutorActivateTrue() { jobExecutorMbean.setJobExecutorActivate(true); verify(jobExecutor).start(); jobExecutorMbean.setJobExecutorActivate(false); verify(jobExecutor).shutdown(); }
@Override public final void dispose() { if (unsubscribed.compareAndSet(false, true)) { if (AutoDisposeAndroidUtil.isMainThread()) { onDispose(); } else { AndroidSchedulers.mainThread().scheduleDirect(this::onDispose); } } }
@Test public void onDisposeFailsWhenMainThreadCheckNotSet() { try { new MainThreadDisposable() { @Override protected void onDispose() {} }.dispose(); throw new AssertionError("Expected to fail before this due to Looper not being stubbed!"); } catch (RuntimeException e) { // "Method myLooper in android.os.Looper not mocked..." // Not testing this exact message as it's an implementation detail of the test framework. } }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Constraints annotations in models") public void testTicket3731() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Ticket3731Resource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /test/cart:\n" + " get:\n" + " summary: Get cart items\n" + " description: Paging follows RFC 5005.\n" + " operationId: getCart\n" + " parameters:\n" + " - name: pageSize\n" + " in: query\n" + " description: \"Number of items per page. Range[1, 200]\"\n" + " schema:\n" + " maximum: 200\n" + " minimum: 1\n" + " type: integer\n" + " format: int32\n" + " default: 50\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); reader = new Reader(new OpenAPI()); openAPI = reader.read(Ticket3731BisResource.class); SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test public void testNestedTypes() throws Exception { Schema nestedSchema = Schema.builder().addInt32Field("f1_int").addStringField("f1_str").build(); Schema schema = Schema.builder().addInt32Field("f_int").addRowField("nested", nestedSchema).build(); Row nestedRow = Row.withSchema(nestedSchema).addValues(18, "foobar").build(); Row row = Row.withSchema(schema).addValues(42, nestedRow).build(); CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row); }
@Override public SingleRuleConfiguration build() { return new SingleRuleConfiguration(); }
@SuppressWarnings("rawtypes") @Test void assertBuild() { DefaultDatabaseRuleConfigurationBuilder builder = OrderedSPILoader.getServices(DefaultDatabaseRuleConfigurationBuilder.class, Collections.singleton(new SingleRuleBuilder())).values().iterator().next(); assertThat(builder.build(), instanceOf(SingleRuleConfiguration.class)); }
@Override public String key() { return PropertyType.BOOLEAN.name(); }
@Test public void key() { assertThat(underTest.key()).isEqualTo("BOOLEAN"); }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void shouldNotMakeQueryWithRowoffsetInWhereClauseScalablePush() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // Given: expectIsSPQ(ColumnName.of("foo"), columnExtractor); givenWhereClause(SystemColumns.ROWOFFSET_NAME, columnExtractor); // When: final boolean isScalablePush = ScalablePushUtil.isScalablePushQuery( query, ksqlEngine, ksqlConfig, overrides ); // Then: assert(!isScalablePush); } }
public OpenConfigAssignmentHandler addConfig(OpenConfigConfigOfAssignmentHandler config) { modelObject.config(config.getModelObject()); return this; }
@Test public void testAddConfig() { // test Handler OpenConfigAssignmentHandler assignment = new OpenConfigAssignmentHandler(2, parent); // call addConfig OpenConfigConfigOfAssignmentHandler configOfAssignment = new OpenConfigConfigOfAssignmentHandler(assignment); // expected ModelObject DefaultAssignment modelObject = new DefaultAssignment(); modelObject.index(2); DefaultConfig config = new DefaultConfig(); modelObject.config(config); assertEquals("[NG]addConfig:ModelObject(Config added) is not an expected one.\n", modelObject, assignment.getModelObject()); }
@Udf(description = "Returns a new string with all matches of regexp in str replaced with newStr") public String regexpReplace( @UdfParameter( description = "The source string. If null, then function returns null.") final String str, @UdfParameter( description = "The regexp to match." + " If null, then function returns null.") final String regexp, @UdfParameter( description = "The string to replace the matches with." + " If null, then function returns null.") final String newStr) { if (str == null || regexp == null || newStr == null) { return null; } try { return str.replaceAll(regexp, newStr); } catch (PatternSyntaxException e) { throw new KsqlFunctionException("Invalid regular expression pattern: " + regexp, e); } }
@Test public void shouldReplace() { assertThat(udf.regexpReplace("foobar", "foo", "bar"), is("barbar")); assertThat(udf.regexpReplace("foobar", "fooo", "bar"), is("foobar")); assertThat(udf.regexpReplace("foobar", "o", ""), is("fbar")); assertThat(udf.regexpReplace("abc", "", "n"), is("nanbncn")); assertThat(udf.regexpReplace("foobar", "(foo|bar)", "cat"), is("catcat")); assertThat(udf.regexpReplace("foobar", "^foo", "cat"), is("catbar")); assertThat(udf.regexpReplace("foobar", "^bar", "cat"), is("foobar")); assertThat(udf.regexpReplace("barbar", "bar$", "cat"), is("barcat")); assertThat(udf.regexpReplace("aababa", "ab", "xy"), is("axyxya")); assertThat(udf.regexpReplace("aababa", "(ab)+", "xy"), is("axya")); }
@Override public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String location = new StoregateWriteFeature(session, fileid).start(file, status); final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status); return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")), new StoregateAttributesFinderFeature(session, fileid), status) { @Override public File getStatus() { return proxy.getResult(); } }; }
@Test public void testReadWrite() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path folder = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final byte[] content = RandomUtils.nextBytes(524289); final TransferStatus status = new TransferStatus(); status.setLength(-1L); final Path test = new Path(folder, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final StoregateMultipartWriteFeature writer = new StoregateMultipartWriteFeature(session, nodeid); final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); final String version = out.getStatus().getId(); assertNotNull(version); assertEquals(content.length, out.getStatus().getSize(), 0L); assertTrue(new DefaultFindFeature(session).find(test)); assertEquals(new StoregateAttributesFinderFeature(session, nodeid).toAttributes(out.getStatus()), new StoregateAttributesFinderFeature(session, nodeid).find(test)); final byte[] compare = new byte[content.length]; final InputStream stream = new StoregateReadFeature(session, nodeid).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public String getOriginalHost() { try { if (originalHost == null) { originalHost = getOriginalHost(getHeaders(), getServerName()); } return originalHost; } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } }
@Test void testGetOriginalHost_handlesNonRFC2396Hostnames() { config.setProperty("zuul.HttpRequestMessage.host.header.strict.validation", false); HttpQueryParams queryParams = new HttpQueryParams(); Headers headers = new Headers(); headers.add("Host", "my_underscore_endpoint.netflix.com"); request = new HttpRequestMessageImpl( new SessionContext(), "HTTP/1.1", "POST", "/some/where", queryParams, headers, "192.168.0.2", "https", 7002, "localhost"); assertEquals("my_underscore_endpoint.netflix.com", request.getOriginalHost()); headers = new Headers(); headers.add("Host", "my_underscore_endpoint.netflix.com:8080"); request = new HttpRequestMessageImpl( new SessionContext(), "HTTP/1.1", "POST", "/some/where", queryParams, headers, "192.168.0.2", "https", 7002, "localhost"); assertEquals("my_underscore_endpoint.netflix.com", request.getOriginalHost()); headers = new Headers(); headers.add("Host", "my_underscore_endpoint^including~more-chars.netflix.com"); request = new HttpRequestMessageImpl( new SessionContext(), "HTTP/1.1", "POST", "/some/where", queryParams, headers, "192.168.0.2", "https", 7002, "localhost"); assertEquals("my_underscore_endpoint^including~more-chars.netflix.com", request.getOriginalHost()); headers = new Headers(); headers.add("Host", "hostname%5Ewith-url-encoded.netflix.com"); request = new HttpRequestMessageImpl( new SessionContext(), "HTTP/1.1", "POST", "/some/where", queryParams, headers, "192.168.0.2", "https", 7002, "localhost"); assertEquals("hostname%5Ewith-url-encoded.netflix.com", request.getOriginalHost()); }
@Override public long put(final K key, final V value, final long timestamp) { return internal.put(key, value, timestamp); }
@Test public void shouldDelegateAndRecordMetricsOnPut() { when(inner.put(RAW_KEY, RAW_VALUE, TIMESTAMP)).thenReturn(PUT_RETURN_CODE_VALID_TO_UNDEFINED); final long validto = store.put(KEY, VALUE, TIMESTAMP); assertThat(validto, is(PUT_RETURN_CODE_VALID_TO_UNDEFINED)); assertThat((Double) getMetric("put-rate").metricValue(), greaterThan(0.0)); }
@Override public void handle(CommitterEvent event) { try { eventQueue.put(event); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } }
@Test public void testCommitWindow() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); TestingJobEventHandler jeh = new TestingJobEventHandler(); dispatcher.register(JobEventType.class, jeh); SystemClock clock = SystemClock.getInstance(); AppContext appContext = mock(AppContext.class); ApplicationAttemptId attemptid = ApplicationAttemptId.fromString( "appattempt_1234567890000_0001_0"); when(appContext.getApplicationID()).thenReturn(attemptid.getApplicationId()); when(appContext.getApplicationAttemptId()).thenReturn(attemptid); when(appContext.getEventHandler()).thenReturn( dispatcher.getEventHandler()); when(appContext.getClock()).thenReturn(clock); OutputCommitter committer = mock(OutputCommitter.class); TestingRMHeartbeatHandler rmhh = new TestingRMHeartbeatHandler(); CommitterEventHandler ceh = new CommitterEventHandler(appContext, committer, rmhh); ceh.init(conf); ceh.start(); // verify trying to commit when RM heartbeats are stale does not commit ceh.handle(new CommitterJobCommitEvent(null, null)); long timeToWaitMs = 5000; while (rmhh.getNumCallbacks() != 1 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs -= 10; } Assert.assertEquals("committer did not register a heartbeat callback", 1, rmhh.getNumCallbacks()); verify(committer, never()).commitJob(any(JobContext.class)); Assert.assertEquals("committer should not have committed", 0, jeh.numCommitCompletedEvents); // set a fresh heartbeat and verify commit completes rmhh.setLastHeartbeatTime(clock.getTime()); timeToWaitMs = 5000; while (jeh.numCommitCompletedEvents != 1 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs -= 10; } Assert.assertEquals("committer did not complete commit after RM hearbeat", 1, jeh.numCommitCompletedEvents); verify(committer, times(1)).commitJob(any()); //Clean up so we can try to commit again (Don't do this at home) cleanup(); // try to commit again and verify it goes through since the heartbeat // is still fresh ceh.handle(new CommitterJobCommitEvent(null, null)); timeToWaitMs = 5000; while (jeh.numCommitCompletedEvents != 2 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs -= 10; } Assert.assertEquals("committer did not commit", 2, jeh.numCommitCompletedEvents); verify(committer, times(2)).commitJob(any()); ceh.stop(); dispatcher.stop(); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { doEvaluateDisruptContext(request, requestContext); return _client.sendRequest(request, requestContext); }
@Test public void testDisruptSourceAlreadySet() { when(_context.getLocalAttr(eq(DISRUPT_SOURCE_KEY))).thenReturn(any(String.class)); _client.sendRequest(_request, _context); verify(_context, never()).putLocalAttr(eq(DISRUPT_CONTEXT_KEY), any(String.class)); }
@Override public String getQueryLimitPart(int limit) { return Queries.postgresSqlLimitPart(limit); }
@Test void test() { assertTrue(jdbcCustomization.supportsExplicitQueryLimitPart()); Arrays.asList(1, 5, 20, 100) .forEach(it -> assertEquals(" LIMIT " + it, jdbcCustomization.getQueryLimitPart(it))); }
public int getKafkaBufferSize() { return _kafkaBufferSize; }
@Test public void testGetKafkaBufferSize() { // test default KafkaPartitionLevelStreamConfig config = getStreamConfig("topic", "host1", null, ""); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_BUFFER_SIZE_DEFAULT, config.getKafkaBufferSize()); config = getStreamConfig("topic", "host1", "", ""); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_BUFFER_SIZE_DEFAULT, config.getKafkaBufferSize()); config = getStreamConfig("topic", "host1", "bad value", ""); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_BUFFER_SIZE_DEFAULT, config.getKafkaBufferSize()); // correct config config = getStreamConfig("topic", "host1", "100", ""); Assert.assertEquals(100, config.getKafkaBufferSize()); }
public static Build withPropertyValue(String propertyValue) { return new Builder(propertyValue); }
@Test void it_should_return_transport_as_default_value_when_property_is_null() { //GIVEN String nullValue = null; //WHEN ElasticsearchClientType clientType = ElasticsearchClientTypeBuilder.withPropertyValue(nullValue).build(); //THEN assertEquals(TRANSPORT, clientType); }
public static long getNumSector(String requestSize, String sectorSize) { Double memSize = Double.parseDouble(requestSize); Double sectorBytes = Double.parseDouble(sectorSize); Double nSectors = memSize / sectorBytes; Double memSizeKB = memSize / 1024; Double memSizeGB = memSize / (1024 * 1024 * 1024); Double memSize100GB = memSizeGB / 100; // allocation bitmap file: one bit per sector Double allocBitmapSize = nSectors / 8; // extend overflow file: 4MB, plus 4MB per 100GB Double extOverflowFileSize = memSize100GB * 1024 * 1024 * 4; // journal file: 8MB, plus 8MB per 100GB Double journalFileSize = memSize100GB * 1024 * 1024 * 8; // catalog file: 10bytes per KB Double catalogFileSize = memSizeKB * 10; // hot files: 5bytes per KB Double hotFileSize = memSizeKB * 5; // quota users file and quota groups file Double quotaUsersFileSize = (memSizeGB * 256 + 1) * 64; Double quotaGroupsFileSize = (memSizeGB * 32 + 1) * 64; Double metadataSize = allocBitmapSize + extOverflowFileSize + journalFileSize + catalogFileSize + hotFileSize + quotaUsersFileSize + quotaGroupsFileSize; Double allocSize = memSize + metadataSize; Double numSectors = allocSize / sectorBytes; System.out.println(numSectors.longValue() + 1); // round up return numSectors.longValue() + 1; }
@Test public void getSectorTestTB() { String testRequestSize = "1099511627776"; // 1TB String testSectorSize = "512"; long result = HFSUtils.getNumSector(testRequestSize, testSectorSize); assertEquals(2179753739L, result); }
public static short translateBucketAcl(GSAccessControlList acl, String userId) { short mode = (short) 0; for (GrantAndPermission gp : acl.getGrantAndPermissions()) { Permission perm = gp.getPermission(); GranteeInterface grantee = gp.getGrantee(); if (perm.equals(Permission.PERMISSION_READ)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is readable by the user, add r and x to the owner mode. mode |= (short) 0500; } } else if (perm.equals(Permission.PERMISSION_WRITE)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is writable by the user, +w to the owner mode. mode |= (short) 0200; } } else if (perm.equals(Permission.PERMISSION_FULL_CONTROL)) { if (isUserIdInGrantee(grantee, userId)) { // If the user has full control to the bucket, +rwx to the owner mode. mode |= (short) 0700; } } } return mode; }
@Test public void translateAuthenticatedUserWritePermission() { GroupGrantee authenticatedUsersGrantee = GroupGrantee.AUTHENTICATED_USERS; mAcl.grantPermission(authenticatedUsersGrantee, Permission.PERMISSION_WRITE); assertEquals((short) 0200, GCSUtils.translateBucketAcl(mAcl, ID)); assertEquals((short) 0200, GCSUtils.translateBucketAcl(mAcl, OTHER_ID)); }
public static int listIndex(int i, int size) { return i < 0 ? size + i : i; }
@Test public void testListIndexOutOfBounds() { assertEquals(0, Accessors.listIndex(0, 10)); assertEquals(1, Accessors.listIndex(1, 10)); assertEquals(9, Accessors.listIndex(9, 10)); assertEquals(9, Accessors.listIndex(-1, 10)); assertEquals(1, Accessors.listIndex(-9, 10)); assertEquals(0, Accessors.listIndex(-10, 10)); }
public Operation parseMethod( Method method, List<Parameter> globalParameters, JsonView jsonViewAnnotation) { JavaType classType = TypeFactory.defaultInstance().constructType(method.getDeclaringClass()); return parseMethod( classType.getClass(), method, globalParameters, null, null, null, null, new ArrayList<>(), Optional.empty(), new HashSet<>(), new ArrayList<>(), false, null, null, jsonViewAnnotation, null, null); }
@Test(description = "Responses") public void testGetResponses() { Reader reader = new Reader(new OpenAPI()); Method[] methods = ResponsesResource.class.getMethods(); Operation responseOperation = reader.parseMethod(Arrays.stream(methods).filter( (method -> method.getName().equals("getResponses"))).findFirst().get(), null, null); assertNotNull(responseOperation); ApiResponses responses = responseOperation.getResponses(); assertEquals(RESPONSES_NUMBER, responses.size()); ApiResponse apiResponse = responses.get(RESPONSE_CODE_200); assertNotNull(apiResponse); assertEquals(RESPONSE_DESCRIPTION, apiResponse.getDescription()); }
@CheckReturnValue @NonNull public static Observable<Boolean> observeNightModeState( @NonNull Context context, @StringRes int enablePrefResId, @BoolRes int defaultValueResId) { final Observable<Boolean> nightMode = ((AnyApplication) context.getApplicationContext()).getNightModeObservable(); final RxSharedPrefs prefs = AnyApplication.prefs(context); return Observable.combineLatest( prefs .getString( R.string.settings_key_night_mode, R.string.settings_default_night_mode_value) .asObservable(), enablePrefResId == 0 ? Observable.just(true) : prefs.getBoolean(enablePrefResId, defaultValueResId).asObservable(), nightMode, (nightModePref, enabledPref, nightModeState) -> { if (!enabledPref) return false; switch (nightModePref) { case "never": return false; case "always": return true; default: return nightModeState; } }) .distinctUntilChanged(); }
@Test public void testNeverNightMode() { SharedPrefsHelper.setPrefsValue(R.string.settings_key_night_mode, "never"); AtomicBoolean atomicBoolean = new AtomicBoolean(); AnyApplication application = getApplicationContext(); final Disposable subscribe = NightMode.observeNightModeState(application, 0, R.bool.settings_default_true) .subscribe( atomicBoolean::set, throwable -> { throw new RuntimeException(throwable); }); Assert.assertFalse(atomicBoolean.get()); application.onConfigurationChanged(configurationForNightMode(Configuration.UI_MODE_NIGHT_YES)); Assert.assertFalse(atomicBoolean.get()); application.onConfigurationChanged(configurationForNightMode(Configuration.UI_MODE_NIGHT_NO)); Assert.assertFalse(atomicBoolean.get()); application.onConfigurationChanged( configurationForNightMode(Configuration.UI_MODE_NIGHT_UNDEFINED)); Assert.assertFalse(atomicBoolean.get()); application.onConfigurationChanged(configurationForNightMode(Configuration.UI_MODE_NIGHT_YES)); Assert.assertFalse(atomicBoolean.get()); subscribe.dispose(); }
@Override public Optional<Decision> onBufferFinished(int numTotalUnSpillBuffers, int currentPoolSize) { return numTotalUnSpillBuffers < numBuffersTriggerSpillingRatio * currentPoolSize ? Optional.of(Decision.NO_ACTION) : Optional.empty(); }
@Test void testOnBufferFinishedUnSpillBufferEqualToOrGreatThenThreshold() { final int poolSize = 10; Optional<Decision> finishedDecision = spillStrategy.onBufferFinished( (int) (poolSize * NUM_BUFFERS_TRIGGER_SPILLING_RATIO), poolSize); assertThat(finishedDecision).isNotPresent(); finishedDecision = spillStrategy.onBufferFinished( (int) (poolSize * NUM_BUFFERS_TRIGGER_SPILLING_RATIO) + 1, poolSize); assertThat(finishedDecision).isNotPresent(); }
@Override public List<Intent> compile(PointToPointIntent intent, List<Intent> installable) { log.trace("compiling {} {}", intent, installable); ConnectPoint ingressPoint = intent.filteredIngressPoint().connectPoint(); ConnectPoint egressPoint = intent.filteredEgressPoint().connectPoint(); //TODO: handle protected path case with suggested path!! //Idea: use suggested path as primary and another path from path service as protection if (intent.suggestedPath() != null && intent.suggestedPath().size() > 0) { Path path = new DefaultPath(PID, intent.suggestedPath(), new ScalarWeight(1)); //Check intent constraints against suggested path and suggested path availability if (checkPath(path, intent.constraints()) && pathAvailable(intent)) { allocateIntentBandwidth(intent, path); return asList(createLinkCollectionIntent(ImmutableSet.copyOf(intent.suggestedPath()), DEFAULT_COST, intent)); } } if (ingressPoint.deviceId().equals(egressPoint.deviceId())) { return createZeroHopLinkCollectionIntent(intent); } // proceed with no protected paths if (!ProtectionConstraint.requireProtectedPath(intent)) { return createUnprotectedLinkCollectionIntent(intent); } try { // attempt to compute and implement backup path return createProtectedIntent(ingressPoint, egressPoint, intent, installable); } catch (PathNotFoundException e) { log.warn("Could not find disjoint Path for {}", intent); // no disjoint path extant -- maximum one path exists between devices return createSinglePathIntent(ingressPoint, egressPoint, intent, installable); } }
@Test public void testBandwidthConstrainedIntentFailure() { final double bpsTotal = 10.0; final ResourceService resourceService = MockResourceService.makeCustomBandwidthResourceService(bpsTotal); final List<Constraint> constraints = Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(BPS_TO_RESERVE))); try { final PointToPointIntent intent = makeIntent(new ConnectPoint(DID_1, PORT_1), new ConnectPoint(DID_3, PORT_2), constraints); String[] hops = {S1, S2, S3}; final PointToPointIntentCompiler compiler = makeCompiler(hops, resourceService); compiler.compile(intent, null); fail("Point to Point compilation with insufficient bandwidth does " + "not throw exception."); } catch (PathNotFoundException noPath) { assertThat(noPath.getMessage(), containsString("No path")); } }
@Override public void deleteArticleCategory(Long id) { // 校验存在 validateArticleCategoryExists(id); // 校验是不是存在关联文章 Long count = articleService.getArticleCountByCategoryId(id); if (count > 0) { throw exception(ARTICLE_CATEGORY_DELETE_FAIL_HAVE_ARTICLES); } // 删除 articleCategoryMapper.deleteById(id); }
@Test public void testDeleteArticleCategory_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> articleCategoryService.deleteArticleCategory(id), ARTICLE_CATEGORY_NOT_EXISTS); }
@Override public Map<TupleTag<?>, PValue> getAdditionalInputs() { return delegate().getAdditionalInputs(); }
@Test public void getAdditionalInputsDelegates() { Map<TupleTag<?>, PValue> additionalInputs = ImmutableMap.of(new TupleTag<>("test_tag"), Pipeline.create().apply(Create.of("1"))); when(delegate.getAdditionalInputs()).thenReturn(additionalInputs); assertThat(forwarding.getAdditionalInputs(), equalTo(additionalInputs)); }
public void enqueue(ByteBuffer payload) throws QueueException { final int messageSize = LENGTH_HEADER_SIZE + payload.remaining(); if (headSegment.hasSpace(currentHeadPtr, messageSize)) { LOG.debug("Head segment has sufficient space for message length {}", LENGTH_HEADER_SIZE + payload.remaining()); writeData(headSegment, currentHeadPtr.plus(1), payload); // move head segment currentHeadPtr = currentHeadPtr.moveForward(messageSize); return; } LOG.debug("Head segment doesn't have enough space"); // the payload can't be fully contained into the current head segment and needs to be splitted // with another segment. final int dataSize = payload.remaining(); final ByteBuffer rawData = (ByteBuffer) ByteBuffer.allocate(LENGTH_HEADER_SIZE + dataSize) .putInt(dataSize) .put(payload) .flip(); // the bytes written from the payload input long bytesRemainingInHeaderSegment = Math.min(rawData.remaining(), headSegment.bytesAfter(currentHeadPtr)); LOG.trace("Writing partial payload to offset {} for {} bytes", currentHeadPtr, bytesRemainingInHeaderSegment); if (bytesRemainingInHeaderSegment > 0) { int copySize = (int) bytesRemainingInHeaderSegment; ByteBuffer slice = rawData.slice(); slice.limit(copySize); writeDataNoHeader(headSegment, currentHeadPtr.plus(1), slice); currentHeadPtr = currentHeadPtr.moveForward(bytesRemainingInHeaderSegment); // No need to move newSegmentPointer the pointer because the last spinningMove has already moved it // shift forward the consumption point rawData.position(rawData.position() + copySize); } Segment newSegment = null; // till the payload is not completely stored, // save the remaining part into a new segment. while (rawData.hasRemaining()) { // To request the next segment, it's needed to be done in global lock. newSegment = queuePool.nextFreeSegment(); //notify segment creation for queue in queue pool allocationListener.segmentedCreated(name, newSegment); int copySize = (int) Math.min(rawData.remaining(), allocator.getSegmentSize()); ByteBuffer slice = rawData.slice(); slice.limit(copySize); currentHeadPtr = currentHeadPtr.moveForward(copySize); writeDataNoHeader(newSegment, newSegment.begin, slice); headSegment = newSegment; // shift forward the consumption point rawData.position(rawData.position() + copySize); } }
@Test public void insertSomeDataIntoNewQueue() throws QueueException, IOException { final QueuePool queuePool = QueuePool.loadQueues(tempQueueFolder, PAGE_SIZE, SEGMENT_SIZE); final Queue queue = queuePool.getOrCreate("test"); queue.enqueue(ByteBuffer.wrap("AAAA".getBytes(StandardCharsets.UTF_8))); // verify final HashSet<String> fileset = new HashSet<>(Arrays.asList(tempQueueFolder.toFile().list())); assertEquals(2, fileset.size()); assertTrue(fileset.contains("checkpoint.properties"), "Checkpoint file must be created"); assertTrue(fileset.contains("0.page"), "One page file must be created"); final Path pageFile = tempQueueFolder.resolve("0.page"); verifyFile(pageFile, 9, rawContent -> { assertEquals(4, rawContent.getInt(), "First 4 bytes contains the length"); assertEquals('A', rawContent.get()); assertEquals('A', rawContent.get()); assertEquals('A', rawContent.get()); assertEquals('A', rawContent.get()); assertEquals(0, rawContent.get()); }); }
@Override public byte[] serialize(final String topic, final TimestampedKeyAndJoinSide<K> data) { final byte boolByte = (byte) (data.isLeftSide() ? 1 : 0); final byte[] keyBytes = keySerializer.serialize(topic, data.getKey()); final byte[] timestampBytes = timestampSerializer.serialize(topic, data.getTimestamp()); return ByteBuffer .allocate(timestampBytes.length + 1 + keyBytes.length) .put(timestampBytes) .put(boolByte) .put(keyBytes) .array(); }
@Test public void shouldThrowIfSerializeNullData() { assertThrows(NullPointerException.class, () -> STRING_SERDE.serializer().serialize(TOPIC, TimestampedKeyAndJoinSide.makeLeft(null, 0))); }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldThrowIfExplicitInterNodeListenerHasIpv6WildcardAddress() { // Given: final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .putAll(MIN_VALID_CONFIGS) .put(ADVERTISED_LISTENER_CONFIG, "https://[::]:1236") .build() ); // When: final Exception e = assertThrows( ConfigException.class, () -> config.getInterNodeListener(portResolver, logger) ); // Then: assertThat(e.getMessage(), containsString("Invalid value https://[::]:1236 for configuration " + ADVERTISED_LISTENER_CONFIG + ": Can not be wildcard")); }
public MessageType convert(Schema avroSchema) { if (!avroSchema.getType().equals(Schema.Type.RECORD)) { throw new IllegalArgumentException("Avro schema must be a record."); } return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), "")); }
@Test public void testTimestampMicrosType() throws Exception { Schema date = LogicalTypes.timestampMicros().addToSchema(Schema.create(LONG)); Schema expected = Schema.createRecord( "myrecord", null, null, false, Arrays.asList(new Schema.Field("timestamp", date, null, null))); testRoundTripConversion( expected, "message myrecord {\n" + " required int64 timestamp (TIMESTAMP(MICROS,true));\n" + "}\n"); for (PrimitiveTypeName primitive : new PrimitiveTypeName[] {INT32, INT96, FLOAT, DOUBLE, BOOLEAN, BINARY, FIXED_LEN_BYTE_ARRAY}) { final PrimitiveType type; if (primitive == FIXED_LEN_BYTE_ARRAY) { type = new PrimitiveType(REQUIRED, primitive, 12, "test", TIMESTAMP_MICROS); } else { type = new PrimitiveType(REQUIRED, primitive, "test", TIMESTAMP_MICROS); } assertThrows( "Should not allow TIMESTAMP_MICROS with " + primitive, IllegalArgumentException.class, () -> new AvroSchemaConverter().convert(message(type))); } // Test that conversions for timestamp types only use APIs that are available in the user's Avro version for (String avroVersion : ImmutableSet.of("1.7.0", "1.8.0", "1.9.0", "1.10.0", "1.11.0")) { Mockito.when(AvroRecordConverter.getRuntimeAvroVersion()).thenReturn(avroVersion); final Schema converted = new AvroSchemaConverter() .convert(Types.buildMessage() .addField(Types.primitive(INT64, Type.Repetition.REQUIRED) .as(LogicalTypeAnnotation.timestampType( false, LogicalTypeAnnotation.TimeUnit.MICROS)) .length(1) .named("timestamp_type")) .named("TestAvro")); assertEquals( avroVersion.matches("1\\.[789]\\.\\d+") ? "timestamp-micros" : "local-timestamp-micros", converted .getField("timestamp_type") .schema() .getLogicalType() .getName()); } }
@ScalarOperator(LESS_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean lessThan(@SqlType(StandardTypes.SMALLINT) long left, @SqlType(StandardTypes.SMALLINT) long right) { return left < right; }
@Test public void testLessThan() { assertFunction("SMALLINT'37' < SMALLINT'37'", BOOLEAN, false); assertFunction("SMALLINT'37' < SMALLINT'17'", BOOLEAN, false); assertFunction("SMALLINT'17' < SMALLINT'37'", BOOLEAN, true); assertFunction("SMALLINT'17' < SMALLINT'17'", BOOLEAN, false); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeDifferentCleanupSubscription() { SinkConfig sinkConfig = createSinkConfig(); SinkConfig newSinkConfig = createUpdatedSinkConfig("cleanupSubscription", false); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); assertFalse(mergedConfig.getCleanupSubscription()); mergedConfig.setCleanupSubscription(sinkConfig.getCleanupSubscription()); assertEquals( new Gson().toJson(sinkConfig), new Gson().toJson(mergedConfig) ); }
@Override public void define(Context context) { NewController controller = context.createController(CONTROLLER_COMPONENTS) .setSince("4.2") .setDescription("Get information about a component (file, directory, project, ...) and its ancestors or descendants. " + "Update a project or module key."); Arrays.stream(actions) .forEach(action -> action.define(controller)); controller.done(); }
@Test public void define_controller() { WebService.Context context = new WebService.Context(); new ComponentsWs(action).define(context); WebService.Controller controller = context.controller(CONTROLLER_COMPONENTS); assertThat(controller).isNotNull(); assertThat(controller.description()).isNotEmpty(); assertThat(controller.since()).isEqualTo("4.2"); assertThat(controller.actions()).extracting(WebService.Action::key).containsExactly(actionKey); }
public static ConnectorConfigGenerator create(final SourceConnector connector, final Class<?> dbzConfigClass) { return create(connector, dbzConfigClass, Collections.emptySet(), Collections.emptyMap()); }
@Test void testIfItHandlesWrongClassInput() { final MySqlConnector connector = new MySqlConnector(); final Map<String, Object> overridenDefaultValues = Collections.emptyMap(); final Set<String> requiredFields = Collections.emptySet(); Class<?> clazz = getClass(); assertThrows(IllegalArgumentException.class, () -> ConnectorConfigGenerator.create(connector, clazz, requiredFields, overridenDefaultValues)); }
@Deprecated static void updateBlockHandlerFor(Class<?> clazz, String name, Method method) { if (clazz == null || StringUtil.isBlank(name)) { throw new IllegalArgumentException("Bad argument"); } BLOCK_HANDLER_MAP.put(getKey(clazz, name), MethodWrapper.wrap(method)); }
@Test(expected = IllegalArgumentException.class) public void testUpdateFallbackBadArgument() { ResourceMetadataRegistry.updateBlockHandlerFor(String.class, "", new Class[0], String.class.getMethods()[0]); }
@Override public int getNettyWriteBufferLowWaterMark() { return clientConfig.getPropertyAsInteger(WRITE_BUFFER_LOW_WATER_MARK, DEFAULT_WRITE_BUFFER_LOW_WATER_MARK); }
@Test void testGetNettyWriteBufferLowWaterMarkOverride() { clientConfig.set(ConnectionPoolConfigImpl.WRITE_BUFFER_LOW_WATER_MARK, 10000); assertEquals(10000, connectionPoolConfig.getNettyWriteBufferLowWaterMark()); }
@Override public T getHollowObject(int ordinal) { List<T> refCachedItems = cachedItems; if (refCachedItems == null) { throw new IllegalStateException(String.format("HollowObjectCacheProvider for type %s has been detached or was not initialized", typeReadState == null ? null : typeReadState.getSchema().getName())); } if (refCachedItems.size() <= ordinal) { throw new IllegalStateException(String.format("Ordinal %s is out of bounds for pojo cache array of size %s.", ordinal, refCachedItems.size())); } return refCachedItems.get(ordinal); }
@Test public void adding_withPreExisting() { TypeA a2 = typeA(2); prepopulate(typeA(0), typeA(1)); notifyAdded(a2); assertEquals(a2, subject.get().getHollowObject(a2.ordinal)); }
public ResourceMethodDescriptor process(final ServerResourceContext context) { String path = context.getRequestURI().getRawPath(); if (path.length() < 2) { throw new RoutingException(HttpStatus.S_404_NOT_FOUND.getCode()); } if (path.charAt(0) == '/') { path = path.substring(1); } Queue<String> remainingPath = new LinkedList<>(Arrays.asList(SLASH_PATTERN.split(path))); String rootPath = "/" + remainingPath.poll(); ResourceModel currentResource; try { currentResource = _pathRootResourceMap.get(URLDecoder.decode(rootPath, RestConstants.DEFAULT_CHARSET_NAME)); } catch (UnsupportedEncodingException e) { throw new RestLiInternalException("UnsupportedEncodingException while trying to decode the root path", e); } if (currentResource == null) { throw new RoutingException(String.format("No root resource defined for path '%s'", rootPath), HttpStatus.S_404_NOT_FOUND.getCode()); } return processResourceTree(currentResource, context, remainingPath); }
@Test public void failsOnRootResourceMethodNotFound() throws URISyntaxException { final TestSetup setup = new TestSetup(); setup.mockContextForMethodNotFound(setup._rootPath); final RestLiRouter router = setup._router; final ServerResourceContext context = setup._context; final RoutingException e = runAndCatch(() -> router.process(context), RoutingException.class); Assert.assertEquals(e.getStatus(), HttpStatus.S_400_BAD_REQUEST.getCode()); }
public TimerProducer(MetricsEndpoint endpoint) { super(endpoint); }
@Test public void testTimerProducer() { assertThat(producer, is(notNullValue())); assertThat(producer.getEndpoint().equals(endpoint), is(true)); }
public static void deletePathQuietly(String toDelete) { try { Path toRemovePath = new Path(toDelete); FileSystem fs = toRemovePath.getFileSystem(); if (fs.exists(toRemovePath)) { fs.delete(toRemovePath, true); } } catch (IOException e) { LOG.error("Failed to delete files for {} ", toDelete, e); } }
@Test void testDeletePathQuietly() throws IOException { File testFile = new File(tempFolder.getPath(), "testFile"); Files.createFile(testFile.toPath()); assertThat(testFile).exists(); SegmentPartitionFile.deletePathQuietly(testFile.getPath()); assertThat(testFile).doesNotExist(); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void answerCallback() { // callbackQuery sent by client after pressing on InlineKeyboardButton (used in sendGame() test) CallbackQuery callbackQuery = BotUtils.parseUpdate(testCallbackQuery).callbackQuery(); assertNotNull(callbackQuery); assertFalse(callbackQuery.id().isEmpty()); UserTest.checkUser(callbackQuery.from(), true); assertEquals(chatId, callbackQuery.from().id()); MessageTest.checkMessage(callbackQuery.message()); assertFalse(callbackQuery.chatInstance().isEmpty()); assertEquals("pengrad_test_game", callbackQuery.gameShortName()); assertNull(callbackQuery.inlineMessageId()); assertNull(callbackQuery.data()); BaseResponse response = bot.execute(new AnswerCallbackQuery(callbackQuery.id()) .text("answer callback") .url("telegram.me/pengrad_test_bot?game=pengrad_test_game") .showAlert(false) .cacheTime(1)); assertFalse(response.isOk()); assertEquals(400, response.errorCode()); assertEquals("Bad Request: query is too old and response timeout expired or query ID is invalid", response.description()); }
public String getBindingActualTable(final String dataSource, final String logicTable, final String otherLogicTable, final String otherActualTable) { Optional<ShardingTable> otherShardingTable = Optional.ofNullable(shardingTables.get(otherLogicTable)); int index = otherShardingTable.map(optional -> optional.findActualTableIndex(dataSource, otherActualTable)).orElse(-1); if (-1 == index) { throw new ActualTableNotFoundException(dataSource, otherActualTable); } Optional<ShardingTable> shardingTable = Optional.ofNullable(shardingTables.get(logicTable)); if (shardingTable.isPresent()) { return shardingTable.get().getActualDataNodes().get(index).getTableName(); } throw new BindingTableNotFoundException(dataSource, logicTable, otherActualTable); }
@Test void assertGetBindingActualTablesFailureWhenNotFound() { assertThrows(ActualTableNotFoundException.class, () -> createBindingTableRule().getBindingActualTable("no_ds", "Sub_Logic_Table", "LOGIC_TABLE", "table_1")); }
@Override public void setHeaders(URLConnection connection, HTTPSamplerBase sampler) throws IOException { // Get the encoding to use for the request String contentEncoding = sampler.getContentEncoding(); long contentLength = 0L; boolean hasPutBody = false; // Check if the header manager had a content type header // This allows the user to specify their own content-type for a PUT request String contentTypeHeader = connection.getRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE); boolean hasContentTypeHeader = contentTypeHeader != null && contentTypeHeader.length() > 0; HTTPFileArg[] files = sampler.getHTTPFiles(); // If there are no arguments, we can send a file as the body of the request if(sampler.getArguments() != null && sampler.getArguments().getArgumentCount() == 0 && sampler.getSendFileAsPostBody()) { // If getSendFileAsPostBody returned true, it's sure that file is not null HTTPFileArg file = files[0]; hasPutBody = true; if(!hasContentTypeHeader) { // Allow the mimetype of the file to control the content type if(file.getMimeType().length() > 0) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, file.getMimeType()); } } // Create the content length we are going to write File inputFile = new File(file.getPath()); contentLength = inputFile.length(); } else if(sampler.getSendParameterValuesAsPostBody()) { hasPutBody = true; // Allow the mimetype of the file to control the content type // This is not obvious in GUI if you are not uploading any files, // but just sending the content of nameless parameters if(!hasContentTypeHeader && files.length == 1 && files[0].getMimeType().length() > 0) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, files[0].getMimeType()); } // We create the post body content now, so we know the size ByteArrayOutputStream bos = new ByteArrayOutputStream(); // Just append all the parameter values, and use that as the put body StringBuilder putBodyBuffer = new StringBuilder(); for (JMeterProperty jMeterProperty : sampler.getArguments()) { HTTPArgument arg = (HTTPArgument) jMeterProperty.getObjectValue(); putBodyBuffer.append(arg.getEncodedValue(contentEncoding)); } bos.write(putBodyBuffer.toString().getBytes(contentEncoding)); bos.flush(); bos.close(); // Keep the content, will be sent later formDataUrlEncoded = bos.toByteArray(); contentLength = bos.toByteArray().length; } if(hasPutBody) { // Set the content length connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_LENGTH, Long.toString(contentLength)); // Make the connection ready for sending post data connection.setDoOutput(true); } }
@Test public void testSetHeadersWithParams() throws Exception { URLConnection uc = new NullURLConnection(); HTTPSampler sampler = new HTTPSampler(); sampler.setHTTPFiles(new HTTPFileArg[] { new HTTPFileArg("file2", "param2", "mime2") }); Arguments arguments = new Arguments(); arguments.addArgument(new HTTPArgument("", "parameter with no name")); sampler.setArguments(arguments); PutWriter pw = new PutWriter(); pw.setHeaders(uc, sampler); assertEquals("mime2", uc.getRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE)); }
@Override public List<ParsedStatement> parse(final String sql) { return primaryContext.parse(sql); }
@Test public void shouldBeAbleToParseInvalidThings() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); // No Stream called 'I_DO_NOT_EXIST' exists // When: final List<ParsedStatement> parsed = ksqlEngine .parse("CREATE STREAM FOO AS SELECT * FROM I_DO_NOT_EXIST;"); // Then: assertThat(parsed, hasSize(1)); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void transformDisplayDataExceptionShouldFail() { DoFn<Integer, Integer> brokenDoFn = new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) throws Exception {} @Override public void populateDisplayData(DisplayData.Builder builder) { throw new RuntimeException("oh noes!"); } }; Pipeline p = getPipeline(); p.apply(Create.of(1, 2, 3)).apply(ParDo.of(brokenDoFn)); thrown.expectMessage(brokenDoFn.getClass().getName()); thrown.expectCause(ThrowableMessageMatcher.hasMessage(is("oh noes!"))); p.run(); }
private CompletionStage<RestResponse> report(RestRequest request) { ServerManagement server = invocationHelper.getServer(); return Security.doAs(request.getSubject(), () -> server.getServerReport().handle((path, t) -> { if (t != null) { throw CompletableFutures.asCompletionException(t); } return createReportResponse(request, path.toFile(), invocationHelper.getRestCacheManager().getNodeName()); }) ); }
@Test public void testServerReport() { CompletionStage<RestResponse> response = adminClient.server().report(); ResponseAssertion.assertThat(response).isOk(); ResponseAssertion.assertThat(response).hasContentType("application/gzip"); }
@Override public <T> void register(Class<T> remoteInterface, T object) { register(remoteInterface, object, 1); }
@Test public void testNoAckWithResultInvocationsAsync() throws InterruptedException, ExecutionException { RedissonClient server = createInstance(); RedissonClient client = createInstance(); try { server.getRemoteService().register(RemoteInterface.class, new RemoteImpl()); // no ack but an execution timeout of 1 second RemoteInvocationOptions options = RemoteInvocationOptions.defaults().noAck().expectResultWithin(1, TimeUnit.SECONDS); RemoteInterfaceAsync service = client.getRemoteService().get(RemoteInterfaceAsync.class, options); service.voidMethod("noAck", 100L).get(); assertThat(service.resultMethod(21L).get()).isEqualTo(42); try { service.errorMethod().get(); Assertions.fail(); } catch (Exception e) { assertThat(e.getCause().getMessage()).isEqualTo("Checking error throw"); } try { service.errorMethodWithCause().get(); Assertions.fail(); } catch (Exception e) { assertThat(e.getCause().getCause()).isInstanceOf(ArithmeticException.class); assertThat(e.getCause().getCause().getMessage()).isEqualTo("/ by zero"); } try { service.timeoutMethod().get(); Assertions.fail("noAck option should still wait for the server to return a response and throw if the execution timeout is exceeded"); } catch (Exception e) { assertThat(e.getCause()).isInstanceOf(RemoteServiceTimeoutException.class); } } finally { client.shutdown(); server.shutdown(); } }
public static ConjunctFuture<Void> completeAll( Collection<? extends CompletableFuture<?>> futuresToComplete) { return new CompletionConjunctFuture(futuresToComplete); }
@Test void testCompleteAllPartialExceptional() { final CompletableFuture<String> inputFuture1 = new CompletableFuture<>(); final CompletableFuture<Integer> inputFuture2 = new CompletableFuture<>(); final List<CompletableFuture<?>> futuresToComplete = Arrays.asList(inputFuture1, inputFuture2); final FutureUtils.ConjunctFuture<Void> completeFuture = FutureUtils.completeAll(futuresToComplete); assertThat(completeFuture).isNotDone(); assertThat(completeFuture.getNumFuturesCompleted()).isZero(); assertThat(completeFuture.getNumFuturesTotal()).isEqualTo(futuresToComplete.size()); final FlinkException testException1 = new FlinkException("Test exception 1"); inputFuture2.completeExceptionally(testException1); assertThat(completeFuture).isNotDone(); assertThat(completeFuture.getNumFuturesCompleted()).isOne(); inputFuture1.complete("foobar"); assertThat(completeFuture).isDone(); assertThat(completeFuture.getNumFuturesCompleted()).isEqualTo(2); assertThatFuture(completeFuture) .eventuallyFailsWith(ExecutionException.class) .withCause(testException1); }
@Override public String toString() { return toStringHelper(this) .add("flushPolicy", flushPolicy) .add("rowGroupMaxRowCount", rowGroupMaxRowCount) .add("dictionaryMaxMemory", dictionaryMaxMemory) .add("dictionaryMemoryAlmostFullRange", dictionaryMemoryAlmostFullRange) .add("dictionaryUsefulCheckPerChunkFrequency", dictionaryUsefulCheckPerChunkFrequency) .add("dictionaryUsefulCheckColumnSize", dictionaryUsefulCheckColumnSize) .add("maxStringStatisticsLimit", maxStringStatisticsLimit) .add("maxCompressionBufferSize", maxCompressionBufferSize) .add("compressionLevel", compressionLevel) .add("streamLayoutFactory", streamLayoutFactory) .add("integerDictionaryEncodingEnabled", integerDictionaryEncodingEnabled) .add("stringDictionarySortingEnabled", stringDictionarySortingEnabled) .add("stringDictionaryEncodingEnabled", stringDictionaryEncodingEnabled) .add("dwrfWriterOptions", dwrfWriterOptions) .add("ignoreDictionaryRowGroupSizes", ignoreDictionaryRowGroupSizes) .add("preserveDirectEncodingStripeCount", preserveDirectEncodingStripeCount) .add("flattenedColumns", flattenedColumns) .add("mapStatisticsEnabled", mapStatisticsEnabled) .add("maxFlattenedMapKeyCount", maxFlattenedMapKeyCount) .toString(); }
@Test public void testToString() { DataSize stripeMinSize = new DataSize(13, MEGABYTE); DataSize stripeMaxSize = new DataSize(27, MEGABYTE); int stripeMaxRowCount = 1_100_000; int rowGroupMaxRowCount = 15_000; DataSize dictionaryMaxMemory = new DataSize(13_000, KILOBYTE); DataSize dictionaryMemoryRange = new DataSize(1_000, KILOBYTE); int dictionaryUsefulCheckPerChunkFrequency = 9_999; DataSize dictionaryUsefulCheckColumnSize = new DataSize(1, MEGABYTE); DataSize stringMaxStatisticsLimit = new DataSize(128, BYTE); DataSize maxCompressionBufferSize = new DataSize(512, KILOBYTE); DataSize dwrfStripeCacheMaxSize = new DataSize(4, MEGABYTE); DwrfStripeCacheMode dwrfStripeCacheMode = DwrfStripeCacheMode.INDEX_AND_FOOTER; OptionalInt compressionLevel = OptionalInt.of(5); StreamLayoutFactory streamLayoutFactory = new ColumnSizeLayoutFactory(); boolean integerDictionaryEncodingEnabled = false; boolean stringDictionarySortingEnabled = true; int preserveDirectEncodingStripeCount = 0; boolean mapStatisticsEnabled = true; int maxFlattenedMapKeyCount = 27; OrcWriterOptions writerOptions = OrcWriterOptions.builder() .withFlushPolicy(DefaultOrcWriterFlushPolicy.builder() .withStripeMinSize(stripeMinSize) .withStripeMaxSize(stripeMaxSize) .withStripeMaxRowCount(stripeMaxRowCount) .build()) .withRowGroupMaxRowCount(rowGroupMaxRowCount) .withDictionaryMaxMemory(dictionaryMaxMemory) .withDictionaryMemoryAlmostFullRange(dictionaryMemoryRange) .withDictionaryUsefulCheckPerChunkFrequency(dictionaryUsefulCheckPerChunkFrequency) .withDictionaryUsefulCheckColumnSize(dictionaryUsefulCheckColumnSize) .withMaxStringStatisticsLimit(stringMaxStatisticsLimit) .withMaxCompressionBufferSize(maxCompressionBufferSize) .withCompressionLevel(compressionLevel) .withStreamLayoutFactory(streamLayoutFactory) .withIntegerDictionaryEncodingEnabled(integerDictionaryEncodingEnabled) .withStringDictionarySortingEnabled(stringDictionarySortingEnabled) .withDwrfStripeCacheEnabled(true) .withDwrfStripeCacheMaxSize(dwrfStripeCacheMaxSize) .withDwrfStripeCacheMode(dwrfStripeCacheMode) .withPreserveDirectEncodingStripeCount(preserveDirectEncodingStripeCount) .withFlattenedColumns(ImmutableSet.of(4)) .withMapStatisticsEnabled(mapStatisticsEnabled) .withMaxFlattenedMapKeyCount(maxFlattenedMapKeyCount) .build(); String expectedString = "OrcWriterOptions{flushPolicy=DefaultOrcWriterFlushPolicy{stripeMaxRowCount=1100000, " + "stripeMinBytes=13631488, stripeMaxBytes=28311552}, rowGroupMaxRowCount=15000, " + "dictionaryMaxMemory=13000kB, dictionaryMemoryAlmostFullRange=1000kB, dictionaryUsefulCheckPerChunkFrequency=9999, " + "dictionaryUsefulCheckColumnSize=1MB, maxStringStatisticsLimit=128B, maxCompressionBufferSize=512kB, " + "compressionLevel=OptionalInt[5], streamLayoutFactory=ColumnSizeLayoutFactory{}, integerDictionaryEncodingEnabled=false, " + "stringDictionarySortingEnabled=true, stringDictionaryEncodingEnabled=true, " + "dwrfWriterOptions=Optional[DwrfStripeCacheOptions{stripeCacheMode=INDEX_AND_FOOTER, stripeCacheMaxSize=4MB}], " + "ignoreDictionaryRowGroupSizes=false, preserveDirectEncodingStripeCount=0, flattenedColumns=[4], mapStatisticsEnabled=true, " + "maxFlattenedMapKeyCount=27}"; assertEquals(expectedString, writerOptions.toString()); }
public static void move(String srcPath, String dstPath) throws IOException { Files.move(Paths.get(srcPath), Paths.get(dstPath), StandardCopyOption.REPLACE_EXISTING); }
@Test public void moveNonExistentFile() throws IOException { // ghostFile is never created, so deleting should fail File ghostFile = new File(mTestFolder.getRoot(), "ghost.txt"); File toFile = mTestFolder.newFile("to.txt"); mException.expect(IOException.class); FileUtils.move(ghostFile.getAbsolutePath(), toFile.getAbsolutePath()); fail("moving a non-existent file should have failed"); }
@Nullable @Override public BlobHttpContent getContent() { return null; }
@Test public void testGetContent() { Assert.assertNull(testBlobChecker.getContent()); }
public ParseResult parse(File file) throws IOException, SchemaParseException { return parse(file, null); }
@Test void testParseFile() throws IOException { Path tempFile = Files.createTempFile("TestSchemaParser", null); Files.write(tempFile, singletonList(SCHEMA_JSON)); Schema schema = new SchemaParser().parse(tempFile.toFile()).mainSchema(); assertEquals(SCHEMA_REAL, schema); }
@Override public boolean isEmpty() { return targetMap.isEmpty(); }
@Test void isEmpty() { Assertions.assertFalse(lowerCaseLinkHashMap.isEmpty()); Assertions.assertTrue(new LowerCaseLinkHashMap<>().isEmpty()); }
@Override public YamlShardingStrategyConfiguration swapToYamlConfiguration(final ShardingStrategyConfiguration data) { YamlShardingStrategyConfiguration result = new YamlShardingStrategyConfiguration(); if (data instanceof StandardShardingStrategyConfiguration) { result.setStandard(createYamlStandardShardingStrategyConfiguration((StandardShardingStrategyConfiguration) data)); } if (data instanceof ComplexShardingStrategyConfiguration) { result.setComplex(createYamlComplexShardingStrategyConfiguration((ComplexShardingStrategyConfiguration) data)); } if (data instanceof HintShardingStrategyConfiguration) { result.setHint(createYamlHintShardingStrategyConfiguration((HintShardingStrategyConfiguration) data)); } if (data instanceof NoneShardingStrategyConfiguration) { result.setNone(new YamlNoneShardingStrategyConfiguration()); } return result; }
@Test void assertSwapToYamlConfigurationForHintShardingStrategy() { ShardingStrategyConfiguration data = new HintShardingStrategyConfiguration("core_hint_fixture"); YamlShardingStrategyConfigurationSwapper swapper = new YamlShardingStrategyConfigurationSwapper(); YamlShardingStrategyConfiguration actual = swapper.swapToYamlConfiguration(data); assertThat(actual.getHint().getShardingAlgorithmName(), is("core_hint_fixture")); }
public static void setCurator(CuratorFramework curator) { CURATOR_TL.set(curator); }
@Test public void testACLs() throws Exception { DelegationTokenManager tm1; String connectString = zkServer.getConnectString(); Configuration conf = getSecretConf(connectString); RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); String userPass = "myuser:mypass"; final ACL digestACL = new ACL(ZooDefs.Perms.ALL, new Id("digest", DigestAuthenticationProvider.generateDigest(userPass))); ACLProvider digestAclProvider = new ACLProvider() { @Override public List<ACL> getAclForPath(String path) { return getDefaultAcl(); } @Override public List<ACL> getDefaultAcl() { List<ACL> ret = new ArrayList<ACL>(); ret.add(digestACL); return ret; } }; CuratorFramework curatorFramework = CuratorFrameworkFactory.builder() .connectString(connectString) .retryPolicy(retryPolicy) .aclProvider(digestAclProvider) .authorization("digest", userPass.getBytes(StandardCharsets.UTF_8)) .build(); curatorFramework.start(); ZKDelegationTokenSecretManager.setCurator(curatorFramework); tm1 = new DelegationTokenManager(conf, new Text("bla")); tm1.init(); // check ACL String workingPath = conf.get(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH); verifyACL(curatorFramework, "/" + workingPath, digestACL); tm1.destroy(); ZKDelegationTokenSecretManager.setCurator(null); curatorFramework.close(); }
@Override public Num calculate(BarSeries series, Position position) { return calculateProfit(series, position); }
@Test public void calculateWithOpenedPosition() { MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 70); // with base percentage should return 1 AnalysisCriterion retWithBase = getCriterion(); Position position1 = new Position(); assertNumEquals(1d, retWithBase.calculate(series, position1)); position1.operate(0); assertNumEquals(1d, retWithBase.calculate(series, position1)); // without base percentage should return 0 AnalysisCriterion retWithoutBase = getCriterion(false); Position position2 = new Position(); assertNumEquals(0, retWithoutBase.calculate(series, position2)); position2.operate(0); assertNumEquals(0, retWithoutBase.calculate(series, position2)); }
@Override public RuleNodePath getRuleNodePath() { return INSTANCE; }
@Test void assertNew() { RuleNodePathProvider ruleNodePathProvider = new ShadowRuleNodePathProvider(); RuleNodePath actualRuleNodePath = ruleNodePathProvider.getRuleNodePath(); assertThat(actualRuleNodePath.getNamedItems().size(), is(3)); assertTrue(actualRuleNodePath.getNamedItems().containsKey(ShadowRuleNodePathProvider.SHADOW_ALGORITHMS)); assertTrue(actualRuleNodePath.getNamedItems().containsKey(ShadowRuleNodePathProvider.TABLES)); assertTrue(actualRuleNodePath.getNamedItems().containsKey(ShadowRuleNodePathProvider.DATA_SOURCES)); assertThat(actualRuleNodePath.getUniqueItems().size(), is(1)); assertTrue(actualRuleNodePath.getUniqueItems().containsKey(ShadowRuleNodePathProvider.DEFAULT_SHADOW_ALGORITHM_NAME)); assertThat(actualRuleNodePath.getRoot().getRuleType(), is(ShadowRuleNodePathProvider.RULE_TYPE)); }
public void flipBit(int position) { bitSet.flip(position); }
@Test public static void testFlipBit() { Bitmap bitmap = new Bitmap(4096); for (int i = 0; i < 4096; i++) { bitmap.flipBit(i); assertTrue(bitmap.getBit(i)); bitmap.flipBit(i); assertFalse(bitmap.getBit(i)); bitmap.flipBit(i); assertTrue(bitmap.getBit(i)); } }
@Override public int getTransactionIsolation() { return Connection.TRANSACTION_NONE; }
@Test void assertGetTransactionIsolation() { assertThat(connection.getTransactionIsolation(), is(Connection.TRANSACTION_NONE)); }
public static String trimToType( String string, int trimType ) { switch ( trimType ) { case ValueMetaInterface.TRIM_TYPE_BOTH: return trim( string ); case ValueMetaInterface.TRIM_TYPE_LEFT: return ltrim( string ); case ValueMetaInterface.TRIM_TYPE_RIGHT: return rtrim( string ); case ValueMetaInterface.TRIM_TYPE_NONE: default: return string; } }
@Test public void testTrimToType() { final String source = " trim me hard "; assertEquals( "trim me hard", Const.trimToType( source, ValueMetaInterface.TRIM_TYPE_BOTH ) ); assertEquals( "trim me hard ", Const.trimToType( source, ValueMetaInterface.TRIM_TYPE_LEFT ) ); assertEquals( " trim me hard", Const.trimToType( source, ValueMetaInterface.TRIM_TYPE_RIGHT ) ); assertEquals( source, Const.trimToType( source, ValueMetaInterface.TRIM_TYPE_NONE ) ); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Responses with array schema") public void testTicket2340() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Ticket2340Resource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /test/test:\n" + " post:\n" + " operationId: getAnimal\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/Animal'\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: string\n" + "components:\n" + " schemas:\n" + " Animal:\n" + " required:\n" + " - type\n" + " type: object\n" + " properties:\n" + " type:\n" + " type: string\n" + " discriminator:\n" + " propertyName: type\n" + " Cat:\n" + " type: object\n" + " allOf:\n" + " - $ref: '#/components/schemas/Animal'\n" + " - type: object\n" + " properties:\n" + " lives:\n" + " type: integer\n" + " format: int32\n" + " Dog:\n" + " type: object\n" + " allOf:\n" + " - $ref: '#/components/schemas/Animal'\n" + " - type: object\n" + " properties:\n" + " barkVolume:\n" + " type: number\n" + " format: double\n"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
public void connect() throws ConnectException { connect(s -> {}, t -> {}, () -> {}); }
@Test public void testInterruptCurrentThreadIfConnectionIsInterrupted() throws Exception { when(webSocketClient.connectBlocking()).thenThrow(new InterruptedException()); service.connect(); assertTrue(Thread.currentThread().isInterrupted(), "Interrupted flag was not set properly"); }
@Override public Num calculate(BarSeries series, Position position) { return series.one(); }
@Test public void calculateWithOnePosition() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); Position position = new Position(); AnalysisCriterion positionsCriterion = getCriterion(); assertNumEquals(1, positionsCriterion.calculate(series, position)); }
public static Type convertType(TypeInfo typeInfo) { switch (typeInfo.getOdpsType()) { case BIGINT: return Type.BIGINT; case INT: return Type.INT; case SMALLINT: return Type.SMALLINT; case TINYINT: return Type.TINYINT; case FLOAT: return Type.FLOAT; case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); case DOUBLE: return Type.DOUBLE; case CHAR: CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo; return ScalarType.createCharType(charTypeInfo.getLength()); case VARCHAR: VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo; return ScalarType.createVarcharType(varcharTypeInfo.getLength()); case STRING: case JSON: return ScalarType.createDefaultCatalogString(); case BINARY: return Type.VARBINARY; case BOOLEAN: return Type.BOOLEAN; case DATE: return Type.DATE; case TIMESTAMP: case DATETIME: return Type.DATETIME; case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo; return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()), convertType(mapTypeInfo.getValueTypeInfo())); case ARRAY: ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo; return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; List<Type> fieldTypeList = structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType) .collect(Collectors.toList()); return new StructType(fieldTypeList); default: return Type.VARCHAR; } }
@Test public void testConvertTypeCaseChar() { CharTypeInfo charTypeInfo = TypeInfoFactory.getCharTypeInfo(10); Type result = EntityConvertUtils.convertType(charTypeInfo); Type expectedType = ScalarType.createCharType(10); assertEquals(expectedType, result); }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test public void testRecursiveBasic() throws CloneNotSupportedException { LinkedListNode node1 = new LinkedListNode(); node1.setIntField(1); LinkedListNode node2 = new LinkedListNode(); node2.setIntField(2); node1.setNext(node2); RecordTemplate expected = node1.copy(); // Introduce bad elements. node1.data().put("foo", "bar"); node2.data().put("foo", "bar"); Assert.assertEquals(node1.getIntField().intValue(), 1); Assert.assertEquals(node1.getNext(), node2); Assert.assertEquals(node1.data().size(), 3); Assert.assertEquals(node2.getIntField().intValue(), 2); Assert.assertEquals(node2.getNext(), null); Assert.assertEquals(node2.data().size(), 2); RestUtils.trimRecordTemplate(node1, false); Assert.assertEquals(node1, expected); }
@Udf(description = "Converts a string representation of a date in the given format" + " into the number of milliseconds since 1970-01-01 00:00:00 UTC/GMT." + " Single quotes in the timestamp format can be escaped with ''," + " for example: 'yyyy-MM-dd''T''HH:mm:ssX'." + " The system default time zone is used when no time zone is explicitly provided.") public long stringToTimestamp( @UdfParameter( description = "The string representation of a date.") final String formattedTimestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { // NB: We do not perform a null here preferring to throw an exception as // there is no sentinel value for a "null" Date. try { final StringToTimestampParser timestampParser = parsers.get(formatPattern); return timestampParser.parse(formattedTimestamp); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse timestamp '" + formattedTimestamp + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowIfFormatInvalid() { // When: final KsqlFunctionException e = assertThrows( KsqlFunctionException.class, () -> udf.stringToTimestamp("2021-12-01 12:10:11.123", "invalid") ); // Then: assertThat(e.getMessage(), containsString("Unknown pattern letter: i")); }
public void refresh() { kidElementCache = null; kidDirectoryCache = null; rd.clear(); populateChildren(); try { if ( obj != null ) { getRepositoryObjects(); } } catch ( KettleException ignored ) { // Ignored } fireCollectionChanged(); }
@Test public void testRefresh() throws Exception { RepositoryDirectory rd = Mockito.mock( RepositoryDirectory.class ); Mockito.when( rd.getObjectId() ).thenReturn( new LongObjectId( 0L ) ); UIRepositoryDirectory uiDir = new UIRepositoryDirectory( rd, null, null ); uiDir.populateChildren(); uiDir.getRepositoryObjects(); uiDir.clear(); Mockito.verify( rd ).getChildren(); Mockito.verify( rd ).getRepositoryObjects(); }
public String format() { StringBuilder builder = new StringBuilder(); for (RefeedActions.Entry entry : actions.getEntries()) { builder.append(entry.name() + ": Consider removing data and re-feed document type '" + entry.getDocumentType() + "' in cluster '" + entry.getClusterName() + "' because:\n"); int counter = 1; for (String message : entry.getMessages()) { builder.append(" " + (counter++) + ") " + message + "\n"); } } return builder.toString(); }
@Test public void formatting_of_single_action() { RefeedActions actions = new ConfigChangeActionsBuilder(). refeed(CHANGE_ID, CHANGE_MSG, DOC_TYPE, CLUSTER, SERVICE_NAME). build().getRefeedActions(); assertEquals("field-type-change: Consider removing data and re-feed document type 'music' in cluster 'foo' because:\n" + " 1) change\n", new RefeedActionsFormatter(actions).format()); }
public boolean hasCapacity(Node host, NodeResources requestedCapacity) { return availableCapacityOf(host).satisfies(requestedCapacity); }
@Test public void hasCapacity() { assertTrue(capacity.hasCapacity(host1, resources0)); assertTrue(capacity.hasCapacity(host1, resources1)); assertTrue(capacity.hasCapacity(host2, resources0)); assertTrue(capacity.hasCapacity(host2, resources1)); assertTrue(capacity.hasCapacity(host3, resources0)); assertTrue(capacity.hasCapacity(host3, resources1)); assertFalse(capacity.hasCapacity(host4, resources0)); // No IPs available assertFalse(capacity.hasCapacity(host4, resources1)); // No IPs available // Add a new node to host1 to deplete the memory resource Node nodeF = Node.reserve(List.of("::6"), "nodeF", "host1", resources0, NodeType.tenant).build(); nodes.add(nodeF); capacity = new HostCapacity(new LockedNodeList(nodes, () -> {}), hostResourcesCalculator); assertFalse(capacity.hasCapacity(host1, resources0)); assertFalse(capacity.hasCapacity(host1, resources1)); }
public boolean checkStateUpdater(final long now, final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) { addTasksToStateUpdater(); if (stateUpdater.hasExceptionsAndFailedTasks()) { handleExceptionsFromStateUpdater(); } if (stateUpdater.restoresActiveTasks()) { handleRestoredTasksFromStateUpdater(now, offsetResetter); } return !stateUpdater.restoresActiveTasks() && !tasks.hasPendingTasksToInit(); }
@Test public void shouldReturnFalseFromCheckStateUpdaterIfActiveTasksAreRestoring() { when(stateUpdater.restoresActiveTasks()).thenReturn(true); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); assertFalse(taskManager.checkStateUpdater(time.milliseconds(), noOpResetter)); }
@Override public void handlerRule(final RuleData ruleData) { Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> { SpringCloudRuleHandle springCloudRuleHandle = GsonUtils.getInstance().fromJson(s, SpringCloudRuleHandle.class); RULE_CACHED.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), springCloudRuleHandle); }); }
@Test public void testHandlerRule() { ruleData.setSelectorId("1"); ruleData.setHandle("{\"urlPath\":\"test\"}"); ruleData.setId("test"); springCloudPluginDataHandler.handlerRule(ruleData); Supplier<CommonHandleCache<String, SpringCloudRuleHandle>> cache = SpringCloudPluginDataHandler.RULE_CACHED; Assertions.assertNotEquals(cache.get().obtainHandle("1_test"), null); }
public static Map<String, Object> getTopologySummary(TopologyPageInfo topologyPageInfo, String window, Map<String, Object> config, String remoteUser) { Map<String, Object> result = new HashMap(); Map<String, Object> topologyConf = (Map<String, Object>) JSONValue.parse(topologyPageInfo.get_topology_conf()); int messageTimeout = (int) topologyConf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS); Map<String, Object> unpackedTopologyPageInfo = unpackTopologyInfo(topologyPageInfo, window, config); result.putAll(unpackedTopologyPageInfo); result.put("user", remoteUser); result.put("window", window); result.put("windowHint", getWindowHint(window)); result.put("msgTimeout", messageTimeout); result.put("configuration", topologyConf); result.put("visualizationTable", new ArrayList()); result.put("schedulerDisplayResource", config.get(DaemonConfig.SCHEDULER_DISPLAY_RESOURCE)); result.put("bugtracker-url", config.get(DaemonConfig.UI_PROJECT_BUGTRACKER_URL)); result.put("central-log-url", config.get(DaemonConfig.UI_CENTRAL_LOGGING_URL)); return result; }
@Test void test_getTopologyBoltAggStatsMap_hasNoLastError() { // Define inputs final String expectedBoltId = "MyBoltId"; // Build stats instance for our bolt final ComponentAggregateStats aggregateStats = buildBoltAggregateStatsBase(); addBoltStats(expectedBoltId, aggregateStats); // Call method under test. final Map<String, Object> result = UIHelpers.getTopologySummary( topoPageInfo, WINDOW, new HashMap<>(), "spp" ); // Validate assertNotNull(result, "Should never return null"); // Validate our Bolt result final Map<String, Object> boltResult = getBoltStatsFromTopologySummaryResult(result, expectedBoltId); assertNotNull(boltResult, "Should have an entry for bolt"); // Verify each piece assertEquals(expectedBoltId, boltResult.get("boltId")); assertEquals(expectedBoltId, boltResult.get("encodedBoltId")); // Verify error fields exist, but are not populated. // These fields default to empty string. assertTrue(boltResult.containsKey("lastError")); assertEquals("", boltResult.get("lastError"), "Backwards compat. with API docs say this should be empty string when empty"); assertTrue(boltResult.containsKey("errorHost")); assertEquals("", boltResult.get("errorHost")); assertTrue(boltResult.containsKey("errorWorkerLogLink")); assertEquals("", boltResult.get("errorWorkerLogLink")); // These fields default to null. assertTrue(boltResult.containsKey("errorPort")); assertNull(boltResult.get("errorPort")); assertTrue(boltResult.containsKey("errorTime")); assertNull(boltResult.get("errorTime")); assertTrue(boltResult.containsKey("errorLapsedSecs")); assertNull(boltResult.get("errorLapsedSecs")); }
@VisibleForTesting static SortedMap<OffsetRange, Integer> computeOverlappingRanges(Iterable<OffsetRange> ranges) { ImmutableSortedMap.Builder<OffsetRange, Integer> rval = ImmutableSortedMap.orderedBy(OffsetRangeComparator.INSTANCE); List<OffsetRange> sortedRanges = Lists.newArrayList(ranges); if (sortedRanges.isEmpty()) { return rval.build(); } Collections.sort(sortedRanges, OffsetRangeComparator.INSTANCE); // Stores ranges in smallest 'from' and then smallest 'to' order // e.g. [2, 7), [3, 4), [3, 5), [3, 5), [3, 6), [4, 0) PriorityQueue<OffsetRange> rangesWithSameFrom = new PriorityQueue<>(OffsetRangeComparator.INSTANCE); Iterator<OffsetRange> iterator = sortedRanges.iterator(); // Stored in reverse sorted order so that when we iterate and re-add them back to // overlappingRanges they are stored in sorted order from smallest to largest range.to List<OffsetRange> rangesToProcess = new ArrayList<>(); while (iterator.hasNext()) { OffsetRange current = iterator.next(); // Skip empty ranges if (current.getFrom() == current.getTo()) { continue; } // If the current range has a different 'from' then a prior range then we must produce // ranges in [rangesWithSameFrom.from, current.from) while (!rangesWithSameFrom.isEmpty() && rangesWithSameFrom.peek().getFrom() != current.getFrom()) { rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); int i = 0; long lastTo = rangesToProcess.get(i).getFrom(); // Output all the ranges that are strictly less then current.from // e.g. current.to := 7 for [3, 4), [3, 5), [3, 5), [3, 6) will produce // [3, 4) := 4 // [4, 5) := 3 // [5, 6) := 1 for (; i < rangesToProcess.size(); ++i) { if (rangesToProcess.get(i).getTo() > current.getFrom()) { break; } // Output only the first of any subsequent duplicate ranges if (i == 0 || rangesToProcess.get(i - 1).getTo() != rangesToProcess.get(i).getTo()) { rval.put( new OffsetRange(lastTo, rangesToProcess.get(i).getTo()), rangesToProcess.size() - i); lastTo = rangesToProcess.get(i).getTo(); } } // We exitted the loop with 'to' > current.from, we must add the range [lastTo, // current.from) if it is non-empty if (lastTo < current.getFrom() && i != rangesToProcess.size()) { rval.put(new OffsetRange(lastTo, current.getFrom()), rangesToProcess.size() - i); } // The remaining ranges have a 'to' that is greater then 'current.from' and will overlap // with current so add them back to rangesWithSameFrom with the updated 'from' for (; i < rangesToProcess.size(); ++i) { rangesWithSameFrom.add( new OffsetRange(current.getFrom(), rangesToProcess.get(i).getTo())); } rangesToProcess.clear(); } rangesWithSameFrom.add(current); } // Process the last chunk of overlapping ranges while (!rangesWithSameFrom.isEmpty()) { // This range always represents the range with with the smallest 'to' OffsetRange current = rangesWithSameFrom.remove(); rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); rval.put(current, rangesToProcess.size() + 1 /* include current */); // Shorten all the remaining ranges such that they start with current.to for (OffsetRange rangeWithDifferentFrom : rangesToProcess) { // Skip any duplicates of current if (rangeWithDifferentFrom.getTo() > current.getTo()) { rangesWithSameFrom.add(new OffsetRange(current.getTo(), rangeWithDifferentFrom.getTo())); } } rangesToProcess.clear(); } return rval.build(); }
@Test public void testOverlappingFroms() { Iterable<OffsetRange> ranges = Arrays.asList(range(0, 4), range(0, 8), range(0, 12)); Map<OffsetRange, Integer> nonOverlappingRangesToNumElementsPerPosition = computeOverlappingRanges(ranges); assertEquals( ImmutableMap.builder().put(range(0, 4), 3).put(range(4, 8), 2).put(range(8, 12), 1).build(), nonOverlappingRangesToNumElementsPerPosition); assertNonEmptyRangesAndPositions(ranges, nonOverlappingRangesToNumElementsPerPosition); }
@Subscribe public void onPostMenuSort(PostMenuSort postMenuSort) { // The menu is not rebuilt when it is open, so don't swap or else it will // repeatedly swap entries if (client.isMenuOpen()) { return; } MenuEntry[] menuEntries = client.getMenuEntries(); // Build option map for quick lookup in findIndex int idx = 0; optionIndexes.clear(); for (MenuEntry entry : menuEntries) { String option = Text.removeTags(entry.getOption()).toLowerCase(); optionIndexes.put(option, idx++); } // Perform swaps idx = 0; for (MenuEntry entry : menuEntries) { swapMenuEntry(null, menuEntries, idx++, entry); } if (config.removeDeadNpcMenus()) { removeDeadNpcs(); } }
@Test public void testSlayerMaster() { lenient().when(config.swapTrade()).thenReturn(true); when(config.swapAssignment()).thenReturn(true); entries = new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Rewards", "Duradel", MenuAction.NPC_FIFTH_OPTION), menu("Trade", "Duradel", MenuAction.NPC_FOURTH_OPTION), menu("Assignment", "Duradel", MenuAction.NPC_THIRD_OPTION), menu("Talk-to", "Duradel", MenuAction.NPC_FIRST_OPTION), }; menuEntrySwapperPlugin.onPostMenuSort(new PostMenuSort()); ArgumentCaptor<MenuEntry[]> argumentCaptor = ArgumentCaptor.forClass(MenuEntry[].class); verify(client).setMenuEntries(argumentCaptor.capture()); // check the assignment swap is hit first instead of trade assertArrayEquals(new MenuEntry[]{ menu("Cancel", "", MenuAction.CANCEL), menu("Rewards", "Duradel", MenuAction.NPC_FIFTH_OPTION), menu("Trade", "Duradel", MenuAction.NPC_FOURTH_OPTION), menu("Talk-to", "Duradel", MenuAction.NPC_FIRST_OPTION), menu("Assignment", "Duradel", MenuAction.NPC_THIRD_OPTION), }, argumentCaptor.getValue()); }
public static String processPattern(String pattern, TbMsg tbMsg) { try { String result = processPattern(pattern, tbMsg.getMetaData()); JsonNode json = JacksonUtil.toJsonNode(tbMsg.getData()); if (json.isObject()) { Matcher matcher = DATA_PATTERN.matcher(result); while (matcher.find()) { String group = matcher.group(2); String[] keys = group.split("\\."); JsonNode jsonNode = json; for (String key : keys) { if (!StringUtils.isEmpty(key) && jsonNode != null) { jsonNode = jsonNode.get(key); } else { jsonNode = null; break; } } if (jsonNode != null && jsonNode.isValueNode()) { result = result.replace(formatDataVarTemplate(group), jsonNode.asText()); } } } return result; } catch (Exception e) { throw new RuntimeException("Failed to process pattern!", e); } }
@Test public void testComplexObjectReplacement() { String pattern = "ABC ${key} $[key1.key2.key3]"; TbMsgMetaData md = new TbMsgMetaData(); md.putValue("key", "metadata_value"); ObjectNode key2Node = JacksonUtil.newObjectNode(); key2Node.put("key3", "value3"); ObjectNode key1Node = JacksonUtil.newObjectNode(); key1Node.set("key2", key2Node); ObjectNode node = JacksonUtil.newObjectNode(); node.set("key1", key1Node); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, md, JacksonUtil.toString(node)); String result = TbNodeUtils.processPattern(pattern, msg); Assertions.assertEquals("ABC metadata_value value3", result); }
public static DataMap convertToDataMap(Map<String, Object> queryParams) { return convertToDataMap(queryParams, Collections.<String, Class<?>>emptyMap(), AllProtocolVersions.RESTLI_PROTOCOL_1_0_0.getProtocolVersion(), RestLiProjectionDataMapSerializer.DEFAULT_SERIALIZER); }
@Test (expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Map key '1' is not of type String") public void testNonStringKeyToDataMap() { Map<String, Object> queryParams = new HashMap<>(); Map<Object, Object> hashMapParam = new HashMap<>(); hashMapParam.put(1, "numeric key"); queryParams.put("hashMapParam", hashMapParam); QueryParamsUtil.convertToDataMap(queryParams); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PiExactFieldMatch that = (PiExactFieldMatch) o; return Objects.equal(this.fieldId(), that.fieldId()) && Objects.equal(value, that.value); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(piExactFieldMatch1, sameAsPiExactFieldMatch1) .addEqualityGroup(piExactFieldMatch2) .testEquals(); }
@InterfaceAudience.Private @InterfaceStability.Unstable @VisibleForTesting public static void setLoginUser(UserGroupInformation ugi) { // if this is to become stable, should probably logout the currently // logged in ugi if it's different loginUserRef.set(ugi); }
@Test(timeout=10000) public void testSetLoginUser() throws IOException { UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test-user"); UserGroupInformation.setLoginUser(ugi); assertEquals(ugi, UserGroupInformation.getLoginUser()); }
public static Method getDeclaredMethod(Class<?> clazz, String methodName, Class<?>... parameterTypes) throws SecurityException { return ReflectUtil.getMethod(clazz, methodName, parameterTypes); }
@Test public void getDeclaredMethod() { Method noMethod = ClassUtil.getDeclaredMethod(TestSubClass.class, "noMethod"); assertNull(noMethod); Method privateMethod = ClassUtil.getDeclaredMethod(TestSubClass.class, "privateMethod"); assertNotNull(privateMethod); Method publicMethod = ClassUtil.getDeclaredMethod(TestSubClass.class, "publicMethod"); assertNotNull(publicMethod); Method publicSubMethod = ClassUtil.getDeclaredMethod(TestSubClass.class, "publicSubMethod"); assertNotNull(publicSubMethod); Method privateSubMethod = ClassUtil.getDeclaredMethod(TestSubClass.class, "privateSubMethod"); assertNotNull(privateSubMethod); }
public B dynamic(Boolean dynamic) { this.dynamic = dynamic; return getThis(); }
@Test void dynamic() { ServiceBuilder builder = new ServiceBuilder(); builder.dynamic(true); Assertions.assertTrue(builder.build().isDynamic()); builder.dynamic(false); Assertions.assertFalse(builder.build().isDynamic()); }
public static ApplicationBadgeLabeler get() { if(PreferencesFactory.get().getBoolean("queue.dock.badge")) { return new ApplicationBadgeLabelerFactory().create(); } return new DisabledApplicationBadgeLabeler(); }
@Test public void testGet() { assertNotNull(ApplicationBadgeLabelerFactory.get()); }
public RowExpression extract(PlanNode node) { return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null); }
@Test public void testProject() { PlanNode node = new ProjectNode(newId(), filter(baseTableScan, and( equals(AV, BV), equals(BV, CV), lessThan(CV, bigintLiteral(10)))), assignment(DV, AV, EV, CV)); RowExpression effectivePredicate = effectivePredicateExtractor.extract(node); // Rewrite in terms of project output symbols assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjuncts( lessThan(DV, bigintLiteral(10)), equals(DV, EV))); }
public RemoteCacheManager getNativeCacheManager() { return this.nativeCacheManager; }
@Test public final void getNativeCacheShouldReturnTheRemoteCacheManagerSuppliedAtConstructionTime() { final RemoteCacheManager nativeCacheManagerReturned = objectUnderTest.getNativeCacheManager(); assertSame( "getNativeCacheManager() should have returned the RemoteCacheManager supplied at construction time. However, it retuned a different one.", remoteCacheManager, nativeCacheManagerReturned); }
protected boolean tryProcess1(@Nonnull Object item) throws Exception { return tryProcess(1, item); }
@Test public void when_tryProcess1_then_delegatesToTryProcess() throws Exception { // When boolean done = p.tryProcess1(MOCK_ITEM); // Then assertTrue(done); p.validateReceptionOfItem(ORDINAL_1, MOCK_ITEM); }
public FederationPolicyManager getPolicyManager(String queueName) throws YarnException { FederationPolicyManager policyManager = policyManagerMap.get(queueName); // If we don't have the policy manager cached, pull configuration // from the FederationStateStore to create and cache it if (policyManager == null) { try { // If we don't have the configuration cached, pull it // from the stateStore SubClusterPolicyConfiguration conf = policyConfMap.get(queueName); if (conf == null) { conf = stateStore.getPolicyConfiguration(queueName); } // If configuration is still null, it does not exist in the // FederationStateStore if (conf == null) { LOG.info("Read null policy for queue {}.", queueName); return null; } // Generate PolicyManager based on PolicyManagerType. String policyManagerType = conf.getType(); policyManager = FederationPolicyUtils.instantiatePolicyManager(policyManagerType); policyManager.setQueue(queueName); // If PolicyManager supports Weighted PolicyInfo, it means that // we need to use this parameter to determine which sub-cluster the router goes to // or which sub-cluster the container goes to. if (policyManager.isSupportWeightedPolicyInfo()) { ByteBuffer weightedPolicyInfoParams = conf.getParams(); if (weightedPolicyInfoParams == null) { LOG.warn("Warning: Queue = {}, FederationPolicyManager {} WeightedPolicyInfo is empty.", queueName, policyManagerType); return null; } WeightedPolicyInfo weightedPolicyInfo = WeightedPolicyInfo.fromByteBuffer(conf.getParams()); policyManager.setWeightedPolicyInfo(weightedPolicyInfo); } else { LOG.warn("Warning: FederationPolicyManager of unsupported WeightedPolicyInfo type {}, " + "initialization may be incomplete.", policyManager.getClass()); } policyManagerMap.put(queueName, policyManager); policyConfMap.put(queueName, conf); } catch (YarnException e) { LOG.error("Error reading SubClusterPolicyConfiguration from state " + "store for queue: {}", queueName); throw e; } } return policyManager; }
@Test public void testGetWeightedHomePolicyManager() throws YarnException { stateStore = new MemoryFederationStateStore(); stateStore.init(new Configuration()); // root.b uses WeightedHomePolicyManager. // Step1. Prepare routerPolicyWeights. Map<SubClusterIdInfo, Float> routerPolicyWeights = new HashMap<>(); routerPolicyWeights.put(new SubClusterIdInfo("SC-1"), 0.8f); routerPolicyWeights.put(new SubClusterIdInfo("SC-2"), 0.2f); WeightedPolicyInfo weightedPolicyInfo = new WeightedPolicyInfo(); weightedPolicyInfo.setHeadroomAlpha(1); weightedPolicyInfo.setRouterPolicyWeights(routerPolicyWeights); // Step2. Set PolicyConfiguration. String policyManagerType = WeightedHomePolicyManager.class.getName(); SubClusterPolicyConfiguration config = SubClusterPolicyConfiguration.newInstance("root.b", policyManagerType, weightedPolicyInfo.toByteBuffer()); SetSubClusterPolicyConfigurationRequest request = SetSubClusterPolicyConfigurationRequest.newInstance(config); stateStore.setPolicyConfiguration(request); // Step3. Get FederationPolicyManager using policyFacade. facade.reinitialize(stateStore, conf); policyFacade = new GPGPolicyFacade(facade, conf); FederationPolicyManager policyManager = policyFacade.getPolicyManager("root.b"); Assert.assertNotNull(policyManager); Assert.assertTrue(policyManager.isSupportWeightedPolicyInfo()); WeightedPolicyInfo weightedPolicyInfo1 = policyManager.getWeightedPolicyInfo(); Assert.assertNotNull(weightedPolicyInfo1); // Step4. Confirm amrmPolicyWeight is accurate. Map<SubClusterIdInfo, Float> amrmPolicyWeights1 = weightedPolicyInfo1.getAMRMPolicyWeights(); Assert.assertNotNull(amrmPolicyWeights1); Assert.assertEquals(0, amrmPolicyWeights1.size()); // Step5. Confirm amrmPolicyWeight is accurate. Map<SubClusterIdInfo, Float> routerPolicyWeights1 = weightedPolicyInfo1.getRouterPolicyWeights(); Assert.assertNotNull(routerPolicyWeights1); Float sc1Float1 = routerPolicyWeights1.get(new SubClusterIdInfo("SC-1")); Float sc2Float2 = routerPolicyWeights1.get(new SubClusterIdInfo("SC-2")); Assert.assertEquals(0.8, sc1Float1, 0.001); Assert.assertEquals(0.2, sc2Float2, 0.001); }
@VisibleForTesting @Nullable Integer getUploadBufferSizeBytes() { return uploadBufferSizeBytes; }
@Test public void testUploadBufferSizeUserSpecified() { GcsOptions pipelineOptions = gcsOptionsWithTestCredential(); pipelineOptions.setGcsUploadBufferSizeBytes(12345); GcsUtil util = pipelineOptions.getGcsUtil(); assertEquals((Integer) 12345, util.getUploadBufferSizeBytes()); }
@Override public boolean isTerminal(Throwable failure) { return false; }
@Test public void isTerminal() { MessageListener<String> listener = createMessageListenerMock(); ReliableMessageListenerAdapter<String> adapter = new ReliableMessageListenerAdapter<>(listener); assertFalse(adapter.isTerminal(new RuntimeException())); assertFalse(adapter.isTerminal(new Exception())); }
@Override public Object removeVariableLocally(String name) { return variables.remove(name); }
@Test public void testRemoveVariableLocally() { ProcessContextImpl context = new ProcessContextImpl(); context.setVariable("key", "value"); context.removeVariableLocally("key"); Assertions.assertEquals(0, context.getVariables().size()); }
@Override public int size2SizeIdx(int size) { return sizeClass.size2SizeIdx(size); }
@Test public void testSize2SizeIdx() { SizeClasses sc = new SizeClasses(PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); PoolArena<ByteBuffer> arena = new PoolArena.DirectArena(null, sc); for (int sz = 0; sz <= CHUNK_SIZE; sz++) { int sizeIdx = arena.sizeClass.size2SizeIdx(sz); assertTrue(sz <= arena.sizeClass.sizeIdx2size(sizeIdx)); if (sizeIdx > 0) { assertTrue(sz > arena.sizeClass.sizeIdx2size(sizeIdx - 1)); } } }
public String getQualifiedName() { String column = identifier.getValueWithQuoteCharacters(); if (null != nestedObjectAttributes && !nestedObjectAttributes.isEmpty()) { column = String.join(".", column, nestedObjectAttributes.stream().map(IdentifierValue::getValueWithQuoteCharacters).collect(Collectors.joining("."))); } return null == owner ? column : String.join(".", owner.getIdentifier().getValueWithQuoteCharacters(), column); }
@Test void assertGetQualifiedNameWithoutOwner() { assertThat(new ColumnSegment(0, 0, new IdentifierValue("col")).getQualifiedName(), is("col")); }