focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "Converts a string representation of a date in the given format" + " into the number of milliseconds since 1970-01-01 00:00:00 UTC/GMT." + " Single quotes in the timestamp format can be escaped with ''," + " for example: 'yyyy-MM-dd''T''HH:mm:ssX'." + " The system default time zone is used when no time zone is explicitly provided.") public long stringToTimestamp( @UdfParameter( description = "The string representation of a date.") final String formattedTimestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { // NB: We do not perform a null here preferring to throw an exception as // there is no sentinel value for a "null" Date. try { final StringToTimestampParser timestampParser = parsers.get(formatPattern); return timestampParser.parse(formattedTimestamp); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse timestamp '" + formattedTimestamp + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowIfParseFails() { // When: final KsqlFunctionException e = assertThrows( KsqlFunctionException.class, () -> udf.stringToTimestamp("invalid", "yyyy-MM-dd'T'HH:mm:ss.SSS") ); // Then: assertThat(e.getMessage(), containsString("Text 'invalid' could not be parsed at index 0")); }
@Override public Date getScheduledDate() { return new Date(); }
@Test public void shouldNotReturnNullForScheduledDate() { PipelineInstanceModel pipeline = PipelineInstanceModel.createPreparingToSchedule("pipeline-name", new StageInstanceModels()); assertThat(pipeline.getScheduledDate(), is(not(nullValue()))); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDel() { List<byte[]> keys = new ArrayList<>(); for (int i = 0; i < 10; i++) { byte[] key = ("test" + i).getBytes(); keys.add(key); connection.set(key, ("test" + i).getBytes()); } assertThat(connection.del(keys.toArray(new byte[0][]))).isEqualTo(10); }
public QueueConnection queueConnection(QueueConnection connection) { // It is common to implement both interfaces if (connection instanceof XAQueueConnection) { return xaQueueConnection((XAQueueConnection) connection); } return TracingConnection.create(connection, this); }
@Test void queueConnection_doesntDoubleWrap() { QueueConnection wrapped = jmsTracing.queueConnection(mock(QueueConnection.class)); assertThat(jmsTracing.queueConnection(wrapped)) .isSameAs(wrapped); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testUnnestInnerScalarAlias() { analyze("SELECT * FROM (SELECT array[1,2] a) a CROSS JOIN UNNEST(a) AS T(x)"); }
@Override public ExecuteContext doBefore(ExecuteContext context) { LogUtils.printHttpRequestBeforePoint(context); final InvokerService invokerService = PluginServiceManager.getPluginService(InvokerService.class); HttpHost httpHost = (HttpHost) context.getArguments()[0]; final HttpRequest httpRequest = (HttpRequest) context.getArguments()[1]; if (!PlugEffectWhiteBlackUtils.isHostEqualRealmName(httpHost.getHostName())) { return context; } final Map<String, String> hostAndPath = RequestInterceptorUtils.recoverUrl(httpRequest.getRequestLine() .getUri()); if (hostAndPath.isEmpty()) { return context; } if (!PlugEffectWhiteBlackUtils.isPlugEffect(hostAndPath.get(HttpConstants.HTTP_URI_SERVICE))) { return context; } RequestInterceptorUtils.printRequestLog("HttpClient", hostAndPath); invokerService.invoke( buildInvokerFunc(hostAndPath, httpRequest, context), buildExFunc(httpRequest, Thread.currentThread().getContextClassLoader()), hostAndPath.get(HttpConstants.HTTP_URI_SERVICE)) .ifPresent(result -> this.setResultOrThrow(context, result, hostAndPath.get(HttpConstants.HTTP_URI_PATH))); return context; }
@Test public void doBefore() throws Exception { final HttpClient4xInterceptor interceptor = new HttpClient4xInterceptor(); interceptor.ready(); final ExecuteContext context = buildContext(); PlugEffectStrategyCache.INSTANCE.resolve(DynamicConfigEventType.CREATE, ""); interceptor.doBefore(context); interceptor.onThrow(context); interceptor.after(context); }
synchronized void add(int splitCount) { int pos = count % history.length; history[pos] = splitCount; count += 1; }
@Test public void testNotFullHistory() { EnumerationHistory history = new EnumerationHistory(3); history.add(1); history.add(2); int[] expectedHistorySnapshot = {1, 2}; testHistory(history, expectedHistorySnapshot); }
public static Iterable<String> expandAtNFilepattern(String filepattern) { ImmutableList.Builder<String> builder = ImmutableList.builder(); Matcher match = AT_N_SPEC.matcher(filepattern); if (!match.find()) { builder.add(filepattern); } else { int numShards = Integer.parseInt(match.group("N")); String formatString = "-%0" + getShardWidth(numShards, filepattern) + "d-of-%05d"; for (int i = 0; i < numShards; ++i) { builder.add( AT_N_SPEC.matcher(filepattern).replaceAll(String.format(formatString, i, numShards))); } if (match.find()) { throw new IllegalArgumentException( "More than one @N wildcard found in filepattern: " + filepattern); } } return builder.build(); }
@Test public void testExpandAtNFilepatternTwoPatterns() throws Exception { exception.expect(IllegalArgumentException.class); exception.expectMessage( "More than one @N wildcard found in filepattern: gs://bucket/object@10.@20.ism"); Filepatterns.expandAtNFilepattern("gs://bucket/object@10.@20.ism"); }
public EnumSet<RepositoryFilePermission> processCheckboxes() { return processCheckboxes( false ); }
@Test public void testProcessCheckboxesReadCheckedEnableAppropriateFalse() { when( readCheckbox.isChecked() ).thenReturn( true ); when( writeCheckbox.isChecked() ).thenReturn( false ); when( deleteCheckbox.isChecked() ).thenReturn( false ); when( manageCheckbox.isChecked() ).thenReturn( false ); assertEquals( EnumSet.of( RepositoryFilePermission.READ ), permissionsCheckboxHandler.processCheckboxes() ); verify( readCheckbox, times( 1 ) ).setDisabled( true ); verify( writeCheckbox, times( 1 ) ).setDisabled( true ); verify( deleteCheckbox, times( 1 ) ).setDisabled( true ); verify( manageCheckbox, times( 1 ) ).setDisabled( true ); }
@Override public void transform(Message message, DataType fromType, DataType toType) { final Optional<ValueRange> valueRange = getValueRangeBody(message); String range = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A:A").toString(); String majorDimension = message .getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "majorDimension", RangeCoordinate.DIMENSION_ROWS).toString(); String spreadsheetId = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "spreadsheetId", "").toString(); String[] columnNames = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "columnNames", "A").toString().split(","); boolean splitResults = Boolean .parseBoolean(message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "splitResults", "false").toString()); if (valueRange.isPresent()) { message.setBody( transformFromValueRangeModel(message, valueRange.get(), spreadsheetId, range, majorDimension, columnNames)); } else if (splitResults) { message.setBody(transformFromSplitValuesModel(message, spreadsheetId, range, majorDimension, columnNames)); } else { String valueInputOption = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption", "USER_ENTERED").toString(); message.setBody( transformToValueRangeModel(message, spreadsheetId, range, majorDimension, valueInputOption, columnNames)); } }
@Test public void testTransformToValueRangeWithJsonObject() throws Exception { Exchange inbound = new DefaultExchange(camelContext); inbound.getMessage().setHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A1:B2"); String body = "{\"spreadsheetId\": \"" + spreadsheetId + "\", \"A\": \"a1\", \"B\": \"b1\" }"; inbound.getMessage().setBody(body); transformer.transform(inbound.getMessage(), DataType.ANY, DataType.ANY); Assertions.assertEquals("A1:B2", inbound.getMessage().getHeader(GoogleSheetsStreamConstants.RANGE)); Assertions.assertEquals(RangeCoordinate.DIMENSION_ROWS, inbound.getMessage().getHeader(GoogleSheetsStreamConstants.MAJOR_DIMENSION)); Assertions.assertEquals("USER_ENTERED", inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption")); ValueRange valueRange = (ValueRange) inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "values"); Assertions.assertEquals(1L, valueRange.getValues().size()); Assertions.assertEquals(2L, valueRange.getValues().get(0).size()); Assertions.assertEquals("a1", valueRange.getValues().get(0).get(0)); Assertions.assertEquals("b1", valueRange.getValues().get(0).get(1)); }
public SCM find(final String scmId) { return stream().filter(scm -> scm.getId().equals(scmId)).findFirst().orElse(null); }
@Test void shouldReturnNullIfNoMatchingSCMFound() { assertThat(new SCMs().find("not-found")).isNull(); }
@Override public void validate(final String methodName, final Class<?>[] parameterTypes, final Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (Objects.nonNull(methodClass)) { groups.add(methodClass); } Set<ConstraintViolation<?>> violations = new HashSet<>(); Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = new Class<?>[groups.size()]; classGroups = groups.toArray(classGroups); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { LOG.error("Failed to validate service: {}, method: {}, cause: {}", clazz.getName(), methodName, violations); StringBuilder validateError = new StringBuilder(); violations.forEach(each -> validateError.append(each.getMessage()).append(",")); throw new ValidationException(validateError.substring(0, validateError.length() - 1)); } }
@Test public void testValidateWhenMeetsConstraintThenValidationFailed() throws Exception { assertThrows(ValidationException.class, () -> apacheDubboClientValidatorUnderTest .validate( "methodTwo", new Class<?>[]{MockValidationParameter.class}, new Object[]{new MockValidationParameter("NotBeNull")})); }
public MessageType convert(Class<? extends TBase<?, ?>> thriftClass) { return convert(toStructType(thriftClass)); }
@Test public void testToMessageType() throws Exception { String expected = "message ParquetSchema {\n" + " optional group persons (LIST) = 1 {\n" + " repeated group persons_tuple {\n" + " required group name = 1 {\n" + " optional binary first_name (UTF8) = 1;\n" + " optional binary last_name (UTF8) = 2;\n" + " }\n" + " optional int32 id = 2;\n" + " optional binary email (UTF8) = 3;\n" + " optional group phones (LIST) = 4 {\n" + " repeated group phones_tuple {\n" + " optional binary number (UTF8) = 1;\n" + " optional binary type (ENUM) = 2;\n" + " }\n" + " }\n" + " }\n" + " }\n" + "}"; ThriftSchemaConverter schemaConverter = new ThriftSchemaConverter(); final MessageType converted = schemaConverter.convert(AddressBook.class); assertEquals(MessageTypeParser.parseMessageType(expected), converted); }
@Udf public final <T> T nullIf( @UdfParameter(description = "expression 1") final T expr1, @UdfParameter(description = "expression 2") final T expr2 ) { if (expr1 == null) { return null; } if (expr1.equals(expr2)) { return null; } else { return expr1; } }
@Test public void shouldReturnNullIfBothValuesAreNulls() { assertThat(udf.nullIf(null, null), is(nullValue())); }
static KiePMMLTextIndex getKiePMMLTextIndex(final TextIndex textIndex) { final LOCAL_TERM_WEIGHTS localTermWeights = textIndex.getLocalTermWeights() != null ? LOCAL_TERM_WEIGHTS.byName(textIndex.getLocalTermWeights().value()) : null; final COUNT_HITS countHits = textIndex.getCountHits() != null ? COUNT_HITS.byName(textIndex.getCountHits().value()) : null; final String wordSeparatorCharacterRE = textIndex.getWordSeparatorCharacterRE() != null ? StringEscapeUtils.escapeJava(textIndex.getWordSeparatorCharacterRE()) : null; return KiePMMLTextIndex.builder(textIndex.getTextField(), getKiePMMLExtensions(textIndex.getExtensions()), getKiePMMLExpression(textIndex.getExpression())) .withTextIndexNormalizations(getKiePMMLTextIndexNormalizations(textIndex.getTextIndexNormalizations())) .withLocalTermWeights(localTermWeights) .withIsCaseSensitive(textIndex.isCaseSensitive()) .withMaxLevenshteinDistance(textIndex.getMaxLevenshteinDistance()) .withCountHits(countHits) .withWordSeparatorCharacterRE(wordSeparatorCharacterRE) .withTokenize(textIndex.isTokenize()) .build(); }
@Test void getKiePMMLTextIndex() { final TextIndex toConvert = getRandomTextIndex(); final KiePMMLTextIndex retrieved = KiePMMLTextIndexInstanceFactory.getKiePMMLTextIndex(toConvert); commonVerifyKiePMMLTextIndex(retrieved, toConvert); }
public static Map<String, String> parseToMap(String attributesModification) { if (Strings.isNullOrEmpty(attributesModification)) { return new HashMap<>(); } // format: +key1=value1,+key2=value2,-key3,+key4=value4 Map<String, String> attributes = new HashMap<>(); String[] kvs = attributesModification.split(ATTR_ARRAY_SEPARATOR_COMMA); for (String kv : kvs) { String key; String value; if (kv.contains(ATTR_KEY_VALUE_EQUAL_SIGN)) { String[] splits = kv.split(ATTR_KEY_VALUE_EQUAL_SIGN); key = splits[0]; value = splits[1]; if (!key.contains(ATTR_ADD_PLUS_SIGN)) { throw new RuntimeException("add/alter attribute format is wrong: " + key); } } else { key = kv; value = ""; if (!key.contains(ATTR_DELETE_MINUS_SIGN)) { throw new RuntimeException("delete attribute format is wrong: " + key); } } String old = attributes.put(key, value); if (old != null) { throw new RuntimeException("key duplication: " + key); } } return attributes; }
@Test public void parseToMap_NullString_ReturnsEmptyMap() { String attributesModification = null; Map<String, String> result = AttributeParser.parseToMap(attributesModification); assertTrue(result.isEmpty()); }
@Override public WindowStoreIterator<V> backwardFetch(final K key, final Instant timeFrom, final Instant timeTo) throws IllegalArgumentException { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return KeyValueIterators.emptyWindowStoreIterator(); }
@Test public void shouldBackwardFetchKeyRangeAcrossStoresWithNullKeyFromKeyTo() { final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE); stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("a", "a", 0L); secondUnderlying.put("b", "b", 10L); secondUnderlying.put("c", "c", 10L); final List<KeyValue<Windowed<String>, String>> results = StreamsTestUtils.toList(windowStore.backwardFetch(null, null, ofEpochMilli(0), ofEpochMilli(10))); assertThat(results, equalTo(Arrays.asList( KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"), KeyValue.pair(new Windowed<>("c", new TimeWindow(10, 10 + WINDOW_SIZE)), "c"), KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b")))); }
public static Schema schemaFromJavaBeanClass( TypeDescriptor<?> typeDescriptor, FieldValueTypeSupplier fieldValueTypeSupplier) { return StaticSchemaInference.schemaFromClass(typeDescriptor, fieldValueTypeSupplier); }
@Test public void testNestedBean() { Schema schema = JavaBeanUtils.schemaFromJavaBeanClass( new TypeDescriptor<NestedBean>() {}, GetterTypeSupplier.INSTANCE); SchemaTestUtils.assertSchemaEquivalent(NESTED_BEAN_SCHEMA, schema); }
@ScalarFunction public static boolean isJson(String inputStr) { try { JsonUtils.stringToJsonNode(inputStr); return true; } catch (Exception e) { return false; } }
@Test(dataProvider = "isJson") public void testIsJson(String input, boolean expectedValue) { assertEquals(StringFunctions.isJson(input), expectedValue); }
@Override public ObjectNode encode(KubevirtLoadBalancer lb, CodecContext context) { checkNotNull(lb, "Kubevirt load balancer cannot be null"); ObjectNode result = context.mapper().createObjectNode() .put(NAME, lb.name()) .put(VIP, lb.vip().toString()) .put(NETWORK_ID, lb.networkId()); if (lb.description() != null) { result.put(DESCRIPTION, lb.description()); } if (lb.members() != null && !lb.members().isEmpty()) { ArrayNode members = context.mapper().createArrayNode(); for (IpAddress ip : lb.members()) { members.add(ip.toString()); } result.set(MEMBERS, members); } if (lb.rules() != null && !lb.rules().isEmpty()) { ArrayNode rules = context.mapper().createArrayNode(); for (KubevirtLoadBalancerRule rule : lb.rules()) { ObjectNode ruleJson = context.codec( KubevirtLoadBalancerRule.class).encode(rule, context); rules.add(ruleJson); } result.set(RULES, rules); } return result; }
@Test public void testKubevirtLoadBalancerEncode() { KubevirtLoadBalancer lb = DefaultKubevirtLoadBalancer.builder() .name("lb-1") .networkId("net-1") .vip(IpAddress.valueOf("10.10.10.10")) .members(ImmutableSet.of(IpAddress.valueOf("10.10.10.11"), IpAddress.valueOf("10.10.10.12"))) .rules(ImmutableSet.of(RULE1, RULE2, RULE3)) .description("network load balancer") .build(); ObjectNode lbJson = kubevirtLoadBalancerCodec.encode(lb, context); assertThat(lbJson, matchesKubevirtLoadBalancer(lb)); }
public static String extractMinionInstanceTag(TableConfig tableConfig, String taskType) { TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig(); if (tableTaskConfig != null) { Map<String, String> configs = tableTaskConfig.getConfigsForTaskType(taskType); if (configs != null && !configs.isEmpty()) { return configs.getOrDefault(PinotTaskManager.MINION_INSTANCE_TAG_CONFIG, CommonConstants.Helix.UNTAGGED_MINION_INSTANCE); } } return CommonConstants.Helix.UNTAGGED_MINION_INSTANCE; }
@Test public void testExtractMinionInstanceTag() { // correct minionInstanceTag extraction Map<String, String> tableTaskConfigs = getDummyTaskConfig(); tableTaskConfigs.put(PinotTaskManager.MINION_INSTANCE_TAG_CONFIG, "minionInstance1"); TableTaskConfig tableTaskConfig = new TableTaskConfig(Collections.singletonMap(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs)); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("sampleTable") .setTaskConfig(tableTaskConfig).build(); assertEquals(TaskGeneratorUtils.extractMinionInstanceTag(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE), "minionInstance1"); // no minionInstanceTag passed tableTaskConfigs = getDummyTaskConfig(); tableTaskConfig = new TableTaskConfig(Collections.singletonMap(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs)); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("sampleTable") .setTaskConfig(tableTaskConfig).build(); assertEquals(TaskGeneratorUtils.extractMinionInstanceTag(tableConfig, MinionConstants.MergeRollupTask.TASK_TYPE), CommonConstants.Helix.UNTAGGED_MINION_INSTANCE); }
@VisibleForTesting AmazonS3 getAmazonS3Client() { return this.amazonS3.get(); }
@Test public void testGetPathStyleAccessEnabled() throws URISyntaxException { S3FileSystem s3FileSystem = new S3FileSystem(s3ConfigWithCustomEndpointAndPathStyleAccessEnabled("s3")); URL s3Url = s3FileSystem.getAmazonS3Client().getUrl("bucket", "file"); assertEquals("https://s3.custom.dns/bucket/file", s3Url.toURI().toString()); }
public static String rc2name( int row, int col ){ StringBuilder sb = new StringBuilder(); int b = 26; int p = 1; if( col >= b ){ col -= b; p *= b; } if( col >= b*b ){ col -= b*b; p *= b; } while( p > 0 ){ sb.append( (char)(col/p + (int)'A') ); col %= p; p /= b; } sb.append( row + 1 ); return sb.toString(); }
@Test public void testRowColumnToCellNAme() { assertThat(rc2name(0, 0)).isEqualTo("A1"); assertThat(rc2name(0, 10)).isEqualTo("K1"); assertThat(rc2name(0, 42)).isEqualTo("AQ1"); assertThat(rc2name(9, 27)).isEqualTo("AB10"); assertThat(rc2name(99, 53)).isEqualTo("BB100"); }
@SuppressWarnings("unchecked") public static <T, S extends T> Optional<S> downcast(T obj, Class<S> klass) { Preconditions.checkArgument(obj != null); if (obj.getClass().equals(Objects.requireNonNull(klass))) { return Optional.of((S) obj); } else { return Optional.empty(); } }
@Test public void testDowncast() { Number a = Integer.valueOf(10); Assert.assertTrue(Util.downcast(a, Integer.class).isPresent()); Assert.assertFalse(Util.downcast(a, String.class).isPresent()); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithString() throws SQLException { String result = "foo"; when(mergeResultSet.getValue(1, String.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, String.class), is(result)); }
@Override public void modifyPath(Function<String, String> pathModifier) { request.setHttpURI(HttpURI.build(request.getHttpURI()).path(pathModifier.apply(request.getHttpURI().getPath())).asImmutable()); }
@Test public void shouldAlterRequestUriOnRequest() { when(request.getHttpURI()).thenReturn(HttpURI.from("foo/bar/baz?a=b&c=d")); jettyRequest.modifyPath(path -> path.replaceAll("^foo/bar/baz", "foo/junk")); verify(request).setHttpURI(capturedUri.capture()); assertThat(capturedUri.getValue().getPath()).isEqualTo("foo/junk"); assertThat(capturedUri.getValue().getQuery()).isEqualTo("a=b&c=d"); }
public JetSqlRow project(Object object) { target.setTarget(object, null); return ExpressionUtil.projection(predicate, projection, this, evalContext); }
@Test public void test_project() { RowProjector projector = new RowProjector( new String[]{"target"}, new QueryDataType[]{INT}, new IdentityTarget(), null, singletonList( MultiplyFunction.create(ColumnExpression.create(0, INT), ConstantExpression.create(2, INT), INT) ), mock(ExpressionEvalContext.class) ); JetSqlRow row = projector.project(1); assertThat(row.getValues()).isEqualTo(new Object[]{2}); }
static void populateCorrectMiningModel(final MiningModel miningModel) { final List<Segment> segments = miningModel.getSegmentation().getSegments(); for (int i = 0; i < segments.size(); i++) { Segment segment = segments.get(i); populateCorrectSegmentId(segment, miningModel.getModelName(), i); Model model = segment.getModel(); populateMissingSegmentModelName(model, segment.getId()); populateMissingTargetFieldInSegment(miningModel.getMiningSchema(), model); populateMissingPredictedOutputFieldTarget(model); if (model instanceof MiningModel) { populateCorrectMiningModel((MiningModel) segment.getModel()); } } }
@Test void populateCorrectMiningModel() throws Exception { final InputStream inputStream = getFileInputStream(NO_MODELNAME_NO_SEGMENT_ID_NOSEGMENT_TARGET_FIELD_SAMPLE); final PMML pmml = org.jpmml.model.PMMLUtil.unmarshal(inputStream); final Model retrieved = pmml.getModels().get(0); assertThat(retrieved).isInstanceOf(MiningModel.class); MiningModel miningModel = (MiningModel) retrieved; miningModel.getSegmentation().getSegments().forEach(segment -> { assertThat(segment.getId()).isNull(); assertThat(segment.getModel().getModelName()).isNull(); assertThat(getMiningTargetFields(segment.getModel().getMiningSchema())).isEmpty(); }); KiePMMLUtil.populateCorrectMiningModel(miningModel); miningModel.getSegmentation().getSegments().forEach(segment -> { assertThat(segment.getId()).isNotNull(); assertThat(segment.getModel().getModelName()).isNotNull(); assertThat(getMiningTargetFields(segment.getModel().getMiningSchema())).isNotEmpty(); }); }
public double d(int[] x, int[] y) { if (x.length != y.length) throw new IllegalArgumentException(String.format("Arrays have different length: x[%d], y[%d]", x.length, y.length)); double dist = 0.0; if (weight == null) { for (int i = 0; i < x.length; i++) { dist += Math.abs(x[i] - y[i]); } } else { if (x.length != weight.length) throw new IllegalArgumentException(String.format("Input vectors and weight vector have different length: %d, %d", x.length, weight.length)); for (int i = 0; i < x.length; i++) { dist += weight[i] * Math.abs(x[i] - y[i]); } } return dist; }
@Test public void testDistanceInt() { System.out.println("distance"); int[] x = {1, 2, 3, 4}; int[] y = {4, 3, 2, 1}; assertEquals(8, new ManhattanDistance().d(x, y), 1E-6); }
public String getServiceUUID(Connection connection, String serviceEntityId, int consumingServiceIdx) { if (connection == null || serviceEntityId == null) return null; EntityDescriptor serviceEntityDescriptor = resolveEntityDescriptorFromMetadata(connection, serviceEntityId); if (serviceEntityDescriptor == null || serviceEntityDescriptor.getRoleDescriptors() == null) { return null; } List<XMLObject> list = serviceEntityDescriptor.getRoleDescriptors().get(0).getOrderedChildren(); if (list == null) { return null; } Optional<XMLObject> xmlObject = list .stream() .filter(obj -> obj instanceof AttributeConsumingService && ((AttributeConsumingService) obj).getIndex() == consumingServiceIdx) .findFirst(); if (xmlObject.isEmpty()) { return null; } AttributeConsumingService attributeConsumingService = (AttributeConsumingService) xmlObject.get(); Optional<RequestedAttribute> requestedAttribute = attributeConsumingService.getRequestedAttributes() .stream() .filter(o -> o.getName().equals(SAML_SERVICE_UUID_NAME)) .findFirst(); return requestedAttribute.isEmpty() ? null : ((XSAny) requestedAttribute.get().getAttributeValues().get(0)).getTextContent(); }
@Test void metadataResponseMapperMapsSuccessResponseTest() { String samlData = "samlData"; Connection connection = newConnection(SAML_COMBICONNECT, true, true, true); Service service = newService(true); String requestStatus = "OK"; SamlMetadataResponse metadataResponse = samlMetadataResponseMapper.mapSuccessResponse(samlData, connection, service, requestStatus); assertEquals(metadataResponse.getSamlMetadata(), samlData); assertEquals(metadataResponse.getFederationName(), connection.getSsoDomain()); assertEquals(metadataResponse.getMinimumReliabilityLevel(), service.getMinimumReliabilityLevel()); assertEquals(metadataResponse.getEncryptionIdType(), service.getEncryptionIdType().name()); assertEquals(metadataResponse.getAppReturnUrl(), service.getAppReturnUrl()); assertEquals(metadataResponse.getAppActive(), service.getAppActive()); assertEquals(metadataResponse.getServiceName(), service.getName()); assertEquals(metadataResponse.getLegacyWebserviceId(), service.getLegacyServiceId()); assertEquals(metadataResponse.getRequestStatus(), requestStatus); assertNull(metadataResponse.getErrorDescription()); assertEquals(metadataResponse.getServiceUuid(), service.getServiceUuid()); assertEquals(metadataResponse.getPermissionQuestion(), service.getPermissionQuestion()); assertEquals(metadataResponse.getProtocolType(), connection.getProtocolType()); }
public DataPoint addDataPoint(Object label) { DataPoint dp = new DataPoint(); labels.add(label); dataPoints.add(dp); return dp; }
@Test public void testAddDataPoint() { cm = new ChartModel(FOO, BAR, ZOO); long time = System.currentTimeMillis(); cm.addDataPoint(time) .data(FOO, VALUES1[0]) .data(BAR, VALUES2[0]) .data(ZOO, VALUES3[0]); cm.addDataPoint(time + 1) .data(FOO, VALUES1[1]) .data(BAR, VALUES2[1]) .data(ZOO, VALUES3[1]); cm.addDataPoint(time + 2) .data(FOO, VALUES1[2]) .data(BAR, VALUES2[2]) .data(ZOO, VALUES3[2]); assertEquals("Wrong result", 3, cm.getDataPoints()[0].size()); assertEquals("Wrong result", 3, cm.getDataPoints()[1].size()); assertEquals("Wrong result", 3, cm.getDataPoints()[2].size()); assertEquals("Wrong result", 3, cm.getDataPoints().length); }
@Override public void lock() { try { lockInterruptibly(-1, null); } catch (InterruptedException e) { throw new IllegalStateException(); } }
@Test public void testIsHeldByCurrentThreadOtherThread() throws InterruptedException { RLock lock = redisson.getSpinLock("lock"); lock.lock(); Thread t = new Thread() { public void run() { RLock lock = redisson.getSpinLock("lock"); Assertions.assertFalse(lock.isHeldByCurrentThread()); } ; }; t.start(); t.join(); lock.unlock(); Thread t2 = new Thread() { public void run() { RLock lock = redisson.getSpinLock("lock"); Assertions.assertFalse(lock.isHeldByCurrentThread()); } ; }; t2.start(); t2.join(); }
@Override public void start() throws BundleException { throw newException(); }
@Test void require_that_start_throws_exception() throws BundleException { assertThrows(RuntimeException.class, () -> { new DisableOsgiFramework().start(); }); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_throws_NPE_if_both_arguments_are_null() { assertThatThrownBy(() -> underTest.toMeasure(null, null)) .isInstanceOf(NullPointerException.class); }
public static int intClamp(int value, int min, int max) { if (value > max) return max; if (value < min) return min; return value; }
@Test void testIntClamp() { assertEquals(5, NumberUtil.intClamp(100, 0, 5)); assertEquals(5, NumberUtil.intClamp(-25, 5, 10)); assertEquals(5, NumberUtil.intClamp(5, 0, 10)); }
@Override public String getAlertFilter() { DriverHandler handler = handler(); NetconfController controller = handler.get(NetconfController.class); MastershipService mastershipService = handler.get(MastershipService.class); DeviceId ncDeviceId = handler.data().deviceId(); checkNotNull(controller, "Netconf controller is null"); String reply = null; if (!mastershipService.isLocalMaster(ncDeviceId)) { log.warn("Not master for {} Use {} to execute command", ncDeviceId, mastershipService.getMasterFor(ncDeviceId)); return null; } try { StringBuilder request = new StringBuilder(); request.append(VOLT_NE_OPEN + VOLT_NE_NAMESPACE); request.append(ANGLE_RIGHT + NEW_LINE); request.append(buildStartTag(VOLT_ALERTS)) .append(buildEmptyTag(ALERT_FILTER)) .append(buildEndTag(VOLT_ALERTS)) .append(VOLT_NE_CLOSE); reply = controller .getDevicesMap() .get(ncDeviceId) .getSession() .get(request.toString(), REPORT_ALL); } catch (NetconfException e) { log.error("Cannot communicate to device {} exception {}", ncDeviceId, e); } return reply; }
@Test public void testGetAlertFilter() throws Exception { voltConfig.getAlertFilter(); }
@Nullable public Integer getIntValue(@IntFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; return switch (formatType) { case FORMAT_UINT8 -> unsignedByteToInt(mValue[offset]); case FORMAT_UINT16_LE -> unsignedBytesToInt(mValue[offset], mValue[offset + 1]); case FORMAT_UINT16_BE -> unsignedBytesToInt(mValue[offset + 1], mValue[offset]); case FORMAT_UINT24_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ); case FORMAT_UINT24_BE -> unsignedBytesToInt( mValue[offset + 2], mValue[offset + 1], mValue[offset], (byte) 0 ); case FORMAT_UINT32_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ); case FORMAT_UINT32_BE -> unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ); case FORMAT_SINT8 -> unsignedToSigned(unsignedByteToInt(mValue[offset]), 8); case FORMAT_SINT16_LE -> unsignedToSigned(unsignedBytesToInt(mValue[offset], mValue[offset + 1]), 16); case FORMAT_SINT16_BE -> unsignedToSigned(unsignedBytesToInt(mValue[offset + 1], mValue[offset]), 16); case FORMAT_SINT24_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ), 24); case FORMAT_SINT24_BE -> unsignedToSigned(unsignedBytesToInt( (byte) 0, mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 24); case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ), 32); case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 32); default -> null; }; }
@Test public void getValue_UINT24() { final Data data = new Data(new byte[] { 0x03, 0x02, 0x01 }); final int value = data.getIntValue(Data.FORMAT_UINT24_LE, 0); assertEquals(0x010203, value); }
@Override public String dumpSchedulerLogs(String time, HttpServletRequest hsr) throws IOException { // Step1. We will check the time parameter to // ensure that the time parameter is not empty and greater than 0. if (StringUtils.isBlank(time)) { routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the time is empty or null."); throw new IllegalArgumentException("Parameter error, the time is empty or null."); } try { int period = Integer.parseInt(time); if (period <= 0) { throw new IllegalArgumentException("time must be greater than 0."); } } catch (NumberFormatException e) { routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw new IllegalArgumentException("time must be a number."); } catch (IllegalArgumentException e) { routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw e; } // Step2. Call dumpSchedulerLogs of each subcluster. try { long startTime = clock.getTime(); Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters(); final HttpServletRequest hsrCopy = clone(hsr); Class[] argsClasses = new Class[]{String.class, HttpServletRequest.class}; Object[] args = new Object[]{time, hsrCopy}; ClientMethod remoteMethod = new ClientMethod("dumpSchedulerLogs", argsClasses, args); Map<SubClusterInfo, String> dumpSchedulerLogsMap = invokeConcurrent( subClustersActive, remoteMethod, String.class); StringBuilder stringBuilder = new StringBuilder(); dumpSchedulerLogsMap.forEach((subClusterInfo, msg) -> { SubClusterId subClusterId = subClusterInfo.getSubClusterId(); stringBuilder.append("subClusterId") .append(subClusterId).append(" : ").append(msg).append("; "); }); long stopTime = clock.getTime(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, TARGET_WEB_SERVICE); routerMetrics.succeededDumpSchedulerLogsRetrieved(stopTime - startTime); return stringBuilder.toString(); } catch (IllegalArgumentException e) { routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(e, "Unable to dump SchedulerLogs by time: %s.", time); } catch (YarnException e) { routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(e, "dumpSchedulerLogs by time = %s error .", time); } routerMetrics.incrDumpSchedulerLogsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), DUMP_SCHEDULERLOGS, UNKNOWN, TARGET_WEB_SERVICE, "dumpSchedulerLogs Failed."); throw new RuntimeException("dumpSchedulerLogs Failed."); }
@Test public void testDumpSchedulerLogs() throws Exception { HttpServletRequest mockHsr = mockHttpServletRequestByUserName("admin"); String dumpSchedulerLogsMsg = interceptor.dumpSchedulerLogs("1", mockHsr); // We cannot guarantee the calling order of the sub-clusters, // We guarantee that the returned result contains the information of each subCluster. Assert.assertNotNull(dumpSchedulerLogsMsg); subClusters.forEach(subClusterId -> { String subClusterMsg = "subClusterId" + subClusterId + " : Capacity scheduler logs are being created.; "; Assert.assertTrue(dumpSchedulerLogsMsg.contains(subClusterMsg)); }); }
public static <T> List<List<T>> split(List<T> list, int size) { return partition(list, size); }
@Test public void splitTest() { List<List<Object>> lists = ListUtil.split(null, 3); assertEquals(ListUtil.empty(), lists); lists = ListUtil.split(Arrays.asList(1, 2, 3, 4), 1); assertEquals("[[1], [2], [3], [4]]", lists.toString()); lists = ListUtil.split(Arrays.asList(1, 2, 3, 4), 2); assertEquals("[[1, 2], [3, 4]]", lists.toString()); lists = ListUtil.split(Arrays.asList(1, 2, 3, 4), 3); assertEquals("[[1, 2, 3], [4]]", lists.toString()); lists = ListUtil.split(Arrays.asList(1, 2, 3, 4), 4); assertEquals("[[1, 2, 3, 4]]", lists.toString()); lists = ListUtil.split(Arrays.asList(1, 2, 3, 4), 5); assertEquals("[[1, 2, 3, 4]]", lists.toString()); }
public static Write write() { return new Write(null /* Configuration */, ""); }
@Test public void testWriting() throws Exception { final String table = tmpTable.getName(); final String key = "key"; final String value = "value"; final int numMutations = 100; createTable(table); p.apply("multiple rows", Create.of(makeMutations(key, value, numMutations))) .apply("write", HBaseIO.write().withConfiguration(conf).withTableId(table)); p.run().waitUntilFinish(); List<Result> results = readTable(table, new Scan()); assertEquals(numMutations, results.size()); }
@VisibleForTesting static List<Tuple2<ConfigGroup, String>> generateTablesForClass( Class<?> optionsClass, Collection<OptionWithMetaInfo> optionWithMetaInfos) { ConfigGroups configGroups = optionsClass.getAnnotation(ConfigGroups.class); List<OptionWithMetaInfo> allOptions = selectOptionsToDocument(optionWithMetaInfos); if (allOptions.isEmpty()) { return Collections.emptyList(); } List<Tuple2<ConfigGroup, String>> tables; if (configGroups != null) { tables = new ArrayList<>(configGroups.groups().length + 1); Tree tree = new Tree(configGroups.groups(), allOptions); for (ConfigGroup group : configGroups.groups()) { List<OptionWithMetaInfo> configOptions = tree.findConfigOptions(group); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(group, toHtmlTable(configOptions))); } } List<OptionWithMetaInfo> configOptions = tree.getDefaultOptions(); if (!configOptions.isEmpty()) { sortOptions(configOptions); tables.add(Tuple2.of(null, toHtmlTable(configOptions))); } } else { sortOptions(allOptions); tables = Collections.singletonList(Tuple2.of(null, toHtmlTable(allOptions))); } return tables; }
@Test void testConfigGroupWithEnumConstantExclusion() { final String expectedTable = "<table class=\"configuration table table-bordered\">\n" + " <thead>\n" + " <tr>\n" + " <th class=\"text-left\" style=\"width: 20%\">Key</th>\n" + " <th class=\"text-left\" style=\"width: 15%\">Default</th>\n" + " <th class=\"text-left\" style=\"width: 10%\">Type</th>\n" + " <th class=\"text-left\" style=\"width: 55%\">Description</th>\n" + " </tr>\n" + " </thead>\n" + " <tbody>\n" + " <tr>\n" + " <td><h5>exclude.enum</h5></td>\n" + " <td style=\"word-wrap: break-word;\">VALUE_1</td>\n" + " <td><p>Enum</p></td>\n" + " <td>Description<br /><br />Possible values:<ul><li>\"VALUE_1\"</li><li>\"VALUE_3\"</li></ul></td>\n" + " </tr>\n" + " <tr>\n" + " <td><h5>exclude.enum.desc</h5></td>\n" + " <td style=\"word-wrap: break-word;\">(none)</td>\n" + " <td><p>Enum</p></td>\n" + " <td>Description<br /><br />Possible values:<ul><li>\"A\": First letter of the alphabet</li><li>\"B\": Second letter of the alphabet</li></ul></td>\n" + " </tr>\n" + " <tr>\n" + " <td><h5>exclude.enum.list</h5></td>\n" + " <td style=\"word-wrap: break-word;\">VALUE_1;<wbr>VALUE_3</td>\n" + " <td><p>List&lt;Enum&gt;</p></td>\n" + " <td>Description<br /><br />Possible values:<ul><li>\"VALUE_1\"</li><li>\"VALUE_3\"</li></ul></td>\n" + " </tr>\n" + " </tbody>\n" + "</table>\n"; final String htmlTable = ConfigOptionsDocGenerator.generateTablesForClass( TestConfigGroupWithEnumConstantExclusion.class, ConfigurationOptionLocator.extractConfigOptions( TestConfigGroupWithEnumConstantExclusion.class)) .get(0) .f1; assertThat(htmlTable).isEqualTo(expectedTable); }
@Override public HttpResponseOutputStream<StorageObject> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final S3Object object = new S3WriteFeature(session, acl).getDetails(file, status); // ID for the initiated multipart upload. final MultipartUpload multipart; try { final Path bucket = containerService.getContainer(file); multipart = session.getClient().multipartStartUpload( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), object); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Upload {0} failed", e, file); } final MultipartOutputStream proxy = new MultipartOutputStream(multipart, file, status); return new HttpResponseOutputStream<StorageObject>(new MemorySegementingOutputStream(proxy, new HostPreferences(session.getHost()).getInteger("s3.upload.multipart.size")), new S3AttributesAdapter(session.getHost()), status) { @Override public StorageObject getStatus() { if(proxy.getResponse() != null) { if(log.isDebugEnabled()) { log.debug(String.format("Received response %s", proxy.getResponse())); } object.setContentLength(proxy.getOffset()); object.setETag(proxy.getResponse().getEtag()); if(proxy.getResponse().getVersionId() != null) { object.addMetadata(S3Object.S3_VERSION_ID, proxy.getResponse().getVersionId()); } } return object; } }; }
@Test public void testWriteVirtualHost() throws Exception { final S3AccessControlListFeature acl = new S3AccessControlListFeature(virtualhost); final S3MultipartWriteFeature feature = new S3MultipartWriteFeature(virtualhost, acl); final TransferStatus status = new TransferStatus(); status.setLength(-1L); final Path file = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<StorageObject> out = feature.write(file, status, new DisabledConnectionCallback()); final byte[] content = RandomUtils.nextBytes(6 * 1024 * 1024); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(content.length, count.getSent()); in.close(); out.close(); assertNotNull(out.getStatus()); assertEquals(content.length, out.getStatus().getContentLength()); assertTrue(new S3FindFeature(virtualhost, acl).find(file)); final PathAttributes attr = new S3AttributesFinderFeature(virtualhost, acl).find(file); assertEquals(status.getResponse().getChecksum(), attr.getChecksum()); assertEquals(content.length, attr.getSize()); final byte[] compare = new byte[content.length]; final InputStream stream = new S3ReadFeature(virtualhost).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new S3DefaultDeleteFeature(virtualhost).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { final String bucket = RequestEntityRestStorageService.findBucketInHostname(session.getHost()); if(StringUtils.isEmpty(bucket)) { if(log.isDebugEnabled()) { log.debug(String.format("No bucket name in host %s", session.getHost().getHostname())); } // List all buckets try { return new S3BucketListService(session).list(directory, listener); } catch(InteroperabilityException e) { // Bucket set in hostname that leads to parser failure for XML reply log.warn(String.format("Failure %s listing buckets", e)); try { return this.listObjects(directory, listener); } catch(BackgroundException ignored) { log.warn(String.format("Ignore failure %s listing objects", ignored)); // Throw original failure throw e; } } } // If bucket is specified in hostname, try to connect to this particular bucket only. } return this.listObjects(directory, listener); }
@Test public void testListMultipartUploadDot() throws Exception { final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path placeholder = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir( new Path(bucket, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.placeholder)), new TransferStatus()); final MultipartUpload multipart = session.getClient().multipartStartUpload(bucket.getName(), String.format("%s/.", new S3PathContainerService(session.getHost()).getKey(placeholder)), Collections.emptyMap()); assertNotNull(new S3ListService(session, acl).list(placeholder, new DisabledListProgressListener()).find(path -> path.getName().equals("."))); new S3DefaultMultipartService(session).delete(multipart); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public static <T> Either<String, T> resolveImportDMN(Import importElement, Collection<T> dmns, Function<T, QName> idExtractor) { final String importerDMNNamespace = ((Definitions) importElement.getParent()).getNamespace(); final String importerDMNName = ((Definitions) importElement.getParent()).getName(); final String importNamespace = importElement.getNamespace(); final String importName = importElement.getName(); final String importLocationURI = importElement.getLocationURI(); // This is optional final String importModelName = importElement.getAdditionalAttributes().get(TImport.MODELNAME_QNAME); LOGGER.debug("Resolving an Import in DMN Model with name={} and namespace={}. " + "Importing a DMN model with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); List<T> matchingDMNList = dmns.stream() .filter(m -> idExtractor.apply(m).getNamespaceURI().equals(importNamespace)) .toList(); if (matchingDMNList.size() == 1) { T located = matchingDMNList.get(0); // Check if the located DMN Model in the NS, correspond for the import `drools:modelName`. if (importModelName == null || idExtractor.apply(located).getLocalPart().equals(importModelName)) { LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " + "with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofRight(located); } else { LOGGER.error("DMN Model with name={} and namespace={} can't import a DMN with namespace={}, name={}, modelName={}, " + "located within namespace only {} but does not match for the actual modelName", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located)); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s can't import a DMN with namespace=%s, name=%s, modelName=%s, " + "located within namespace only %s but does not match for the actual modelName", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located))); } } else { List<T> usingNSandName = matchingDMNList.stream() .filter(dmn -> idExtractor.apply(dmn).getLocalPart().equals(importModelName)) .toList(); if (usingNSandName.size() == 1) { LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " + "with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofRight(usingNSandName.get(0)); } else if (usingNSandName.isEmpty()) { LOGGER.error("DMN Model with name={} and namespace={} failed to import a DMN with namespace={} name={} locationURI={}, modelName={}.", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s failed to import a DMN with namespace=%s name=%s locationURI=%s, modelName=%s. ", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName)); } else { LOGGER.error("DMN Model with name={} and namespace={} detected a collision ({} elements) trying to import a DMN with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, usingNSandName.size(), importNamespace, importName, importLocationURI, importModelName); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s detected a collision trying to import a DMN with %s namespace, " + "%s name and modelName %s. There are %s DMN files with the same namespace in your project. " + "Please change the DMN namespaces and make them unique to fix this issue.", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, usingNSandName.size())); } } }
@Test void nSonly() { final Import i = makeImport("ns1", null, null); final List<QName> available = Arrays.asList(new QName("ns1", "m1"), new QName("ns2", "m2"), new QName("ns3", "m3")); final Either<String, QName> result = ImportDMNResolverUtil.resolveImportDMN(i, available, Function.identity()); assertThat(result.isRight()).isTrue(); assertThat(result.getOrElse(null)).isEqualTo(new QName("ns1", "m1")); }
public static byte[] createByteArray( int size ) { return new byte[size]; }
@Test public void testCreateByteArray() { assertTrue( Const.createByteArray( 5 ).length == 5 ); }
public double calculateAveragePercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) { int skippedResourceTypes = 0; double total = 0.0; if (usedMemoryMb > totalMemoryMb) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } if (totalMemoryMb != 0.0) { total += usedMemoryMb / totalMemoryMb; } else { skippedResourceTypes++; } double totalCpu = getTotalCpu(); if (used.getTotalCpu() > getTotalCpu()) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } if (totalCpu != 0.0) { total += used.getTotalCpu() / getTotalCpu(); } else { skippedResourceTypes++; } if (used.otherResources.length > otherResources.length) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } for (int i = 0; i < otherResources.length; i++) { double totalValue = otherResources[i]; double usedValue; if (i >= used.otherResources.length) { //Resources missing from used are using none of that resource usedValue = 0.0; } else { usedValue = used.otherResources[i]; } if (usedValue > totalValue) { throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb); } if (totalValue == 0.0) { //Skip any resources where the total is 0, the percent used for this resource isn't meaningful. //We fall back to prioritizing by cpu, memory and any other resources by ignoring this value skippedResourceTypes++; continue; } total += usedValue / totalValue; } //Adjust the divisor for the average to account for any skipped resources (those where the total was 0) int divisor = 2 + otherResources.length - skippedResourceTypes; if (divisor == 0) { /* * This is an arbitrary choice to make the result consistent with calculateMin. Any value would be valid here, becase there are * no (non-zero) resources in the total set of resources, so we're trying to average 0 values. */ return 100.0; } else { return (total * 100.0) / divisor; } }
@Test public void testCalculateAvgWithOnlyCpu() { NormalizedResources resources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 2))); NormalizedResources usedResources = new NormalizedResources(normalize(Collections.singletonMap(Constants.COMMON_CPU_RESOURCE_NAME, 1))); double avg = resources.calculateAveragePercentageUsedBy(usedResources, 0, 0); assertThat(avg, is(50.0)); }
public Filter parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) { if (!filterExpression.contains(FIELD_AND_VALUE_SEPARATOR)) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String[] split = filterExpression.split(FIELD_AND_VALUE_SEPARATOR, 2); final String fieldPart = split[0]; if (fieldPart == null || fieldPart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String valuePart = split[1]; if (valuePart == null || valuePart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final EntityAttribute attributeMetaData = getAttributeMetaData(attributes, fieldPart); final SearchQueryField.Type fieldType = attributeMetaData.type(); if (isRangeValueExpression(valuePart, fieldType)) { if (valuePart.startsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), null, extractValue(fieldType, valuePart.substring(RANGE_VALUES_SEPARATOR.length())) ); } else if (valuePart.endsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, valuePart.substring(0, valuePart.length() - RANGE_VALUES_SEPARATOR.length())), null ); } else { final String[] ranges = valuePart.split(RANGE_VALUES_SEPARATOR); return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, ranges[0]), extractValue(fieldType, ranges[1]) ); } } else { return new SingleValueFilter(attributeMetaData.id(), extractValue(fieldType, valuePart)); } }
@Test void parsesFilterExpressionCorrectlyForIntRanges() { final List<EntityAttribute> entityAttributes = List.of(EntityAttribute.builder() .id("number") .title("Number") .type(SearchQueryField.Type.INT) .filterable(true) .build()); assertEquals( new RangeFilter("number", 42, 53), toTest.parseSingleExpression("number:42" + RANGE_VALUES_SEPARATOR + "53", entityAttributes )); }
public String keyToString(Object key) { // This string should be in the format of: // "<TYPE>:<KEY>" for internally supported types or "T:<KEY_CLASS>:<KEY>" for custom types // e.g.: // "S:my string key" // "I:75" // "D:5.34" // "B:f" // "T:com.myorg.MyType:STRING_GENERATED_BY_TRANSFORMER_FOR_MY_TYPE" // First going to check if the key is a primitive or a String. Otherwise, check if it's a transformable. // If none of those conditions are satisfied, we'll throw a CacheException. // Using 'X' for Shorts and 'Y' for Bytes because 'S' is used for Strings and 'B' is being used for Booleans. if (key instanceof byte[]) return "A:" + Base64.getEncoder().encodeToString((byte[]) key); //todo [anistor] need to profile Base64 versus simple hex encoding of the raw bytes if (key instanceof String) return "S:" + key; else if (key instanceof Integer) return "I:" + key; else if (key instanceof Boolean) return "B:" + key; else if (key instanceof Long) return "L:" + key; else if (key instanceof Float) return "F:" + key; else if (key instanceof Double) return "D:" + key; else if (key instanceof Short) return "X:" + key; else if (key instanceof Byte) return "Y:" + key; else if (key instanceof Character) return "C:" + key; else if (key instanceof UUID) return "U:" + key; else { Transformer t = getTransformer(key.getClass()); if (t != null) { return "T:" + key.getClass().getName() + ":" + t.toString(key); } else { throw CONTAINER.noTransformerForKey(key.getClass().getName()); } } }
@Test(expectedExceptions = IllegalArgumentException.class) public void testKeyToStringWithDefaultTransformerForNonSerializableObject() { NonSerializableKey key = new NonSerializableKey("test"); keyTransformationHandler.keyToString(key); }
public void dismiss() { mDialogController.dismiss(); }
@Test public void testDismiss() { Application context = ApplicationProvider.getApplicationContext(); ShadowApplication shadowApplication = Shadows.shadowOf(context); final AddOnStoreSearchController underTest = new AddOnStoreSearchController(context, "add on"); underTest.searchForAddOns(); Assert.assertNotSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); underTest.dismiss(); Assert.assertSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); }
@Override public boolean test(final Path test) { return this.equals(new DefaultPathPredicate(test)); }
@Test public void testPredicateFileIdDirectory() { final Path t = new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withFileId("1")); assertTrue(new DefaultPathPredicate(t).test(t)); assertTrue(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withFileId("1")))); assertFalse(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.directory), new PathAttributes().withFileId("2")))); }
String substituteParametersInSqlString(String sql, SqlParameterSource paramSource) { ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(sql); List<SqlParameter> declaredParams = NamedParameterUtils.buildSqlParameterList(parsedSql, paramSource); if (declaredParams.isEmpty()) { return sql; } for (SqlParameter parSQL: declaredParams) { String paramName = parSQL.getName(); if (!paramSource.hasValue(paramName)) { continue; } Object value = paramSource.getValue(paramName); if (value instanceof SqlParameterValue) { value = ((SqlParameterValue)value).getValue(); } if (!(value instanceof Iterable)) { String ValueForSQLQuery = getValueForSQLQuery(value); sql = sql.replace(":" + paramName, ValueForSQLQuery); continue; } //Iterable int count = 0; String valueArrayStr = ""; for (Object valueTemp: (Iterable)value) { if (count > 0) { valueArrayStr+=", "; } String valueForSQLQuery = getValueForSQLQuery(valueTemp); valueArrayStr += valueForSQLQuery; ++count; } sql = sql.replace(":" + paramName, valueArrayStr); } return sql; }
@Test public void substituteParametersInSqlString_DoubleLongType() { double sum = 0.00000021d; long price = 100000; String sql = "Select * from Table Where sum = :sum AND price = :price"; String sqlToUse = "Select * from Table Where sum = 2.1E-7 AND price = 100000"; ctx.addDoubleParameter("sum", sum); ctx.addLongParameter("price", price); String sqlToUseResult = queryLog.substituteParametersInSqlString(sql, ctx); assertEquals(sqlToUse, sqlToUseResult); }
@Override public V joinInternal() { return resolve(super.joinInternal()); }
@Test public void test_joinInternal() { Object value = "value"; DeserializingCompletableFuture<Object> future = new DeserializingCompletableFuture<>(serializationService, deserialize); future.complete(value); assertEquals(value, future.joinInternal()); }
@VisibleForTesting static String parseHostMachine() { String hostMachine = System.getProperty("host_machine"); return StringUtils.isNotEmpty(hostMachine) ? hostMachine : null; }
@Test public void parseHostMachine() { String old = System.getProperty("host_machine"); try { System.setProperty("host_machine", "xxx"); Assert.assertEquals("xxx", SystemInfo.parseHostMachine()); } finally { if (old == null) { System.clearProperty("host_machine"); } else { System.setProperty("host_machine", old); } } Assert.assertTrue(StringUtils.isEmpty(SystemInfo.getHostMachine())); }
@VisibleForTesting List<ExecNode<?>> calculatePipelinedAncestors(ExecNode<?> node) { List<ExecNode<?>> ret = new ArrayList<>(); AbstractExecNodeExactlyOnceVisitor ancestorVisitor = new AbstractExecNodeExactlyOnceVisitor() { @Override protected void visitNode(ExecNode<?> node) { boolean hasAncestor = false; if (!boundaries.contains(node)) { List<InputProperty> inputProperties = node.getInputProperties(); for (int i = 0; i < inputProperties.size(); i++) { // we only go through PIPELINED edges if (inputProperties .get(i) .getDamBehavior() .stricterOrEqual(safeDamBehavior)) { continue; } hasAncestor = true; node.getInputEdges().get(i).getSource().accept(this); } } if (!hasAncestor) { ret.add(node); } } }; node.accept(ancestorVisitor); return ret; }
@Test void testCalculatePipelinedAncestors() { // P = InputProperty.DamBehavior.PIPELINED, E = InputProperty.DamBehavior.END_INPUT // // 0 ------P----> 1 -E--> 2 // \-----P----> 3 -P-/ // 4 -E-> 5 -P-/ / // 6 -----E-----/ TestingBatchExecNode[] nodes = new TestingBatchExecNode[7]; for (int i = 0; i < nodes.length; i++) { nodes[i] = new TestingBatchExecNode("TestingBatchExecNode" + i); } nodes[1].addInput(nodes[0]); nodes[2].addInput( nodes[1], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); nodes[2].addInput(nodes[3]); nodes[3].addInput(nodes[0]); nodes[3].addInput(nodes[5]); nodes[3].addInput( nodes[6], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); nodes[5].addInput( nodes[4], InputProperty.builder().damBehavior(InputProperty.DamBehavior.END_INPUT).build()); TestingInputPriorityConflictResolver resolver = new TestingInputPriorityConflictResolver( Collections.singletonList(nodes[2]), Collections.emptySet(), InputProperty.DamBehavior.END_INPUT); List<ExecNode<?>> ancestors = resolver.calculatePipelinedAncestors(nodes[2]); assertThat(ancestors).hasSize(2); assertThat(ancestors).contains(nodes[0]); assertThat(ancestors).contains(nodes[5]); }
public static String createFullName(Deque<String> fieldNames) { String result = ""; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); result = iter.next(); if (!iter.hasNext()) { return result; } StringBuilder sb = new StringBuilder(); sb.append(result); while (iter.hasNext()) { sb.append("."); sb.append(iter.next()); } result = sb.toString(); } return result; }
@Test void testCreateFullName() { String result = HoodieAvroUtils.createFullName(new ArrayDeque<>(Arrays.asList("a", "b", "c"))); String resultSingle = HoodieAvroUtils.createFullName(new ArrayDeque<>(Collections.singletonList("a"))); assertEquals("c.b.a", result); assertEquals("a", resultSingle); }
@Override protected void doStart() throws Exception { super.doStart(); listener = getListener(); connection.addIRCEventListener(listener); LOG.debug("Sleeping for {} seconds before sending commands.", configuration.getCommandTimeout() / 1000); // sleep for a few seconds as the server sometimes takes a moment to fully connect, print banners, etc after connection established try { Thread.sleep(configuration.getCommandTimeout()); } catch (InterruptedException ex) { LOG.info("Interrupted while sleeping before sending commands"); Thread.currentThread().interrupt(); } if (ObjectHelper.isNotEmpty(configuration.getNickPassword())) { LOG.debug("Identifying and enforcing nick with NickServ."); // Identify nick and enforce, https://meta.wikimedia.org/wiki/IRC/Instructions#Register_your_nickname.2C_identify.2C_and_enforce connection.doPrivmsg("nickserv", "identify " + configuration.getNickPassword()); connection.doPrivmsg("nickserv", "set enforce on"); } endpoint.joinChannels(); }
@Test public void doStartTest() throws Exception { consumer.doStart(); verify(connection).addIRCEventListener(listener); verify(endpoint).joinChannels(); }
public List<ProductPagingSimpleResponse> findAllWithPagingByCategoryId( final Long memberId, final Long productId, final Long categoryId, final int pageSize ) { QProduct product = QProduct.product; QProductImage productImage = QProductImage.productImage; QMember member = QMember.member; QProductLike productLike = QProductLike.productLike; return jpaQueryFactory.select(Projections.constructor(ProductPagingSimpleResponse.class, product.id, new CaseBuilder() .when(productImage.id.isNull()) .then(NOT_FOUND_IMAGE_NUMBER) .otherwise(productImage.id).as("imageId"), new CaseBuilder() .when(productImage.uniqueName.isNull()) .then("null") .otherwise( stringTemplate("CONCAT('https://atwozimage.s3.ap-northeast-2.amazonaws.com/', {0})", productImage.uniqueName) ).as("uniqueName"), product.description.location, product.description.title, product.price.price, product.statisticCount.visitedCount, product.statisticCount.contactCount, product.productStatus, member.id, member.nickname, product.statisticCount.likedCount, isLikedAlreadyByMe(memberId), product.createdAt )) .from(product) .leftJoin(member).on(product.memberId.eq(member.id)) .leftJoin(productLike).on(productLike.productId.eq(product.id).and(productLike.memberId.eq(memberId))) .leftJoin(productImage).on(productImage.product.id.eq(product.id)) .where( ltProductId(productId), categoryId != null ? product.categoryId.eq(categoryId) : product.categoryId.isNull() ) .orderBy(product.id.desc()) .limit(pageSize) .fetch(); }
@Test void no_offset_페이징_첫_조회() { // given for (long i = 1L; i <= 20L; i++) { productRepository.save(Product.builder() .id(i) .categoryId(1L) .memberId(1L) .description(new Description("title", "content", Location.BUILDING_CENTER)) .statisticCount(StatisticCount.createDefault()) .price(new Price(10000)) .productStatus(ProductStatus.WAITING) .build() ); } // when List<ProductPagingSimpleResponse> result = productQueryRepository.findAllWithPagingByCategoryId(1L, null, 1L, 10); // then assertSoftly(softly -> { softly.assertThat(result).hasSize(10); softly.assertThat(result.get(0).id()).isEqualTo(20L); softly.assertThat(result.get(9).id()).isEqualTo(11L); }); }
public static Optional<TableSchema> getUpdatedSchema( TableSchema oldSchema, TableSchema newSchema) { Result updatedFields = getUpdatedSchema(oldSchema.getFieldsList(), newSchema.getFieldsList()); if (updatedFields.isEquivalent()) { return Optional.empty(); } else { return updatedFields .getFields() .map( tableFieldSchemas -> TableSchema.newBuilder().addAllFields(tableFieldSchemas).build()); } }
@Test public void testEquivalentSchema() { TableSchema baseSchema1 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema schema1 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(baseSchema1.getFieldsList())) .build(); TableSchema baseSchema2 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema schema2 = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(baseSchema2.getFieldsList())) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .build(); assertFalse(TableSchemaUpdateUtils.getUpdatedSchema(schema1, schema2).isPresent()); }
@Override public boolean retryRequest( HttpRequest request, IOException exception, int execCount, HttpContext context) { if (execCount > maxRetries) { // Do not retry if over max retries return false; } if (nonRetriableExceptions.contains(exception.getClass())) { return false; } else { for (Class<? extends IOException> rejectException : nonRetriableExceptions) { if (rejectException.isInstance(exception)) { return false; } } } if (request instanceof CancellableDependency && ((CancellableDependency) request).isCancelled()) { return false; } // Retry if the request is considered idempotent return Method.isIdempotent(request.getMethod()); }
@Test public void noRetryOnConnectionClosed() { HttpGet request = new HttpGet("/"); assertThat(retryStrategy.retryRequest(request, new ConnectionClosedException(), 1, null)) .isFalse(); }
@Override public ExecuteContext doBefore(ExecuteContext context) { DatabaseInfo databaseInfo = getDataBaseInfo(context); String database = databaseInfo.getDatabaseName(); Query query = (Query) context.getArguments()[0]; String sql = query.toString((ParameterList) context.getArguments()[1]); if (isGaussdbDatabase) { handleWriteOperationIfWriteDisabled(sql, database, DatabaseWriteProhibitionManager.getOpenGaussProhibitionDatabases(), context); return context; } handleWriteOperationIfWriteDisabled(sql, database, DatabaseWriteProhibitionManager.getPostgreSqlProhibitionDatabases(), context); return context; }
@Test public void testDoBefore() throws Exception { // Database write prohibition switch turned off GLOBAL_CONFIG.setEnablePostgreSqlWriteProhibition(false); ExecuteContext context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned off, and the write prohibition database set contains the // intercepted database Set<String> databases = new HashSet<>(); databases.add("database-test"); GLOBAL_CONFIG.setPostgreSqlDatabases(databases); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned on, and the write prohibition database collection contains // the intercepted databases GLOBAL_CONFIG.setEnablePostgreSqlWriteProhibition(true); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertEquals("Database prohibit to write, database: database-test", context.getThrowableOut().getMessage()); // The database write prohibition switch is turned on, and the write prohibition database collection contains // the intercepted database. SQL does not perform write operations Query readQuery = new BatchedQuery(new NativeQuery(READ_SQL, null), null, 0, 0, false); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, new Object[]{readQuery, null, null, null, null, null, null}, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); // The database write prohibition switch is turned on, and the write prohibition database collection does not // contain the intercepted database GLOBAL_CONFIG.setPostgreSqlDatabases(new HashSet<>()); context = ExecuteContext.forMemberMethod(queryExecutor, methodMock, argument, null, null); queryExecutorImplInterceptor.before(context); Assert.assertNull(context.getThrowableOut()); }
@Override public String toString() { return MoreObjects.toStringHelper(getClass()) .add("flags", flags) .add("type", type) .add("peerDistinguisher", Arrays.toString(peerDistinguisher)) .add("peerAddress", peerAddress.getHostAddress()) .add("peerAs", peerAs) .add("peerBgpId", peerBgpId) .add("seconds", seconds) .add("microseconds", microseconds) .toString(); }
@Test public void testToStringBmp() throws Exception { BmpPeer bmpPeer = deserializer.deserialize(headerBytes, 0, headerBytes.length); String str = bmpPeer.toString(); assertTrue(StringUtils.contains(str, "flags=" + flags)); assertTrue(StringUtils.contains(str, "type=" + type)); assertTrue(StringUtils.contains(str, "peerDistinguisher=" + Arrays.toString(peerDistinguisher))); assertTrue(StringUtils.contains(str, "peerAddress=" + peerAddress.getHostAddress())); assertTrue(StringUtils.contains(str, "peerAs=" + peerAs)); assertTrue(StringUtils.contains(str, "peerBgpId=" + peerBgpId)); assertTrue(StringUtils.contains(str, "seconds=" + seconds)); assertTrue(StringUtils.contains(str, "microseconds=" + microseconds)); }
public static ProxyBackendHandler newInstance(final DatabaseType databaseType, final String sql, final SQLStatement sqlStatement, final ConnectionSession connectionSession, final HintValueContext hintValueContext) throws SQLException { if (sqlStatement instanceof EmptyStatement) { return new SkipBackendHandler(sqlStatement); } SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement) : new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), hintValueContext).bind(sqlStatement, Collections.emptyList()); QueryContext queryContext = new QueryContext(sqlStatementContext, sql, Collections.emptyList(), hintValueContext, connectionSession.getConnectionContext(), ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()); connectionSession.setQueryContext(queryContext); return newInstance(databaseType, queryContext, connectionSession, false); }
@Test void assertNewInstanceWithEmptyString() throws SQLException { String sql = ""; ProxyBackendHandler actual = ProxyBackendHandlerFactory.newInstance(databaseType, sql, new EmptyStatement(), connectionSession, new HintValueContext()); assertThat(actual, instanceOf(SkipBackendHandler.class)); }
public MapConfig setMergePolicyConfig(MergePolicyConfig mergePolicyConfig) { this.mergePolicyConfig = checkNotNull(mergePolicyConfig, "mergePolicyConfig cannot be null!"); return this; }
@Test public void testSetMergePolicyConfig() { MergePolicyConfig mergePolicyConfig = new MergePolicyConfig() .setPolicy(PassThroughMergePolicy.class.getName()) .setBatchSize(2342); MapConfig config = new MapConfig(); config.setMergePolicyConfig(mergePolicyConfig); assertEquals(PassThroughMergePolicy.class.getName(), config.getMergePolicyConfig().getPolicy()); assertEquals(2342, config.getMergePolicyConfig().getBatchSize()); }
@Override public boolean contains(Object o) { if (o instanceof Uuid) { Uuid topicId = (Uuid) o; TopicImage topicImage = image.getTopic(topicId); if (topicImage == null) return false; return topicNames.contains(topicImage.name()); } return false; }
@Test public void testContains() { Uuid fooUuid = Uuid.randomUuid(); Uuid barUuid = Uuid.randomUuid(); Uuid bazUuid = Uuid.randomUuid(); Uuid quxUuid = Uuid.randomUuid(); TopicsImage topicsImage = new MetadataImageBuilder() .addTopic(fooUuid, "foo", 3) .addTopic(barUuid, "bar", 3) .addTopic(bazUuid, "qux", 3) .build() .topics(); Set<Uuid> topicIds = new TopicIds(mkSet("foo", "bar", "baz"), topicsImage); assertTrue(topicIds.contains(fooUuid)); assertTrue(topicIds.contains(barUuid)); assertFalse(topicIds.contains(bazUuid)); assertFalse(topicIds.contains(quxUuid)); }
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception { return newGetter(object, parent, modifier, field.getType(), field::get, (t, et) -> new FieldGetter(parent, field, modifier, t, et)); }
@Test public void newFieldGetter_whenExtractingFromEmpty_Array_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType() throws Exception { OuterObject object = new OuterObject("name", InnerObject.emptyInner("inner")); Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersArrayField, "[any]"); Getter innerObjectNameGetter = GetterFactory.newFieldGetter(object, parentGetter, innerAttributesArrayField, "[any]"); Class<?> returnType = innerObjectNameGetter.getReturnType(); assertEquals(Integer.class, returnType); }
@Override public FinishedTriggersSet copy() { return fromSet(Sets.newHashSet(finishedTriggers)); }
@Test public void testCopy() throws Exception { FinishedTriggersSet finishedSet = FinishedTriggersSet.fromSet(new HashSet<>()); assertThat( finishedSet.copy().getFinishedTriggers(), not(theInstance(finishedSet.getFinishedTriggers()))); }
public static void main(String[] args) { Converter<UserDto, User> userConverter = new UserConverter(); UserDto dtoUser = new UserDto("John", "Doe", true, "whatever[at]wherever.com"); User user = userConverter.convertFromDto(dtoUser); LOGGER.info("Entity converted from DTO: {}", user); var users = List.of( new User("Camile", "Tough", false, "124sad"), new User("Marti", "Luther", true, "42309fd"), new User("Kate", "Smith", true, "if0243") ); LOGGER.info("Domain entities:"); users.stream().map(User::toString).forEach(LOGGER::info); LOGGER.info("DTO entities converted from domain:"); List<UserDto> dtoEntities = userConverter.createFromEntities(users); dtoEntities.stream().map(UserDto::toString).forEach(LOGGER::info); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public PluginDescriptor find(Path pluginPath) { Properties properties = readProperties(pluginPath); return createPluginDescriptor(properties); }
@Test public void testFind() throws Exception { PluginDescriptorFinder descriptorFinder = new PropertiesPluginDescriptorFinder(); PluginDescriptor plugin1 = descriptorFinder.find(pluginsPath.resolve("test-plugin-1")); PluginDescriptor plugin2 = descriptorFinder.find(pluginsPath.resolve("test-plugin-2")); assertEquals("test-plugin-1", plugin1.getPluginId()); assertEquals("Test Plugin 1", plugin1.getPluginDescription()); assertEquals(TestPlugin.class.getName(), plugin1.getPluginClass()); assertEquals("0.0.1", plugin1.getVersion()); assertEquals("Decebal Suiu", plugin1.getProvider()); assertEquals(2, plugin1.getDependencies().size()); assertEquals("test-plugin-2", plugin1.getDependencies().get(0).getPluginId()); assertEquals("test-plugin-3", plugin1.getDependencies().get(1).getPluginId()); assertEquals("~1.0", plugin1.getDependencies().get(1).getPluginVersionSupport()); assertEquals("Apache-2.0", plugin1.getLicense()); assertEquals(">=1", plugin1.getRequires()); assertTrue(versionManager.checkVersionConstraint("1.0.0", plugin1.getRequires())); assertFalse(versionManager.checkVersionConstraint("0.1.0", plugin1.getRequires())); assertEquals("test-plugin-2", plugin2.getPluginId()); assertEquals("", plugin2.getPluginDescription()); assertEquals(TestPlugin.class.getName(), plugin2.getPluginClass()); assertEquals("0.0.1", plugin2.getVersion()); assertEquals("Decebal Suiu", plugin2.getProvider()); assertEquals(0, plugin2.getDependencies().size()); assertEquals("*", plugin2.getRequires()); // Default is * assertTrue(versionManager.checkVersionConstraint("1.0.0", plugin2.getRequires())); }
public static Status unblock( final UnsafeBuffer logMetaDataBuffer, final UnsafeBuffer termBuffer, final int blockedOffset, final int tailOffset, final int termId) { Status status = NO_ACTION; int frameLength = frameLengthVolatile(termBuffer, blockedOffset); if (frameLength < 0) { resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength); status = UNBLOCKED; } else if (0 == frameLength) { int currentOffset = blockedOffset + FRAME_ALIGNMENT; while (currentOffset < tailOffset) { frameLength = frameLengthVolatile(termBuffer, currentOffset); if (frameLength != 0) { if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED; } break; } currentOffset += FRAME_ALIGNMENT; } if (currentOffset == termBuffer.capacity()) { if (0 == frameLengthVolatile(termBuffer, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED_TO_END; } } } return status; }
@Test void shouldTakeNoActionToEndOfPartitionIfMessageNonCommittedAfterScan() { final int messageLength = HEADER_LENGTH * 4; final int termOffset = TERM_BUFFER_CAPACITY - messageLength; final int tailOffset = TERM_BUFFER_CAPACITY; when(mockTermBuffer.getIntVolatile(termOffset)) .thenReturn(0) .thenReturn(-messageLength); assertEquals( NO_ACTION, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID)); }
public Future<Void> maybeRollingUpdate(Reconciliation reconciliation, int replicas, Labels selectorLabels, Function<Pod, List<String>> podRestart, TlsPemIdentity coTlsPemIdentity) { String namespace = reconciliation.namespace(); // We prepare the list of expected Pods. This is needed as we need to account for pods which might be missing. // We need to wait for them before rolling any running pods to avoid problems. List<String> expectedPodNames = new ArrayList<>(); for (int i = 0; i < replicas; i++) { expectedPodNames.add(KafkaResources.zookeeperPodName(reconciliation.name(), i)); } return podOperator.listAsync(namespace, selectorLabels) .compose(pods -> { ZookeeperClusterRollContext clusterRollContext = new ZookeeperClusterRollContext(); for (String podName : expectedPodNames) { Pod pod = pods.stream().filter(p -> podName.equals(p.getMetadata().getName())).findFirst().orElse(null); if (pod != null) { List<String> restartReasons = podRestart.apply(pod); final boolean ready = podOperator.isReady(namespace, pod.getMetadata().getName()); ZookeeperPodContext podContext = new ZookeeperPodContext(podName, restartReasons, true, ready); if (restartReasons != null && !restartReasons.isEmpty()) { LOGGER.debugCr(reconciliation, "Pod {} should be rolled due to {}", podContext.getPodName(), restartReasons); } else { LOGGER.debugCr(reconciliation, "Pod {} does not need to be rolled", podContext.getPodName()); } clusterRollContext.add(podContext); } else { // Pod does not exist, but we still add it to the roll context because we should not roll // any other pods before it is ready LOGGER.debugCr(reconciliation, "Pod {} does not exist and cannot be rolled", podName); ZookeeperPodContext podContext = new ZookeeperPodContext(podName, null, false, false); clusterRollContext.add(podContext); } } if (clusterRollContext.requiresRestart()) { return Future.succeededFuture(clusterRollContext); } else { return Future.succeededFuture(null); } }).compose(clusterRollContext -> { if (clusterRollContext != null) { Promise<Void> promise = Promise.promise(); Future<String> leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, clusterRollContext.podNames(), coTlsPemIdentity); leaderFuture.compose(leader -> { LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) ? "unknown" : "pod " + leader)); Future<Void> fut = Future.succeededFuture(); // Then roll each non-leader pod => the leader is rolled last for (ZookeeperPodContext podContext : clusterRollContext.getPodContextsWithNonExistingAndNonReadyFirst()) { if (podContext.requiresRestart() && !podContext.getPodName().equals(leader)) { LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted", podContext.getPodName()); // roll the pod and wait until it is ready // this prevents rolling into faulty state (note: this applies just for ZK pods) fut = fut.compose(ignore -> restartPod(reconciliation, podContext.getPodName(), podContext.reasonsToRestart)); } else { if (podContext.requiresRestart()) { LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podContext.getPodName()); } else { LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podContext.getPodName()); } fut = fut.compose(ignore -> podOperator.readiness(reconciliation, reconciliation.namespace(), podContext.getPodName(), READINESS_POLLING_INTERVAL_MS, operationTimeoutMs)); } } // Check if we have a leader and if it needs rolling if (ZookeeperLeaderFinder.UNKNOWN_LEADER.equals(leader) || clusterRollContext.get(leader) == null || !clusterRollContext.get(leader).requiresRestart()) { return fut; } else { // Roll the leader pod return fut.compose(ar -> { // the leader is rolled as the last LOGGER.debugCr(reconciliation, "Restarting leader pod (previously deferred) {}", leader); return restartPod(reconciliation, leader, clusterRollContext.get(leader).reasonsToRestart); }); } }).onComplete(promise); return promise.future(); } else { return Future.succeededFuture(); } }); }
@Test public void testNonReadinessOfPodCanPreventAllPodRestarts(VertxTestContext context) { final String followerPodNonReady = "name-zookeeper-1"; final String leaderPodNeedsRestart = "name-zookeeper-2"; final String followerPodNeedsRestart = "name-zookeeper-0"; final Set<String> needsRestart = Set.of(followerPodNeedsRestart, leaderPodNeedsRestart); Function<Pod, List<String>> shouldRestart = pod -> { if (needsRestart.contains(pod.getMetadata().getName())) { return List.of("Should restart"); } else { return List.of(); } }; PodOperator podOperator = mock(PodOperator.class); when(podOperator.isReady(any(), eq(followerPodNeedsRestart))).thenReturn(true); when(podOperator.isReady(any(), eq(followerPodNonReady))).thenReturn(false); when(podOperator.isReady(any(), eq(leaderPodNeedsRestart))).thenReturn(true); when(podOperator.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(PODS)); when(podOperator.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.failedFuture("failure")); ZookeeperLeaderFinder leaderFinder = mock(ZookeeperLeaderFinder.class); when(leaderFinder.findZookeeperLeader(any(), any(), any())).thenReturn(Future.succeededFuture(leaderPodNeedsRestart)); MockZooKeeperRoller roller = new MockZooKeeperRoller(podOperator, leaderFinder, 300_00L); roller.maybeRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, 3, DUMMY_SELECTOR, shouldRestart, DUMMY_IDENTITY) .onComplete(context.failing(v -> context.verify(() -> { assertThat(roller.podRestarts.size(), is(0)); context.completeNow(); }))); }
@SuppressWarnings("deprecation") static Object[] buildArgs(final Object[] positionalArguments, final ResourceMethodDescriptor resourceMethod, final ServerResourceContext context, final DynamicRecordTemplate template, final ResourceMethodConfig resourceMethodConfig) { List<Parameter<?>> parameters = resourceMethod.getParameters(); Object[] arguments = Arrays.copyOf(positionalArguments, parameters.size()); fixUpComplexKeySingletonArraysInArguments(arguments); boolean attachmentsDesired = false; for (int i = positionalArguments.length; i < parameters.size(); ++i) { Parameter<?> param = parameters.get(i); try { if (param.getParamType() == Parameter.ParamType.KEY || param.getParamType() == Parameter.ParamType.ASSOC_KEY_PARAM) { Object value = context.getPathKeys().get(param.getName()); if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.CALLBACK) { continue; } else if (param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT_PARAM || param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT) { continue; // don't know what to fill in yet } else if (param.getParamType() == Parameter.ParamType.HEADER) { HeaderParam headerParam = param.getAnnotations().get(HeaderParam.class); String value = context.getRequestHeaders().get(headerParam.value()); arguments[i] = value; continue; } //Since we have multiple different types of MaskTrees that can be passed into resource methods, //we must evaluate based on the param type (annotation used) else if (param.getParamType() == Parameter.ParamType.PROJECTION || param.getParamType() == Parameter.ParamType.PROJECTION_PARAM) { arguments[i] = context.getProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.METADATA_PROJECTION_PARAM) { arguments[i] = context.getMetadataProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.PAGING_PROJECTION_PARAM) { arguments[i] = context.getPagingProjectionMask(); continue; } else if (param.getParamType() == Parameter.ParamType.CONTEXT || param.getParamType() == Parameter.ParamType.PAGING_CONTEXT_PARAM) { PagingContext ctx = RestUtils.getPagingContext(context, (PagingContext) param.getDefaultValue()); arguments[i] = ctx; continue; } else if (param.getParamType() == Parameter.ParamType.PATH_KEYS || param.getParamType() == Parameter.ParamType.PATH_KEYS_PARAM) { arguments[i] = context.getPathKeys(); continue; } else if (param.getParamType() == Parameter.ParamType.PATH_KEY_PARAM) { Object value = context.getPathKeys().get(param.getName()); if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT || param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT_PARAM) { arguments[i] = context; continue; } else if (param.getParamType() == Parameter.ParamType.VALIDATOR_PARAM) { RestLiDataValidator validator = new RestLiDataValidator(resourceMethod.getResourceModel().getResourceClass().getAnnotations(), resourceMethod.getResourceModel().getValueClass(), resourceMethod.getMethodType()); arguments[i] = validator; continue; } else if (param.getParamType() == Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM) { arguments[i] = context.getRequestAttachmentReader(); attachmentsDesired = true; continue; } else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_WRITER_PARAM) { // The OutputStream is passed to the resource implementation in a synchronous call. Upon return of the // resource method, all the bytes would haven't written to the OutputStream. The EntityStream would have // contained all the bytes by the time data is requested. The ownership of the OutputStream is passed to // the ByteArrayOutputStreamWriter, which is responsible of closing the OutputStream if necessary. ByteArrayOutputStream out = new ByteArrayOutputStream(); context.setResponseEntityStream(EntityStreams.newEntityStream(new ByteArrayOutputStreamWriter(out))); arguments[i] = new UnstructuredDataWriter(out, context); continue; } else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_REACTIVE_READER_PARAM) { arguments[i] = new UnstructuredDataReactiveReader(context.getRequestEntityStream(), context.getRawRequest().getHeader(RestConstants.HEADER_CONTENT_TYPE)); continue; } else if (param.getParamType() == Parameter.ParamType.POST) { // handle action parameters if (template != null) { DataMap data = template.data(); if (data.containsKey(param.getName())) { arguments[i] = template.getValue(param); continue; } } } else if (param.getParamType() == Parameter.ParamType.QUERY) { Object value; if (DataTemplate.class.isAssignableFrom(param.getType())) { value = buildDataTemplateArgument(context.getStructuredParameter(param.getName()), param, resourceMethodConfig.shouldValidateQueryParams()); } else { value = buildRegularArgument(context, param, resourceMethodConfig.shouldValidateQueryParams()); } if (value != null) { arguments[i] = value; continue; } } else if (param.getParamType() == Parameter.ParamType.BATCH || param.getParamType() == Parameter.ParamType.RESOURCE_KEY) { // should not come to this routine since it should be handled by passing in positionalArguments throw new RoutingException("Parameter '" + param.getName() + "' should be passed in as a positional argument", HttpStatus.S_400_BAD_REQUEST.getCode()); } else { // unknown param type throw new RoutingException( "Parameter '" + param.getName() + "' has an unknown parameter type '" + param.getParamType().name() + "'", HttpStatus.S_400_BAD_REQUEST.getCode()); } } catch (TemplateRuntimeException e) { throw new RoutingException("Parameter '" + param.getName() + "' is invalid", HttpStatus.S_400_BAD_REQUEST.getCode()); } try { // Handling null-valued parameters not provided in resource context or entity body // check if it is optional parameter if (param.isOptional() && param.hasDefaultValue()) { arguments[i] = param.getDefaultValue(); } else if (param.isOptional() && !param.getType().isPrimitive()) { // optional primitive parameter must have default value or provided arguments[i] = null; } else { throw new RoutingException("Parameter '" + param.getName() + "' is required", HttpStatus.S_400_BAD_REQUEST.getCode()); } } catch (ResourceConfigException e) { // Parameter default value format exception should result in server error code 500. throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR, "Parameter '" + param.getName() + "' default value is invalid", e); } } //Verify that if the resource method did not expect attachments, and attachments were present, that we drain all //incoming attachments and send back a bad request. We must take precaution here since simply ignoring the request //attachments is not correct behavior here. Ignoring other request level constructs such as headers or query parameters //that were not needed is safe, but not for request attachments. if (!attachmentsDesired && context.getRequestAttachmentReader() != null) { throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST, "Resource method endpoint invoked does not accept any request attachments."); } return arguments; }
@Test @SuppressWarnings("deprecation") public void testPagingContextParamType() { String testParamKey = "testParam"; ServerResourceContext mockResourceContext = EasyMock.createMock(ServerResourceContext.class); PagingContext pagingContext = new PagingContext(RestConstants.DEFAULT_START, RestConstants.DEFAULT_COUNT, false, false); EasyMock.expect(mockResourceContext.getParameter(RestConstants.START_PARAM)).andReturn(null).anyTimes(); EasyMock.expect(mockResourceContext.getParameter(RestConstants.COUNT_PARAM)).andReturn(null).anyTimes(); EasyMock.expect(mockResourceContext.getRequestAttachmentReader()).andReturn(null); EasyMock.replay(mockResourceContext); List<Parameter<?>> parameters = new ArrayList<>(); Parameter<PagingContext> param1 = new Parameter<>(testParamKey, PagingContext.class, null, false, null, Parameter.ParamType.PAGING_CONTEXT_PARAM, false, AnnotationSet.EMPTY); Parameter<PagingContext> param2 = new Parameter<>(testParamKey, PagingContext.class, null, false, null, Parameter.ParamType.CONTEXT, false, AnnotationSet.EMPTY); parameters.add(param1); parameters.add(param2); Object[] results = ArgumentBuilder.buildArgs(new Object[0], getMockResourceMethod(parameters), mockResourceContext, null, getMockResourceMethodConfig(false)); Assert.assertEquals(results[0], pagingContext); Assert.assertEquals(results[1], pagingContext); }
@Override public void filter(ContainerRequestContext requestContext) throws IOException { if (resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersion.class) || resourceInfo.getResourceMethod().isAnnotationPresent(SupportedSearchVersions.class)) { checkVersion(resourceInfo.getResourceMethod().getAnnotationsByType(SupportedSearchVersion.class)); } else if (resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersion.class) || resourceInfo.getResourceClass().isAnnotationPresent(SupportedSearchVersions.class)) { checkVersion(resourceInfo.getResourceClass().getAnnotationsByType(SupportedSearchVersion.class)); } }
@Test public void testFilterWithInvalidVersion() throws Exception { final Method resourceMethod = TestResourceWithMethodAnnotationRequiresES7.class.getMethod("methodWithAnnotation"); when(resourceInfo.getResourceMethod()).thenReturn(resourceMethod); when(versionProvider.get()).thenReturn(elasticSearchV6); Exception exception = assertThrows(InternalServerErrorException.class, () -> { filter.filter(requestContext); }); assertTrue(exception.getMessage().contains("Elasticsearch ^7")); verify(versionProvider, times(1)).get(); }
@VisibleForTesting protected static String getRefreshStatement( ObjectIdentifier tableIdentifier, String definitionQuery, Map<String, String> partitionSpec, Map<String, String> dynamicOptions) { String tableIdentifierWithDynamicOptions = generateTableWithDynamicOptions(tableIdentifier, dynamicOptions); StringBuilder insertStatement = new StringBuilder( String.format( "INSERT OVERWRITE %s\n SELECT * FROM (%s)", tableIdentifierWithDynamicOptions, definitionQuery)); if (!partitionSpec.isEmpty()) { insertStatement.append("\n WHERE "); insertStatement.append( partitionSpec.entrySet().stream() .map( entry -> String.format( "%s = '%s'", entry.getKey(), entry.getValue())) .reduce((s1, s2) -> s1 + " AND " + s2) .get()); } return insertStatement.toString(); }
@Test void testGetManuallyRefreshStatement() { ObjectIdentifier tableIdentifier = ObjectIdentifier.of("catalog", "database", "my_materialized_table"); String query = "SELECT * FROM my_source_table"; assertThat( MaterializedTableManager.getRefreshStatement( tableIdentifier, query, Collections.emptyMap(), Collections.emptyMap())) .isEqualTo( "INSERT OVERWRITE `catalog`.`database`.`my_materialized_table`\n" + " SELECT * FROM (SELECT * FROM my_source_table)"); Map<String, String> partitionSpec = new LinkedHashMap<>(); partitionSpec.put("k1", "v1"); partitionSpec.put("k2", "v2"); assertThat( MaterializedTableManager.getRefreshStatement( tableIdentifier, query, partitionSpec, Collections.emptyMap())) .isEqualTo( "INSERT OVERWRITE `catalog`.`database`.`my_materialized_table`\n" + " SELECT * FROM (SELECT * FROM my_source_table)\n" + " WHERE k1 = 'v1' AND k2 = 'v2'"); }
public void syncTimerCheckPoint() { String masterAddrBak = this.masterAddr; if (masterAddrBak != null) { try { if (null != brokerController.getMessageStore().getTimerMessageStore() && !brokerController.getTimerMessageStore().isShouldRunningDequeue()) { TimerCheckpoint checkpoint = this.brokerController.getBrokerOuterAPI().getTimerCheckPoint(masterAddrBak); if (null != this.brokerController.getTimerCheckpoint()) { this.brokerController.getTimerCheckpoint().setLastReadTimeMs(checkpoint.getLastReadTimeMs()); this.brokerController.getTimerCheckpoint().setMasterTimerQueueOffset(checkpoint.getMasterTimerQueueOffset()); this.brokerController.getTimerCheckpoint().getDataVersion().assignNewOne(checkpoint.getDataVersion()); } } } catch (Exception e) { LOGGER.error("syncTimerCheckPoint Exception, {}", masterAddrBak, e); } } }
@Test public void testSyncTimerCheckPoint() throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, MQBrokerException, InterruptedException { when(brokerOuterAPI.getTimerCheckPoint(anyString())).thenReturn(timerCheckpoint); slaveSynchronize.syncTimerCheckPoint(); Assert.assertEquals(0, timerCheckpoint.getDataVersion().getStateVersion()); }
public static TableElements of(final TableElement... elements) { return new TableElements(ImmutableList.copyOf(elements)); }
@Test public void shouldIterateElements() { // Given: final TableElement te1 = tableElement("k0", STRING_TYPE, KEY_CONSTRAINT); final TableElement te2 = tableElement("v0", INT_TYPE); // When: final Iterable<TableElement> iterable = TableElements.of(ImmutableList.of(te1, te2)); // Then: assertThat(iterable, contains(te1, te2)); }
public static Expression convert(Filter[] filters) { Expression expression = Expressions.alwaysTrue(); for (Filter filter : filters) { Expression converted = convert(filter); Preconditions.checkArgument( converted != null, "Cannot convert filter to Iceberg: %s", filter); expression = Expressions.and(expression, converted); } return expression; }
@Test public void testDateFilterConversion() { LocalDate localDate = LocalDate.parse("2018-10-18"); Date date = Date.valueOf(localDate); long epochDay = localDate.toEpochDay(); Expression localDateExpression = SparkFilters.convert(GreaterThan.apply("x", localDate)); Expression dateExpression = SparkFilters.convert(GreaterThan.apply("x", date)); Expression rawExpression = Expressions.greaterThan("x", epochDay); Assert.assertEquals( "Generated localdate expression should be correct", rawExpression.toString(), localDateExpression.toString()); Assert.assertEquals( "Generated date expression should be correct", rawExpression.toString(), dateExpression.toString()); }
@Override public BigDecimal getBigNumber( Object object ) throws KettleValueException { Long timestampAsInteger = getInteger( object ); if ( null != timestampAsInteger ) { return BigDecimal.valueOf( timestampAsInteger ); } else { return null; } }
@Test public void testConvertTimestampToBigNumber_Nanoseconds() throws KettleValueException { System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_NANOSECONDS ); ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp(); BigDecimal result = valueMetaTimestamp.getBigNumber( TIMESTAMP_WITH_NANOSECONDS ); assertEquals( BigDecimal.valueOf( TIMESTAMP_AS_NANOSECONDS ), result ); }
public Properties translate(Properties source) { Properties hikariProperties = new Properties(); // Iterate over source Properties and translate from HZ to Hikari source.forEach((key, value) -> { String keyString = (String) key; if (PROPERTY_MAP.containsKey(keyString)) { hikariProperties.put(keyString, value); } else if (keyString.startsWith(HIKARI_PREFIX)) { String keyNoPrefix = keyString.substring(HIKARI_PREFIX.length()); hikariProperties.put(keyNoPrefix, source.get(keyString)); } else { hikariProperties.put("dataSource." + keyString, value); } }); int cnt = poolCounter.getAndIncrement(); hikariProperties.put("poolName", "HikariPool-" + cnt + "-" + name); return hikariProperties; }
@Test public void testUnknownProperty() { // Unknown Hikari property is considered as DataSource property String unknownProperty = "unknownProperty"; Properties hzProperties = new Properties(); hzProperties.put(unknownProperty, "value"); Properties hikariProperties = hikariTranslator.translate(hzProperties); assertThat(hikariProperties).containsEntry("dataSource.unknownProperty", "value"); HikariConfig hikariConfig = new HikariConfig(hikariProperties); // Get DataSource for verification Properties dataSourceProperties = hikariConfig.getDataSourceProperties(); assertThat(dataSourceProperties.getProperty(unknownProperty)).isEqualTo("value"); }
public static boolean isCompleteHost(final String host) { if (host == null) { return false; } return IP_PATTERN.matcher(host).matches(); }
@Test public void testIsCompleteHost() { assertTrue(IpUtils.isCompleteHost("192.168.1.166")); assertFalse(IpUtils.isCompleteHost("192.168.")); assertFalse(IpUtils.isCompleteHost("192..")); }
Object getFromSignalDependency(String signalDependencyName, String paramName) { try { return executor .submit(() -> fromSignalDependency(signalDependencyName, paramName)) .get(TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS); } catch (Exception e) { throw new MaestroInternalError( e, "getFromSignalDependency throws an exception for signalDependencyName=[%s], paramName=[%s]", signalDependencyName, paramName); } }
@Test public void testGetFromSignalDependency() { when(signalDependenciesParams.get("dev/foo/bar")) .thenReturn( Collections.singletonList( Collections.singletonMap( "param1", StringParameter.builder().evaluatedResult("hello").build()))); assertEquals("hello", paramExtension.getFromSignalDependency("dev/foo/bar", "param1")); }
public String getLowerLimit() { String retval = null; COSArray arr = node.getCOSArray(COSName.LIMITS); if( arr != null ) { retval = arr.getString( 0 ); } return retval; }
@Test void testLowerLimit() throws IOException { assertEquals("Actinium", this.node5.getLowerLimit()); assertEquals("Actinium", this.node2.getLowerLimit()); assertEquals("Xenon", this.node24.getLowerLimit()); assertEquals("Xenon", this.node4.getLowerLimit()); assertEquals(null, this.node1.getLowerLimit()); }
@Override public int size() { return soi.getAllStructFieldRefs().size(); }
@Test public void testSize() throws Exception { HCatRecord r = new LazyHCatRecord(getHCatRecord(), getObjectInspector()); Assert.assertEquals(4, r.size()); }
public static List<Date> matchedDates(String patternStr, Date start, int count, boolean isMatchSecond) { return matchedDates(patternStr, start, DateUtil.endOfYear(start), count, isMatchSecond); }
@Test public void matchedDatesTest() { //测试每30秒执行 List<Date> matchedDates = CronPatternUtil.matchedDates("0/30 * 8-18 * * ?", DateUtil.parse("2018-10-15 14:33:22"), 5, true); assertEquals(5, matchedDates.size()); assertEquals("2018-10-15 14:33:30", matchedDates.get(0).toString()); assertEquals("2018-10-15 14:34:00", matchedDates.get(1).toString()); assertEquals("2018-10-15 14:34:30", matchedDates.get(2).toString()); assertEquals("2018-10-15 14:35:00", matchedDates.get(3).toString()); assertEquals("2018-10-15 14:35:30", matchedDates.get(4).toString()); }
@Override public ExecuteContext after(ExecuteContext context) { if (InvokeUtils.isKafkaInvokeBySermant(Thread.currentThread().getStackTrace())) { return context; } KafkaConsumerWrapper kafkaConsumerWrapper = KafkaConsumerController.getKafkaConsumerCache() .get(context.getObject().hashCode()); if (kafkaConsumerWrapper == null) { return context; } kafkaConsumerWrapper.setOriginalTopics(kafkaConsumerWrapper.getKafkaConsumer().subscription()); if (handler != null) { handler.doAfter(context); } else { LOGGER.info("Try to check if it is need to disable consumption after assignment..."); // The host application checks whether it is necessary to unsubscribe from the Topic every time it // subscribes KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, ProhibitionConfigManager.getKafkaProhibitionTopics()); } return context; }
@Test public void testAfter() { ExecuteContext context = ExecuteContext.forMemberMethod(mockConsumer, null, null, null, null); interceptor.after(context); Assert.assertEquals(topics, KafkaConsumerController.getKafkaConsumerCache().get(mockConsumer.hashCode()).getOriginalTopics()); }
@Override public Optional<String> nodeIdToHostName(String nodeId) { return nodeById(nodeId) .map(jsonNode -> jsonNode.path("host")) .filter(host -> !host.isMissingNode()) .map(JsonNode::asText); }
@Test void handlesMissingHostField() throws Exception { mockNodesResponse(); assertThat(this.clusterAdapter.nodeIdToHostName(nodeId)).isEmpty(); }
static <T> T copy(T object, DataComplexTable alreadyCopied) throws CloneNotSupportedException { if (object == null) { return null; } else if (isComplex(object)) { DataComplex src = (DataComplex) object; @SuppressWarnings("unchecked") T found = (T) alreadyCopied.get(src); if (found != null) { return found; } else { DataComplex clone = src.clone(); alreadyCopied.put(src, clone); if (clone instanceof DataMap) { ((DataMap)clone).copyReferencedObjects(alreadyCopied); } else if (clone instanceof DataList) { ((DataList)clone).copyReferencedObjects(alreadyCopied); } @SuppressWarnings("unchecked") T converted = (T) clone; return converted; } } else if (isPrimitive(object)) { return object; } else { throw new CloneNotSupportedException("Illegal value encountered: " + object); } }
@Test public void testListClonesHaveDifferentHashValues() throws CloneNotSupportedException { DataList originalList = new DataList(); originalList.add("value"); DataList copyList = originalList.copy(); // The objects should be "equal," but not identical. assertTrue(copyList.equals(originalList)); assertFalse(copyList.dataComplexHashCode() == originalList.dataComplexHashCode()); }
public ClassData getClassDataOrNull(String className) { ClassData classData = loadBytecodesFromClientCache(className); if (classData != null) { return classData; } if (providerMode == UserCodeDeploymentConfig.ProviderMode.OFF) { return null; } classData = loadBytecodesFromParent(className); if (classData == null && providerMode == UserCodeDeploymentConfig.ProviderMode.LOCAL_AND_CACHED_CLASSES) { classData = loadBytecodesFromCache(className); } return classData; }
@Test public void givenProviderModeSetToOFF_whenMapClassContainsClass_thenReturnNull() { UserCodeDeploymentConfig.ProviderMode providerMode = OFF; String className = "className"; ClassSource classSource = newMockClassSource(); ClassLoader parent = getClass().getClassLoader(); ClassDataProvider provider = createClassDataProvider(providerMode, className, classSource, parent); ClassData classData = provider.getClassDataOrNull(className); assertNull(classData); }
@Override public void eventAdded( KettleLoggingEvent event ) { Object messageObject = event.getMessage(); checkNotNull( messageObject, "Expected log message to be defined." ); if ( messageObject instanceof LogMessage ) { LogMessage message = (LogMessage) messageObject; LoggingObjectInterface loggingObject = logObjProvider.apply( message.getLogChannelId() ); if ( loggingObject == null || ( loggingObject.getObjectType() == GENERAL && "Y".equals( EnvUtil.getSystemProperty( Const.KETTLE_LOG_GENERAL_OBJECTS_TO_DI_LOGGER ) ) ) ) { // this can happen if logObject has been discarded while log events are still in flight. logToLogger( diLogger, message.getLevel(), message.getSubject() + " " + message.getMessage() ); } else if ( loggingObject.getObjectType() == TRANS || loggingObject.getObjectType() == STEP || loggingObject.getObjectType() == DATABASE ) { logToLogger( transLogger, message.getLevel(), loggingObject, message ); } else if ( loggingObject.getObjectType() == JOB || loggingObject.getObjectType() == JOBENTRY ) { logToLogger( jobLogger, message.getLevel(), loggingObject, message ); } } }
@Test public void testAddLogEventJob() { when( logObjProvider.apply( logChannelId ) ).thenReturn( loggingObject ); when( loggingObject.getObjectType() ).thenReturn( LoggingObjectType.JOB ); when( loggingObject.getFilename() ).thenReturn( "filename" ); when( message.getLevel() ).thenReturn( LogLevel.BASIC ); listener.eventAdded( logEvent ); verify( jobLogger ).info( "[filename] " + msgText ); when( message.getLevel() ).thenReturn( LogLevel.ERROR ); listener.eventAdded( logEvent ); verify( jobLogger ).error( "[filename] " + msgText ); verifyNoInteractions( diLogger ); verifyNoInteractions( transLogger ); }
public static String convertToHtml(String input) { return new Markdown().convert(StringEscapeUtils.escapeHtml4(input)); }
@Test public void shouldDecorateCode() { assertThat(Markdown.convertToHtml("This is a ``line of code``")).isEqualTo("This is a <code>line of code</code>"); assertThat(Markdown.convertToHtml("This is not a ``line of code")).isEqualTo("This is not a ``line of code"); }
public static void enableDeepLinkInstallSource(boolean enable) { mEnableDeepLinkInstallSource = enable; }
@Test public void enableDeepLinkInstallSource() { DeepLinkManager.enableDeepLinkInstallSource(true); }
@Override public void commitJob(JobContext originalContext) throws IOException { JobContext jobContext = TezUtil.enrichContextWithVertexId(originalContext); JobConf jobConf = jobContext.getJobConf(); long startTime = System.currentTimeMillis(); LOG.info("Committing job {} has started", jobContext.getJobID()); Collection<String> outputs = HiveIcebergStorageHandler.outputTables(jobContext.getJobConf()); Collection<String> jobLocations = new ConcurrentLinkedQueue<>(); ExecutorService fileExecutor = fileExecutor(jobConf); ExecutorService tableExecutor = tableExecutor(jobConf, outputs.size()); try { // Commits the changes for the output tables in parallel Tasks.foreach(outputs) .throwFailureWhenFinished() .stopOnFailure() .executeWith(tableExecutor) .run( output -> { Table table = HiveIcebergStorageHandler.table(jobConf, output); if (table != null) { String catalogName = HiveIcebergStorageHandler.catalogName(jobConf, output); jobLocations.add( generateJobLocation(table.location(), jobConf, jobContext.getJobID())); commitTable( table.io(), fileExecutor, jobContext, output, table.location(), catalogName); } else { LOG.info( "CommitJob found no serialized table in config for table: {}. Skipping job commit.", output); } }); } finally { fileExecutor.shutdown(); if (tableExecutor != null) { tableExecutor.shutdown(); } } LOG.info( "Commit took {} ms for job {}", System.currentTimeMillis() - startTime, jobContext.getJobID()); cleanup(jobContext, jobLocations); }
@Test public void testRetryTask() throws IOException { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); Table table = table(temp.toFile().getPath(), false); JobConf conf = jobConf(table, 2); // Write records and abort the tasks writeRecords(table.name(), 2, 0, false, true, conf); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 0); HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0); // Write records but do not abort the tasks // The data files remain since we can not identify them but should not be read writeRecords(table.name(), 2, 1, false, false, conf); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 2); HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0); // Write and commit the records List<Record> expected = writeRecords(table.name(), 2, 2, true, false, conf); committer.commitJob(new JobContextImpl(conf, JOB_ID)); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 4); HiveIcebergTestUtils.validateData(table, expected, 0); }
@Override public Optional<String> getNewCodeReferenceBranch() { if (!newCodeReferenceBranch.isInitialized()) { return Optional.empty(); } return Optional.of(newCodeReferenceBranch.getProperty()); }
@Test public void get_new_code_reference_branch_return_empty_when_holder_is_not_initialized() { assertThat(underTest.getNewCodeReferenceBranch()).isEmpty(); }
public void onBuffer(Buffer buffer, int sequenceNumber, int backlog, int subpartitionId) throws IOException { boolean recycleBuffer = true; try { if (expectedSequenceNumber != sequenceNumber) { onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber)); return; } if (buffer.getDataType().isBlockingUpstream()) { onBlockingUpstream(); checkArgument(backlog == 0, "Illegal number of backlog: %s, should be 0.", backlog); } final boolean wasEmpty; boolean firstPriorityEvent = false; synchronized (receivedBuffers) { NetworkActionsLogger.traceInput( "RemoteInputChannel#onBuffer", buffer, inputGate.getOwningTaskName(), channelInfo, channelStatePersister, sequenceNumber); // Similar to notifyBufferAvailable(), make sure that we never add a buffer // after releaseAllResources() released all buffers from receivedBuffers // (see above for details). if (isReleased.get()) { return; } wasEmpty = receivedBuffers.isEmpty(); SequenceBuffer sequenceBuffer = new SequenceBuffer(buffer, sequenceNumber, subpartitionId); DataType dataType = buffer.getDataType(); if (dataType.hasPriority()) { firstPriorityEvent = addPriorityBuffer(sequenceBuffer); recycleBuffer = false; } else { receivedBuffers.add(sequenceBuffer); recycleBuffer = false; if (dataType.requiresAnnouncement()) { firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer)); } } totalQueueSizeInBytes += buffer.getSize(); final OptionalLong barrierId = channelStatePersister.checkForBarrier(sequenceBuffer.buffer); if (barrierId.isPresent() && barrierId.getAsLong() > lastBarrierId) { // checkpoint was not yet started by task thread, // so remember the numbers of buffers to spill for the time when // it will be started lastBarrierId = barrierId.getAsLong(); lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber; } channelStatePersister.maybePersist(buffer); ++expectedSequenceNumber; } if (firstPriorityEvent) { notifyPriorityEvent(sequenceNumber); } if (wasEmpty) { notifyChannelNonEmpty(); } if (backlog >= 0) { onSenderBacklog(backlog); } } finally { if (recycleBuffer) { buffer.recycleBuffer(); } } }
@Test void testNotifyOnPriority() throws IOException { SingleInputGate inputGate = new SingleInputGateBuilder().build(); RemoteInputChannel channel = InputChannelTestUtils.createRemoteInputChannel(inputGate, 0); CheckpointOptions options = new CheckpointOptions(CHECKPOINT, getDefault()); assertPriorityAvailability( inputGate, false, false, () -> assertAvailability( inputGate, false, true, () -> { channel.onBuffer( toBuffer( new CheckpointBarrier(1L, 123L, options), false), 0, 0, 0); })); assertPriorityAvailability( inputGate, false, true, () -> assertAvailability( inputGate, true, true, () -> { channel.onBuffer( toBuffer( new CheckpointBarrier(2L, 123L, options), true), 1, 0, 0); })); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldValidatePlannedQuery() { // Given: givenPlannedQuery(); // When: commandFactory.create(configuredStatement, executionContext); // Then: verify(executionContext).plan(serviceContext, configuredStatement); verify(executionContext).execute( serviceContext, ConfiguredKsqlPlan.of(A_PLAN, SessionConfig.of(config, overrides)) ); }
@Override public String exportResources( VariableSpace space, Map<String, ResourceDefinition> definitions, ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { // Try to load the transformation from repository or file. // Modify this recursively too... // // AGAIN: there is no need to clone this job entry because the caller is responsible for this. // // First load the transformation metadata... // copyVariablesFrom( space ); TransMeta transMeta = getTransMeta( repository, space ); // Also go down into the transformation and export the files there. (mapping recursively down) // String proposedNewFilename = transMeta.exportResources( transMeta, definitions, namingInterface, repository, metaStore ); // To get a relative path to it, we inject ${Internal.Entry.Current.Directory} // String newFilename = "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + proposedNewFilename; // Set the correct filename inside the XML. // transMeta.setFilename( newFilename ); // exports always reside in the root directory, in case we want to turn this into a file repository... // transMeta.setRepositoryDirectory( new RepositoryDirectory() ); // export to filename ALWAYS (this allows the exported XML to be executed remotely) // setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); // change it in the job entry // filename = newFilename; return proposedNewFilename; }
@Ignore( "Test can't be properly mocked" ) @Test public void testExportResources() throws KettleException { JobEntryTrans jobEntryTrans = spy( getJobEntryTrans() ); TransMeta transMeta = mock( TransMeta.class ); String testName = "test"; doReturn( transMeta ).when( jobEntryTrans ).getTransMeta( any( Repository.class ), any( VariableSpace.class ) ); when( transMeta.exportResources( any( TransMeta.class ), any( Map.class ), any( ResourceNamingInterface.class ), any( Repository.class ), any( IMetaStore.class ) ) ).thenReturn( testName ); jobEntryTrans.exportResources( null, null, null, null, null ); verify( transMeta ).setFilename( "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + testName ); verify( jobEntryTrans ).setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); }
@Override public Optional<KsqlConstants.PersistentQueryType> getPersistentQueryType() { if (!queryPlan.isPresent()) { return Optional.empty(); } // CREATE_AS and CREATE_SOURCE commands contain a DDL command and a Query plan. if (ddlCommand.isPresent()) { if (ddlCommand.get() instanceof CreateTableCommand && ((CreateTableCommand) ddlCommand.get()).getIsSource()) { return Optional.of(KsqlConstants.PersistentQueryType.CREATE_SOURCE); } else { return Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS); } } else { // INSERT INTO persistent queries are the only queries types that exist without a // DDL command linked to the plan. return Optional.of(KsqlConstants.PersistentQueryType.INSERT); } }
@Test public void shouldReturnCreateAsPersistentQueryTypeOnCreateStream() { // Given: final CreateStreamCommand ddlCommand = Mockito.mock(CreateStreamCommand.class); final KsqlPlanV1 plan = new KsqlPlanV1( "stmt", Optional.of(ddlCommand), Optional.of(queryPlan1)); // When/Then: assertThat(plan.getPersistentQueryType(), is(Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS))); }
@JsonProperty("progress") public int progress() { if (indices.isEmpty()) { return 100; // avoid division by zero. No indices == migration is immediately done } final BigDecimal sum = indices.stream() .filter(i -> i.progress() != null) .map(RemoteReindexMigration::indexProgress) .reduce(BigDecimal.ZERO, BigDecimal::add); return sum.divide(BigDecimal.valueOf(indices.size()), 4, RoundingMode.HALF_UP).scaleByPowerOfTen(2).intValue(); }
@Test void testProgress() { final RemoteReindexMigration migration = withIndices( index("one", RemoteReindexingMigrationAdapter.Status.FINISHED), index("two", RemoteReindexingMigrationAdapter.Status.FINISHED), index("three", RemoteReindexingMigrationAdapter.Status.ERROR), index("four", RemoteReindexingMigrationAdapter.Status.RUNNING), index("five", RemoteReindexingMigrationAdapter.Status.NOT_STARTED) ); Assertions.assertThat(migration.progress()).isEqualTo(60); }
public static void executeWithRetries( final Function function, final RetryBehaviour retryBehaviour ) throws Exception { executeWithRetries(() -> { function.call(); return null; }, retryBehaviour); }
@Test public void shouldRetryIfSupplierThrowsExecutionExceptionWrapingRetriable() throws Exception { // Given: final AtomicInteger counts = new AtomicInteger(5); final Callable<Object> throwsExecutionExceptionThenSucceeds = () -> { if (counts.decrementAndGet() == 0) { return null; } throw new ExecutionException(new TestRetriableException("Test should retry")); }; // When: ExecutorUtil.executeWithRetries(throwsExecutionExceptionThenSucceeds, ON_RETRYABLE, () -> SMALL_RETRY_BACKOFF); // Then: Succeeded, i.e. did not throw. }
public RouteResult<T> route(HttpMethod method, String path) { return route(method, path, Collections.emptyMap()); }
@Test void testSplatWildcard() { RouteResult<String> routed = router.route(GET, "/download/foo/bar.png"); assertThat(routed.target()).isEqualTo("download"); assertThat(routed.pathParams()).hasSize(1); assertThat(routed.pathParams().get("*")).isEqualTo("foo/bar.png"); }
public static void setCallTimeout(Operation op, long callTimeout) { op.setCallTimeout(callTimeout); }
@Test public void testSetCallTimeout() { Operation operation = new DummyOperation(); setCallTimeout(operation, 10); assertEquals(10, operation.getCallTimeout()); }