focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String get(String url, String path) throws RestClientException { return get(url, path, String.class); }
@Test public void testGet() throws Exception { String str = restClientTemplate.get("http://localhost:9990", "/api", String.class); System.out.println(str); assertNotNull(str); }
public void check( List<CheckResultInterface> remarks, TransMeta transMeta, StepMeta stepinfo, RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, Repository repository, IMetaStore metaStore ) { CheckResult cr; // Check output fields if ( prev != null && prev.size() > 0 ) { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.FieldsReceived", "" + prev.size() ), stepinfo ); remarks.add( cr ); String error_message = ""; boolean error_found = false; // Starting from selected fields in ... for ( int i = 0; i < outputFields.length; i++ ) { int idx = prev.indexOfValue( outputFields[i].getFieldName() ); if ( idx < 0 ) { error_message += "\t\t" + outputFields[i].getFieldName() + Const.CR; error_found = true; } } if ( error_found ) { error_message = BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.FieldsNotFound", error_message ); cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepinfo ); remarks.add( cr ); } else { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.AllFieldsFound" ), stepinfo ); remarks.add( cr ); } } // See if we have input streams leading to this step! if ( input.length > 0 ) { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.ExpectedInputOk" ), stepinfo ); remarks.add( cr ); } else { cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.ExpectedInputError" ), stepinfo ); remarks.add( cr ); } cr = new CheckResult( CheckResultInterface.TYPE_RESULT_COMMENT, BaseMessages.getString( PKG, "XMLOutputMeta.CheckResult.FilesNotChecked" ), stepinfo ); remarks.add( cr ); }
@Test public void testCheck() throws Exception { XMLOutputMeta xmlOutputMeta = new XMLOutputMeta(); xmlOutputMeta.setDefault(); TransMeta transMeta = mock( TransMeta.class ); StepMeta stepInfo = mock( StepMeta.class ); RowMetaInterface prev = mock( RowMetaInterface.class ); Repository repos = mock( Repository.class ); IMetaStore metastore = mock( IMetaStore.class ); RowMetaInterface info = mock( RowMetaInterface.class ); ArrayList<CheckResultInterface> remarks = new ArrayList<>(); xmlOutputMeta.check( remarks, transMeta, stepInfo, prev, new String[] { "input" }, new String[] { "output" }, info, new Variables(), repos, metastore ); assertEquals( 2, remarks.size() ); assertEquals( "Step is receiving info from other steps.", remarks.get( 0 ).getText() ); assertEquals( "File specifications are not checked.", remarks.get( 1 ).getText() ); XMLField xmlField = new XMLField(); xmlField.setFieldName( "aField" ); xmlField.setType( 1 ); xmlField.setLength( 10 ); xmlField.setPrecision( 3 ); xmlOutputMeta.setOutputFields( new XMLField[] { xmlField } ); when( prev.size() ).thenReturn( 1 ); remarks.clear(); xmlOutputMeta.check( remarks, transMeta, stepInfo, prev, new String[] { "input" }, new String[] { "output" }, info, new Variables(), repos, metastore ); assertEquals( 4, remarks.size() ); assertEquals( "Step is connected to previous one, receiving 1 fields", remarks.get( 0 ).getText() ); assertEquals( "All output fields are found in the input stream.", remarks.get( 1 ).getText() ); assertEquals( "Step is receiving info from other steps.", remarks.get( 2 ).getText() ); assertEquals( "File specifications are not checked.", remarks.get( 3 ).getText() ); }
@Override public void createFunction(SqlInvokedFunction function, boolean replace) { checkCatalog(function); checkFunctionLanguageSupported(function); checkArgument(!function.hasVersion(), "function '%s' is already versioned", function); QualifiedObjectName functionName = function.getFunctionId().getFunctionName(); checkFieldLength("Catalog name", functionName.getCatalogName(), MAX_CATALOG_NAME_LENGTH); checkFieldLength("Schema name", functionName.getSchemaName(), MAX_SCHEMA_NAME_LENGTH); if (!functionNamespaceDao.functionNamespaceExists(functionName.getCatalogName(), functionName.getSchemaName())) { throw new PrestoException(NOT_FOUND, format("Function namespace not found: %s", functionName.getCatalogSchemaName())); } checkFieldLength("Function name", functionName.getObjectName(), MAX_FUNCTION_NAME_LENGTH); if (function.getParameters().size() > MAX_PARAMETER_COUNT) { throw new PrestoException(GENERIC_USER_ERROR, format("Function has more than %s parameters: %s", MAX_PARAMETER_COUNT, function.getParameters().size())); } for (Parameter parameter : function.getParameters()) { checkFieldLength("Parameter name", parameter.getName(), MAX_PARAMETER_NAME_LENGTH); } checkFieldLength( "Parameter type list", function.getFunctionId().getArgumentTypes().stream() .map(TypeSignature::toString) .collect(joining(",")), MAX_PARAMETER_TYPES_LENGTH); checkFieldLength("Return type", function.getSignature().getReturnType().toString(), MAX_RETURN_TYPE_LENGTH); jdbi.useTransaction(handle -> { FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass); Optional<SqlInvokedFunctionRecord> latestVersion = transactionDao.getLatestRecordForUpdate(hash(function.getFunctionId()), function.getFunctionId()); if (!replace && latestVersion.isPresent() && !latestVersion.get().isDeleted()) { throw new PrestoException(ALREADY_EXISTS, "Function already exists: " + function.getFunctionId()); } if (!latestVersion.isPresent() || !latestVersion.get().getFunction().hasSameDefinitionAs(function)) { long newVersion = latestVersion.map(SqlInvokedFunctionRecord::getFunction).map(MySqlFunctionNamespaceManager::getLongVersion).orElse(0L) + 1; insertSqlInvokedFunction(transactionDao, function, newVersion); } else if (latestVersion.get().isDeleted()) { SqlInvokedFunction latest = latestVersion.get().getFunction(); checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId()); transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), false); } }); refreshFunctionsCache(functionName); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Error getting FunctionMetadata for handle: unittest\\.memory\\.power_tower\\(double\\):2") public void testInvalidFunctionHandle() { createFunction(FUNCTION_POWER_TOWER_DOUBLE, true); SqlFunctionHandle functionHandle = new SqlFunctionHandle(FUNCTION_POWER_TOWER_DOUBLE.getFunctionId(), "2"); functionNamespaceManager.getFunctionMetadata(functionHandle); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = MinionConstants.UpsertCompactionTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { if (!validate(tableConfig)) { LOGGER.warn("Validation failed for table {}. Skipping..", tableConfig.getTableName()); continue; } String tableNameWithType = tableConfig.getTableName(); LOGGER.info("Start generating task configs for table: {}", tableNameWithType); if (tableConfig.getTaskConfig() == null) { LOGGER.warn("Task config is null for table: {}", tableNameWithType); continue; } Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType); List<SegmentZKMetadata> allSegments = _clusterInfoAccessor.getSegmentsZKMetadata(tableNameWithType); // Get completed segments and filter out the segments based on the buffer time configuration List<SegmentZKMetadata> completedSegments = getCompletedSegments(taskConfigs, allSegments, System.currentTimeMillis()); if (completedSegments.isEmpty()) { LOGGER.info("No completed segments were eligible for compaction for table: {}", tableNameWithType); continue; } // Only schedule 1 task of this type, per table Map<String, TaskState> incompleteTasks = TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType, _clusterInfoAccessor); if (!incompleteTasks.isEmpty()) { LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.", incompleteTasks.keySet(), tableNameWithType, taskType); continue; } // get server to segment mappings PinotHelixResourceManager pinotHelixResourceManager = _clusterInfoAccessor.getPinotHelixResourceManager(); Map<String, List<String>> serverToSegments = pinotHelixResourceManager.getServerToSegmentsMap(tableNameWithType); BiMap<String, String> serverToEndpoints; try { serverToEndpoints = pinotHelixResourceManager.getDataInstanceAdminEndpoints(serverToSegments.keySet()); } catch (InvalidConfigException e) { throw new RuntimeException(e); } ServerSegmentMetadataReader serverSegmentMetadataReader = new ServerSegmentMetadataReader(_clusterInfoAccessor.getExecutor(), _clusterInfoAccessor.getConnectionManager()); // By default, we use 'snapshot' for validDocIdsType. This means that we will use the validDocIds bitmap from // the snapshot from Pinot segment. This will require 'enableSnapshot' from UpsertConfig to be set to true. String validDocIdsTypeStr = taskConfigs.getOrDefault(UpsertCompactionTask.VALID_DOC_IDS_TYPE, ValidDocIdsType.SNAPSHOT.toString()); ValidDocIdsType validDocIdsType = ValidDocIdsType.valueOf(validDocIdsTypeStr.toUpperCase()); // Number of segments to query per server request. If a table has a lot of segments, then we might send a // huge payload to pinot-server in request. Batching the requests will help in reducing the payload size. int numSegmentsBatchPerServerRequest = Integer.parseInt( taskConfigs.getOrDefault(UpsertCompactionTask.NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST, String.valueOf(DEFAULT_NUM_SEGMENTS_BATCH_PER_SERVER_REQUEST))); // Validate that the snapshot is enabled if validDocIdsType is validDocIdsSnapshot if (validDocIdsType == ValidDocIdsType.SNAPSHOT) { UpsertConfig upsertConfig = tableConfig.getUpsertConfig(); Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask"); Preconditions.checkState(upsertConfig.isEnableSnapshot(), String.format( "'enableSnapshot' from UpsertConfig must be enabled for UpsertCompactionTask with validDocIdsType = %s", validDocIdsType)); } else if (validDocIdsType == ValidDocIdsType.IN_MEMORY_WITH_DELETE) { UpsertConfig upsertConfig = tableConfig.getUpsertConfig(); Preconditions.checkNotNull(upsertConfig, "UpsertConfig must be provided for UpsertCompactionTask"); Preconditions.checkNotNull(upsertConfig.getDeleteRecordColumn(), String.format("deleteRecordColumn must be provided for " + "UpsertCompactionTask with validDocIdsType = %s", validDocIdsType)); } Map<String, List<ValidDocIdsMetadataInfo>> validDocIdsMetadataList = serverSegmentMetadataReader.getSegmentToValidDocIdsMetadataFromServer(tableNameWithType, serverToSegments, serverToEndpoints, null, 60_000, validDocIdsType.toString(), numSegmentsBatchPerServerRequest); Map<String, SegmentZKMetadata> completedSegmentsMap = completedSegments.stream().collect(Collectors.toMap(SegmentZKMetadata::getSegmentName, Function.identity())); SegmentSelectionResult segmentSelectionResult = processValidDocIdsMetadata(taskConfigs, completedSegmentsMap, validDocIdsMetadataList); if (!segmentSelectionResult.getSegmentsForDeletion().isEmpty()) { pinotHelixResourceManager.deleteSegments(tableNameWithType, segmentSelectionResult.getSegmentsForDeletion(), "0d"); LOGGER.info( "Deleted segments containing only invalid records for table: {}, number of segments to be deleted: {}", tableNameWithType, segmentSelectionResult.getSegmentsForDeletion()); } int numTasks = 0; int maxTasks = getMaxTasks(taskType, tableNameWithType, taskConfigs); for (SegmentZKMetadata segment : segmentSelectionResult.getSegmentsForCompaction()) { if (numTasks == maxTasks) { break; } if (StringUtils.isBlank(segment.getDownloadUrl())) { LOGGER.warn("Skipping segment {} for task {} as download url is empty", segment.getSegmentName(), taskType); continue; } Map<String, String> configs = new HashMap<>(getBaseTaskConfigs(tableConfig, List.of(segment.getSegmentName()))); configs.put(MinionConstants.DOWNLOAD_URL_KEY, segment.getDownloadUrl()); configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments"); configs.put(MinionConstants.ORIGINAL_SEGMENT_CRC_KEY, String.valueOf(segment.getCrc())); configs.put(UpsertCompactionTask.VALID_DOC_IDS_TYPE, validDocIdsType.toString()); pinotTaskConfigs.add(new PinotTaskConfig(UpsertCompactionTask.TASK_TYPE, configs)); numTasks++; } LOGGER.info("Finished generating {} tasks configs for table: {}", numTasks, tableNameWithType); } return pinotTaskConfigs; }
@Test public void testGenerateTasksWithConsumingSegment() { SegmentZKMetadata consumingSegment = new SegmentZKMetadata("testTable__0"); consumingSegment.setStatus(CommonConstants.Segment.Realtime.Status.IN_PROGRESS); when(_mockClusterInfoAccessor.getSegmentsZKMetadata(REALTIME_TABLE_NAME)).thenReturn( Lists.newArrayList(consumingSegment)); when(_mockClusterInfoAccessor.getIdealState(REALTIME_TABLE_NAME)).thenReturn( getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList("testTable__0"))); _taskGenerator.init(_mockClusterInfoAccessor); List<PinotTaskConfig> pinotTaskConfigs = _taskGenerator.generateTasks(Lists.newArrayList(_tableConfig)); assertEquals(pinotTaskConfigs.size(), 0); }
@Override public ReadBufferResult readBuffer( TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, int segmentId, int bufferIndex, MemorySegment memorySegment, BufferRecycler recycler, @Nullable ReadProgress readProgress, @Nullable CompositeBuffer partialBuffer) throws IOException { lazyInitializeFileChannel(); // Get the read offset, including the start offset, the end offset Tuple2<Long, Long> startAndEndOffset = getReadStartAndEndOffset(subpartitionId, bufferIndex, readProgress, partialBuffer); if (startAndEndOffset == null) { return null; } long readStartOffset = startAndEndOffset.f0; long readEndOffset = startAndEndOffset.f1; int numBytesToRead = Math.min(memorySegment.size(), (int) (readEndOffset - readStartOffset)); if (numBytesToRead == 0) { return null; } List<Buffer> readBuffers = new LinkedList<>(); ByteBuffer byteBuffer = memorySegment.wrap(0, numBytesToRead); fileChannel.position(readStartOffset); // Read data to the memory segment, note the read size is numBytesToRead readFileDataToBuffer(memorySegment, recycler, byteBuffer); // Slice the read memory segment to multiple small network buffers and add them to // readBuffers Tuple2<Integer, Integer> partial = sliceBuffer(byteBuffer, memorySegment, partialBuffer, recycler, readBuffers); return getReadBufferResult( readBuffers, readStartOffset, readEndOffset, numBytesToRead, partial.f0, partial.f1); }
@Test void testReadProgress() throws IOException { long currentFileOffset = 0; ProducerMergedPartitionFileReader.ProducerMergedReadProgress readProgress = null; CompositeBuffer partialBuffer = null; for (int bufferIndex = 0; bufferIndex < DEFAULT_BUFFER_NUMBER; ) { PartitionFileReader.ReadBufferResult readBufferResult = readBuffer(bufferIndex, DEFAULT_SUBPARTITION_ID, readProgress, partialBuffer); assertThat(readBufferResult).isNotNull(); assertThat(readBufferResult.getReadProgress()) .isInstanceOf( ProducerMergedPartitionFileReader.ProducerMergedReadProgress.class); readProgress = (ProducerMergedPartitionFileReader.ProducerMergedReadProgress) readBufferResult.getReadProgress(); for (Buffer buffer : readBufferResult.getReadBuffers()) { if (buffer instanceof CompositeBuffer) { partialBuffer = (CompositeBuffer) buffer; if (partialBuffer.missingLength() == 0) { bufferIndex++; currentFileOffset += partialBuffer.readableBytes() + HEADER_LENGTH; partialBuffer.recycleBuffer(); partialBuffer = null; } } else { bufferIndex++; currentFileOffset += buffer.readableBytes() + HEADER_LENGTH; buffer.recycleBuffer(); } } assertThat(readProgress.getCurrentBufferOffset()).isEqualTo(currentFileOffset); } }
@Udf(description = "Returns all substrings of the input that matches the given regex pattern") public List<String> regexpExtractAll( @UdfParameter(description = "The regex pattern") final String pattern, @UdfParameter(description = "The input string to apply regex on") final String input ) { return regexpExtractAll(pattern, input, 0); }
@Test public void shouldReturnNullOnNullValue() { assertNull(udf.regexpExtractAll(null, null)); assertNull(udf.regexpExtractAll(null, null, null)); assertNull(udf.regexpExtractAll(null, "", 1)); assertNull(udf.regexpExtractAll("some string", null, 1)); assertNull(udf.regexpExtractAll("some string", "", null)); }
@Override public void asyncGetCursorInfo(String ledgerName, String cursorName, MetaStoreCallback<ManagedCursorInfo> callback) { String path = PREFIX + ledgerName + "/" + cursorName; if (log.isDebugEnabled()) { log.debug("Reading from {}", path); } store.get(path) .thenAcceptAsync(optRes -> { if (optRes.isPresent()) { try { ManagedCursorInfo info = parseManagedCursorInfo(optRes.get().getValue()); callback.operationComplete(info, optRes.get().getStat()); } catch (InvalidProtocolBufferException e) { callback.operationFailed(getException(e)); } } else { callback.operationFailed(new MetadataNotFoundException("Cursor metadata not found")); } }, executor.chooseThread(ledgerName)) .exceptionally(ex -> { executor.executeOrdered(ledgerName, () -> callback.operationFailed(getException(ex))); return null; }); }
@Test(timeOut = 20000) void readMalformedCursorNode() throws Exception { MetaStore store = new MetaStoreImpl(metadataStore, executor); metadataStore.put("/managed-ledgers/my_test", "".getBytes(), Optional.empty()).join(); metadataStore.put("/managed-ledgers/my_test/c1", "non-valid".getBytes(), Optional.empty()).join(); final CountDownLatch latch = new CountDownLatch(1); store.asyncGetCursorInfo("my_test", "c1", new MetaStoreCallback<MLDataFormats.ManagedCursorInfo>() { public void operationFailed(MetaStoreException e) { // Ok latch.countDown(); } public void operationComplete(ManagedCursorInfo result, Stat version) { fail("Operation should have failed"); } }); latch.await(); }
public SearchSourceBuilder create(SearchesConfig config) { return create(SearchCommand.from(config)); }
@Test void searchIncludesTimerange() { final SearchSourceBuilder search = this.searchRequestFactory.create(ChunkCommand.builder() .indices(Collections.singleton("graylog_0")) .range(RANGE) .build()); assertJsonPath(search, request -> { request.jsonPathAsListOf("$.query.bool.filter..range.timestamp.from", String.class) .containsExactly("2020-07-23 11:03:32.243"); request.jsonPathAsListOf("$.query.bool.filter..range.timestamp.to", String.class) .containsExactly("2020-07-23 11:08:32.243"); }); }
@Nonnull public static Number rem(@Nonnull Number first, @Nonnull Number second) { // Check for widest types first, go down the type list to narrower types until reaching int. if (second instanceof Double || first instanceof Double) { return first.doubleValue() % second.doubleValue(); } else if (second instanceof Float || first instanceof Float) { return first.floatValue() % second.floatValue(); } else if (second instanceof Long || first instanceof Long) { return first.longValue() % second.longValue(); } else { return first.intValue() % second.intValue(); } }
@Test void testRem() { assertEquals(4, NumberUtil.rem(13, 9)); assertEquals(4D, NumberUtil.rem(13, 9.0D)); assertEquals(4F, NumberUtil.rem(13, 9.0F)); assertEquals(4L, NumberUtil.rem(13, 9L)); }
public String toString(Object object) { return toJson(object); }
@Test public void test2() { TestBean testBean = new TestBean(); testBean.setDateTime(LocalDateTime.now()); System.out.println(JsonKit.toString(testBean)); }
protected void setValues( JobEntryUnZip jobEntryUnZip, String zipFile, String destinationDirectory ) { jobEntryUnZip.setZipFilename( zipFile ); jobEntryUnZip.setWildcardSource( "" ); jobEntryUnZip.setWildcardExclude( "" ); jobEntryUnZip.setSourceDirectory( destinationDirectory ); jobEntryUnZip.setMoveToDirectory( "" ); }
@Test public void testSetValues() { String destinationDirectory = "/tmp/some/path"; String zipFile = "/quotes/to/be/or/not/to/be/that/is/the/question.zip"; String empty = ""; JobEntryUnZip jobEntryUnZip = new JobEntryUnZip(); zipService.setValues( jobEntryUnZip, zipFile, destinationDirectory); assertEquals( zipFile, jobEntryUnZip.getZipFilename() ); assertEquals( empty, jobEntryUnZip.getWildcardSource() ); assertEquals( empty, jobEntryUnZip.getWildcardExclude() ); assertEquals( destinationDirectory, jobEntryUnZip.getSourceDirectory() ); assertEquals( empty, jobEntryUnZip.getMoveToDirectory() ); }
public static Read read() { return new Read(null, "", new Scan()); }
@Test public void testReadingEmptyTable() throws Exception { final String table = tmpTable.getName(); createTable(table); runReadTest( HBaseIO.read().withConfiguration(conf).withTableId(table), false, new ArrayList<>()); runReadTest(HBaseIO.read().withConfiguration(conf).withTableId(table), true, new ArrayList<>()); }
@Override public Matrix clone() { return new Matrix(single.clone()); }
@Test void testConstructionAndCopy() throws Exception { Matrix m1 = new Matrix(); assertMatrixIsPristine(m1); Matrix m2 = m1.clone(); assertNotSame(m1, m2); assertMatrixIsPristine(m2); }
@Nonnull public <K, V> Consumer<K, V> newConsumer() { return newConsumer(EMPTY_PROPERTIES); }
@Test public void should_create_new_consumer_for_each_call() { kafkaDataConnection = createNonSharedKafkaDataConnection(); try (Consumer<Object, Object> c1 = kafkaDataConnection.newConsumer(); Consumer<Object, Object> c2 = kafkaDataConnection.newConsumer()) { assertThat(c1).isNotSameAs(c2); } }
@Override public long recalculateRevision() { return revision.addAndGet(1); }
@Test void testRecalculateRevisionAsync() throws InterruptedException { assertEquals(0, connectionBasedClient.getRevision()); for (int i = 0; i < 10; i++) { Thread thread = new Thread(() -> { for (int j = 0; j < 10; j++) { connectionBasedClient.recalculateRevision(); } }); thread.start(); } TimeUnit.SECONDS.sleep(1); assertEquals(100, connectionBasedClient.getRevision()); }
@Override public List<String> getPartitionKeysByValue(String dbName, String tableName, List<Optional<String>> partitionValues) { DatabaseTableName databaseTableName = DatabaseTableName.of(dbName, tableName); HivePartitionValue hivePartitionValue = HivePartitionValue.of(databaseTableName, partitionValues); if (metastore instanceof CachingHiveMetastore) { Table table = getTable(dbName, tableName); if (table.isHiveTable() && !((HiveTable) table).isUseMetadataCache()) { invalidatePartitionKeys(hivePartitionValue); } } // update last access time lastAccessTimeMap.put(databaseTableName, System.currentTimeMillis()); // first check if the all partition keys are cached HivePartitionValue allPartitionValue = HivePartitionValue.of(databaseTableName, HivePartitionValue.ALL_PARTITION_VALUES); if (partitionKeysCache.asMap().containsKey(allPartitionValue)) { List<String> allPartitionNames = get(partitionKeysCache, allPartitionValue); if (partitionValues.stream().noneMatch(Optional::isPresent)) { // no need to filter partition names by values return allPartitionNames; } return PartitionUtil.getFilteredPartitionKeys(allPartitionNames, partitionValues); } return get(partitionKeysCache, hivePartitionValue); }
@Test public void testGetPartitionKeys() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); Assert.assertEquals(Lists.newArrayList("col1"), cachingHiveMetastore.getPartitionKeysByValue("db1", "tbl1", HivePartitionValue.ALL_PARTITION_VALUES)); }
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers); if (filteredOpenAPI == null) { return filteredOpenAPI; } OpenAPI clone = new OpenAPI(); clone.info(filteredOpenAPI.getInfo()); clone.openapi(filteredOpenAPI.getOpenapi()); clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect()); clone.setSpecVersion(filteredOpenAPI.getSpecVersion()); clone.setExtensions(filteredOpenAPI.getExtensions()); clone.setExternalDocs(filteredOpenAPI.getExternalDocs()); clone.setSecurity(filteredOpenAPI.getSecurity()); clone.setServers(filteredOpenAPI.getServers()); clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags())); final Set<String> allowedTags = new HashSet<>(); final Set<String> filteredTags = new HashSet<>(); Paths clonedPaths = new Paths(); if (filteredOpenAPI.getPaths() != null) { for (String resourcePath : filteredOpenAPI.getPaths().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clonedPaths.addPathItem(resourcePath, clonedPathItem); } } } clone.paths(clonedPaths); } filteredTags.removeAll(allowedTags); final List<Tag> tags = clone.getTags(); if (tags != null && !filteredTags.isEmpty()) { tags.removeIf(tag -> filteredTags.contains(tag.getName())); if (clone.getTags().isEmpty()) { clone.setTags(null); } } if (filteredOpenAPI.getWebhooks() != null) { for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) { PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath); PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers); PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags); if (clonedPathItem != null) { if (!clonedPathItem.readOperations().isEmpty()) { clone.addWebhooks(resourcePath, clonedPathItem); } } } } if (filteredOpenAPI.getComponents() != null) { clone.components(new Components()); clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers)); clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes()); clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks()); clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples()); clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions()); clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders()); clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks()); clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters()); clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies()); clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses()); clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems()); } if (filter.isRemovingUnreferencedDefinitions()) { clone = removeBrokenReferenceDefinitions(clone); } return clone; }
@Test(description = "it should change away with a new operation") public void changeGetResources() throws IOException { final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH); OpenAPI filter = new SpecFilter().filter(openAPI, new ChangeGetOperationsFilter(), null, null, null); assertOperations(filter, CHANGED_OPERATION_ID, CHANGED_OPERATION_DESCRIPTION); }
public String build( final String cellValue ) { switch ( type ) { case FORALL: return buildForAll( cellValue ); case INDEXED: return buildMulti( cellValue ); default: return buildSingle( cellValue ); } }
@Test public void testMultiPlaceHolderEscapedComma() { final String snippet = "rulesOutputRouting.set( $1, $2, $3, $4, $5 );"; final SnippetBuilder snip = new SnippetBuilder(snippet); final String result = snip.build("\"80\",\"Department Manager\",toa.getPersonExpense().getEntityCode(\"Part Of\",\"Office\"),10004,30"); assertThat(result).isEqualTo("rulesOutputRouting.set( \"80\", \"Department Manager\", toa.getPersonExpense().getEntityCode(\"Part Of\",\"Office\"), 10004, 30 );"); }
public Locality locality() { return locality; }
@Test void localityCannotBeNull() { Assertions.assertThrows(NullPointerException.class, () -> DefaultBot.getDefaultBuilder().locality(null).build()); }
@Override public BytesInput getBytes() { // The Page Header should include: blockSizeInValues, numberOfMiniBlocks, totalValueCount if (deltaValuesToFlush != 0) { flushBlockBuffer(); } return BytesInput.concat( config.toBytesInput(), BytesInput.fromUnsignedVarInt(totalValueCount), BytesInput.fromZigZagVarInt(firstValue), BytesInput.from(baos)); }
@Test public void shouldSkipN() throws IOException { int[] data = new int[5 * blockSize + 1]; for (int i = 0; i < data.length; i++) { data[i] = i * 32; } writeData(data); reader = new DeltaBinaryPackingValuesReader(); reader.initFromPage(100, writer.getBytes().toInputStream()); int skipCount; for (int i = 0; i < data.length; i += skipCount + 1) { skipCount = (data.length - i) / 2; assertEquals(i * 32, reader.readInteger()); reader.skip(skipCount); } }
@Override public KsqlVersionMetrics collectMetrics() { final KsqlVersionMetrics metricsRecord = new KsqlVersionMetrics(); metricsRecord.setTimestamp(TimeUnit.MILLISECONDS.toSeconds(clock.millis())); metricsRecord.setConfluentPlatformVersion(AppInfo.getVersion()); metricsRecord.setKsqlComponentType(moduleType.name()); metricsRecord.setIsActive(activenessSupplier.get()); return metricsRecord; }
@Test public void shouldSetActivenessToFalse() { // Given: when(activenessStatusSupplier.get()).thenReturn(false); // When: final KsqlVersionMetrics metrics = basicCollector.collectMetrics(); // Then: assertThat(metrics.getIsActive(), is(false)); }
@Override public Set<V> removeAll(Object key) { return (Set<V>) get(removeAllAsync(key)); }
@Test public void testRemoveAll() { RSetMultimap<SimpleKey, SimpleValue> map = redisson.getSetMultimap("test1"); map.put(new SimpleKey("0"), new SimpleValue("1")); map.put(new SimpleKey("0"), new SimpleValue("2")); map.put(new SimpleKey("0"), new SimpleValue("3")); Set<SimpleValue> values = map.removeAll(new SimpleKey("0")); assertThat(values).containsOnly(new SimpleValue("1"), new SimpleValue("2"), new SimpleValue("3")); assertThat(map.size()).isZero(); Set<SimpleValue> values2 = map.removeAll(new SimpleKey("0")); assertThat(values2).isEmpty(); }
public static CopyFilter getCopyFilter(Configuration conf) { String filtersClassName = conf .get(DistCpConstants.CONF_LABEL_FILTERS_CLASS); if (filtersClassName != null) { try { Class<? extends CopyFilter> filtersClass = conf .getClassByName(filtersClassName) .asSubclass(CopyFilter.class); filtersClassName = filtersClass.getName(); Constructor<? extends CopyFilter> constructor = filtersClass .getDeclaredConstructor(Configuration.class); return constructor.newInstance(conf); } catch (Exception e) { LOG.error(DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); throw new RuntimeException( DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); } } else { return getDefaultCopyFilter(conf); } }
@Test public void testGetCopyFilterWrongClassType() throws Exception { final String filterName = "org.apache.hadoop.tools." + "TestCopyFilter.FilterNotExtendingCopyFilter"; Configuration configuration = new Configuration(false); configuration.set(DistCpConstants.CONF_LABEL_FILTERS_CLASS, filterName); intercept(RuntimeException.class, DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filterName, () -> CopyFilter.getCopyFilter(configuration)); }
public static boolean isNative(RunnerApi.PTransform pTransform) { // TODO(https://github.com/apache/beam/issues/20192) Use default (context) classloader. Iterator<IsNativeTransform> matchers = ServiceLoader.load(IsNativeTransform.class, NativeTransforms.class.getClassLoader()) .iterator(); while (matchers.hasNext()) { if (matchers.next().test(pTransform)) { return true; } } return false; }
@Test public void testMatch() { Assert.assertTrue( NativeTransforms.isNative( RunnerApi.PTransform.newBuilder() .setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn("test").build()) .build())); }
@Override public Object toKsqlRow(final Schema connectSchema, final Object connectData) { if (connectData == null) { return null; } return toKsqlValue(schema, connectSchema, connectData, ""); }
@Test public void shouldThrowOnTypeMismatch() { // Given: final Schema schema = SchemaBuilder.struct() .field("FIELD", SchemaBuilder.OPTIONAL_INT32_SCHEMA) .optional() .build(); final Schema badSchema = SchemaBuilder.struct() .field("FIELD", SchemaBuilder.OPTIONAL_STRING_SCHEMA) .optional() .build(); final Struct badData = new Struct(badSchema); badData.put("FIELD", "fubar"); final ConnectDataTranslator connectToKsqlTranslator = new ConnectDataTranslator(schema); // When: final DataException e = assertThrows( DataException.class, () -> connectToKsqlTranslator.toKsqlRow(badSchema, badData) ); // Then: assertThat(e.getMessage(), containsString(Schema.Type.STRING.getName())); assertThat(e.getMessage(), containsString(Schema.Type.INT32.getName())); assertThat(e.getMessage(), containsString("FIELD")); }
public String get(Component c) { return get(Group.Alphabetic, c); }
@Test public void testValueOf() { PersonName pn = new PersonName( "Adams^John Robert Quincy^^Rev.^B.A. M.Div."); assertEquals("Adams", pn.get(PersonName.Component.FamilyName)); assertEquals("John Robert Quincy", pn.get(PersonName.Component.GivenName)); assertEquals("Rev.", pn.get(PersonName.Component.NamePrefix)); assertEquals("B.A. M.Div.", pn.get(PersonName.Component.NameSuffix)); }
public static Type fromHiveTypeToArrayType(String typeStr) { if (HIVE_UNSUPPORTED_TYPES.stream().anyMatch(typeStr.toUpperCase()::contains)) { return Type.UNKNOWN_TYPE; } Matcher matcher = Pattern.compile(ARRAY_PATTERN).matcher(typeStr.toLowerCase(Locale.ROOT)); Type itemType; if (matcher.find()) { Type innerType = fromHiveType(matcher.group(1)); if (Type.UNKNOWN_TYPE.equals(innerType)) { itemType = Type.UNKNOWN_TYPE; } else { itemType = new ArrayType(innerType); } } else { throw new StarRocksConnectorException("Failed to get ArrayType at " + typeStr); } return itemType; }
@Test public void testArrayString() { ScalarType itemType = ScalarType.createType(PrimitiveType.DATE); ArrayType arrayType = new ArrayType(new ArrayType(itemType)); String typeStr = "Array<Array<date>>"; Type resType = fromHiveTypeToArrayType(typeStr); Assert.assertEquals(arrayType, resType); itemType = ScalarType.createDefaultCatalogString(); arrayType = new ArrayType(itemType); typeStr = "Array<string>"; resType = fromHiveTypeToArrayType(typeStr); Assert.assertEquals(arrayType, resType); itemType = ScalarType.createType(PrimitiveType.INT); arrayType = new ArrayType(new ArrayType(new ArrayType(itemType))); typeStr = "array<Array<Array<int>>>"; resType = fromHiveTypeToArrayType(typeStr); Assert.assertEquals(arrayType, resType); itemType = ScalarType.createType(PrimitiveType.BIGINT); arrayType = new ArrayType(new ArrayType(new ArrayType(itemType))); typeStr = "array<Array<Array<bigint>>>"; resType = fromHiveTypeToArrayType(typeStr); Assert.assertEquals(arrayType, resType); itemType = ScalarType.createUnifiedDecimalType(4, 2); Assert.assertEquals(new ArrayType(new ArrayType(itemType)), fromHiveTypeToArrayType("array<Array<decimal(4, 2)>>")); }
@Override public Result invoke(final Invocation invocation) throws RpcException { checkWhetherDestroyed(); // binding attachments into invocation. // Map<String, Object> contextAttachments = RpcContext.getClientAttachment().getObjectAttachments(); // if (contextAttachments != null && contextAttachments.size() != 0) { // ((RpcInvocation) invocation).addObjectAttachmentsIfAbsent(contextAttachments); // } InvocationProfilerUtils.enterDetailProfiler(invocation, () -> "Router route."); List<Invoker<T>> invokers = list(invocation); InvocationProfilerUtils.releaseDetailProfiler(invocation); checkInvokers(invokers, invocation); LoadBalance loadbalance = initLoadBalance(invokers, invocation); RpcUtils.attachInvocationIdIfAsync(getUrl(), invocation); InvocationProfilerUtils.enterDetailProfiler( invocation, () -> "Cluster " + this.getClass().getName() + " invoke."); try { return doInvoke(invocation, invokers, loadbalance); } finally { InvocationProfilerUtils.releaseDetailProfiler(invocation); } }
@Test void testTimeoutExceptionCode() { List<Invoker<DemoService>> invokers = new ArrayList<Invoker<DemoService>>(); invokers.add(new Invoker<DemoService>() { @Override public Class<DemoService> getInterface() { return DemoService.class; } public URL getUrl() { return URL.valueOf("dubbo://" + NetUtils.getLocalHost() + ":20880/" + DemoService.class.getName()); } @Override public boolean isAvailable() { return false; } @Override public Result invoke(Invocation invocation) throws RpcException { throw new RpcException(RpcException.TIMEOUT_EXCEPTION, "test timeout"); } @Override public void destroy() {} }); Directory<DemoService> directory = new StaticDirectory<DemoService>(invokers); FailoverClusterInvoker<DemoService> failoverClusterInvoker = new FailoverClusterInvoker<DemoService>(directory); RpcInvocation invocation = new RpcInvocation("sayHello", DemoService.class.getName(), "", new Class<?>[0], new Object[0]); try { failoverClusterInvoker.invoke(invocation); Assertions.fail(); } catch (RpcException e) { Assertions.assertEquals(RpcException.TIMEOUT_EXCEPTION, e.getCode()); } ForkingClusterInvoker<DemoService> forkingClusterInvoker = new ForkingClusterInvoker<DemoService>(directory); invocation = new RpcInvocation("sayHello", DemoService.class.getName(), "", new Class<?>[0], new Object[0]); try { forkingClusterInvoker.invoke(invocation); Assertions.fail(); } catch (RpcException e) { Assertions.assertEquals(RpcException.TIMEOUT_EXCEPTION, e.getCode()); } FailfastClusterInvoker<DemoService> failfastClusterInvoker = new FailfastClusterInvoker<DemoService>(directory); invocation = new RpcInvocation("sayHello", DemoService.class.getName(), "", new Class<?>[0], new Object[0]); try { failfastClusterInvoker.invoke(invocation); Assertions.fail(); } catch (RpcException e) { Assertions.assertEquals(RpcException.TIMEOUT_EXCEPTION, e.getCode()); } }
public static void rethrowIfUnrecoverable(Throwable exception) { if (exception instanceof OutOfMemoryError) { ExceptionUtils.throwAsUncheckedException(exception); } }
@Test void ignoresThrowable() { assertDoesNotThrow(() -> rethrowIfUnrecoverable(new Throwable())); }
@Override public UUID generateId() { long counterValue = counter.incrementAndGet(); if (counterValue == MAX_COUNTER_VALUE) { throw new CucumberException( "Out of " + IncrementingUuidGenerator.class.getSimpleName() + " capacity. Please generate using a new instance or use another " + UuidGenerator.class.getSimpleName() + "implementation."); } long leastSigBits = counterValue | 0x8000000000000000L; // set variant return new UUID(msb, leastSigBits); }
@Test void same_thread_generates_different_UuidGenerators() { // Given/When List<UUID> uuids = IntStream.rangeClosed(1, 10) .mapToObj(i -> new IncrementingUuidGenerator().generateId()) .collect(Collectors.toList()); // Then checkUuidProperties(uuids); }
static String headerLine(CSVFormat csvFormat) { return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader()); }
@Test public void givenNoTrim_keepsSpaces() { CSVFormat csvFormat = csvFormat().withTrim(false); PCollection<String> input = pipeline.apply( Create.of( headerLine(csvFormat), " a ,1,1.1", "b, 2 ,2.2", "c,3, 3.3 ")); CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat); CsvIOParseResult<List<String>> result = input.apply(underTest); PAssert.that(result.getOutput()) .containsInAnyOrder( Arrays.asList( Arrays.asList(" a ", "1", "1.1"), Arrays.asList("b", " 2 ", "2.2"), Arrays.asList("c", "3", " 3.3 "))); PAssert.that(result.getErrors()).empty(); pipeline.run(); }
public static void urlEncode(String str, StringBuilder sb) { for (int idx = 0; idx < str.length(); ++idx) { char c = str.charAt(idx); if ('+' == c) { sb.append("%2B"); } else if ('%' == c) { sb.append("%25"); } else { sb.append(c); } } }
@Test public void urlEncode() { String str = "hello+World%"; String expected = "hello%2BWorld%25"; StringBuilder stringBuilder = new StringBuilder(); GroupKey.urlEncode(str, stringBuilder); Assert.isTrue(stringBuilder.toString().contains(expected)); }
@Override public void updateTask(Task task) { Map.Entry<String, Long> taskInDb = getTaskChecksumAndUpdateTime(task.getTaskId()); String taskCheckSum = computeChecksum(task); if (taskInDb != null) { long updateInterval = task.getUpdateTime() - taskInDb.getValue(); if (taskCheckSum.equals(taskInDb.getKey()) && updateInterval < maxTaskUpdateInterval) { LOG.debug( "task has the same checksum and update interval {} is less than max interval {} millis and skip update", updateInterval, maxTaskUpdateInterval); return; } LOG.info( "update task [{}] with checksum=[{}] with an update interval=[{}]", task.getTaskId(), taskCheckSum, updateInterval); } task.setWorkerId(taskCheckSum); super.updateTask(task); }
@Test public void testUpdateTask() { // should update DB if this is new maestroExecutionDao.updateTask(task); Task actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("b1a2db354f803423e990fad1b9265b6f", actual.getWorkerId()); assertEquals(1, actual.getPollCount()); assertEquals(0, actual.getUpdateTime()); // should update DB if there is a change task.getOutputData().put("bat", true); maestroExecutionDao.updateTask(task); actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("2858a83f63639306837f276e545b57ed", actual.getWorkerId()); assertEquals(2, actual.getPollCount()); assertEquals(0, actual.getUpdateTime()); // no real DB update if only poll count is updated task.setPollCount(10); maestroExecutionDao.updateTask(task); actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("2858a83f63639306837f276e545b57ed", actual.getWorkerId()); assertEquals(2, actual.getPollCount()); assertEquals(0, actual.getUpdateTime()); // no real DB update if task updates workerId task.setPollCount(10); task.setWorkerId("foo"); maestroExecutionDao.updateTask(task); actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("2858a83f63639306837f276e545b57ed", actual.getWorkerId()); assertEquals(2, actual.getPollCount()); assertEquals(0, actual.getUpdateTime()); // no real DB update if only update time is updated within the max update interval task.setPollCount(10); task.setUpdateTime(MAX_UPDATE_INTERVAL - 1); maestroExecutionDao.updateTask(task); actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("2858a83f63639306837f276e545b57ed", actual.getWorkerId()); assertEquals(2, actual.getPollCount()); assertEquals(0, actual.getUpdateTime()); // should update if the update time is larger than max update interval task.setPollCount(10); task.setUpdateTime(MAX_UPDATE_INTERVAL + 1); maestroExecutionDao.updateTask(task); actual = maestroExecutionDao.getTask(TEST_TASK_ID); assertEquals("2858a83f63639306837f276e545b57ed", actual.getWorkerId()); assertEquals(11, actual.getPollCount()); assertEquals(MAX_UPDATE_INTERVAL + 1, actual.getUpdateTime()); }
public void updateDetectNewPartitionWatermark(Instant watermark) { writeToMdTableWatermarkHelper(getFullDetectNewPartition(), watermark, null); }
@Test public void testUpdateDetectNewPartitionWatermark() { Instant watermark = Instant.now(); metadataTableDao.updateDetectNewPartitionWatermark(watermark); Row row = dataClient.readRow( metadataTableAdminDao.getTableId(), metadataTableDao .getChangeStreamNamePrefix() .concat(MetadataTableAdminDao.DETECT_NEW_PARTITION_SUFFIX)); assertNull(MetadataTableEncoder.parseTokenFromRow(row)); assertEquals(watermark, parseWatermarkFromRow(row)); }
@Override public ConfigOperateResult insertOrUpdateTag(final ConfigInfo configInfo, final String tag, final String srcIp, final String srcUser) { if (findConfigInfo4TagState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), tag) == null) { return addConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } else { return updateConfigInfo4Tag(configInfo, tag, srcIp, srcUser); } }
@Test void testInsertOrUpdateTagOfAdd() { String dataId = "dataId111222"; String group = "group"; String tenant = "tenant"; String appName = "appname1234"; String content = "c12345"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); configInfo.setEncryptedDataKey("key23456"); //mock query config state empty and return obj after insert ConfigInfoStateWrapper configInfoStateWrapper = new ConfigInfoStateWrapper(); configInfoStateWrapper.setLastModified(System.currentTimeMillis()); configInfoStateWrapper.setId(234567890L); String tag = "tag123"; Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant, tag}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null).thenReturn(configInfoStateWrapper); String srcIp = "ip345678"; String srcUser = "user1234567"; ConfigOperateResult configOperateResult = embeddedConfigInfoTagPersistService.insertOrUpdateTag(configInfo, tag, srcIp, srcUser); //mock insert invoked. embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(dataId), eq(group), eq(tenant), eq(tag), eq(appName), eq(content), eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(srcIp), eq(srcUser), any(Timestamp.class), any(Timestamp.class)), times(1)); assertEquals(configInfoStateWrapper.getId(), configOperateResult.getId()); assertEquals(configInfoStateWrapper.getLastModified(), configOperateResult.getLastModified()); }
@Override public final int position() { return pos; }
@Test(expected = IllegalArgumentException.class) public void testPositionNewPos_negativeNewPos() { in.position(-1); }
@Override public void createFunction(SqlInvokedFunction function, boolean replace) { checkCatalog(function); checkFunctionLanguageSupported(function); checkArgument(!function.hasVersion(), "function '%s' is already versioned", function); QualifiedObjectName functionName = function.getFunctionId().getFunctionName(); checkFieldLength("Catalog name", functionName.getCatalogName(), MAX_CATALOG_NAME_LENGTH); checkFieldLength("Schema name", functionName.getSchemaName(), MAX_SCHEMA_NAME_LENGTH); if (!functionNamespaceDao.functionNamespaceExists(functionName.getCatalogName(), functionName.getSchemaName())) { throw new PrestoException(NOT_FOUND, format("Function namespace not found: %s", functionName.getCatalogSchemaName())); } checkFieldLength("Function name", functionName.getObjectName(), MAX_FUNCTION_NAME_LENGTH); if (function.getParameters().size() > MAX_PARAMETER_COUNT) { throw new PrestoException(GENERIC_USER_ERROR, format("Function has more than %s parameters: %s", MAX_PARAMETER_COUNT, function.getParameters().size())); } for (Parameter parameter : function.getParameters()) { checkFieldLength("Parameter name", parameter.getName(), MAX_PARAMETER_NAME_LENGTH); } checkFieldLength( "Parameter type list", function.getFunctionId().getArgumentTypes().stream() .map(TypeSignature::toString) .collect(joining(",")), MAX_PARAMETER_TYPES_LENGTH); checkFieldLength("Return type", function.getSignature().getReturnType().toString(), MAX_RETURN_TYPE_LENGTH); jdbi.useTransaction(handle -> { FunctionNamespaceDao transactionDao = handle.attach(functionNamespaceDaoClass); Optional<SqlInvokedFunctionRecord> latestVersion = transactionDao.getLatestRecordForUpdate(hash(function.getFunctionId()), function.getFunctionId()); if (!replace && latestVersion.isPresent() && !latestVersion.get().isDeleted()) { throw new PrestoException(ALREADY_EXISTS, "Function already exists: " + function.getFunctionId()); } if (!latestVersion.isPresent() || !latestVersion.get().getFunction().hasSameDefinitionAs(function)) { long newVersion = latestVersion.map(SqlInvokedFunctionRecord::getFunction).map(MySqlFunctionNamespaceManager::getLongVersion).orElse(0L) + 1; insertSqlInvokedFunction(transactionDao, function, newVersion); } else if (latestVersion.get().isDeleted()) { SqlInvokedFunction latest = latestVersion.get().getFunction(); checkState(latest.hasVersion(), "Function version missing: %s", latest.getFunctionId()); transactionDao.setDeletionStatus(hash(latest.getFunctionId()), latest.getFunctionId(), getLongVersion(latest), false); } }); refreshFunctionsCache(functionName); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Schema name exceeds max length of 128.*") public void testSchemaNameTooLong() { QualifiedObjectName functionName = QualifiedObjectName.valueOf(TEST_CATALOG, dummyString(129), "tangent"); createFunction(createFunctionTangent(functionName), false); }
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (TableOutputMeta) smi; data = (TableOutputData) sdi; if ( super.init( smi, sdi ) ) { try { data.commitSize = Integer.parseInt( environmentSubstitute( meta.getCommitSize() ) ); data.databaseMeta = meta.getDatabaseMeta(); DatabaseInterface dbInterface = data.databaseMeta.getDatabaseInterface(); // Batch updates are not supported on PostgreSQL (and look-a-likes) // together with error handling (PDI-366). // For these situations we can use savepoints to help out. data.useSafePoints = data.databaseMeta.getDatabaseInterface().useSafePoints() && getStepMeta().isDoingErrorHandling(); // Get the boolean that indicates whether or not we can/should release // savepoints during data load. data.releaseSavepoint = dbInterface.releaseSavepoint(); // Disable batch mode in case // - we use an unlimited commit size // - if we need to pick up auto-generated keys // - if you are running the transformation as a single database transaction (unique connections) // - if we are reverting to save-points data.batchMode = meta.useBatchUpdate() && data.commitSize > 0 && !meta.isReturningGeneratedKeys() && !getTransMeta().isUsingUniqueConnections() && !data.useSafePoints; // Per PDI-6211 : give a warning that batch mode operation in combination with step error handling can lead to // incorrectly processed rows. if ( getStepMeta().isDoingErrorHandling() && !dbInterface.supportsErrorHandlingOnBatchUpdates() ) { log.logMinimal( BaseMessages.getString( PKG, "TableOutput.Warning.ErrorHandlingIsNotFullySupportedWithBatchProcessing" ) ); } if ( !dbInterface.supportsStandardTableOutput() ) { throw new KettleException( dbInterface.getUnsupportedTableOutputMessage() ); } if ( log.isBasic() ) { logBasic( "Connected to database [" + meta.getDatabaseMeta() + "] (commit=" + data.commitSize + ")" ); } // Postpone commit as long as possible. PDI-2091 if ( data.commitSize == 0 ) { data.commitSize = Integer.MAX_VALUE; } data.db.setCommitSize( data.commitSize ); if ( !meta.isPartitioningEnabled() && !meta.isTableNameInField() ) { data.tableName = environmentSubstitute( meta.getTableName() ); } return true; } catch ( KettleException e ) { logError( "An error occurred intialising this step: " + e.getMessage() ); stopAll(); setErrors( 1 ); } } return false; }
@Test public void testInit_unsupportedConnection() { TableOutputMeta meta = mock( TableOutputMeta.class ); TableOutputData data = mock( TableOutputData.class ); DatabaseInterface dbInterface = mock( DatabaseInterface.class ); doNothing().when( tableOutputSpy ).logError( anyString() ); when( meta.getCommitSize() ).thenReturn( "1" ); when( meta.getDatabaseMeta() ).thenReturn( databaseMeta ); when( databaseMeta.getDatabaseInterface() ).thenReturn( dbInterface ); String unsupportedTableOutputMessage = "unsupported exception"; when( dbInterface.getUnsupportedTableOutputMessage() ).thenReturn( unsupportedTableOutputMessage ); //Will cause the Kettle Exception when( dbInterface.supportsStandardTableOutput() ).thenReturn( false ); tableOutputSpy.init( meta, data ); KettleException ke = new KettleException( unsupportedTableOutputMessage ); verify( tableOutputSpy, times( 1 ) ).logError( "An error occurred intialising this step: " + ke.getMessage() ); }
@Override public RedisClusterNode clusterGetNodeForSlot(int slot) { Iterable<RedisClusterNode> res = clusterGetNodes(); for (RedisClusterNode redisClusterNode : res) { if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) { return redisClusterNode; } } return null; }
@Test public void testClusterGetNodeForSlot() { RedisClusterNode node1 = connection.clusterGetNodeForSlot(1); RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000); assertThat(node1.getId()).isNotEqualTo(node2.getId()); }
@Override public String getName() { return this.name; }
@Test public void allCircuitBreakerStatesAllowTransitionToMetricsOnlyMode() { for (final CircuitBreaker.State state : CircuitBreaker.State.values()) { assertThatNoException().isThrownBy(() -> CircuitBreaker.StateTransition.transitionBetween(circuitBreaker.getName(), state, CircuitBreaker.State.METRICS_ONLY)); } }
public static SlotManagerConfiguration fromConfiguration( Configuration configuration, WorkerResourceSpec defaultWorkerResourceSpec) throws ConfigurationException { final Time rpcTimeout = Time.fromDuration(configuration.get(RpcOptions.ASK_TIMEOUT_DURATION)); final Time taskManagerTimeout = Time.fromDuration(configuration.get(ResourceManagerOptions.TASK_MANAGER_TIMEOUT)); final Duration requirementCheckDelay = configuration.get(ResourceManagerOptions.REQUIREMENTS_CHECK_DELAY); final Duration declareNeededResourceDelay = configuration.get(ResourceManagerOptions.DECLARE_NEEDED_RESOURCE_DELAY); boolean waitResultConsumedBeforeRelease = configuration.get(ResourceManagerOptions.TASK_MANAGER_RELEASE_WHEN_RESULT_CONSUMED); TaskManagerLoadBalanceMode taskManagerLoadBalanceMode = TaskManagerLoadBalanceMode.loadFromConfiguration(configuration); int numSlotsPerWorker = configuration.get(TaskManagerOptions.NUM_TASK_SLOTS); int minSlotNum = configuration.get(ResourceManagerOptions.MIN_SLOT_NUM); int maxSlotNum = configuration.get(ResourceManagerOptions.MAX_SLOT_NUM); int redundantTaskManagerNum = configuration.get(ResourceManagerOptions.REDUNDANT_TASK_MANAGER_NUM); return new SlotManagerConfiguration( rpcTimeout, taskManagerTimeout, requirementCheckDelay, declareNeededResourceDelay, waitResultConsumedBeforeRelease, taskManagerLoadBalanceMode, defaultWorkerResourceSpec, numSlotsPerWorker, minSlotNum, maxSlotNum, getMinTotalCpu(configuration, defaultWorkerResourceSpec, minSlotNum), getMaxTotalCpu(configuration, defaultWorkerResourceSpec, maxSlotNum), getMinTotalMem(configuration, defaultWorkerResourceSpec, minSlotNum), getMaxTotalMem(configuration, defaultWorkerResourceSpec, maxSlotNum), redundantTaskManagerNum); }
@Test void testComputeMinMaxMemoryIsInvalid() { final Configuration configuration = new Configuration(); final MemorySize minMemorySize = MemorySize.ofMebiBytes(500); final MemorySize maxMemorySize = MemorySize.ofMebiBytes(700); final int numSlots = 3; configuration.set(ResourceManagerOptions.MIN_TOTAL_MEM, minMemorySize); configuration.set(ResourceManagerOptions.MAX_TOTAL_MEM, maxMemorySize); assertThatIllegalStateException() .isThrownBy( () -> SlotManagerConfiguration.fromConfiguration( configuration, new WorkerResourceSpec.Builder() .setNumSlots(numSlots) .setTaskHeapMemoryMB(100) .setManagedMemoryMB(100) .setNetworkMemoryMB(100) .setTaskOffHeapMemoryMB(100) .build())); }
@Override public boolean tryFence(HAServiceTarget target, String args) { ProcessBuilder builder; String cmd = parseArgs(target.getTransitionTargetHAStatus(), args); if (!Shell.WINDOWS) { builder = new ProcessBuilder("bash", "-e", "-c", cmd); } else { builder = new ProcessBuilder("cmd.exe", "/c", cmd); } setConfAsEnvVars(builder.environment()); addTargetInfoAsEnvVars(target, builder.environment()); Process p; try { p = builder.start(); p.getOutputStream().close(); } catch (IOException e) { LOG.warn("Unable to execute " + cmd, e); return false; } String pid = tryGetPid(p); LOG.info("Launched fencing command '" + cmd + "' with " + ((pid != null) ? ("pid " + pid) : "unknown pid")); String logPrefix = abbreviate(cmd, ABBREV_LENGTH); if (pid != null) { logPrefix = "[PID " + pid + "] " + logPrefix; } // Pump logs to stderr StreamPumper errPumper = new StreamPumper( LOG, logPrefix, p.getErrorStream(), StreamPumper.StreamType.STDERR); errPumper.start(); StreamPumper outPumper = new StreamPumper( LOG, logPrefix, p.getInputStream(), StreamPumper.StreamType.STDOUT); outPumper.start(); int rc; try { rc = p.waitFor(); errPumper.join(); outPumper.join(); } catch (InterruptedException ie) { LOG.warn("Interrupted while waiting for fencing command: " + cmd); return false; } return rc == 0; }
@Test public void testBasicSuccessFailure() { assertTrue(fencer.tryFence(TEST_TARGET, "echo")); assertFalse(fencer.tryFence(TEST_TARGET, "exit 1")); // bad path should also fail assertFalse(fencer.tryFence(TEST_TARGET, "xxxxxxxxxxxx")); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>(); final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); continue; } callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { final List<ObjectKeyAndVersion> keys = new ArrayList<>(); // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId())); if(map.containsKey(bucket)) { map.get(bucket).addAll(keys); } else { map.put(bucket, keys); } } } // Iterate over all containers and delete list of keys for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) { final Path container = entry.getKey(); final List<ObjectKeyAndVersion> keys = entry.getValue(); this.delete(container, keys, prompt); } for(Path file : containers) { callback.delete(file); // Finally delete bucket itself try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test public void testDeleteFileBackslash() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)); final Path test = new Path(container, String.format("%s\\%s", new AlphanumericRandomStringService().random(), new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)); new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(test, new TransferStatus()); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test)); new S3MultipleDeleteFeature(session, new S3AccessControlListFeature(session)).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test)); }
@Override public int getGroupKeyLength() { return 0; }
@Test public void testGetGroupKeyLength() { // Run the test final int result = _selectionResultSetUnderTest.getGroupKeyLength(); // Verify the results assertEquals(0, result); }
public EpochEntry findEpochEntryByOffset(final long offset) { this.readLock.lock(); try { if (!this.epochMap.isEmpty()) { for (Map.Entry<Integer, EpochEntry> entry : this.epochMap.entrySet()) { if (entry.getValue().getStartOffset() <= offset && entry.getValue().getEndOffset() > offset) { return new EpochEntry(entry.getValue()); } } } return null; } finally { this.readLock.unlock(); } }
@Test public void testFindEpochEntryByOffset() { final EpochEntry entry = this.epochCache.findEpochEntryByOffset(350); assertEquals(entry.getEpoch(), 2); assertEquals(entry.getStartOffset(), 300); assertEquals(entry.getEndOffset(), 500); }
@Override public void convertWeightsForChildQueues(FSQueue queue, CapacitySchedulerConfiguration csConfig) { List<FSQueue> children = queue.getChildQueues(); if (queue instanceof FSParentQueue || !children.isEmpty()) { QueuePath queuePath = new QueuePath(queue.getName()); if (queue.getName().equals(ROOT_QUEUE)) { csConfig.setNonLabeledQueueWeight(queuePath, queue.getWeight()); } children.forEach(fsQueue -> csConfig.setNonLabeledQueueWeight( new QueuePath(fsQueue.getName()), fsQueue.getWeight())); csConfig.setAutoQueueCreationV2Enabled(queuePath, true); } }
@Test public void testAutoCreateV2FlagOnParent() { FSQueue root = createFSQueues(1); converter.convertWeightsForChildQueues(root, csConfig); assertTrue("root autocreate v2 enabled", csConfig.isAutoQueueCreationV2Enabled(ROOT)); }
@Override public int hashCode() { return serializer != null ? serializer.hashCode() : 0; }
@Test public void testAdaptorEqualAndHashCode() { StreamSerializerAdapter theOther = new StreamSerializerAdapter(serializer); StreamSerializerAdapter theEmptyOne = new StreamSerializerAdapter(null); assertEquals(adapter, adapter); assertEquals(adapter, theOther); assertNotEquals(null, adapter); assertNotEquals("Not An Adaptor", adapter); assertNotEquals(adapter, theEmptyOne); assertEquals(adapter.hashCode(), serializer.hashCode()); assertEquals(0, theEmptyOne.hashCode()); }
public PullResult pullKernelImpl( final MessageQueue mq, final String subExpression, final String expressionType, final long subVersion, final long offset, final int maxNums, final int maxSizeInBytes, final int sysFlag, final long commitOffset, final long brokerSuspendMaxTimeMillis, final long timeoutMillis, final CommunicationMode communicationMode, final PullCallback pullCallback ) throws MQClientException, RemotingException, MQBrokerException, InterruptedException { FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(this.mQClientFactory.getBrokerNameFromMessageQueue(mq), this.recalculatePullFromWhichNode(mq), false); if (null == findBrokerResult) { this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic()); findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(this.mQClientFactory.getBrokerNameFromMessageQueue(mq), this.recalculatePullFromWhichNode(mq), false); } if (findBrokerResult != null) { { // check version if (!ExpressionType.isTagType(expressionType) && findBrokerResult.getBrokerVersion() < MQVersion.Version.V4_1_0_SNAPSHOT.ordinal()) { throw new MQClientException("The broker[" + mq.getBrokerName() + ", " + findBrokerResult.getBrokerVersion() + "] does not upgrade to support for filter message by " + expressionType, null); } } int sysFlagInner = sysFlag; if (findBrokerResult.isSlave()) { sysFlagInner = PullSysFlag.clearCommitOffsetFlag(sysFlagInner); } PullMessageRequestHeader requestHeader = new PullMessageRequestHeader(); requestHeader.setConsumerGroup(this.consumerGroup); requestHeader.setTopic(mq.getTopic()); requestHeader.setQueueId(mq.getQueueId()); requestHeader.setQueueOffset(offset); requestHeader.setMaxMsgNums(maxNums); requestHeader.setSysFlag(sysFlagInner); requestHeader.setCommitOffset(commitOffset); requestHeader.setSuspendTimeoutMillis(brokerSuspendMaxTimeMillis); requestHeader.setSubscription(subExpression); requestHeader.setSubVersion(subVersion); requestHeader.setMaxMsgBytes(maxSizeInBytes); requestHeader.setExpressionType(expressionType); requestHeader.setBrokerName(mq.getBrokerName()); String brokerAddr = findBrokerResult.getBrokerAddr(); if (PullSysFlag.hasClassFilterFlag(sysFlagInner)) { brokerAddr = computePullFromWhichFilterServer(mq.getTopic(), brokerAddr); } PullResult pullResult = this.mQClientFactory.getMQClientAPIImpl().pullMessage( brokerAddr, requestHeader, timeoutMillis, communicationMode, pullCallback); return pullResult; } throw new MQClientException("The broker[" + mq.getBrokerName() + "] not exist", null); }
@Test public void testPullKernelImpl() throws Exception { PullCallback pullCallback = mock(PullCallback.class); when(mQClientFactory.getMQClientAPIImpl()).thenReturn(mqClientAPIImpl); PullResult actual = pullAPIWrapper.pullKernelImpl(createMessageQueue(), "", "", 1L, 1L, 1, 1, PullSysFlag.buildSysFlag(false, false, false, true), 1L, System.currentTimeMillis(), defaultTimeout, CommunicationMode.ASYNC, pullCallback); assertNull(actual); verify(mqClientAPIImpl, times(1)).pullMessage(eq(defaultBroker), any(PullMessageRequestHeader.class), eq(defaultTimeout), any(CommunicationMode.class), any(PullCallback.class)); }
public static Set<X509Certificate> filterValid( X509Certificate... certificates ) { final Set<X509Certificate> results = new HashSet<>(); if (certificates != null) { for ( X509Certificate certificate : certificates ) { if ( certificate == null ) { continue; } try { certificate.checkValidity(); } catch ( CertificateExpiredException | CertificateNotYetValidException e ) { // Not yet or no longer valid. Don't include in result. continue; } results.add( certificate ); } } return results; }
@Test public void testFilterValidEmpty() throws Exception { // Setup fixture. final Collection<X509Certificate> input = new ArrayList<>(); // Execute system under test. final Collection<X509Certificate> result = CertificateUtils.filterValid( input ); // Verify results. assertTrue( result.isEmpty() ); }
public boolean allFieldsNoLessThan(final ResourceProfile other) { checkNotNull(other, "Cannot compare null resources"); if (this.equals(ANY)) { return true; } if (this.equals(other)) { return true; } if (this.equals(UNKNOWN)) { return false; } if (other.equals(UNKNOWN)) { return true; } if (cpuCores.getValue().compareTo(other.cpuCores.getValue()) >= 0 && taskHeapMemory.compareTo(other.taskHeapMemory) >= 0 && taskOffHeapMemory.compareTo(other.taskOffHeapMemory) >= 0 && managedMemory.compareTo(other.managedMemory) >= 0 && networkMemory.compareTo(other.networkMemory) >= 0) { for (Map.Entry<String, ExternalResource> resource : other.extendedResources.entrySet()) { if (!extendedResources.containsKey(resource.getKey()) || extendedResources .get(resource.getKey()) .getValue() .compareTo(resource.getValue().getValue()) < 0) { return false; } } return true; } return false; }
@Test void testUnknownNoLessThanUnknown() { assertThat(ResourceProfile.UNKNOWN.allFieldsNoLessThan(ResourceProfile.UNKNOWN)).isTrue(); }
public static int[] computePhysicalIndices( List<TableColumn> logicalColumns, DataType physicalType, Function<String, String> nameRemapping) { Map<TableColumn, Integer> physicalIndexLookup = computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping); return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray(); }
@Test void testFieldMappingLegacyDecimalType() { int[] indices = TypeMappingUtils.computePhysicalIndices( TableSchema.builder() .field("f0", DECIMAL(38, 18)) .build() .getTableColumns(), ROW(FIELD("f0", TypeConversions.fromLegacyInfoToDataType(Types.BIG_DEC))), Function.identity()); assertThat(indices).isEqualTo(new int[] {0}); }
private NodesSpecification(ClusterResources min, ClusterResources max, IntRange groupSize, boolean dedicated, Version version, boolean required, boolean canFail, boolean exclusive, Optional<DockerImage> dockerImageRepo, Optional<String> combinedId, Optional<CloudAccount> cloudAccount, boolean hasCountAttribute) { if (max.smallerThan(min)) throw new IllegalArgumentException("Max resources must be larger or equal to min resources, but " + max + " is smaller than " + min); if (min.nodes() < 1) throw new IllegalArgumentException("Min node count cannot be less than 1, but is " + min.nodes()); // Non-scaled resources must be equal if ( ! min.nodeResources().justNonNumbers().equals(max.nodeResources().justNonNumbers())) throw new IllegalArgumentException("Min and max resources must have the same non-numeric settings, but " + "min is " + min + " and max " + max); if (min.nodeResources().bandwidthGbps() != max.nodeResources().bandwidthGbps()) throw new IllegalArgumentException("Min and max resources must have the same bandwidth, but " + "min is " + min + " and max " + max); this.min = min; this.max = max; this.groupSize = groupSize; this.dedicated = dedicated; this.version = version; this.required = required; this.canFail = canFail; this.exclusive = exclusive; this.dockerImageRepo = dockerImageRepo; this.combinedId = combinedId; this.cloudAccount = cloudAccount; this.hasCountAttribute = hasCountAttribute; }
@Test void invalidResources() { assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources vcpu='-1' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources vcpu='' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='-1' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='1x' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources vcpu='[-1,]' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources vcpu='[1,0.5]' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='[,-1b]' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='[1mb,999kb]' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='b' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes><resources memory='Yb' /></nodes>")); assertThrows(IllegalArgumentException.class, () -> nodesSpecification("<nodes count='[0, 1]'></nodes>")); }
public static void runCommand(Config config) throws TerseException { try { ManifestWorkspace workspace = new ManifestWorkspace(config.out); ClassLoader parent = ConnectPluginPath.class.getClassLoader(); ServiceLoaderScanner serviceLoaderScanner = new ServiceLoaderScanner(); ReflectionScanner reflectionScanner = new ReflectionScanner(); PluginSource classpathSource = PluginUtils.classpathPluginSource(parent); ManifestWorkspace.SourceWorkspace<?> classpathWorkspace = workspace.forSource(classpathSource); PluginScanResult classpathPlugins = discoverPlugins(classpathSource, reflectionScanner, serviceLoaderScanner); Map<Path, Set<Row>> rowsByLocation = new LinkedHashMap<>(); Set<Row> classpathRows = enumerateRows(classpathWorkspace, classpathPlugins); rowsByLocation.put(null, classpathRows); ClassLoaderFactory factory = new ClassLoaderFactory(); try (DelegatingClassLoader delegatingClassLoader = factory.newDelegatingClassLoader(parent)) { beginCommand(config); for (Path pluginLocation : config.locations) { PluginSource source = PluginUtils.isolatedPluginSource(pluginLocation, delegatingClassLoader, factory); ManifestWorkspace.SourceWorkspace<?> pluginWorkspace = workspace.forSource(source); PluginScanResult plugins = discoverPlugins(source, reflectionScanner, serviceLoaderScanner); Set<Row> rows = enumerateRows(pluginWorkspace, plugins); rowsByLocation.put(pluginLocation, rows); for (Row row : rows) { handlePlugin(config, row); } } endCommand(config, workspace, rowsByLocation); } } catch (Throwable e) { failCommand(config, e); } }
@Test public void testNoArguments() { CommandResult res = runCommand(); assertNotEquals(0, res.returnCode); }
public boolean tryUnblockFailedWorkflowInstance( String workflowId, long workflowInstanceId, long workflowRunId, TimelineEvent event) { int updated = withMetricLogError( () -> withRetryableUpdate( UNBLOCK_INSTANCE_FAILED_STATUS, stmt -> { int idx = 0; stmt.setString(++idx, toJson(event)); stmt.setString(++idx, workflowId); stmt.setLong(++idx, workflowInstanceId); stmt.setLong(++idx, workflowRunId); }), "tryUnblockFailedWorkflowInstance", "Failed to try to unblock the failed workflow instance [{}][{}][{}]", workflowId, workflowInstanceId, workflowRunId); return updated == SUCCESS_WRITE_SIZE; }
@Test public void testTryUnblockFailedWorkflowInstance() { int cnt = instanceDao.terminateQueuedInstances( TEST_WORKFLOW_ID, 2, WorkflowInstance.Status.FAILED, "test-reason"); assertEquals(1L, cnt); String status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 1L, 1L); assertEquals("FAILED", status); boolean ret = instanceDao.tryUnblockFailedWorkflowInstance(TEST_WORKFLOW_ID, 1L, 1L, null); assertTrue(ret); status = instanceDao.getWorkflowInstanceRawStatus(TEST_WORKFLOW_ID, 1L, 1L); assertEquals("FAILED_1", status); }
public List<Modification> parse(String svnLogOutput, String path, SAXBuilder builder) { try { Document document = builder.build(new StringReader(svnLogOutput)); return parseDOMTree(document, path); } catch (Exception e) { throw bomb("Unable to parse svn log output: " + svnLogOutput, e); } }
@Test public void shouldParseBJCruiseLogCorrectly() { String firstChangeLog = """ <?xml version="1.0"?> <log> <logentry revision="11238"> <author>yxchu</author> <date>2008-10-21T14:00:16.598195Z</date> <paths> <path action="M">/trunk/test/unit/card_selection_test.rb</path> <path action="M">/trunk/test/functional/cards_controller_quick_add_test.rb</path> <path action="M">/trunk/app/controllers/cards_controller.rb</path> </paths> <msg>#2761, fix random test failure and add quick add card type to session</msg> </logentry> </log>"""; String secondChangeLog = """ <?xml version="1.0"?> <log> <logentry revision="11239"> <author>yxchu</author> <date>2008-10-21T14:00:36.209014Z</date> <paths> <path action="M">/trunk/test/unit/card_selection_test.rb</path> </paths> <msg>still fix test</msg> </logentry> <logentry revision="11240"> <author>yxchu</author> <date>2008-10-21T14:00:47.614448Z</date> <paths> <path action="M">/trunk/test/unit/card_selection_test.rb</path> </paths> <msg>fix test remove messaging helper</msg> </logentry> </log>"""; SvnLogXmlParser parser = new SvnLogXmlParser(); List<Modification> mods = parser.parse(firstChangeLog, ".", new SAXBuilder()); assertThat(mods.get(0).getUserName()).isEqualTo("yxchu"); List<Modification> mods2 = parser.parse(secondChangeLog, ".", new SAXBuilder()); assertThat(mods2.size()).isEqualTo(2); }
public Span toSpan(TraceContext context) { return toSpan(null, context); }
@Test void toSpan() { TraceContext context = tracer.newTrace().context(); assertThat(tracer.toSpan(context)) .isInstanceOf(RealSpan.class) .extracting(Span::context) .isEqualTo(context); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testGettersWithMultipleDefaults() throws Exception { expectedException.expect(IllegalArgumentException.class); // Make sure the error message says what the problem is, generally expectedException.expectMessage("contradictory annotations"); // Make sure the error message gives actionable details about what annotations were // contradictory. // Note that the quotes in the unparsed string are present in Java 11 but absent in Java 8 expectedException.expectMessage( anyOf( containsString("Default.String(value=\"abc\")"), containsString("Default.String(value=abc)"))); expectedException.expectMessage("Default.Integer(value=0)"); // When we attempt to create, we should error at this moment. PipelineOptionsFactory.as(GettersWithMultipleDefault.class); }
@Override public HttpRestResult<String> httpDelete(String path, Map<String, String> headers, Map<String, String> paramValues, String encode, long readTimeoutMs) throws Exception { final long endTime = System.currentTimeMillis() + readTimeoutMs; String currentServerAddr = serverListMgr.getCurrentServerAddr(); int maxRetry = this.maxRetry; HttpClientConfig httpConfig = HttpClientConfig.builder() .setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue()) .setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(100)).build(); do { try { Header newHeaders = Header.newInstance(); if (headers != null) { newHeaders.addAll(headers); } Query query = Query.newInstance().initParams(paramValues); HttpRestResult<String> result = nacosRestTemplate.delete(getUrl(currentServerAddr, path), httpConfig, newHeaders, query, String.class); if (isFail(result)) { LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", serverListMgr.getCurrentServerAddr(), result.getCode()); } else { // Update the currently available server addr serverListMgr.updateCurrentServerAddr(currentServerAddr); return result; } } catch (ConnectException connectException) { LOGGER.error("[NACOS ConnectException httpDelete] currentServerAddr:{}, err : {}", serverListMgr.getCurrentServerAddr(), ExceptionUtil.getStackTrace(connectException)); } catch (SocketTimeoutException stoe) { LOGGER.error("[NACOS SocketTimeoutException httpDelete] currentServerAddr:{}, err : {}", serverListMgr.getCurrentServerAddr(), ExceptionUtil.getStackTrace(stoe)); } catch (Exception ex) { LOGGER.error("[NACOS Exception httpDelete] currentServerAddr: " + serverListMgr.getCurrentServerAddr(), ex); throw ex; } if (serverListMgr.getIterator().hasNext()) { currentServerAddr = serverListMgr.getIterator().next(); } else { maxRetry--; if (maxRetry < 0) { throw new ConnectException( "[NACOS HTTP-DELETE] The maximum number of tolerable server reconnection errors has been reached"); } serverListMgr.refreshCurrentServerAddr(); } } while (System.currentTimeMillis() <= endTime); LOGGER.error("no available server"); throw new ConnectException("no available server"); }
@Test void testHttpDeleteWithRequestException() throws Exception { assertThrows(NacosException.class, () -> { when(nacosRestTemplate.<String>delete(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class), any(Header.class), any(Query.class), eq(String.class))).thenThrow(new ConnectException(), new SocketTimeoutException(), new NacosException()); serverHttpAgent.httpDelete("/test", Collections.emptyMap(), Collections.emptyMap(), "UTF-8", 1000); }); }
BackgroundJobRunner getBackgroundJobRunner(Job job) { assertJobExists(job.getJobDetails()); return backgroundJobRunners.stream() .filter(jobRunner -> jobRunner.supports(job)) .findFirst() .orElseThrow(() -> problematicConfigurationException("Could not find a BackgroundJobRunner: either no JobActivator is registered, your Background Job Class is not registered within the IoC container or your Job does not have a default no-arg constructor.")); }
@Test void getBackgroundJobRunnerForIoCJobWithInstance() { final Job job = anEnqueuedJob() .withJobDetails(() -> testServiceForIoC.doWork()) .build(); assertThat(backgroundJobServer.getBackgroundJobRunner(job)) .isNotNull() .isInstanceOf(BackgroundJobWithIocRunner.class); }
public static Read<JmsRecord> read() { return new AutoValue_JmsIO_Read.Builder<JmsRecord>() .setMaxNumRecords(Long.MAX_VALUE) .setCoder(SerializableCoder.of(JmsRecord.class)) .setCloseTimeout(DEFAULT_CLOSE_TIMEOUT) .setRequiresDeduping(false) .setMessageMapper( new MessageMapper<JmsRecord>() { @Override public JmsRecord mapMessage(Message message) throws Exception { TextMessage textMessage = (TextMessage) message; Map<String, Object> properties = new HashMap<>(); @SuppressWarnings("rawtypes") Enumeration propertyNames = textMessage.getPropertyNames(); while (propertyNames.hasMoreElements()) { String propertyName = (String) propertyNames.nextElement(); properties.put(propertyName, textMessage.getObjectProperty(propertyName)); } return new JmsRecord( textMessage.getJMSMessageID(), textMessage.getJMSTimestamp(), textMessage.getJMSCorrelationID(), textMessage.getJMSReplyTo(), textMessage.getJMSDestination(), textMessage.getJMSDeliveryMode(), textMessage.getJMSRedelivered(), textMessage.getJMSType(), textMessage.getJMSExpiration(), textMessage.getJMSPriority(), properties, textMessage.getText()); } }) .build(); }
@Test public void testReadMessagesWithCFProviderFn() throws Exception { long count = 5; produceTestMessages(count, JmsIOTest::createTextMessage); PCollection<JmsRecord> output = pipeline.apply( JmsIO.read() .withConnectionFactoryProviderFn( toSerializableFunction(commonJms::createConnectionFactory)) .withQueue(QUEUE) .withUsername(USERNAME) .withPassword(PASSWORD) .withMaxNumRecords(count)); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(count); pipeline.run(); assertQueueIsEmpty(); }
public Map<String, String> clientTags() { return clientTags; }
@Test public void shouldReturnEmptyClientTagsMapByDefault() { assertTrue(new ClientState().clientTags().isEmpty()); }
public void requireAtLeast(final int requiredMajor, final int requiredMinor) { final Version required = new Version(requiredMajor, requiredMinor); if (this.compareTo(required) < 0) { throw new UnsupportedOperationException( "This operation requires API version at least " + requiredMajor + "." + requiredMinor + ", currently configured for " + major + "." + minor); } }
@Test public void shouldObserveApiLimitsOnMinorVersions() { assertThrows(UnsupportedOperationException.class, () -> V35_0.requireAtLeast(35, 1)); }
@Override public PMML_MODEL getPMMLModelType() { return PMML_MODEL.REGRESSION_MODEL; }
@Test void getPMMLModelType() { assertThat(executor.getPMMLModelType()).isEqualTo(PMML_MODEL.REGRESSION_MODEL); }
public static double pow2(double x) { return x * x; }
@Test public void testPow2() { System.out.println("pow2"); assertEquals(0, MathEx.pow2(0), 1E-10); assertEquals(1, MathEx.pow2(1), 1E-10); assertEquals(4, MathEx.pow2(2), 1E-10); assertEquals(9, MathEx.pow2(3), 1E-10); }
@Override public void notifyCheckpointComplete(long completedCheckpointId) { synchronized (uploadedSstFiles) { // FLINK-23949: materializedSstFiles.keySet().contains(completedCheckpointId) make sure // the notified checkpointId is not a savepoint, otherwise next checkpoint will // degenerate into a full checkpoint if (completedCheckpointId > lastCompletedCheckpointId && uploadedSstFiles.containsKey(completedCheckpointId)) { uploadedSstFiles .keySet() .removeIf(checkpointId -> checkpointId < completedCheckpointId); lastCompletedCheckpointId = completedCheckpointId; } } }
@Test void testCheckpointIsIncremental() throws Exception { try (CloseableRegistry closeableRegistry = new CloseableRegistry(); RocksIncrementalSnapshotStrategy<?> checkpointSnapshotStrategy = createSnapshotStrategy()) { FsCheckpointStreamFactory checkpointStreamFactory = createFsCheckpointStreamFactory(); // make and notify checkpoint with id 1 snapshot(1L, checkpointSnapshotStrategy, checkpointStreamFactory, closeableRegistry); checkpointSnapshotStrategy.notifyCheckpointComplete(1L); // notify savepoint with id 2 checkpointSnapshotStrategy.notifyCheckpointComplete(2L); // make checkpoint with id 3 IncrementalRemoteKeyedStateHandle incrementalRemoteKeyedStateHandle3 = snapshot( 3L, checkpointSnapshotStrategy, checkpointStreamFactory, closeableRegistry); // If 3rd checkpoint's full size > checkpointed size, it means 3rd checkpoint is // incremental. assertThat(incrementalRemoteKeyedStateHandle3.getStateSize()) .isGreaterThan(incrementalRemoteKeyedStateHandle3.getCheckpointedSize()); } }
@Override public long reservePermission(final int permits) { long timeoutInNanos = state.get().config.getTimeoutDuration().toNanos(); State modifiedState = updateStateWithBackOff(permits, timeoutInNanos); boolean canAcquireImmediately = modifiedState.nanosToWait <= 0; if (canAcquireImmediately) { publishRateLimiterAcquisitionEvent(true, permits); return 0; } boolean canAcquireInTime = timeoutInNanos >= modifiedState.nanosToWait; if (canAcquireInTime) { publishRateLimiterAcquisitionEvent(true, permits); return modifiedState.nanosToWait; } publishRateLimiterAcquisitionEvent(false, permits); return -1; }
@Test public void reserveManyCyclesIfWegithgreaterThenLimitPerPeriod() throws Exception { setup(Duration.ofNanos(CYCLE_IN_NANOS * 5)); setTimeOnNanos(CYCLE_IN_NANOS); long nanosToWait = rateLimiter.reservePermission(PERMISSIONS_RER_CYCLE * 3); then(nanosToWait).isGreaterThan(CYCLE_IN_NANOS); }
@VisibleForTesting protected ClientRequestInterceptor createRequestInterceptorChain() { Configuration conf = getConfig(); return RouterServerUtil.createRequestInterceptorChain(conf, YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE, YarnConfiguration.DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS, ClientRequestInterceptor.class); }
@Test public void testRequestInterceptorChainCreation() throws Exception { ClientRequestInterceptor root = super.getRouterClientRMService().createRequestInterceptorChain(); int index = 0; while (root != null) { // The current pipeline is: // PassThroughClientRequestInterceptor - index = 0 // PassThroughClientRequestInterceptor - index = 1 // PassThroughClientRequestInterceptor - index = 2 // MockClientRequestInterceptor - index = 3 switch (index) { case 0: // Fall to the next case case 1: // Fall to the next case case 2: // If index is equal to 0,1 or 2 we fall in this check Assert.assertEquals(PassThroughClientRequestInterceptor.class.getName(), root.getClass().getName()); break; case 3: Assert.assertEquals(MockClientRequestInterceptor.class.getName(), root.getClass().getName()); break; default: Assert.fail(); } root = root.getNextInterceptor(); index++; } Assert.assertEquals("The number of interceptors in chain does not match", 4, index); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldVisitInt8() { // Given: final Schema schema = Schema.OPTIONAL_INT8_SCHEMA; when(visitor.visitInt8(any())).thenReturn("Expected"); // When: final String result = SchemaWalker.visit(schema, visitor); // Then: verify(visitor).visitInt8(same(schema)); assertThat(result, is("Expected")); }
public Node pop() { return nodes.poll(); }
@Test void require_SearcherNodes_ordered_by_insertion_order() { int priority = 0; ComponentNode a = new ComponentNode<>(createFakeComponentB("1"), priority++); ComponentNode b = new ComponentNode<>(createFakeComponentA("2"), priority++); ComponentNode c = new ComponentNode<>(createFakeComponentA("03"), priority++); addNodes(a, b, c); assertEquals(a, pop()); assertEquals(b, pop()); assertEquals(c, pop()); }
@Udf(description = "Converts a TIMESTAMP value into the" + " string representation of the timestamp in the given format. Single quotes in the" + " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'" + " The system default time zone is used when no time zone is explicitly provided." + " The format pattern should be in the format expected" + " by java.time.format.DateTimeFormatter") public String formatTimestamp( @UdfParameter( description = "TIMESTAMP value.") final Timestamp timestamp, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId()); }
@Test public void shouldThrowIfInvalidTimeZone() { // When: final KsqlException e = assertThrows( KsqlFunctionException.class, () -> udf.formatTimestamp(new Timestamp(1638360611123L), "yyyy-MM-dd HH:mm:ss.SSS", "PST") ); // Then: assertThat(e.getMessage(), containsString("Unknown time-zone ID: PST")); }
public int indexOf(PDPage page) { SearchContext context = new SearchContext(page); if (findPage(context, root)) { return context.index; } return -1; }
@Test void indexOfPageFromOutlineDestination() throws IOException { doc = Loader.loadPDF(RandomAccessReadBuffer.createBufferFromStream( TestPDPageTree.class.getResourceAsStream("with_outline.pdf"))); PDDocumentOutline outline = doc.getDocumentCatalog().getDocumentOutline(); for (PDOutlineItem current : outline.children()) { if (current.getTitle().contains("Second")) { assertEquals(2, doc.getPages().indexOf(current.findDestinationPage(doc))); } } }
public static <T extends GeneratedMessageV3> ProtobufNativeSchema<T> of(Class<T> pojo) { return of(pojo, new HashMap<>()); }
@Test public void testSchema() { ProtobufNativeSchema<org.apache.pulsar.client.schema.proto.Test.TestMessage> protobufSchema = ProtobufNativeSchema.of(org.apache.pulsar.client.schema.proto.Test.TestMessage.class); assertEquals(protobufSchema.getSchemaInfo().getType(), SchemaType.PROTOBUF_NATIVE); assertNotNull(ProtobufNativeSchemaUtils.deserialize(protobufSchema.getSchemaInfo().getSchema())); assertEquals(new String(protobufSchema.getSchemaInfo().getSchema(), StandardCharsets.UTF_8), EXPECTED_SCHEMA_JSON); }
@Deprecated public static UnboundedSource<Long, CounterMark> unbounded() { return unboundedWithTimestampFn(new NowTimestampFn()); }
@Test @Category({ ValidatesRunner.class, UsesStatefulParDo.class // This test fails if State is unsupported despite no direct usage. }) public void testUnboundedSourceSplits() throws Exception { long numElements = 1000; int numSplits = 10; UnboundedSource<Long, ?> initial = CountingSource.unbounded(); List<? extends UnboundedSource<Long, ?>> splits = initial.split(numSplits, p.getOptions()); assertEquals("Expected exact splitting", numSplits, splits.size()); long elementsPerSplit = numElements / numSplits; assertEquals("Expected even splits", numElements, elementsPerSplit * numSplits); PCollectionList<Long> pcollections = PCollectionList.empty(p); for (int i = 0; i < splits.size(); ++i) { pcollections = pcollections.and( p.apply("split" + i, Read.from(splits.get(i)).withMaxNumRecords(elementsPerSplit))); } PCollection<Long> input = pcollections.apply(Flatten.pCollections()); addCountingAsserts(input, numElements); p.run(); }
@Override String getFileName(double lat, double lon) { int intKey = calcIntKey(lat, lon); String str = areas.get(intKey); if (str == null) return null; int minLat = Math.abs(down(lat)); int minLon = Math.abs(down(lon)); str += "/"; if (lat >= 0) str += "N"; else str += "S"; if (minLat < 10) str += "0"; str += minLat; if (lon >= 0) str += "E"; else str += "W"; if (minLon < 10) str += "0"; if (minLon < 100) str += "0"; str += minLon; return str; }
@Disabled @Test public void testGetEleHorizontalBorder() { instance = new SRTMProvider(); // Border between the tiles N42E011 and N42E012 assertEquals("Eurasia/N42E011", instance.getFileName(42.1, 11.999999)); assertEquals(324, instance.getEle(42.1, 11.999999), precision); assertEquals("Eurasia/N42E012", instance.getFileName(42.1, 12.000001)); assertEquals(324, instance.getEle(42.1, 12.000001), precision); }
@Override public Response getListingJson(Application app, ServletConfig sc, HttpHeaders headers, UriInfo uriInfo) throws JsonProcessingException { Response response = getListingJsonResponse(app, context, sc, headers, uriInfo); response.getHeaders().add("Access-Control-Allow-Origin", "*"); response.getHeaders().add("Access-Control-Allow-Headers", "x-requested-with, ssi-token"); response.getHeaders().add("Access-Control-Max-Age", "3600"); response.getHeaders().add("Access-Control-Allow-Methods", "GET,POST,PUT,DELETE,OPTIONS"); return response; }
@Test void test() throws Exception { DubboSwaggerApiListingResource resource = new DubboSwaggerApiListingResource(); app = mock(Application.class); sc = mock(ServletConfig.class); Set<Class<?>> sets = new HashSet<Class<?>>(); sets.add(SwaggerService.class); when(sc.getServletContext()).thenReturn(mock(ServletContext.class)); when(app.getClasses()).thenReturn(sets); Response response = resource.getListingJson(app, sc, null, new ResteasyUriInfo(new URI("http://rest.test"))); Assertions.assertNotNull(response); Swagger swagger = (Swagger) response.getEntity(); Assertions.assertEquals("SwaggerService", swagger.getTags().get(0).getName()); Assertions.assertEquals( "/demoService/hello", swagger.getPaths().keySet().toArray()[0].toString()); }
@Override @CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id") public void deleteMailAccount(Long id) { // 校验是否存在账号 validateMailAccountExists(id); // 校验是否存在关联模版 if (mailTemplateService.getMailTemplateCountByAccountId(id) > 0) { throw exception(MAIL_ACCOUNT_RELATE_TEMPLATE_EXISTS); } // 删除 mailAccountMapper.deleteById(id); }
@Test public void testDeleteMailAccount_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> mailAccountService.deleteMailAccount(id), MAIL_ACCOUNT_NOT_EXISTS); }
public static Autoscaling empty() { return empty(""); }
@Test public void test_changing_exclusivity() { var min = new ClusterResources( 2, 1, new NodeResources( 3, 8, 100, 1)); var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); var fixture = DynamicProvisioningTester.fixture() .awsProdSetup(true) .cluster(clusterSpec(true)) .capacity(Capacity.from(min, max)) .initialResources(Optional.empty()) .build(); fixture.tester().assertResources("Initial deployment at minimum", 2, 1, 4, 8, 100, fixture.currentResources().advertisedResources()); fixture.tester().deploy(fixture.applicationId(), clusterSpec(false), fixture.capacity()); fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1, 0, 0), 5); fixture.tester().assertResources("Exclusive nodes makes no difference here", 2, 1, 4, 8, 100.0, fixture.autoscale()); fixture.tester().deploy(fixture.applicationId(), clusterSpec(true), fixture.capacity()); fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1, 0, 0), 5); fixture.tester().assertResources("Reverts to the initial resources", 2, 1, 4, 8, 100, fixture.currentResources().advertisedResources()); }
@Override public String get(final Scope scope, final ConnectionSession connectionSession, final MySQLSystemVariable variable) { return Scope.GLOBAL == scope ? variable.getDefaultValue() : connectionSession.getIsolationLevel().orElse(TransactionIsolationLevel.REPEATABLE_READ).getIsolationLevel(); }
@Test void assertGetGlobalValue() { assertThat(new TransactionIsolationValueProvider().get(Scope.GLOBAL, null, MySQLSystemVariable.TRANSACTION_ISOLATION), is("REPEATABLE-READ")); assertThat(new TransactionIsolationValueProvider().get(Scope.GLOBAL, null, MySQLSystemVariable.TX_ISOLATION), is("REPEATABLE-READ")); }
@Override public SparkTable loadTable(Identifier ident) throws NoSuchTableException { Pair<Table, Long> table = load(ident); return new SparkTable(table.first(), table.second(), false /* refresh eagerly */); }
@Test public void testTimeTravel() { sql("CREATE TABLE %s (id INT, dep STRING) USING iceberg", tableName); Table table = validationCatalog.loadTable(tableIdent); sql("INSERT INTO TABLE %s VALUES (1, 'hr')", tableName); table.refresh(); Snapshot firstSnapshot = table.currentSnapshot(); waitUntilAfter(firstSnapshot.timestampMillis()); sql("INSERT INTO TABLE %s VALUES (2, 'hr')", tableName); table.refresh(); Snapshot secondSnapshot = table.currentSnapshot(); waitUntilAfter(secondSnapshot.timestampMillis()); sql("INSERT INTO TABLE %s VALUES (3, 'hr')", tableName); table.refresh(); try { TABLE_CACHE.add("key", table); assertEquals( "Should have expected rows in 3rd snapshot", ImmutableList.of(row(1, "hr"), row(2, "hr"), row(3, "hr")), sql("SELECT * FROM testcache.key ORDER BY id")); assertEquals( "Should have expected rows in 2nd snapshot", ImmutableList.of(row(1, "hr"), row(2, "hr")), sql( "SELECT * FROM testcache.`key#at_timestamp_%s` ORDER BY id", secondSnapshot.timestampMillis())); assertEquals( "Should have expected rows in 1st snapshot", ImmutableList.of(row(1, "hr")), sql( "SELECT * FROM testcache.`key#snapshot_id_%d` ORDER BY id", firstSnapshot.snapshotId())); } finally { TABLE_CACHE.remove("key"); } }
public DdlCommandResult execute( final String sql, final DdlCommand ddlCommand, final boolean withQuery, final Set<SourceName> withQuerySources ) { return execute(sql, ddlCommand, withQuery, withQuerySources, false); }
@Test public void shouldAddTableWithCorrectSql() { // Given: givenCreateTable(); // When: cmdExec.execute(SQL_TEXT, createTable, false, NO_QUERY_SOURCES); // Then: assertThat(metaStore.getSource(TABLE_NAME).getSqlExpression(), is(SQL_TEXT)); }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullString() { Ip4Prefix ipPrefix; String fromString; fromString = null; ipPrefix = Ip4Prefix.valueOf(fromString); }
@Override public Map<String, ScannerPlugin> installRequiredPlugins() { LOG.info("Loading required plugins"); InstallResult result = installPlugins(p -> p.getRequiredForLanguages() == null || p.getRequiredForLanguages().isEmpty()); LOG.debug("Plugins not loaded because they are optional: {}", result.skippedPlugins); return result.installedPluginsByKey; }
@Test public void fail_if_json_of_installed_plugins_is_not_valid() { WsTestUtil.mockReader(wsClient, "api/plugins/installed", new StringReader("not json")); assertThatThrownBy(() -> underTest.installRequiredPlugins()) .isInstanceOf(IllegalStateException.class) .hasMessage("Fail to parse response of api/plugins/installed"); }
public static int[] computePhysicalIndices( List<TableColumn> logicalColumns, DataType physicalType, Function<String, String> nameRemapping) { Map<TableColumn, Integer> physicalIndexLookup = computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping); return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray(); }
@Test void testFieldMappingLegacyDecimalTypeNotMatchingPrecision() { assertThatThrownBy( () -> TypeMappingUtils.computePhysicalIndices( TableSchema.builder() .field("f0", DECIMAL(38, 10)) .build() .getTableColumns(), ROW( FIELD( "f0", TypeConversions.fromLegacyInfoToDataType( Types.BIG_DEC))), Function.identity())) .isInstanceOf(ValidationException.class) .hasMessage( "Type DECIMAL(38, 10) of table field 'f0' does not match with the " + "physical type LEGACY('DECIMAL', 'DECIMAL') of the " + "'f0' field of the TableSource return type.") .cause() .isInstanceOf(ValidationException.class) .hasMessage("Legacy decimal type can only be mapped to DECIMAL(38, 18)."); }
public <T> OutputSampler<T> sampleOutput(String pcollectionId, Coder<T> coder) { return (OutputSampler<T>) outputSamplers.computeIfAbsent( pcollectionId, k -> new OutputSampler<>( coder, this.maxSamples, this.sampleEveryN, this.onlySampleExceptions)); }
@Test public void testMultipleOutputs() throws Exception { DataSampler sampler = new DataSampler(); VarIntCoder coder = VarIntCoder.of(); sampler.sampleOutput("pcollection-id-1", coder).sample(globalWindowedValue(1)); sampler.sampleOutput("pcollection-id-2", coder).sample(globalWindowedValue(2)); BeamFnApi.InstructionResponse samples = getAllSamples(sampler); assertHasSamples(samples, "pcollection-id-1", Collections.singleton(encodeInt(1))); assertHasSamples(samples, "pcollection-id-2", Collections.singleton(encodeInt(2))); }
public static <T extends PipelineOptions> T validate(Class<T> klass, PipelineOptions options) { return validate(klass, options, false); }
@Test public void testWhenOptionIsDefinedInMultipleSuperInterfacesAndIsNotPresentFailsRequirement() { RightOptions rightOptions = PipelineOptionsFactory.as(RightOptions.class); rightOptions.setBoth("foo"); rightOptions.setRunner(CrashingRunner.class); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Missing required value for group"); expectedException.expectMessage("getFoo"); PipelineOptionsValidator.validate(JoinedOptions.class, rightOptions); }
static <T extends Type> String buildMethodSignature( String methodName, List<TypeReference<T>> parameters) { StringBuilder result = new StringBuilder(); result.append(methodName); result.append("("); String params = parameters.stream().map(Utils::getTypeName).collect(Collectors.joining(",")); result.append(params); result.append(")"); return result.toString(); }
@Test void testBuildMethodSignatureWithDynamicArrays() { assertEquals( "nazzEvent2((((string,string)[])[],uint256)[])", EventEncoder.buildMethodSignature( AbiV2TestFixture.nazzEvent2.getName(), AbiV2TestFixture.nazzEvent2.getParameters())); }
@Override public void handlerRule(final RuleData ruleData) { Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> { RewriteHandle rewriteHandle = GsonUtils.getInstance().fromJson(s, RewriteHandle.class); CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), rewriteHandle); }); }
@Test public void testHandlerRule() { ruleData.setSelectorId("1"); ruleData.setHandle("{\"urlPath\":\"test\"}"); ruleData.setId("test"); rewritePluginDataHandler.handlerRule(ruleData); Supplier<CommonHandleCache<String, RewriteHandle>> cache = RewritePluginDataHandler.CACHED_HANDLE; Assertions.assertNotNull(cache.get().obtainHandle("1_test")); }
@Override public boolean register(final Application application) { return SMAppService.loginItemServiceWithIdentifier(application.getIdentifier()).registerAndReturnError(null); }
@Test public void testRegister() { assumeFalse(Factory.Platform.osversion.matches("(10|11|12)\\..*")); assertFalse(new SMAppServiceApplicationLoginRegistry().register( new Application("bundle.helper"))); }
@Override public boolean equals(@Nullable Object other) { if (!(other instanceof PCollectionTuple)) { return false; } PCollectionTuple that = (PCollectionTuple) other; return this.pipeline.equals(that.pipeline) && this.pcollectionMap.equals(that.pcollectionMap); }
@Test public void testEquals() { TestPipeline p = TestPipeline.create(); TupleTag<Long> longTag = new TupleTag<>(); PCollection<Long> longs = p.apply(GenerateSequence.from(0)); TupleTag<String> strTag = new TupleTag<>(); PCollection<String> strs = p.apply(Create.of("foo", "bar")); EqualsTester tester = new EqualsTester(); // Empty tuples in the same pipeline are equal tester.addEqualityGroup(PCollectionTuple.empty(p), PCollectionTuple.empty(p)); tester.addEqualityGroup( PCollectionTuple.of(longTag, longs).and(strTag, strs), PCollectionTuple.of(longTag, longs).and(strTag, strs)); tester.addEqualityGroup(PCollectionTuple.of(longTag, longs)); tester.addEqualityGroup(PCollectionTuple.of(strTag, strs)); TestPipeline otherPipeline = TestPipeline.create(); // Empty tuples in different pipelines are not equal tester.addEqualityGroup(PCollectionTuple.empty(otherPipeline)); tester.testEquals(); }
public static boolean endsWith(CharSequence s, char c) { int len = s.length(); return len > 0 && s.charAt(len - 1) == c; }
@Test public void testEndsWith() { assertFalse(StringUtil.endsWith("", 'u')); assertTrue(StringUtil.endsWith("u", 'u')); assertTrue(StringUtil.endsWith("-u", 'u')); assertFalse(StringUtil.endsWith("-", 'u')); assertFalse(StringUtil.endsWith("u-", 'u')); }
@SuppressWarnings("unused") // Required for automatic type inference public static <K> Builder0<K> forClass(final Class<K> type) { return new Builder0<>(); }
@Test(expected = IllegalArgumentException.class) public void shouldThrowIfHandlerSupplierThrows1() { HandlerMaps.forClass(BaseType.class).withArgType(String.class) .put(LeafTypeA.class, () -> { throw new RuntimeException("Boom"); }) .build(); }
public ContentPackUninstallDetails getUninstallDetails(ContentPack contentPack, ContentPackInstallation installation) { if (contentPack instanceof ContentPackV1 contentPackV1) { return getUninstallDetails(contentPackV1, installation); } else { throw new IllegalArgumentException("Unsupported content pack version: " + contentPack.version()); } }
@Test public void getUninstallDetails() { /* Test will be uninstalled */ when(contentPackInstallService.countInstallationOfEntityById(ModelId.of("dead-beef1"))).thenReturn((long) 1); ContentPackUninstallDetails expect = ContentPackUninstallDetails.create(nativeEntityDescriptors); ContentPackUninstallDetails result = contentPackService.getUninstallDetails(contentPack, contentPackInstallation); assertThat(result).isEqualTo(expect); /* Test nothing will be uninstalled */ when(contentPackInstallService.countInstallationOfEntityById(ModelId.of("dead-beef1"))).thenReturn((long) 2); ContentPackUninstallDetails expectNon = ContentPackUninstallDetails.create(ImmutableSet.of()); ContentPackUninstallDetails resultNon = contentPackService.getUninstallDetails(contentPack, contentPackInstallation); assertThat(resultNon).isEqualTo(expectNon); }
public ConsumerStatsManager getConsumerStatsManager() { return this.defaultMQPushConsumerImpl.getConsumerStatsManager(); }
@Test public void testPullMessage_ConsumeSuccess() throws InterruptedException, RemotingException, MQBrokerException, NoSuchFieldException, Exception { final CountDownLatch countDownLatch = new CountDownLatch(1); final AtomicReference<MessageExt> messageAtomic = new AtomicReference<>(); ConsumeMessageConcurrentlyService normalServie = new ConsumeMessageConcurrentlyService(pushConsumer.getDefaultMQPushConsumerImpl(), new MessageListenerConcurrently() { @Override public ConsumeConcurrentlyStatus consumeMessage(List<MessageExt> msgs, ConsumeConcurrentlyContext context) { messageAtomic.set(msgs.get(0)); countDownLatch.countDown(); return ConsumeConcurrentlyStatus.CONSUME_SUCCESS; } }); pushConsumer.getDefaultMQPushConsumerImpl().setConsumeMessageService(normalServie); PullMessageService pullMessageService = mQClientFactory.getPullMessageService(); pullMessageService.executePullRequestImmediately(createPullRequest()); countDownLatch.await(); Thread.sleep(1000); ConsumeStatus stats = normalServie.getConsumerStatsManager().consumeStatus(pushConsumer.getDefaultMQPushConsumerImpl().groupName(), topic); ConsumerStatsManager mgr = normalServie.getConsumerStatsManager(); Field statItmeSetField = mgr.getClass().getDeclaredField("topicAndGroupConsumeOKTPS"); statItmeSetField.setAccessible(true); StatsItemSet itemSet = (StatsItemSet) statItmeSetField.get(mgr); StatsItem item = itemSet.getAndCreateStatsItem(topic + "@" + pushConsumer.getDefaultMQPushConsumerImpl().groupName()); assertThat(item.getValue().sum()).isGreaterThan(0L); MessageExt msg = messageAtomic.get(); assertThat(msg).isNotNull(); assertThat(msg.getTopic()).isEqualTo(topic); assertThat(msg.getBody()).isEqualTo(new byte[] { 'a' }); }
public ReviewGroupResponse getReviewGroupSummary(String reviewRequestCode) { ReviewGroup reviewGroup = reviewGroupRepository.findByReviewRequestCode(reviewRequestCode) .orElseThrow(() -> new ReviewGroupNotFoundByReviewRequestCodeException(reviewRequestCode)); return new ReviewGroupResponse(reviewGroup.getReviewee(), reviewGroup.getProjectName()); }
@Test void 리뷰_요청_코드에_대한_리뷰_그룹이_존재하지_않을_경우_예외가_발생한다() { // given, when, then assertThatThrownBy(() -> reviewGroupLookupService.getReviewGroupSummary("reviewRequestCode")) .isInstanceOf(ReviewGroupNotFoundByReviewRequestCodeException.class); }
public static boolean isDirectory(URL resourceURL) throws URISyntaxException { final String protocol = resourceURL.getProtocol(); switch (protocol) { case "jar": try { final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection(); final JarEntry entry = jarConnection.getJarEntry(); if (entry.isDirectory()) { return true; } // WARNING! Heuristics ahead. // It turns out that JarEntry#isDirectory() really just tests whether the filename ends in a '/'. // If you try to open the same URL without a trailing '/', it'll succeed — but the result won't be // what you want. We try to get around this by calling getInputStream() on the file inside the jar. // This seems to return null for directories (though that behavior is undocumented as far as I // can tell). If you have a better idea, please improve this. final String relativeFilePath = entry.getName(); final JarFile jarFile = jarConnection.getJarFile(); final ZipEntry zipEntry = jarFile.getEntry(relativeFilePath); final InputStream inputStream = jarFile.getInputStream(zipEntry); return inputStream == null; } catch (IOException e) { throw new ResourceNotFoundException(e); } case "file": return new File(resourceURL.toURI()).isDirectory(); default: throw new IllegalArgumentException("Unsupported protocol " + resourceURL.getProtocol() + " for resource " + resourceURL); } }
@Test void isDirectoryReturnsFalseForURLEncodedFilesInJars() throws Exception { final URL url = new URL("jar:" + resourceJar.toExternalForm() + "!/file%20with%20space.txt"); assertThat(url.getProtocol()).isEqualTo("jar"); assertThat(ResourceURL.isDirectory(url)).isFalse(); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = true; boolean containsNull = false; // Spec. definition: return false if any item is false, else true if all items are true, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result &= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeArrayParamReturnTrue() { FunctionTestUtil.assertResult(allFunction.invoke(new Object[]{Boolean.TRUE, Boolean.TRUE}), true); }
@Override @NotNull public BTreeMutable getMutableCopy() { final BTreeMutable result = new BTreeMutable(this); result.addExpiredLoggable(rootLoggable); return result; }
@Test public void testGetReturnsFirstSortedDuplicate() { tm = new BTreeEmpty(log, createTestSplittingPolicy(), true, 1).getMutableCopy(); List<INode> l = new ArrayList<>(); l.add(kv("1", "1")); l.add(kv("2", "2")); l.add(kv("3", "3")); l.add(kv("5", "51")); l.add(kv("5", "52")); l.add(kv("5", "53")); l.add(kv("5", "54")); l.add(kv("5", "55")); l.add(kv("5", "56")); l.add(kv("5", "57")); l.add(kv("7", "7")); for (INode ln : l) { getTreeMutable().add(ln); } valueEquals("51", tm.get(key("5"))); }
public static String parametersToString(Object... objs) { StringBuilder sb = new StringBuilder("("); if (objs != null) { for (int k = 0; k < objs.length; k++) { if (k != 0) { sb.append(", "); } if (objs[k] == null) { sb.append("null"); } else { sb.append(objs[k].toString()); } } } sb.append(")"); return sb.toString(); }
@Test public void parametersToString() { class TestCase { String mExpected; Object[] mInput; public TestCase(String expected, Object[] objs) { mExpected = expected; mInput = objs; } } List<TestCase> testCases = new ArrayList<>(); testCases.add(new TestCase("()", null)); testCases.add(new TestCase("(null)", new Object[] {null})); testCases.add(new TestCase("()", new Object[] {""})); testCases.add(new TestCase("(foo)", new Object[] {"foo"})); testCases.add(new TestCase("(foo, bar)", new Object[] {"foo", "bar"})); testCases.add(new TestCase("(foo, , bar)", new Object[] {"foo", "", "bar"})); testCases.add(new TestCase("(, foo, )", new Object[] {"", "foo", ""})); testCases.add(new TestCase("(, , )", new Object[] {"", "", ""})); testCases.add(new TestCase("(1)", new Object[] {1})); testCases.add(new TestCase("(1, 2, 3)", new Object[] {1, 2, 3})); for (TestCase testCase : testCases) { assertEquals(testCase.mExpected, FormatUtils.parametersToString(testCase.mInput)); } }
@Deprecated public List<Pet> findPetsByTags(List<String> tags) throws RestClientException { return findPetsByTagsWithHttpInfo(tags).getBody(); }
@Test public void findPetsByTagsTest() { List<String> tags = null; List<Pet> response = api.findPetsByTags(tags); // TODO: test validations }
public void doGet( HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { if ( isJettyMode() && !request.getContextPath().startsWith( CONTEXT_PATH ) ) { return; } if ( log.isDebug() ) { logDebug( BaseMessages.getString( PKG, "RunJobServlet.Log.RunJobRequested" ) ); } // Options taken from PAN // String[] knownOptions = new String[] { "job", "level", }; String transOption = request.getParameter( "job" ); String levelOption = request.getParameter( "level" ); response.setStatus( HttpServletResponse.SC_OK ); PrintWriter out = response.getWriter(); SlaveServerConfig serverConfig = transformationMap.getSlaveServerConfig(); try { Repository slaveServerRepository = serverConfig.getRepository(); if ( slaveServerRepository == null || !slaveServerRepository.isConnected() ) { response.setStatus( HttpServletResponse.SC_UNAUTHORIZED ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.UnableToConnectToRepository", serverConfig.getRepositoryId() ) ) ); return; } if ( transOption == null ) { response.setStatus( HttpServletResponse.SC_BAD_REQUEST ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.MissingMandatoryParameterJob" ) ) ); return; } final JobMeta jobMeta = loadJob( slaveServerRepository, transOption ); // Set the servlet parameters as variables in the transformation // String[] parameters = jobMeta.listParameters(); Enumeration<?> parameterNames = request.getParameterNames(); while ( parameterNames.hasMoreElements() ) { String parameter = (String) parameterNames.nextElement(); String[] values = request.getParameterValues( parameter ); // Ignore the known options. set the rest as variables // if ( Const.indexOfString( parameter, knownOptions ) < 0 ) { // If it's a trans parameter, set it, otherwise simply set the // variable // if ( Const.indexOfString( parameter, parameters ) < 0 ) { jobMeta.setVariable( parameter, values[0] ); } else { jobMeta.setParameterValue( parameter, values[0] ); } } } JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(); if ( levelOption != null && !isValidLogLevel( levelOption ) ) { response.setStatus( HttpServletResponse.SC_BAD_REQUEST ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.InvalidLogLevel" ) ) ); return; } LogLevel logLevel = LogLevel.getLogLevelForCode( levelOption ); jobExecutionConfiguration.setLogLevel( logLevel ); // Create new repository connection for this job // final Repository repository = jobExecutionConfiguration.connectRepository( serverConfig.getRepositoryId(), serverConfig.getRepositoryUsername(), serverConfig.getRepositoryPassword() ); JobConfiguration jobConfiguration = new JobConfiguration( jobMeta, jobExecutionConfiguration ); String carteObjectId = UUID.randomUUID().toString(); SimpleLoggingObject servletLoggingObject = new SimpleLoggingObject( CONTEXT_PATH, LoggingObjectType.CARTE, null ); servletLoggingObject.setContainerObjectId( carteObjectId ); servletLoggingObject.setLogLevel( logLevel ); // Create the transformation and store in the list... // final Job job = new Job( repository, jobMeta, servletLoggingObject ); // Setting variables // job.initializeVariablesFrom( null ); job.getJobMeta().setInternalKettleVariables( job ); job.injectVariables( jobConfiguration.getJobExecutionConfiguration().getVariables() ); // Also copy the parameters over... // job.copyParametersFrom( jobMeta ); job.clearParameters(); /* * String[] parameterNames = job.listParameters(); for (int idx = 0; idx < parameterNames.length; idx++) { // Grab * the parameter value set in the job entry // String thisValue = * jobExecutionConfiguration.getParams().get(parameterNames[idx]); if (!Utils.isEmpty(thisValue)) { // Set the * value as specified by the user in the job entry // jobMeta.setParameterValue(parameterNames[idx], thisValue); } * } */ jobMeta.activateParameters(); job.setSocketRepository( getSocketRepository() ); JobMap jobMap = getJobMap(); jobMap.addJob( job.getJobname(), carteObjectId, job, jobConfiguration ); // Disconnect from the job's repository when the job finishes. // job.addJobListener( new JobAdapter() { public void jobFinished( Job job ) { repository.disconnect(); } } ); String message = "Job '" + job.getJobname() + "' was added to the list with id " + carteObjectId; logBasic( message ); try { runJob( job ); WebResult webResult = new WebResult( WebResult.STRING_OK, "Job started", carteObjectId ); out.println( webResult.getXML() ); out.flush(); } catch ( Exception executionException ) { response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR ); String logging = KettleLogStore.getAppender().getBuffer( job.getLogChannelId(), false ).toString(); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.ErrorExecutingJob", serverConfig.getRepositoryId(), logging ) ) ); } } catch ( IdNotFoundException idEx ) { response.setStatus( HttpServletResponse.SC_UNAUTHORIZED ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.UnableToRunJob", serverConfig.getRepositoryId() ) ) ); } catch ( Exception ex ) { if ( ex.getMessage().contains( UNAUTHORIZED_ACCESS_TO_REPOSITORY ) ) { response.setStatus( HttpServletResponse.SC_UNAUTHORIZED ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.UnableToConnectToRepository", serverConfig.getRepositoryId() ) ) ); return; } else if ( ex.getMessage().contains( UNABLE_TO_LOAD_JOB ) ) { response.setStatus( HttpServletResponse.SC_NOT_FOUND ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.UnableToFindJob", serverConfig.getRepositoryId() ) ) ); return; } response.setStatus( HttpServletResponse.SC_INTERNAL_SERVER_ERROR ); out.println( new WebResult( WebResult.STRING_ERROR, BaseMessages.getString( PKG, "RunJobServlet.Error.UnexpectedError", Const.CR + Const.getStackTracker( ex ) ) ) ); } }
@Test public void doGetMissingMandatoryParamJobTest() throws Exception { HttpServletRequest mockHttpServletRequest = mock( HttpServletRequest.class ); HttpServletResponse mockHttpServletResponse = mock( HttpServletResponse.class ); TransformationMap transformationMap = mock( TransformationMap.class ); SlaveServerConfig slaveServerConfig = mock( SlaveServerConfig.class ); Repository repository = mock( Repository.class ); setInternalState( runJobServlet, "transformationMap", transformationMap ); KettleLogStore.init(); StringWriter out = new StringWriter(); PrintWriter printWriter = new PrintWriter( out ); when( mockHttpServletRequest.getParameter( "job" ) ).thenReturn( null ); when( mockHttpServletRequest.getParameter( "level" ) ).thenReturn( "BASIC" ); when( mockHttpServletResponse.getWriter() ).thenReturn( printWriter ); when( transformationMap.getSlaveServerConfig() ).thenReturn( slaveServerConfig ); when( slaveServerConfig.getRepository() ).thenReturn( repository ); when( repository.isConnected() ).thenReturn( true ); runJobServlet.doGet( mockHttpServletRequest, mockHttpServletResponse ); verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_OK ); verify( mockHttpServletResponse ).setStatus( HttpServletResponse.SC_BAD_REQUEST ); }