focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override protected void write(final MySQLPacketPayload payload) { payload.writeInt4(capabilityFlags); payload.writeInt4(maxPacketSize); payload.writeInt1(characterSet); payload.writeReserved(23); payload.writeStringNul(username); writeAuthResponse(payload); writeDatabase(payload); writeAuthPluginName(payload); }
@Test void assertWriteWithClientPluginAuthLenencClientData() { MySQLHandshakeResponse41Packet actual = new MySQLHandshakeResponse41Packet(100, MySQLConstants.DEFAULT_CHARSET.getId(), "root"); actual.setCapabilityFlags(MySQLCapabilityFlag.CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA.getValue()); actual.setAuthResponse(new byte[]{1}); actual.write(payload); verify(payload).writeInt4(MySQLCapabilityFlag.CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA.getValue()); verify(payload).writeInt4(100); verify(payload).writeInt1(MySQLConstants.DEFAULT_CHARSET.getId()); verify(payload).writeReserved(23); verify(payload).writeStringNul("root"); verify(payload).writeStringLenenc(new String(new byte[]{1})); }
@Override public void setMaxParallelism(int maxParallelism) { maxParallelism = normalizeAndCheckMaxParallelism(maxParallelism); Optional<String> validationResult = rescaleMaxValidator.apply(maxParallelism); if (validationResult.isPresent()) { throw new IllegalArgumentException( String.format( "Rescaling max parallelism from %s to %s is not allowed: %s", this.maxParallelism, maxParallelism, validationResult.get())); } this.maxParallelism = maxParallelism; }
@Test void setMaxOutOfBounds() { DefaultVertexParallelismInfo info = new DefaultVertexParallelismInfo(1, 1, ALWAYS_VALID); assertThatThrownBy(() -> info.setMaxParallelism(-4)) .withFailMessage("not in valid bounds") .isInstanceOf(IllegalArgumentException.class); }
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) { final List<FieldInfo> allFields = schema.columns().stream() .map(EntityUtil::toFieldInfo) .collect(Collectors.toList()); if (allFields.isEmpty()) { throw new IllegalArgumentException("Root schema should contain columns: " + schema); } return allFields; }
@Test public void shouldBuildCorrectStructField() { // Given: final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("field"), SqlTypes.struct() .field("innerField", SqlTypes.STRING) .build()) .build(); // When: final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema); // Then: assertThat(fields, hasSize(1)); assertThat(fields.get(0).getName(), equalTo("field")); assertThat(fields.get(0).getSchema().getTypeName(), equalTo("STRUCT")); assertThat(fields.get(0).getSchema().getFields().get().size(), equalTo(1)); final FieldInfo inner = fields.get(0).getSchema().getFields().get().get(0); assertThat(inner.getSchema().getTypeName(), equalTo("STRING")); assertThat(inner.getType(), equalTo(Optional.empty())); assertThat(fields.get(0).getSchema().getMemberSchema(), equalTo(Optional.empty())); }
public static boolean isTimeoutException(Throwable throwable) { return throwable instanceof SocketTimeoutException || throwable instanceof ConnectTimeoutException || throwable instanceof TimeoutException || throwable.getCause() instanceof TimeoutException; }
@Test void testIsTimeoutException() { assertFalse(HttpUtils.isTimeoutException(new NacosRuntimeException(0))); assertTrue(HttpUtils.isTimeoutException(new TimeoutException())); assertTrue(HttpUtils.isTimeoutException(new SocketTimeoutException())); assertTrue(HttpUtils.isTimeoutException(new ConnectTimeoutException())); assertTrue(HttpUtils.isTimeoutException(new NacosRuntimeException(0, new TimeoutException()))); }
public void publishArtifacts(List<ArtifactPlan> artifactPlans, EnvironmentVariableContext environmentVariableContext) { final File pluggableArtifactFolder = publishPluggableArtifacts(artifactPlans, environmentVariableContext); try { final List<ArtifactPlan> mergedPlans = artifactPlanFilter.getBuiltInMergedArtifactPlans(artifactPlans); if (isMetadataFolderEmpty(pluggableArtifactFolder)) { LOGGER.info("Pluggable metadata folder is empty."); } else if (pluggableArtifactFolder != null) { mergedPlans.add(0, new ArtifactPlan(ArtifactPlanType.file, format("%s%s*", pluggableArtifactFolder.getName(), File.separator), PLUGGABLE_ARTIFACT_METADATA_FOLDER)); } for (ArtifactPlan artifactPlan : mergedPlans) { try { artifactPlan.publishBuiltInArtifacts(goPublisher, workingDirectory); } catch (Exception e) { failedArtifact.add(artifactPlan); } } if (!failedArtifact.isEmpty()) { StringBuilder builder = new StringBuilder(); for (ArtifactPlan artifactPlan : failedArtifact) { artifactPlan.printArtifactInfo(builder); } throw new RuntimeException(format("[%s] Uploading finished. Failed to upload %s.", PRODUCT_NAME, builder)); } } finally { FileUtils.deleteQuietly(pluggableArtifactFolder); } }
@Test public void shouldPublishPluggableArtifactsAndUploadMetadataFileToServer() { final ArtifactStore s3ArtifactStore = new ArtifactStore("s3", "cd.go.s3", create("access_key", false, "some-key")); final ArtifactStore dockerArtifactStore = new ArtifactStore("docker", "cd.go.docker", create("registry-url", false, "docker.io")); final ArtifactStores artifactStores = new ArtifactStores(s3ArtifactStore, dockerArtifactStore); final ArtifactPlan s3ArtifactPlan = new ArtifactPlan(new PluggableArtifactConfig("installers", "s3", create("Baz", true, "Car"))); final ArtifactPlan dockerArtifactPlan = new ArtifactPlan(new PluggableArtifactConfig("test-reports", "docker", create("junit", false, "junit.xml"))); when(artifactExtension.publishArtifact(eq("cd.go.s3"), eq(s3ArtifactPlan), eq(s3ArtifactStore), anyString(), eq(env))) .thenReturn(new PublishArtifactResponse(Collections.singletonMap("src", "s3://dist"))); when(artifactExtension.publishArtifact(eq("cd.go.docker"), eq(dockerArtifactPlan), eq(dockerArtifactStore), anyString(), eq(env))) .thenReturn(new PublishArtifactResponse(Collections.singletonMap("image", "alpine"))); new ArtifactsPublisher(publisher, artifactExtension, artifactStores, registry, workingFolder) .publishArtifacts(Arrays.asList(s3ArtifactPlan, dockerArtifactPlan), env); assertThat(uploadedPluggableMetadataFiles(publisher.publishedFiles())).containsExactlyInAnyOrder("cd.go.s3.json", "cd.go.docker.json"); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeChinesePersonNameGBK() { assertArrayEquals(CHINESE_PERSON_NAME_GB18030_BYTES, gbk().encode(CHINESE_PERSON_NAME_GB18030, PN_DELIMS)); }
public void execute() { new PathAwareCrawler<>( FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas)) .visit(treeRootHolder.getReportTreeRoot()); }
@Test public void dont_compute_duplicated_blocks_for_test_files() { duplicationRepository.addDuplication(FILE_5_REF, new TextBlock(1, 1), new TextBlock(3, 3)); duplicationRepository.addDuplication(FILE_5_REF, new TextBlock(2, 2), new TextBlock(3, 3)); underTest.execute(); assertRawMeasureValue(FILE_5_REF, DUPLICATED_BLOCKS_KEY, 0); assertRawMeasureValue(FILE_5_REF, DUPLICATED_FILES_KEY, 0); }
@Udf public <T> List<T> distinct( @UdfParameter(description = "Array of values to distinct") final List<T> input) { if (input == null) { return null; } final Set<T> distinctVals = Sets.newLinkedHashSetWithExpectedSize(input.size()); distinctVals.addAll(input); return new ArrayList<>(distinctVals); }
@SuppressWarnings("unchecked") @Test public void shouldDistinctArrayOfMaps() { final Map<String, Integer> map1 = ImmutableMap.of("foo", 1, "bar", 2, "baz", 3); final Map<String, Integer> map2 = ImmutableMap.of("foo", 10, "baz", 3); final Map<String, Integer> map3 = ImmutableMap.of("foo", 1, "bar", 2, "baz", 3); final List<Map<String, Integer>> result = udf.distinct(Arrays.asList(map1, map2, map3)); assertThat(result, contains(map1, map2)); }
@Override public TenantPackageDO validTenantPackage(Long id) { TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id); if (tenantPackage == null) { throw exception(TENANT_PACKAGE_NOT_EXISTS); } if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName()); } return tenantPackage; }
@Test public void testValidTenantPackage_success() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据 // 调用 TenantPackageDO result = tenantPackageService.validTenantPackage(dbTenantPackage.getId()); // 断言 assertPojoEquals(dbTenantPackage, result); }
public RuntimeOptionsBuilder parse(Class<?> clazz) { RuntimeOptionsBuilder args = new RuntimeOptionsBuilder(); for (Class<?> classWithOptions = clazz; hasSuperClass( classWithOptions); classWithOptions = classWithOptions.getSuperclass()) { CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions); if (options != null) { addDryRun(options, args); addMonochrome(options, args); addTags(classWithOptions, options, args); addPlugins(options, args); addPublish(options, args); addName(options, args); addSnippets(options, args); addGlue(options, args); addFeatures(options, args); addObjectFactory(options, args); addUuidGenerator(options, args); } } addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz); addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz); return args; }
@Test void create_with_extra_glue() { RuntimeOptions runtimeOptions = parser().parse(ClassWithExtraGlue.class).build(); assertThat(runtimeOptions.getGlue(), contains(uri("classpath:/app/features/hooks"), uri("classpath:/io/cucumber/core/options"))); }
@Override public ShenyuContext decorator(final ShenyuContext shenyuContext, final MetaData metaData) { String path = shenyuContext.getPath(); shenyuContext.setMethod(path); shenyuContext.setRealUrl(path); shenyuContext.setRpcType(RpcTypeEnum.WEB_SOCKET.getName()); shenyuContext.setModule(Optional.ofNullable(metaData).map(MetaData::getAppName) .orElse(String.format("%s-%s", PluginEnum.WEB_SOCKET.getName(), shenyuContext.getRpcType()))); return shenyuContext; }
@Test public void testDecorator() { MetaData metaData = null; ShenyuContext shenyuContext = new ShenyuContext(); webSocketShenyuContextDecorator.decorator(shenyuContext, metaData); Assertions.assertNull(shenyuContext.getMethod()); Assertions.assertEquals(shenyuContext.getRpcType(), "websocket"); Assertions.assertEquals(shenyuContext.getModule(), "websocket-websocket"); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext builderContext) { final Stacker contextStacker = builderContext.buildNodeContext(getId().toString()); return getSource().buildStream(builderContext) .filter( getPredicate(), contextStacker ); }
@Test public void shouldApplyFilterCorrectly() { // When: node.buildStream(planBuildContext); // Then: verify(sourceNode).buildStream(planBuildContext); verify(schemaKStream).filter(predicate, stacker); }
public static SchemaAndValue parseString(String value) { if (value == null) { return NULL_SCHEMA_AND_VALUE; } if (value.isEmpty()) { return new SchemaAndValue(Schema.STRING_SCHEMA, value); } ValueParser parser = new ValueParser(new Parser(value)); return parser.parse(false); }
@Test @Timeout(5) public void shouldNotEncounterInfiniteLoop() { // This byte sequence gets parsed as CharacterIterator.DONE and can cause issues if // comparisons to that character are done to check if the end of a string has been reached. // For more information, see https://issues.apache.org/jira/browse/KAFKA-10574 byte[] bytes = new byte[] {-17, -65, -65}; String str = new String(bytes, StandardCharsets.UTF_8); SchemaAndValue schemaAndValue = Values.parseString(str); assertEquals(Type.STRING, schemaAndValue.schema().type()); assertEquals(str, schemaAndValue.value()); }
@Override public List<FetchArtifactEnvironmentVariable> getFetchArtifactEnvironmentVariablesFromResponseBody(String responseBody) { List<FetchArtifactEnvironmentVariable> result = DEFAULT_GSON.fromJson(responseBody, new TypeToken<List<FetchArtifactEnvironmentVariable>>() {}.getType()); return Optional.ofNullable(result).orElse(List.of()); }
@Test public void fetchArtifactMessage_shouldDeserializeAndAssumeEmpty() { Assertions.assertThat(new ArtifactMessageConverterV2().getFetchArtifactEnvironmentVariablesFromResponseBody("")).isEmpty(); }
public StringSubject factValue(String key) { return doFactValue(key, null); }
@Test public void factValueIntFailNotEnoughWithKey() { Object unused = expectFailureWhenTestingThat(fact("foo", "the foo")).factValue("foo", 5); assertFailureKeys("for key", "index too high", "fact count was"); assertFailureValue("for key", "foo"); assertFailureValue("index too high", "5"); assertFailureValue("fact count was", "1"); }
public void importCounters(String[] counterNames, String[] counterKinds, long[] counterDeltas) { final int length = counterNames.length; if (counterKinds.length != length || counterDeltas.length != length) { throw new AssertionError("array lengths do not match"); } for (int i = 0; i < length; ++i) { final CounterName name = CounterName.named(counterPrefix + counterNames[i]); final String kind = counterKinds[i]; final long delta = counterDeltas[i]; switch (kind) { case "sum": counterFactory.longSum(name).addValue(delta); break; case "max": counterFactory.longMax(name).addValue(delta); break; case "min": counterFactory.longMin(name).addValue(delta); break; default: throw new IllegalArgumentException("unsupported counter kind: " + kind); } } }
@Test public void testMultipleCounters() throws Exception { String[] names = {"sum_counter", "max_counter", "min_counter"}; String[] kinds = {"sum", "max", "min"}; long[] deltas = {100, 200, 300}; counters.importCounters(names, kinds, deltas); Counter<Long, Long> sumCounter = getCounter("sum_counter"); assertNotNull(sumCounter); counterSet.extractUpdates(false, mockUpdateExtractor); verify(mockUpdateExtractor) .longSum(named("stageName-systemName-dataset-sum_counter"), false, 100L); verify(mockUpdateExtractor) .longMax(named("stageName-systemName-dataset-max_counter"), false, 200L); verify(mockUpdateExtractor) .longMin(named("stageName-systemName-dataset-min_counter"), false, 300L); verifyNoMoreInteractions(mockUpdateExtractor); }
@VisibleForTesting Set<Artifact> getProjectDependencies() { return session.getProjects().stream() .map(MavenProject::getArtifact) .filter(artifact -> !artifact.equals(project.getArtifact())) .filter(artifact -> artifact.getFile() != null) .collect(Collectors.toSet()); }
@Test public void testGetProjectDependencies() { MavenProject rootPomProject = mock(MavenProject.class); MavenProject jibSubModule = mock(MavenProject.class); MavenProject sharedLibSubModule = mock(MavenProject.class); when(mockMavenSession.getProjects()) .thenReturn(Arrays.asList(rootPomProject, sharedLibSubModule, jibSubModule)); Artifact nullFileArtifact = mock(Artifact.class); Artifact projectJar = newArtifact("com.test", "my-app", "1.0"); Artifact sharedLibJar = newArtifact("com.test", "shared-lib", "1.0"); when(rootPomProject.getArtifact()).thenReturn(nullFileArtifact); when(jibSubModule.getArtifact()).thenReturn(projectJar); when(sharedLibSubModule.getArtifact()).thenReturn(sharedLibJar); when(mockMavenProject.getArtifact()).thenReturn(projectJar); assertThat(mavenProjectProperties.getProjectDependencies()).containsExactly(sharedLibJar); }
@Override public void run() { if (backgroundJobServer.isNotReadyToProcessJobs()) return; try (PeriodicTaskRunInfo runInfo = taskStatistics.startRun(backgroundJobServerConfiguration())) { tasks.forEach(task -> task.run(runInfo)); runInfo.markRunAsSucceeded(); } catch (Exception e) { taskStatistics.handleException(e); if (taskStatistics.hasTooManyExceptions()) { if (e instanceof StorageException) { LOGGER.error("FATAL - JobRunr encountered too many storage exceptions. Shutting down. Did you know JobRunr Pro has built-in database fault tolerance? Check out https://www.jobrunr.io/en/documentation/pro/database-fault-tolerance/", e); } else { LOGGER.error("FATAL - JobRunr encountered too many processing exceptions. Shutting down.", shouldNotHappenException(e)); } backgroundJobServer.stop(); } else { LOGGER.warn(JobRunrException.SHOULD_NOT_HAPPEN_MESSAGE + " - Processing will continue.", e); } } }
@Test void jobHandlerStopsBackgroundJobServerIfTooManyExceptions() { Task mockedTask = mockTaskThatThrows(new SevereJobRunrException("Could not resolve ConcurrentJobModificationException", new UnresolvableConcurrentJobModificationException(emptyList(), null))); JobHandler jobHandler = createJobHandlerWithTask(mockedTask); for (int i = 0; i <= 5; i++) { jobHandler.run(); } verify(backgroundJobServer).stop(); assertThat(logger).hasErrorMessage("FATAL - JobRunr encountered too many processing exceptions. Shutting down."); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path file : files.keySet()) { if(file.isFile() || file.isSymbolicLink()) { callback.delete(file); try { session.sftp().remove(file.getAbsolute()); } catch(IOException e) { throw new SFTPExceptionMappingService().map("Cannot delete {0}", e, file); } } } for(Path file : files.keySet()) { if(file.isDirectory() && !file.isSymbolicLink()) { callback.delete(file); try { session.sftp().removeDir(file.getAbsolute()); } catch(IOException e) { throw new SFTPExceptionMappingService().map("Cannot delete {0}", e, file); } } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFound() throws Exception { final Path test = new Path(new SFTPHomeDirectoryService(session).find(), "t", EnumSet.of(Path.Type.file)); try { new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); } catch(NotfoundException e) { assertEquals("Cannot delete t.", e.getMessage()); throw e; } }
@Override public boolean tryToLockWebLeader() { return webLeaderLocked.compareAndSet(false, true); }
@Test public void tryToLockWebLeader_returns_true_if_first_call() { assertThat(underTest.tryToLockWebLeader()).isTrue(); // next calls return false assertThat(underTest.tryToLockWebLeader()).isFalse(); assertThat(underTest.tryToLockWebLeader()).isFalse(); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldMaintainOrderOfReturnedQueries() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); // When: final List<QueryMetadata> queries = KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream foo as select * from orders;" + "create stream bar as select * from orders;", ksqlConfig, Collections.emptyMap()); // Then: assertThat(queries, hasSize(2)); assertThat(queries.get(0).getStatementString(), containsString("CREATE STREAM FOO")); assertThat(queries.get(1).getStatementString(), containsString("CREATE STREAM BAR")); }
@Override public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (GetRepositoryNamesMeta) smi; data = (GetRepositoryNamesData) sdi; if ( super.init( smi, sdi ) ) { try { // Get the repository objects from the repository... // data.list = getRepositoryObjects(); } catch ( Exception e ) { logError( "Error initializing step: ", e ); return false; } data.rownr = 1L; data.filenr = 0; return true; } return false; }
@Test public void testGetRepoList_transOnly_Extended() throws KettleException { init( repoExtended, "/", true, ".*", "", Transformations, 2 ); }
public DirectoryEntry lookUp( File workingDirectory, JimfsPath path, Set<? super LinkOption> options) throws IOException { checkNotNull(path); checkNotNull(options); DirectoryEntry result = lookUp(workingDirectory, path, options, 0); if (result == null) { // an intermediate file in the path did not exist or was not a directory throw new NoSuchFileException(path.toString()); } return result; }
@Test public void testLookup_relative() throws IOException { assertExists(lookup("one"), "work", "one"); assertExists(lookup("one/two/three"), "two", "three"); }
public List<Region> findRegionsByKeyRange(final byte[] startKey, final byte[] endKey) { final StampedLock stampedLock = this.stampedLock; final long stamp = stampedLock.readLock(); try { final byte[] realStartKey = BytesUtil.nullToEmpty(startKey); final NavigableMap<byte[], Long> subRegionMap; if (endKey == null) { subRegionMap = this.rangeTable.tailMap(realStartKey, false); } else { subRegionMap = this.rangeTable.subMap(realStartKey, false, endKey, true); } final List<Region> regionList = Lists.newArrayListWithCapacity(subRegionMap.size() + 1); final Map.Entry<byte[], Long> headEntry = this.rangeTable.floorEntry(realStartKey); if (headEntry == null) { reportFail(startKey); throw reject(startKey, "fail to find region by startKey"); } regionList.add(safeCopy(this.regionTable.get(headEntry.getValue()))); for (final Long regionId : subRegionMap.values()) { regionList.add(safeCopy(this.regionTable.get(regionId))); } return regionList; } finally { stampedLock.unlockRead(stamp); } }
@Test public void findRegionsByKeyRangeTest() { // case-1 { RegionRouteTable table = new RegionRouteTable(); Region region = makeRegion(-1, null, null); table.addOrUpdateRegion(region); List<Region> regionList = table.findRegionsByKeyRange(KeyValueTool.makeKey("a"), KeyValueTool.makeKey("w")); assertEquals(1, regionList.size()); } // case-2 { RegionRouteTable table = new RegionRouteTable(); Region r1 = makeRegion(1, null, KeyValueTool.makeKey("c")); Region r2 = makeRegion(2, KeyValueTool.makeKey("c"), KeyValueTool.makeKey("e")); Region r3 = makeRegion(3, KeyValueTool.makeKey("e"), null); table.addOrUpdateRegion(r1); table.addOrUpdateRegion(r2); table.addOrUpdateRegion(r3); List<Region> foundList = table.findRegionsByKeyRange(KeyValueTool.makeKey("adc"), KeyValueTool.makeKey("def")); assertEquals(2, foundList.size()); assertEquals(r1.getId(), foundList.get(0).getId()); assertEquals(r2.getId(), foundList.get(1).getId()); foundList = table.findRegionsByKeyRange(KeyValueTool.makeKey("c"), KeyValueTool.makeKey("def")); assertEquals(1, foundList.size()); assertEquals(r2.getId(), foundList.get(0).getId()); } // case-3 { RegionRouteTable table = new RegionRouteTable(); Region r1 = makeRegion(1, null, KeyValueTool.makeKey("c")); Region r2 = makeRegion(2, KeyValueTool.makeKey("c"), KeyValueTool.makeKey("e")); Region r3 = makeRegion(3, KeyValueTool.makeKey("e"), KeyValueTool.makeKey("g")); Region r4 = makeRegion(4, KeyValueTool.makeKey("g"), KeyValueTool.makeKey("i")); Region r5 = makeRegion(5, KeyValueTool.makeKey("i"), KeyValueTool.makeKey("k")); Region r6 = makeRegion(6, KeyValueTool.makeKey("k"), KeyValueTool.makeKey("n")); Region r7 = makeRegion(7, KeyValueTool.makeKey("n"), null); table.addOrUpdateRegion(r1); table.addOrUpdateRegion(r2); table.addOrUpdateRegion(r3); table.addOrUpdateRegion(r4); table.addOrUpdateRegion(r5); table.addOrUpdateRegion(r6); table.addOrUpdateRegion(r7); List<Region> foundList = table.findRegionsByKeyRange(KeyValueTool.makeKey("adc"), KeyValueTool.makeKey("def")); assertEquals(2, foundList.size()); assertEquals(r1.getId(), foundList.get(0).getId()); assertEquals(r2.getId(), foundList.get(1).getId()); foundList = table.findRegionsByKeyRange(KeyValueTool.makeKey("c"), KeyValueTool.makeKey("kf")); assertEquals(5, foundList.size()); assertEquals(r2.getId(), foundList.get(0).getId()); assertEquals(r3.getId(), foundList.get(1).getId()); assertEquals(r4.getId(), foundList.get(2).getId()); assertEquals(r5.getId(), foundList.get(3).getId()); assertEquals(r6.getId(), foundList.get(4).getId()); } }
public void validateAndMergeOutputParams(StepRuntimeSummary runtimeSummary) { Optional<String> externalJobId = extractExternalJobId(runtimeSummary); if (externalJobId.isPresent()) { Optional<OutputData> outputDataOpt = outputDataDao.getOutputDataForExternalJob(externalJobId.get(), ExternalJobType.TITUS); outputDataOpt.ifPresent( outputData -> { ParamsMergeHelper.mergeOutputDataParams( runtimeSummary.getParams(), outputData.getParams()); }); } }
@Test public void testUndefinedOutputParameter() { setupOutputDataDao(); runtimeSummary = runtimeSummaryBuilder().artifacts(artifacts).build(); AssertHelper.assertThrows( "throws validation error if output param not defined", MaestroValidationException.class, "Invalid output parameter [str_param], not defined in params", () -> outputDataManager.validateAndMergeOutputParams(runtimeSummary)); }
public static String formatIso8601ForCCTray(Date date) { if (date == null) { return null; } return formatterUtc.print(date.getTime()); }
@Test public void shouldSerializeDateForCcTray() { Date date = new DateTime("2008-12-09T18:56:14+08:00").toDate(); assertThat(DateUtils.formatIso8601ForCCTray(date), is("2008-12-09T10:56:14Z")); }
@VisibleForTesting void validateEmailUnique(Long id, String email) { if (StrUtil.isBlank(email)) { return; } AdminUserDO user = userMapper.selectByEmail(email); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_EMAIL_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_EMAIL_EXISTS); } }
@Test public void testValidateEmailUnique_emailExistsForUpdate() { // 准备参数 Long id = randomLongId(); String email = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setEmail(email))); // 调用,校验异常 assertServiceException(() -> userService.validateEmailUnique(id, email), USER_EMAIL_EXISTS); }
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { final Fetch<K, V> fetch = Fetch.empty(); final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final CompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) break; if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and // (2) there are no fetched completedFetch with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) fetchBuffer.poll(); throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); fetchBuffer.setNextInLineFetch(null); } else { final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll fetchBuffer.addAll(pausedCompletedFetches); } return fetch; }
@Test public void testCollectFetchInitializationWithNullPosition() { final TopicPartition topicPartition0 = new TopicPartition("topic", 0); final SubscriptionState subscriptions = mock(SubscriptionState.class); when(subscriptions.hasValidPosition(topicPartition0)).thenReturn(true); when(subscriptions.positionOrNull(topicPartition0)).thenReturn(null); final FetchCollector<String, String> fetchCollector = createFetchCollector(subscriptions); final Records records = createRecords(); FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() .setPartitionIndex(topicPartition0.partition()) .setHighWatermark(1000) .setRecords(records); final CompletedFetch completedFetch = new CompletedFetchBuilder() .partitionData(partitionData) .partition(topicPartition0).build(); final FetchBuffer fetchBuffer = mock(FetchBuffer.class); when(fetchBuffer.nextInLineFetch()).thenReturn(null); when(fetchBuffer.peek()).thenReturn(completedFetch).thenReturn(null); final Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); assertTrue(fetch.isEmpty()); verify(fetchBuffer).setNextInLineFetch(null); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@SneakyThrows(MalformedURLException.class) @Test void assertConvertURLValue() throws SQLException { String urlString = "https://shardingsphere.apache.org/"; URL url = (URL) ResultSetUtils.convertValue(urlString, URL.class); assertThat(url, is(new URL(urlString))); }
public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(time.timer(Long.MAX_VALUE), future); }
@Test public void testMetadataFailurePropagated() { KafkaException metadataException = new KafkaException(); metadata.fatalError(metadataException); Exception exc = assertThrows(Exception.class, () -> consumerClient.poll(time.timer(Duration.ZERO))); assertEquals(metadataException, exc); }
String resolveKey(String notifierDescriptorName) { return notifierDescriptorName + ".json"; }
@Test void resolveKeyTest() { assertThat(notifierConfigStore.resolveKey("fake-notifier")) .isEqualTo("fake-notifier.json"); assertThat(notifierConfigStore.resolveKey("other-notifier")) .isEqualTo("other-notifier.json"); }
@Override public ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId, String groupBy) { try { // Check the parameters to ensure that the parameters are not empty Validate.checkNotNullAndNotEmpty(nodeId, "nodeId"); Validate.checkNotNullAndNotEmpty(groupBy, "groupBy"); // Query SubClusterInfo according to id, // if the nodeId cannot get SubClusterInfo, an exception will be thrown directly. // Call the corresponding subCluster to get ActivitiesInfo. long startTime = clock.getTime(); DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByNodeId(nodeId); final HttpServletRequest hsrCopy = clone(hsr); ActivitiesInfo activitiesInfo = interceptor.getActivities(hsrCopy, nodeId, groupBy); if (activitiesInfo != null) { long stopTime = clock.getTime(); RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_ACTIVITIES, TARGET_WEB_SERVICE); routerMetrics.succeededGetActivitiesLatencyRetrieved(stopTime - startTime); return activitiesInfo; } } catch (IllegalArgumentException | NotFoundException e) { routerMetrics.incrGetActivitiesFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_ACTIVITIES, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw e; } RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_ACTIVITIES, UNKNOWN, TARGET_WEB_SERVICE, "getActivities Failed."); routerMetrics.incrGetActivitiesFailedRetrieved(); throw new RuntimeException("getActivities Failed."); }
@Test public void testGetActivitiesError() throws Exception { // nodeId is empty LambdaTestUtils.intercept(IllegalArgumentException.class, "'nodeId' must not be empty.", () -> interceptor.getActivities(null, "", "DIAGNOSTIC")); // groupBy is empty LambdaTestUtils.intercept(IllegalArgumentException.class, "'groupBy' must not be empty.", () -> interceptor.getActivities(null, "1", "")); // groupBy value is wrong LambdaTestUtils.intercept(IllegalArgumentException.class, "Got invalid groupBy: TEST1, valid groupBy types: [DIAGNOSTIC]", () -> interceptor.getActivities(null, "1", "TEST1")); }
@Override public List<Integer> applyTransforms(List<Integer> originalGlyphIds) { List<Integer> intermediateGlyphsFromGsub = originalGlyphIds; for (String feature : FEATURES_IN_ORDER) { if (!gsubData.isFeatureSupported(feature)) { LOG.debug("the feature {} was not found", feature); continue; } LOG.debug("applying the feature {}", feature); ScriptFeature scriptFeature = gsubData.getFeature(feature); intermediateGlyphsFromGsub = applyGsubFeature(scriptFeature, intermediateGlyphsFromGsub); } return Collections.unmodifiableList(repositionGlyphs(intermediateGlyphsFromGsub)); }
@Test void testApplyTransforms_ra_e_hosshu() { // given List<Integer> glyphsAfterGsub = Arrays.asList(352, 108, 87, 101); // when List<Integer> result = gsubWorkerForBengali.applyTransforms(getGlyphIds("রুপো")); // then assertEquals(glyphsAfterGsub, result); }
public static Map<String, Object> toValueMap(ReferenceMap m, Map<String, ValueReference> parameters) { final ImmutableMap.Builder<String, Object> mapBuilder = ImmutableMap.builder(); for (Map.Entry<String, Reference> entry : m.entrySet()) { final Object value = valueOf(entry.getValue(), parameters); if (value != null) { mapBuilder.put(entry.getKey(), value); } } return mapBuilder.build(); }
@Test public void toValueMapWithMissingParameter() { final Map<String, ValueReference> parameters = Collections.emptyMap(); final ReferenceMap map = new ReferenceMap(Collections.singletonMap("param", ValueReference.createParameter("STRING"))); assertThatThrownBy(() -> ReferenceMapUtils.toValueMap(map, parameters)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Missing parameter STRING"); }
protected Session openSession() throws RepositoryException { if (ObjectHelper.isEmpty(getJcrEndpoint().getWorkspaceName())) { return getJcrEndpoint().getRepository().login(getJcrEndpoint().getCredentials()); } else { return getJcrEndpoint().getRepository().login(getJcrEndpoint().getCredentials(), getJcrEndpoint().getWorkspaceName()); } }
@Test public void testNodeTypeIsSpecified() throws Exception { Exchange exchange = createExchangeWithBody("Test"); exchange.getIn().removeHeader("testClass"); //there is no definition of such property in nt:resource exchange.getIn().setHeader(JcrConstants.JCR_NODE_NAME, "typedNode"); exchange.getIn().setHeader(JcrConstants.JCR_NODE_TYPE, "nt:folder"); Exchange out = template.send("direct:a", exchange); assertNotNull(out); String uuid = out.getMessage().getBody(String.class); Session session = openSession(); try { Node node = session.getNodeByIdentifier(uuid); assertNotNull(node); assertEquals("/home/test/typedNode", node.getPath()); assertEquals("nt:folder", node.getPrimaryNodeType().getName()); } finally { if (session != null && session.isLive()) { session.logout(); } } }
@Override public AppResponse process(Flow flow, AppSessionRequest request) { if (appSession.getRegistrationId() == null) { return new NokResponse(); } Map<String, String> result = digidClient.getExistingAccount(appSession.getRegistrationId(), appSession.getLanguage()); if (result.get(lowerUnderscore(STATUS)).equals("OK") && result.get(lowerUnderscore(ACCOUNT_ID)) != null) { appSession.setAccountId(Long.valueOf(result.get(lowerUnderscore(ACCOUNT_ID)))); digidClient.remoteLog("54", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new OkResponse(); } else if (result.get(lowerUnderscore(STATUS)).equals("PENDING")) { // switch state to require replace action appSession.setState(State.EXISTING_ACCOUNT_FOUND.name()); return new StatusResponse("PENDING"); } else { return new NokResponse(); } }
@Test void processNOKResponseTest(){ when(digidClientMock.getExistingAccount(1337L, "NL")).thenReturn(Map.of( lowerUnderscore(STATUS), "OK" )); AppResponse appResponse = checkExistingAccount.process(flowMock, null); assertTrue(appResponse instanceof NokResponse); assertEquals("NOK", ((StatusResponse) appResponse).getStatus()); }
public void openFile() { openFile( false ); }
@Test public void testLoadLastUsedTransLocalNoRepositoryAtStartup() throws Exception { String repositoryName = null; String fileName = "fileName"; setLoadLastUsedJobLocalWithRepository( false, repositoryName, null, fileName, true, true ); verify( spoon ).openFile( fileName, null, false ); }
@Override public PageData<DashboardInfo> findDashboardsByTenantId(UUID tenantId, PageLink pageLink) { return DaoUtil.toPageData(dashboardInfoRepository .findByTenantId( tenantId, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); }
@Test public void testFindDashboardsByTenantId() { UUID tenantId1 = Uuids.timeBased(); UUID tenantId2 = Uuids.timeBased(); for (int i = 0; i < 20; i++) { createDashboard(tenantId1, i); createDashboard(tenantId2, i * 2); } PageLink pageLink = new PageLink(15, 0, "DASHBOARD"); PageData<DashboardInfo> dashboardInfos1 = dashboardInfoDao.findDashboardsByTenantId(tenantId1, pageLink); Assert.assertEquals(15, dashboardInfos1.getData().size()); PageData<DashboardInfo> dashboardInfos2 = dashboardInfoDao.findDashboardsByTenantId(tenantId1, pageLink.nextPageLink()); Assert.assertEquals(5, dashboardInfos2.getData().size()); }
public static <T, S> T copy(S source, T target, String... ignore) { return copy(source, target, DEFAULT_CONVERT, ignore); }
@Test @SneakyThrows public void testCrossClassLoader() { URL clazz = new File("target/test-classes").getAbsoluteFile().toURI().toURL(); System.out.println(clazz); URLClassLoader loader = new URLClassLoader(new URL[]{ clazz }, ClassUtils.getDefaultClassLoader()){ @Override protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { try { Class<?> clazz = loadSelfClass(name); if (null != clazz) { if (resolve) { resolveClass(clazz); } return clazz; } } catch (Throwable ignore) { } return super.loadClass(name, resolve); } @SneakyThrows public synchronized Class<?> loadSelfClass(String name) { Class<?> clazz = super.findLoadedClass(name); if (clazz == null) { clazz = super.findClass(name); resolveClass(clazz); } return clazz; } @Override public Enumeration<URL> getResources(String name) throws IOException { return findResources(name); } @Override public URL getResource(String name) { return findResource(name); } }; Class<?> sourceClass = loader.loadClass(Source.class.getName()); Assert.assertNotSame(sourceClass, Source.class); Object source = sourceClass.newInstance(); FastBeanCopier.copy(Collections.singletonMap("name","测试"),source); Map<String,Object> map = FastBeanCopier.copy(source,new HashMap<>()); System.out.println(map); loader.close(); map = FastBeanCopier.copy(source,new HashMap<>()); System.out.println(map); }
protected List<List<Comparable>> getTableTransInfo(long txnId) throws AnalysisException { List<List<Comparable>> tableInfos = new ArrayList<>(); readLock(); try { TransactionState transactionState = unprotectedGetTransactionState(txnId); if (null == transactionState) { throw new AnalysisException("Transaction[" + txnId + "] does not exist."); } for (Map.Entry<Long, TableCommitInfo> entry : transactionState.getIdToTableCommitInfos().entrySet()) { List<Comparable> tableInfo = new ArrayList<>(); tableInfo.add(entry.getKey()); tableInfo.add(Joiner.on(", ").join(entry.getValue().getIdToPartitionCommitInfo().values().stream().map( PartitionCommitInfo::getPartitionId).collect(Collectors.toList()))); tableInfos.add(tableInfo); } } finally { readUnlock(); } return tableInfos; }
@Test public void testGetTableTransInfo() throws AnalysisException { DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1); Long txnId = lableToTxnId.get(GlobalStateMgrTestUtil.testTxnLable1); List<List<Comparable>> tableTransInfos = masterDbTransMgr.getTableTransInfo(txnId); assertEquals(1, tableTransInfos.size()); List<Comparable> tableTransInfo = tableTransInfos.get(0); assertEquals(2, tableTransInfo.size()); assertEquals(2L, tableTransInfo.get(0)); assertEquals("3", tableTransInfo.get(1)); }
@Override public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException { this.config = TbNodeUtils.convert(configuration, TbSendRestApiCallReplyNodeConfiguration.class); }
@Test public void givenDefaultConfig_whenInit_thenDoesNotThrowException() { var configuration = new TbNodeConfiguration(JacksonUtil.valueToTree(config)); assertThatNoException().isThrownBy(() -> node.init(ctxMock, configuration)); }
@VisibleForTesting @CheckForNull CharsetHandler getHandler(Dialect dialect) { switch (dialect.getId()) { case H2.ID: // nothing to check return null; case Oracle.ID: return new OracleCharsetHandler(sqlExecutor); case PostgreSql.ID: return new PostgresCharsetHandler(sqlExecutor, new PostgresMetadataReader(sqlExecutor)); case MsSql.ID: return new MssqlCharsetHandler(sqlExecutor, new MssqlMetadataReader(sqlExecutor)); default: throw new IllegalArgumentException("Database not supported: " + dialect.getId()); } }
@Test public void getHandler_throws_IAE_if_unsupported_db() { Dialect unsupportedDialect = mock(Dialect.class); when(unsupportedDialect.getId()).thenReturn("foo"); assertThatThrownBy(() -> underTest.getHandler(unsupportedDialect)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Database not supported: foo"); }
@Override public ClusterInfo clusterGetClusterInfo() { RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO); Map<String, String> entries = syncFuture(f); Properties props = new Properties(); for (Entry<String, String> entry : entries.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } return new ClusterInfo(props); }
@Test public void testClusterGetClusterInfo() { ClusterInfo info = connection.clusterGetClusterInfo(); assertThat(info.getSlotsFail()).isEqualTo(0); assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); }
@PostMapping("/check-token") @PermitAll @Operation(summary = "校验访问令牌") @Parameter(name = "token", required = true, description = "访问令牌", example = "biu") public CommonResult<OAuth2OpenCheckTokenRespVO> checkToken(HttpServletRequest request, @RequestParam("token") String token) { // 校验客户端 String[] clientIdAndSecret = obtainBasicAuthorization(request); oauth2ClientService.validOAuthClientFromCache(clientIdAndSecret[0], clientIdAndSecret[1], null, null, null); // 校验令牌 OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.checkAccessToken(token); Assert.notNull(accessTokenDO, "访问令牌不能为空"); // 防御性检查 return success(OAuth2OpenConvert.INSTANCE.convert2(accessTokenDO)); }
@Test public void testCheckToken() { // 准备参数 HttpServletRequest request = mockRequest("demo_client_id", "demo_client_secret"); String token = randomString(); // mock 方法 OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class).setUserType(UserTypeEnum.ADMIN.getValue()).setExpiresTime(LocalDateTimeUtil.of(1653485731195L)); when(oauth2TokenService.checkAccessToken(eq(token))).thenReturn(accessTokenDO); // 调用 CommonResult<OAuth2OpenCheckTokenRespVO> result = oauth2OpenController.checkToken(request, token); // 断言 assertEquals(0, result.getCode()); assertPojoEquals(accessTokenDO, result.getData()); assertEquals(1653485731L, result.getData().getExp()); // 执行过程会过去几毫秒 }
@Override public void write(final OutputStream out) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity try { out.write("[".getBytes(StandardCharsets.UTF_8)); write(out, buildHeader()); final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); while (!connectionClosed && queryMetadata.isRunning() && !limitReached && !complete) { final KeyValueMetadata<List<?>, GenericRow> row = rowQueue.poll( disconnectCheckInterval, TimeUnit.MILLISECONDS ); if (row != null) { write(out, buildRow(row)); } else { // If no new rows have been written, the user may have terminated the connection without // us knowing. Check by trying to write a single newline. out.write("\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } drainAndThrowOnError(out); } if (connectionClosed) { return; } drain(out); if (limitReached) { objectMapper.writeValue(out, StreamedRow.finalMessage("Limit Reached")); } else if (complete) { objectMapper.writeValue(out, StreamedRow.finalMessage("Query Completed")); } out.write("]\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } catch (final EOFException exception) { // The user has terminated the connection; we can stop writing log.warn("Query terminated due to exception:" + exception.toString()); } catch (final InterruptedException exception) { // The most likely cause of this is the server shutting down. Should just try to close // gracefully, without writing any more to the connection stream. log.warn("Interrupted while writing to connection stream"); } catch (final Exception exception) { log.error("Exception occurred while writing to connection stream: ", exception); outputException(out, exception); } finally { close(); } }
@Test public void shouldWriteAnyPendingRowsBeforeReportingException() { // Given: doAnswer(streamRows("Row1", "Row2", "Row3")) .when(rowQueue).drainTo(any()); createWriter(); givenUncaughtException(new KsqlException("Server went Boom")); // When: writer.write(out); // Then: final List<String> lines = getOutput(out); assertThat(lines, contains( containsString("header"), containsString("Row1"), containsString("Row2"), containsString("Row3"), containsString("Server went Boom") )); }
@Override public YamlShardingStrategyConfiguration swapToYamlConfiguration(final ShardingStrategyConfiguration data) { YamlShardingStrategyConfiguration result = new YamlShardingStrategyConfiguration(); if (data instanceof StandardShardingStrategyConfiguration) { result.setStandard(createYamlStandardShardingStrategyConfiguration((StandardShardingStrategyConfiguration) data)); } if (data instanceof ComplexShardingStrategyConfiguration) { result.setComplex(createYamlComplexShardingStrategyConfiguration((ComplexShardingStrategyConfiguration) data)); } if (data instanceof HintShardingStrategyConfiguration) { result.setHint(createYamlHintShardingStrategyConfiguration((HintShardingStrategyConfiguration) data)); } if (data instanceof NoneShardingStrategyConfiguration) { result.setNone(new YamlNoneShardingStrategyConfiguration()); } return result; }
@Test void assertSwapToYamlConfigurationForStandardShardingStrategy() { ShardingStrategyConfiguration data = new StandardShardingStrategyConfiguration("order_id", "core_standard_fixture"); YamlShardingStrategyConfigurationSwapper swapper = new YamlShardingStrategyConfigurationSwapper(); YamlShardingStrategyConfiguration actual = swapper.swapToYamlConfiguration(data); assertThat(actual.getStandard().getShardingColumn(), is("order_id")); assertThat(actual.getStandard().getShardingAlgorithmName(), is("core_standard_fixture")); }
@Override public String getContextName() { return this.contextName; }
@Test public void testGetContextName() { assertEquals(contextName, v3SnmpConfiguration.getContextName()); }
public RegistryBuilder address(String address) { this.address = address; return getThis(); }
@Test void address() { RegistryBuilder builder = new RegistryBuilder(); builder.address("address"); Assertions.assertEquals("address", builder.build().getAddress()); }
@Override public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { final FTPClient client = connect(); Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); FileStatus status; try { status = getFileStatus(client, file); } catch (FileNotFoundException fnfe) { status = null; } if (status != null) { if (overwrite && !status.isDirectory()) { delete(client, file, false); } else { disconnect(client); throw new FileAlreadyExistsException("File already exists: " + file); } } Path parent = absolute.getParent(); if (parent == null || !mkdirs(client, parent, FsPermission.getDirDefault())) { parent = (parent == null) ? new Path("/") : parent; disconnect(client); throw new IOException("create(): Mkdirs failed to create: " + parent); } client.allocate(bufferSize); // Change to parent directory on the server. Only then can we write to the // file on the server by opening up an OutputStream. As a side effect the // working directory on the server is changed to the parent directory of the // file. The FTP client connection is closed when close() is called on the // FSDataOutputStream. client.changeWorkingDirectory(parent.toUri().getPath()); OutputStream outputStream = client.storeFileStream(file.getName()); if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { // The ftpClient is an inconsistent state. Must close the stream // which in turn will logout and disconnect from FTP server if (outputStream != null) { IOUtils.closeStream(outputStream); } disconnect(client); throw new IOException("Unable to create file: " + file + ", Aborting"); } FSDataOutputStream fos = new FSDataOutputStream(outputStream, statistics) { @Override public void close() throws IOException { super.close(); if (!client.isConnected()) { throw new FTPException("Client not connected"); } boolean cmdCompleted = client.completePendingCommand(); disconnect(client); if (!cmdCompleted) { throw new FTPException("Could not complete transfer, Reply Code - " + client.getReplyCode()); } } }; return fos; }
@Test public void testCreateWithoutWritePermissions() throws Exception { BaseUser user = server.addUser("test", "password"); Configuration configuration = new Configuration(); configuration.set("fs.defaultFS", "ftp:///"); configuration.set("fs.ftp.host", "localhost"); configuration.setInt("fs.ftp.host.port", server.getPort()); configuration.set("fs.ftp.user.localhost", user.getName()); configuration.set("fs.ftp.password.localhost", user.getPassword()); configuration.setBoolean("fs.ftp.impl.disable.cache", true); FileSystem fs = FileSystem.get(configuration); byte[] bytesExpected = "hello world".getBytes(StandardCharsets.UTF_8); LambdaTestUtils.intercept( IOException.class, "Unable to create file: test1.txt, Aborting", () -> { try (FSDataOutputStream out = fs.create(new Path("test1.txt"))) { out.write(bytesExpected); } } ); }
public static String generateSparkAppId(final SparkApplication app) { long attemptId = ModelUtils.getAttemptId(app); String preferredId = String.format("%s-%d", app.getMetadata().getName(), attemptId); if (preferredId.length() > DEFAULT_ID_LENGTH_LIMIT) { int preferredIdPrefixLength = DEFAULT_ID_LENGTH_LIMIT - DEFAULT_HASH_BASED_IDENTIFIER_LENGTH_LIMIT - 1; String preferredIdPrefix = preferredId.substring(0, preferredIdPrefixLength); return generateHashBasedId( preferredIdPrefix, app.getMetadata().getNamespace(), app.getMetadata().getName(), String.valueOf(attemptId)); } else { return preferredId; } }
@Test void sparkAppIdShouldBeDeterministicPerAppPerAttempt() { SparkApplication mockApp1 = mock(SparkApplication.class); SparkApplication mockApp2 = mock(SparkApplication.class); ApplicationStatus mockStatus1 = mock(ApplicationStatus.class); ApplicationStatus mockStatus2 = mock(ApplicationStatus.class); String appName1 = "app1"; String appName2 = "app2"; ObjectMeta appMeta1 = new ObjectMetaBuilder().withName(appName1).withNamespace("ns").build(); ObjectMeta appMeta2 = new ObjectMetaBuilder().withName(appName2).withNamespace("ns").build(); when(mockApp1.getMetadata()).thenReturn(appMeta1); when(mockApp2.getMetadata()).thenReturn(appMeta2); when(mockApp1.getStatus()).thenReturn(mockStatus1); when(mockApp2.getStatus()).thenReturn(mockStatus2); String appId1 = SparkAppSubmissionWorker.generateSparkAppId(mockApp1); String appId2 = SparkAppSubmissionWorker.generateSparkAppId(mockApp2); assertNotEquals(appId1, appId2); assertTrue(appId1.contains(appName1)); assertTrue(appId1.length() <= DEFAULT_ID_LENGTH_LIMIT); assertTrue(appId2.length() <= DEFAULT_ID_LENGTH_LIMIT); // multiple invoke shall give same result assertEquals( appId1, SparkAppSubmissionWorker.generateSparkAppId(mockApp1), "Multiple invoke of generateSparkAppId shall give same result."); assertEquals( appId2, SparkAppSubmissionWorker.generateSparkAppId(mockApp2), "Multiple invoke of generateSparkAppId shall give same result."); ApplicationAttemptSummary mockAttempt = mock(ApplicationAttemptSummary.class); AttemptInfo mockAttemptInfo = mock(AttemptInfo.class); when(mockAttempt.getAttemptInfo()).thenReturn(mockAttemptInfo); when(mockAttemptInfo.getId()).thenReturn(2L); when(mockStatus1.getCurrentAttemptSummary()).thenReturn(mockAttempt); when(mockStatus2.getCurrentAttemptSummary()).thenReturn(mockAttempt); String appId1Attempt2 = SparkAppSubmissionWorker.generateSparkAppId(mockApp1); assertTrue(appId1Attempt2.contains(appName1)); assertNotEquals(appId1, appId1Attempt2); assertTrue(appId1Attempt2.length() <= DEFAULT_ID_LENGTH_LIMIT); String appId2Attempt2 = SparkAppSubmissionWorker.generateSparkAppId(mockApp2); assertNotEquals(appId2, appId2Attempt2); assertEquals(appId2Attempt2, SparkAppSubmissionWorker.generateSparkAppId(mockApp2)); assertTrue(appId2Attempt2.length() <= DEFAULT_ID_LENGTH_LIMIT); assertEquals(appId1Attempt2, SparkAppSubmissionWorker.generateSparkAppId(mockApp1)); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComDropDbPacket() { assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_DROP_DB, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class)); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeKoreanPersonName() { assertArrayEquals(KOREAN_PERSON_NAME_BYTES, ksx1001().encode(KOREAN_PERSON_NAME, PN_DELIMS)); }
public static Collection<AndPredicate> getAndPredicates(final ExpressionSegment expression) { Collection<AndPredicate> result = new LinkedList<>(); extractAndPredicates(result, expression); return result; }
@Test void assertExtractAndPredicatesAndCondition() { ColumnSegment columnSegment1 = new ColumnSegment(28, 35, new IdentifierValue("order_id")); ParameterMarkerExpressionSegment parameterMarkerExpressionSegment1 = new ParameterMarkerExpressionSegment(39, 39, 0); ExpressionSegment leftExpressionSegment = new BinaryOperationExpression(28, 39, columnSegment1, parameterMarkerExpressionSegment1, "=", "order_id=?"); ColumnSegment columnSegment2 = new ColumnSegment(45, 50, new IdentifierValue("status")); ParameterMarkerExpressionSegment parameterMarkerExpressionSegment2 = new ParameterMarkerExpressionSegment(54, 54, 1); ExpressionSegment rightExpressionSegment = new BinaryOperationExpression(28, 39, columnSegment2, parameterMarkerExpressionSegment2, "=", "status=?"); BinaryOperationExpression expression = new BinaryOperationExpression(28, 54, leftExpressionSegment, rightExpressionSegment, "AND", "order_id=? AND status=?"); Collection<AndPredicate> actual = ExpressionExtractUtils.getAndPredicates(expression); assertThat(actual.size(), is(1)); AndPredicate andPredicate = actual.iterator().next(); assertThat(andPredicate.getPredicates().size(), is(2)); Iterator<ExpressionSegment> iterator = andPredicate.getPredicates().iterator(); assertThat(iterator.next(), is(leftExpressionSegment)); assertThat(iterator.next(), is(rightExpressionSegment)); }
@Override public int getMaxParallelism() { return parallelismInfo.getMaxParallelism(); }
@Test void testFallingBackToDefaultMaxParallelism() throws Exception { final int defaultMaxParallelism = 13; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(-1, -1, defaultMaxParallelism); assertThat(ejv.getMaxParallelism()).isEqualTo(defaultMaxParallelism); }
public static Resumed resumed(XmlPullParser parser) throws XmlPullParserException, IOException { ParserUtils.assertAtStartTag(parser); long h = ParserUtils.getLongAttribute(parser, "h"); String previd = parser.getAttributeValue("", "previd"); parser.next(); ParserUtils.assertAtEndTag(parser); return new Resumed(h, previd); }
@Test public void testParseResumed() throws Exception { long handledPackets = 42; String previousID = "zid615d9"; String resumedStanza = XMLBuilder.create("resumed") .a("xmlns", "urn:xmpp:sm:3") .a("h", String.valueOf(handledPackets)) .a("previd", previousID) .asString(outputProperties); StreamManagement.Resumed resumedPacket = ParseStreamManagement.resumed( PacketParserUtils.getParserFor(resumedStanza)); assertNotNull(resumedPacket); assertEquals(handledPackets, resumedPacket.getHandledCount()); assertEquals(previousID, resumedPacket.getPrevId()); }
static void divide(Slice dividend, int dividendScaleFactor, Slice divisor, int divisorScaleFactor, Slice quotient, Slice remainder) { divide(getRawLong(dividend, 0), getRawLong(dividend, 1), dividendScaleFactor, getRawLong(divisor, 0), getRawLong(divisor, 1), divisorScaleFactor, quotient, remainder); }
@Test public void testDivide() { // simple cases assertDivideAllSigns("0", "10"); assertDivideAllSigns("5", "10"); assertDivideAllSigns("50", "100"); assertDivideAllSigns("99", "10"); assertDivideAllSigns("95", "10"); assertDivideAllSigns("91", "10"); assertDivideAllSigns("1000000000000000000000000", "10"); assertDivideAllSigns("1000000000000000000000000", "3"); assertDivideAllSigns("1000000000000000000000000", "9"); assertDivideAllSigns("1000000000000000000000000", "100000000000000000000000"); assertDivideAllSigns("1000000000000000000000000", "333333333333333333333333"); assertDivideAllSigns("1000000000000000000000000", "111111111111111111111111"); // dividend < divisor assertDivideAllSigns(new int[] {4, 3, 2, 0}, new int[] {4, 3, 2, 1}); assertDivideAllSigns(new int[] {4, 3, 0, 0}, new int[] {4, 3, 2, 0}); assertDivideAllSigns(new int[] {4, 0, 0, 0}, new int[] {4, 3, 0, 0}); assertDivideAllSigns(new int[] {0, 0, 0, 0}, new int[] {4, 0, 0, 0}); // different lengths assertDivideAllSigns(new int[] {1423957378, 1765820914, 0xFFFFFFFF, 0}, new int[] {4, 0x0000FFFF, 0, 0}); assertDivideAllSigns(new int[] {1423957378, 1765820914, 0xFFFFFFFF, 0}, new int[] {2042457708, 0, 0, 0}); assertDivideAllSigns(new int[] {1423957378, -925263858, 0, 0}, new int[] {2042457708, 0, 0, 0}); assertDivideAllSigns(new int[] {0xFFFFFFFF, 0, 0, 0}, new int[] {2042457708, 0, 0, 0}); // single int divisor assertDivideAllSigns(new int[] {1423957378, -1444436990, -925263858, 1106345725}, new int[] {2042457708, 0, 0, 0}); assertDivideAllSigns(new int[] {0, 0xF7000000, 0, 0x39000000}, new int[] {-1765820914, 0, 0, 0}); assertDivideAllSigns(new int[] {-1981284352, -1966660860, 0, 0}, new int[] {-1794967296, 0, 0, 0}); // normalization scale = 1 assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 2042457708, 0xFFFFFFFF, 0}); assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 0xFFFFFF00, 0, 0}); assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 0xFF000000, 0, 0}); // normalization scale > 1 assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 2042457708, 0xFFFFFFFF, 0x7FFFFFFF}); assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 2042457708, 0x4FFFFFFF, 0}); assertDivideAllSigns(new int[] {0x0FF00210, 0xF7001230, 0xFB00AC00, 0x39003500}, new int[] {-1765820914, 2042457708, 0x0000FFFF, 0}); // normalization scale signed overflow assertDivideAllSigns(new int[] {1, 1, 1, 0x7FFFFFFF}, new int[] {0xFFFFFFFF, 1, 0, 0}); // u2 = v1 assertDivideAllSigns(new int[] {0, 0x8FFFFFFF, 0x8FFFFFFF, 0}, new int[] {0xFFFFFFFF, 0x8FFFFFFF, 0, 0}); // qhat is greater than q by 1 assertDivideAllSigns(new int[] {1, 1, 0xFFFFFFFF, 0}, new int[] {0xFFFFFFFF, 0x7FFFFFFF, 0, 0}); // qhat is greater than q by 2 assertDivideAllSigns(new int[] {1, 1, 0xFFFFFFFF, 0}, new int[] {0xFFFFFFFF, 0x7FFFFFFF, 0, 0}); // overflow after multiplyAndSubtract assertDivideAllSigns(new int[] {0x00000003, 0x00000000, 0x80000000, 0}, new int[] {0x00000001, 0x00000000, 0x20000000, 0}); assertDivideAllSigns(new int[] {0x00000003, 0x00000000, 0x00008000, 0}, new int[] {0x00000001, 0x00000000, 0x00002000, 0}); assertDivideAllSigns(new int[] {0, 0, 0x00008000, 0x00007fff}, new int[] {1, 0, 0x00008000, 0}); // test cases from http://www.hackersdelight.org/hdcodetxt/divmnu64.c.txt // license: http://www.hackersdelight.org/permissions.htm assertDivideAllSigns(new int[] {3, 0, 0, 0}, new int[] {2, 0, 0, 0}); assertDivideAllSigns(new int[] {3, 0, 0, 0}, new int[] {3, 0, 0, 0}); assertDivideAllSigns(new int[] {3, 0, 0, 0}, new int[] {4, 0, 0, 0}); assertDivideAllSigns(new int[] {3, 0, 0, 0}, new int[] {0xffffffff, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0, 0, 0}, new int[] {1, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0, 0, 0}, new int[] {0xffffffff, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0, 0, 0}, new int[] {3, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0xffffffff, 0, 0}, new int[] {1, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0xffffffff, 0, 0}, new int[] {0xffffffff, 0, 0, 0}); assertDivideAllSigns(new int[] {0xffffffff, 0xfffffffe, 0, 0}, new int[] {0xffffffff, 0, 0, 0}); assertDivideAllSigns(new int[] {0x00005678, 0x00001234, 0, 0}, new int[] {0x00009abc, 0, 0, 0}); assertDivideAllSigns(new int[] {0, 0, 0, 0}, new int[] {0, 1, 0, 0}); assertDivideAllSigns(new int[] {0, 7, 0, 0}, new int[] {0, 3, 0, 0}); assertDivideAllSigns(new int[] {5, 7, 0, 0}, new int[] {0, 3, 0, 0}); assertDivideAllSigns(new int[] {0, 6, 0, 0}, new int[] {0, 2, 0, 0}); assertDivideAllSigns(new int[] {0x80000000, 0, 0, 0}, new int[] {0x40000001, 0, 0, 0}); assertDivideAllSigns(new int[] {0x00000000, 0x80000000, 0, 0}, new int[] {0x40000001, 0, 0, 0}); assertDivideAllSigns(new int[] {0x00000000, 0x80000000, 0, 0}, new int[] {0x00000001, 0x40000000, 0, 0}); assertDivideAllSigns(new int[] {0x0000789a, 0x0000bcde, 0, 0}, new int[] {0x0000789a, 0x0000bcde, 0, 0}); assertDivideAllSigns(new int[] {0x0000789b, 0x0000bcde, 0, 0}, new int[] {0x0000789a, 0x0000bcde, 0, 0}); assertDivideAllSigns(new int[] {0x00007899, 0x0000bcde, 0, 0}, new int[] {0x0000789a, 0x0000bcde, 0, 0}); assertDivideAllSigns(new int[] {0x0000ffff, 0x0000ffff, 0, 0}, new int[] {0x0000ffff, 0x0000ffff, 0, 0}); assertDivideAllSigns(new int[] {0x0000ffff, 0x0000ffff, 0, 0}, new int[] {0x00000000, 0x0000ffff, 0, 0}); assertDivideAllSigns(new int[] {0x000089ab, 0x00004567, 0x00000123, 0}, new int[] {0x00000000, 0x00000001, 0, 0}); assertDivideAllSigns(new int[] {0x000089ab, 0x00004567, 0x00000123, 0}, new int[] {0x00000000, 0x00000001, 0, 0}); assertDivideAllSigns(new int[] {0x00000000, 0x0000fffe, 0x00008000, 0}, new int[] {0x0000ffff, 0x00008000, 0, 0}); assertDivideAllSigns(new int[] {0x00000003, 0x00000000, 0x80000000, 0}, new int[] {0x00000001, 0x00000000, 0x20000000, 0}); assertDivideAllSigns(new int[] {0x00000003, 0x00000000, 0x00008000, 0}, new int[] {0x00000001, 0x00000000, 0x00002000, 0}); assertDivideAllSigns(new int[] {0, 0, 0x00008000, 0x00007fff}, new int[] {1, 0, 0x00008000, 0}); assertDivideAllSigns(new int[] {0, 0x0000fffe, 0, 0x00008000}, new int[] {0x0000ffff, 0, 0x00008000, 0}); assertDivideAllSigns(new int[] {0, 0xfffffffe, 0, 0x80000000}, new int[] {0x0000ffff, 0, 0x80000000, 0}); assertDivideAllSigns(new int[] {0, 0xfffffffe, 0, 0x80000000}, new int[] {0xffffffff, 0, 0x80000000, 0}); // with rescale assertDivideAllSigns("100000000000000000000000", 10, "111111111111111111111111", 10); assertDivideAllSigns("100000000000000000000000", 10, "111111111111", 22); assertDivideAllSigns("99999999999999999999999999999999999999", 37, "99999999999999999999999999999999999999", 37); assertDivideAllSigns("99999999999999999999999999999999999999", 2, "99999999999999999999999999999999999999", 1); assertDivideAllSigns("99999999999999999999999999999999999999", 37, "9", 37); assertDivideAllSigns("99999999999999999999999999999999999999", 37, "1", 37); assertDivideAllSigns("11111111111111111111111111111111111111", 37, "2", 37); assertDivideAllSigns("11111111111111111111111111111111111111", 37, "2", 1); assertDivideAllSigns("97764425639372288753711864842425458618", 36, "32039006229599111733094986468789901155", 0); assertDivideAllSigns("34354576602352622842481633786816220283", 0, "31137583115118564930544829855652258045", 0); assertDivideAllSigns("96690614752287690630596513604374991473", 0, "10039352042372909488692220528497751229", 0); assertDivideAllSigns("87568357716090115374029040878755891076", 0, "46106713604991337798209343815577148589", 0); }
@Override public TimestampedKeyValueStore<K, V> build() { KeyValueStore<Bytes, byte[]> store = storeSupplier.get(); if (!(store instanceof TimestampedBytesStore)) { if (store.persistent()) { store = new KeyValueToTimestampedKeyValueByteStoreAdapter(store); } else { store = new InMemoryTimestampedKeyValueStoreMarker(store); } } return new MeteredTimestampedKeyValueStore<>( maybeWrapCaching(maybeWrapLogging(store)), storeSupplier.metricsScope(), time, keySerde, valueSerde); }
@Test public void shouldHaveChangeLoggingStoreWhenLoggingEnabled() { setUp(); final TimestampedKeyValueStore<String, String> store = builder .withLoggingEnabled(Collections.emptyMap()) .build(); final StateStore wrapped = ((WrappedStateStore) store).wrapped(); assertThat(store, instanceOf(MeteredTimestampedKeyValueStore.class)); assertThat(wrapped, instanceOf(ChangeLoggingTimestampedKeyValueBytesStore.class)); assertThat(((WrappedStateStore) wrapped).wrapped(), CoreMatchers.equalTo(inner)); }
public Canvas canvas() { Canvas canvas = new Canvas(getLowerBound(), getUpperBound()); canvas.add(this); if (name != null) { canvas.setTitle(name); } return canvas; }
@Test public void testScatter() throws Exception { System.out.println("Scatter"); var canvas = ScatterPlot.of(iris, "sepallength", "sepalwidth", "class", '*').canvas(); canvas.setAxisLabels("sepallength", "sepalwidth"); canvas.window(); }
public MessagesRequestSpec simpleQueryParamsToFullRequestSpecification(final String query, final Set<String> streams, final String timerangeKeyword, final List<String> fields, final String sort, final SortSpec.Direction sortOrder, final int from, final int size) { return new MessagesRequestSpec(query, streams, timerangeParser.parseTimeRange(timerangeKeyword), sort, sortOrder, from, size, fields); }
@Test void usesProperDefaults() { AggregationRequestSpec aggregationRequestSpec = toTest.simpleQueryParamsToFullRequestSpecification(null, null, null, List.of("http_method"), null); assertThat(aggregationRequestSpec).isEqualTo(new AggregationRequestSpec( "*", Set.of(), DEFAULT_TIMERANGE, List.of(new Grouping("http_method")), List.of(new Metric("count", null)) ) ); aggregationRequestSpec = toTest.simpleQueryParamsToFullRequestSpecification(null, null, null, List.of("http_method"), List.of()); assertThat(aggregationRequestSpec).isEqualTo(new AggregationRequestSpec( "*", Set.of(), DEFAULT_TIMERANGE, List.of(new Grouping("http_method")), List.of(new Metric("count", null)) ) ); final MessagesRequestSpec messagesRequestSpec = toTest.simpleQueryParamsToFullRequestSpecification(null, null, null, null, null, null, 0, 10); assertThat(messagesRequestSpec).isEqualTo(new MessagesRequestSpec( "*", Set.of(), DEFAULT_TIMERANGE, DEFAULT_SORT, DEFAULT_SORT_ORDER, 0, 10, DEFAULT_FIELDS ) ); }
@Override public Result invoke(Invocation invocation) throws RpcException { Result result; String value = getUrl().getMethodParameter( RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString()) .trim(); if (ConfigUtils.isEmpty(value)) { // no mock result = this.invoker.invoke(invocation); } else if (value.startsWith(FORCE_KEY)) { if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "force mock", "", "force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : " + getUrl()); } // force:direct mock result = doMockInvoke(invocation, null); } else { // fail-mock try { result = this.invoker.invoke(invocation); // fix:#4585 if (result.getException() != null && result.getException() instanceof RpcException) { RpcException rpcException = (RpcException) result.getException(); if (rpcException.isBiz()) { throw rpcException; } else { result = doMockInvoke(invocation, rpcException); } } } catch (RpcException e) { if (e.isBiz()) { throw e; } if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "failed to mock invoke", "", "fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : " + getUrl(), e); } result = doMockInvoke(invocation, e); } } return result; }
@Test void testMockInvokerFromOverride_Invoke_force_throw() { URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName()) .addParameter( REFER_KEY, URL.encode( PATH_KEY + "=" + IHelloService.class.getName() + "&" + "getBoolean2.mock=force:throw ")) .addParameter("invoke_return_error", "true"); Invoker<IHelloService> cluster = getClusterInvoker(url); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getBoolean2"); try { cluster.invoke(invocation); Assertions.fail(); } catch (RpcException e) { Assertions.assertFalse(e.isBiz(), "not custom exception"); } }
@Override public boolean isDirectory(URI uri) throws IOException { try { String prefix = normalizeToDirectoryPrefix(uri); if (prefix.equals(DELIMITER)) { return true; } ListObjectsV2Request listObjectsV2Request = ListObjectsV2Request.builder().bucket(uri.getHost()).prefix(prefix).maxKeys(2).build(); ListObjectsV2Response listObjectsV2Response = _s3Client.listObjectsV2(listObjectsV2Request); return listObjectsV2Response.hasContents(); } catch (NoSuchKeyException e) { LOGGER.error("Could not get directory entry for {}", uri); return false; } }
@Test public void testIsDirectory() throws Exception { String[] originalFiles = new String[]{"a-dir.txt", "b-dir.txt", "c-dir.txt"}; String folder = "my-files-dir"; String childFolder = "my-files-dir-child"; for (String fileName : originalFiles) { String folderName = folder + DELIMITER + childFolder; createEmptyFile(folderName, fileName); } boolean isBucketDir = _s3PinotFS.isDirectory(URI.create(String.format(DIR_FORMAT, SCHEME, BUCKET))); boolean isDir = _s3PinotFS.isDirectory(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder))); boolean isDirChild = _s3PinotFS.isDirectory( URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder))); boolean notIsDir = _s3PinotFS.isDirectory(URI.create( String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + childFolder + DELIMITER + "a-delete.txt"))); Assert.assertTrue(isBucketDir); Assert.assertTrue(isDir); Assert.assertTrue(isDirChild); Assert.assertFalse(notIsDir); }
public String process(final Expression expression) { return formatExpression(expression); }
@Test public void shouldGenerateCorrectCodeForComparisonWithNegativeNumbers() { // Given: final Expression expression = new ComparisonExpression( ComparisonExpression.Type.GREATER_THAN, COL3, new DoubleLiteral(-10.0) ); // When: final String javaExpression = sqlToJavaVisitor.process(expression); // Then: assertThat( javaExpression, equalTo( "((((Object)(((java.lang.Double) arguments.get(\"COL3\")))) == null || ((Object)(-1E1)) == null) ? false : (((java.lang.Double) arguments.get(\"COL3\")) > -1E1))")); }
public String doGetConfig(HttpServletRequest request, HttpServletResponse response, String dataId, String group, String tenant, String tag, String isNotify, String clientIp) throws IOException, ServletException { return doGetConfig(request, response, dataId, group, tenant, tag, isNotify, clientIp, false); }
@Test void testDoGetConfigFormalV2() throws Exception { String dataId = "dataId1234552333V2"; String group = "group"; String tenant = "tenant"; configCacheServiceMockedStatic.when(() -> ConfigCacheService.tryConfigReadLock(GroupKey2.getKey(dataId, group, tenant))) .thenReturn(1); //mock cache item . CacheItem cacheItem = new CacheItem("test"); cacheItem.setBeta(false); String md5 = "md5wertyui"; String content = "content345678"; cacheItem.getConfigCache().setMd5Utf8(md5); long ts = System.currentTimeMillis(); cacheItem.getConfigCache().setLastModifiedTs(ts); cacheItem.getConfigCache().setEncryptedDataKey("key2345678"); configCacheServiceMockedStatic.when(() -> ConfigCacheService.getContentCache(GroupKey.getKeyTenant(dataId, group, tenant))) .thenReturn(cacheItem); MockHttpServletRequest request = new MockHttpServletRequest(); MockHttpServletResponse response = new MockHttpServletResponse(); when(configRocksDbDiskService.getContent(dataId, group, tenant)).thenReturn(content); String actualValue = configServletInner.doGetConfig(request, response, dataId, group, tenant, null, "true", "localhost", true); assertEquals(JacksonUtils.toJson(Result.success(content)), response.getContentAsString()); assertEquals(HttpServletResponse.SC_OK + "", actualValue); assertEquals(md5, response.getHeader(CONTENT_MD5)); assertEquals("key2345678", response.getHeader("Encrypted-Data-Key")); assertEquals(MediaType.APPLICATION_JSON, response.getHeader(HttpHeaderConsts.CONTENT_TYPE)); }
public List<PartitionInfo> getTopicMetadata(String topic, boolean allowAutoTopicCreation, Timer timer) { MetadataRequest.Builder request = new MetadataRequest.Builder(Collections.singletonList(topic), allowAutoTopicCreation); Map<String, List<PartitionInfo>> topicMetadata = getTopicMetadata(request, timer); return topicMetadata.get(topic); }
@Test public void testGetTopicMetadataOfflinePartitions() { buildFetcher(); assignFromUser(singleton(tp0)); MetadataResponse originalResponse = newMetadataResponse(Errors.NONE); //baseline ok response //create a response based on the above one with all partitions being leaderless List<MetadataResponse.TopicMetadata> altTopics = new ArrayList<>(); for (MetadataResponse.TopicMetadata item : originalResponse.topicMetadata()) { List<MetadataResponse.PartitionMetadata> partitions = item.partitionMetadata(); List<MetadataResponse.PartitionMetadata> altPartitions = new ArrayList<>(); for (MetadataResponse.PartitionMetadata p : partitions) { altPartitions.add(new MetadataResponse.PartitionMetadata( p.error, p.topicPartition, Optional.empty(), //no leader Optional.empty(), p.replicaIds, p.inSyncReplicaIds, p.offlineReplicaIds )); } MetadataResponse.TopicMetadata alteredTopic = new MetadataResponse.TopicMetadata( item.error(), item.topic(), item.isInternal(), altPartitions ); altTopics.add(alteredTopic); } Node controller = originalResponse.controller(); MetadataResponse altered = RequestTestUtils.metadataResponse( originalResponse.brokers(), originalResponse.clusterId(), controller != null ? controller.id() : MetadataResponse.NO_CONTROLLER_ID, altTopics); client.prepareResponse(altered); List<PartitionInfo> topicMetadata = topicMetadataFetcher.getTopicMetadata(topicName, false, time.timer(5000L)); assertNotNull(topicMetadata); assertFalse(topicMetadata.isEmpty()); //noinspection ConstantConditions assertEquals(metadata.fetch().partitionCountForTopic(topicName).longValue(), topicMetadata.size()); }
@ApiOperation(value = "Delete a group", tags = { "Groups" }, code = 204) @ApiResponses(value = { @ApiResponse(code = 204, message = "Indicates the group was found and has been deleted. Response-body is intentionally empty."), @ApiResponse(code = 404, message = "Indicates the requested group does not exist.") }) @DeleteMapping("/identity/groups/{groupId}") @ResponseStatus(HttpStatus.NO_CONTENT) public void deleteGroup(@ApiParam(name = "groupId") @PathVariable String groupId) { Group group = getGroupFromRequest(groupId); if (restApiInterceptor != null) { restApiInterceptor.deleteGroup(group); } identityService.deleteGroup(group.getId()); }
@Test public void testDeleteGroup() throws Exception { try { Group testGroup = identityService.newGroup("testgroup"); testGroup.setName("Test group"); testGroup.setType("Test type"); identityService.saveGroup(testGroup); closeResponse(executeRequest(new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP, "testgroup")), HttpStatus.SC_NO_CONTENT)); assertThat(identityService.createGroupQuery().groupId("testgroup").singleResult()).isNull(); } finally { try { identityService.deleteGroup("testgroup"); } catch (Throwable ignore) { // Ignore, since the group may not have been created in the test // or already deleted } } }
public void setCalendar( int recordsFilter, GregorianCalendar startDate, GregorianCalendar endDate ) throws KettleException { this.startDate = startDate; this.endDate = endDate; this.recordsFilter = recordsFilter; if ( this.startDate == null || this.endDate == null ) { throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.EmptyStartDateOrEndDate" ) ); } if ( this.startDate.getTime().compareTo( this.endDate.getTime() ) >= 0 ) { throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.WrongDates" ) ); } // Calculate difference in days long diffDays = ( this.endDate.getTime().getTime() - this.startDate.getTime().getTime() ) / ( 24 * 60 * 60 * 1000 ); if ( diffDays > 30 ) { throw new KettleException( BaseMessages.getString( PKG, "SalesforceInput.Error.StartDateTooOlder" ) ); } }
@Test public void testSetCalendar() { SalesforceConnection conn = mock( SalesforceConnection.class, Mockito.CALLS_REAL_METHODS ); // Test valid data try { conn.setCalendar( new Random().nextInt( SalesforceConnectionUtils.recordsFilterDesc.length ), new GregorianCalendar( 2016, Calendar.JANUARY, 1 ), new GregorianCalendar( 2016, Calendar.JANUARY, 31 ) ); // No errors detected } catch ( KettleException e ) { fail(); } // Test reversed dates (should fail) try { conn.setCalendar( new Random().nextInt( SalesforceConnectionUtils.recordsFilterDesc.length ), new GregorianCalendar( 2016, Calendar.JANUARY, 31 ), new GregorianCalendar( 2016, Calendar.JANUARY, 1 ) ); fail(); } catch ( KettleException expected ) { // Ignore, expected result } // Test null start date (should fail) try { conn.setCalendar( new Random().nextInt( SalesforceConnectionUtils.recordsFilterDesc.length ), null, new GregorianCalendar( 2016, Calendar.JANUARY, 31 ) ); fail(); } catch ( KettleException expected ) { // Ignore, expected result } // Test null end date (should fail) try { conn.setCalendar( new Random().nextInt( SalesforceConnectionUtils.recordsFilterDesc.length ), new GregorianCalendar( 2016, Calendar.JANUARY, 1 ), null ); fail(); } catch ( KettleException expected ) { // Ignore, expected result } }
@Override public String getPrivProtocol() { return privProtocol; }
@Test public void testGetPrivProtocol() { assertEquals(privProtocol, defaultSnmpv3Device.getPrivProtocol()); }
public static KeyStore loadKeyStore(File certificateChainFile, File privateKeyFile, String keyPassword) throws IOException, GeneralSecurityException { PrivateKey key; try { key = createPrivateKey(privateKeyFile, keyPassword); } catch (OperatorCreationException | IOException | GeneralSecurityException | PKCSException e) { throw new GeneralSecurityException("Private Key issues", e); } List<X509Certificate> certificateChain = readCertificateChain(certificateChainFile); if (certificateChain.isEmpty()) { throw new CertificateException("Certificate file does not contain any certificates: " + certificateChainFile); } KeyStore keyStore = KeyStore.getInstance("JKS"); keyStore.load(null, null); keyStore.setKeyEntry("key", key, keyPassword.toCharArray(), certificateChain.stream().toArray(Certificate[]::new)); return keyStore; }
@Test void testParsingPKCS8WithWrongPassword() throws IOException, GeneralSecurityException { assertThrows(GeneralSecurityException.class, () -> { PEMImporter.loadKeyStore(pemCert, privkeyWithPasswordPKCS8, "nottest"); }); }
@Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { conf = configuration; cGroupsHandler .initializeCGroupController(CGroupsHandler.CGroupController.NET_CLS); this.tagMappingManager = createNetworkTagMappingManager(conf); this.tagMappingManager.initialize(conf); return null; }
@Test public void testBootstrap() { NetworkPacketTaggingHandlerImpl handlerImpl = createNetworkPacketTaggingHandlerImpl(); try { handlerImpl.bootstrap(conf); verify(cGroupsHandlerMock).initializeCGroupController( eq(CGroupsHandler.CGroupController.NET_CLS)); verifyNoMoreInteractions(cGroupsHandlerMock); } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected ResourceHandlerException!"); } }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertTextMessageToAmqpMessageWithNoBody() throws Exception { ActiveMQTextMessage outbound = createTextMessage(); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof AmqpValue); assertNull(((AmqpValue) amqp.getBody()).getValue()); }
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException { checkMaybeCompatible(source, target); if (source.isOptional() && !target.isOptional()) { if (target.defaultValue() != null) { if (record != null) { return projectRequiredSchema(source, record, target); } else { return target.defaultValue(); } } else { throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value."); } } else { if (record != null) { return projectRequiredSchema(source, record, target); } else { return null; } } }
@Test public void testStructRemoveField() { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); sourceStruct.put("field2", 234); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, targetStruct.get("field")); assertThrows(DataException.class, () -> targetStruct.get("field2"), "field2 is not part of the projected struct"); }
public static LocalRetryableExecution executeLocallyWithRetry(NodeEngine nodeEngine, Operation operation) { if (operation.getOperationResponseHandler() != null) { throw new IllegalArgumentException("Operation must not have a response handler set"); } if (!operation.returnsResponse()) { throw new IllegalArgumentException("Operation must return a response"); } if (operation.validatesTarget()) { throw new IllegalArgumentException("Operation must not validate the target"); } final LocalRetryableExecution execution = new LocalRetryableExecution(nodeEngine, operation); execution.run(); return execution; }
@Test(expected = IllegalArgumentException.class) public void executeLocallyWithRetryFailsWhenOperationDoesNotReturnResponse() { final Operation op = new Operation() { @Override public boolean returnsResponse() { return false; } }; executeLocallyWithRetry(null, op); }
@SuppressWarnings("argument") static Status runSqlLine( String[] args, @Nullable InputStream inputStream, @Nullable OutputStream outputStream, @Nullable OutputStream errorStream) throws IOException { String[] modifiedArgs = checkConnectionArgs(args); SqlLine sqlLine = new SqlLine(); if (outputStream != null) { sqlLine.setOutputStream(new PrintStream(outputStream, false, StandardCharsets.UTF_8.name())); } if (errorStream != null) { sqlLine.setErrorStream(new PrintStream(errorStream, false, StandardCharsets.UTF_8.name())); } return sqlLine.begin(modifiedArgs, inputStream, true); }
@Test public void testSqlLine_fixedWindow() throws Exception { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); String[] args = buildArgs( "CREATE EXTERNAL TABLE table_test (col_a VARCHAR, col_b TIMESTAMP) TYPE 'test';", "INSERT INTO table_test SELECT '3', TIMESTAMP '2018-07-01 21:26:06';", "INSERT INTO table_test SELECT '3', TIMESTAMP '2018-07-01 21:26:07';", "SELECT TUMBLE_START(col_b, INTERVAL '1' SECOND), count(*) FROM table_test " + "GROUP BY TUMBLE(col_b, INTERVAL '1' SECOND);"); BeamSqlLine.runSqlLine(args, null, byteArrayOutputStream, null); List<List<String>> lines = toLines(byteArrayOutputStream); assertThat( Arrays.asList( Arrays.asList("2018-07-01 21:26:06", "1"), Arrays.asList("2018-07-01 21:26:07", "1")), everyItem(is(oneOf(lines.toArray())))); }
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"}) void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas, MigrationDecisionCallback callback) { assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: " + Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas); if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas)); logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas)); } initState(oldReplicas); assertNoDuplicate(partitionId, oldReplicas, newReplicas); // fix cyclic partition replica movements if (fixCycle(oldReplicas, newReplicas)) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId, Arrays.toString(newReplicas)); } } int currentIndex = 0; while (currentIndex < oldReplicas.length) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex, Arrays.toString(state)); } assertNoDuplicate(partitionId, oldReplicas, newReplicas); if (newReplicas[currentIndex] == null) { if (state[currentIndex] != null) { // replica owner is removed and no one will own this replica logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1); state[currentIndex] = null; } currentIndex++; continue; } if (state[currentIndex] == null) { int i = getReplicaIndex(state, newReplicas[currentIndex]); if (i == -1) { // fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (i > currentIndex) { // SHIFT UP replica from i to currentIndex, copy data from partition owner logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId, state[i], i, currentIndex); callback.migrate(null, -1, -1, state[i], i, currentIndex); state[currentIndex] = state[i]; state[i] = null; continue; } throw new AssertionError("partitionId=" + partitionId + "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas)); } if (newReplicas[currentIndex].equals(state[currentIndex])) { // no change, no action needed currentIndex++; continue; } if (getReplicaIndex(newReplicas, state[currentIndex]) == -1 && getReplicaIndex(state, newReplicas[currentIndex]) == -1) { // MOVE partition replica from its old owner to new owner logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) { int newIndex = getReplicaIndex(newReplicas, state[currentIndex]); assert newIndex > currentIndex : "partitionId=" + partitionId + ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); if (state[newIndex] == null) { // it is a SHIFT DOWN logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId, state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex); state[newIndex] = state[currentIndex]; } else { logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); } state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex); } assert Arrays.equals(state, newReplicas) : "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas) + " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); }
@Test public void test_SHIFT_DOWN_performedBy_MOVE() throws UnknownHostException { final PartitionReplica[] oldReplicas = { new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), null, null, null, null, }; final PartitionReplica[] newReplicas = {new PartitionReplica(new Address("localhost", 5704), uuids[3]), new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), null, null, null, null, }; migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, 0); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, 1); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, -1, new PartitionReplica(new Address("localhost", 5702), uuids[1]), -1, 2); }
public static InternalRequestSignature fromHeaders(Crypto crypto, byte[] requestBody, HttpHeaders headers) { if (headers == null) { return null; } String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER); String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER); if (signatureAlgorithm == null || encodedSignature == null) { return null; } Mac mac; try { mac = crypto.mac(signatureAlgorithm); } catch (NoSuchAlgorithmException e) { throw new BadRequestException(e.getMessage()); } byte[] decodedSignature; try { decodedSignature = Base64.getDecoder().decode(encodedSignature); } catch (IllegalArgumentException e) { throw new BadRequestException(e.getMessage()); } return new InternalRequestSignature( requestBody, mac, decodedSignature ); }
@Test public void fromHeadersShouldThrowExceptionOnInvalidSignatureAlgorithm() { assertThrows(BadRequestException.class, () -> InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, internalRequestHeaders(ENCODED_SIGNATURE, "doesn'texist"))); }
public ClusterSerdes init(Environment env, ClustersProperties clustersProperties, int clusterIndex) { ClustersProperties.Cluster clusterProperties = clustersProperties.getClusters().get(clusterIndex); log.debug("Configuring serdes for cluster {}", clusterProperties.getName()); var globalPropertiesResolver = new PropertyResolverImpl(env); var clusterPropertiesResolver = new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex); Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>(); // initializing serdes from config if (clusterProperties.getSerde() != null) { for (int i = 0; i < clusterProperties.getSerde().size(); i++) { ClustersProperties.SerdeConfig serdeConfig = clusterProperties.getSerde().get(i); if (Strings.isNullOrEmpty(serdeConfig.getName())) { throw new ValidationException("'name' property not set for serde: " + serdeConfig); } if (registeredSerdes.containsKey(serdeConfig.getName())) { throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName()); } var instance = createSerdeFromConfig( serdeConfig, new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"), clusterPropertiesResolver, globalPropertiesResolver ); registeredSerdes.put(serdeConfig.getName(), instance); } } // initializing remaining built-in serdes with empty selection patters builtInSerdeClasses.forEach((name, clazz) -> { if (!registeredSerdes.containsKey(name)) { BuiltInSerde serde = createSerdeInstance(clazz); if (autoConfigureSerde(serde, clusterPropertiesResolver, globalPropertiesResolver)) { registeredSerdes.put(name, new SerdeInstance(name, serde, null, null, null)); } } }); registerTopicRelatedSerde(registeredSerdes); return new ClusterSerdes( registeredSerdes, Optional.ofNullable(clusterProperties.getDefaultKeySerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default key serde not found")) .orElse(null), Optional.ofNullable(clusterProperties.getDefaultValueSerde()) .map(name -> Preconditions.checkNotNull(registeredSerdes.get(name), "Default value serde not found")) .or(() -> Optional.ofNullable(registeredSerdes.get(SchemaRegistrySerde.name()))) .or(() -> Optional.ofNullable(registeredSerdes.get(ProtobufFileSerde.name()))) .orElse(null), createFallbackSerde() ); }
@Test void pluggedSerdesInitializedByLoader() { ClustersProperties.SerdeConfig customSerdeConfig = new ClustersProperties.SerdeConfig(); customSerdeConfig.setName("MyPluggedSerde"); customSerdeConfig.setFilePath("/custom.jar"); customSerdeConfig.setClassName("org.test.MyPluggedSerde"); customSerdeConfig.setTopicKeysPattern("keys"); customSerdeConfig.setTopicValuesPattern("values"); when(customSerdeLoaderMock.loadAndConfigure(anyString(), anyString(), any(), any(), any())) .thenReturn(new CustomSerdeLoader.CustomSerde(new StringSerde(), new URLClassLoader(new URL[]{}))); var serdes = init(customSerdeConfig); SerdeInstance customSerdeInstance = serdes.serdes.get("MyPluggedSerde"); verifyPatternsMatch(customSerdeConfig, customSerdeInstance); assertThat(customSerdeInstance.classLoader).isNotNull(); verify(customSerdeLoaderMock).loadAndConfigure( eq(customSerdeConfig.getClassName()), eq(customSerdeConfig.getFilePath()), any(), any(), any() ); }
@Override protected FieldValue doGet(String fieldName, EventWithContext eventWithContext) { final ImmutableMap.Builder<String, Object> dataModelBuilder = ImmutableMap.builder(); if (eventWithContext.messageContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.messageContext().get().getFields()); } else if (eventWithContext.eventContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.eventContext().get().toDto().fields()); } final ImmutableMap<String, Object> dataModel = dataModelBuilder.build(); if (!isValidTemplate(config.template(), dataModel)) { return FieldValue.error(); } try { return FieldValue.string(templateEngine.transform(config.template(), dataModel)); } catch (Exception e) { LOG.error("Couldn't render field template \"{}\"", config.template(), e); return FieldValue.error(); } }
@Test public void templateNumberFormatting() { final TestEvent event = new TestEvent(); final EventWithContext eventWithContext = EventWithContext.create(event, newMessage(ImmutableMap.of("count", 10241234, "avg", 1024.42))); final FieldValue fieldValue = newTemplate("count: ${source.count} avg: ${source.avg}").doGet("test", eventWithContext); assertThat(fieldValue.value()).isEqualTo("count: 10241234 avg: 1024.42"); }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testCreateCatalogHadoop() { String catalogName = "hadoopCatalog"; props.put( FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HADOOP); Catalog catalog = FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration()) .loadCatalog(); assertThat(catalog).isNotNull().isInstanceOf(HadoopCatalog.class); }
public static IPMappingAddress ipv4MappingAddress(IpPrefix ip) { return new IPMappingAddress(ip, MappingAddress.Type.IPV4); }
@Test public void testIpv4MappingAddressMethod() { IpPrefix ip = IpPrefix.valueOf("1.2.3.4/24"); MappingAddress mappingAddress = MappingAddresses.ipv4MappingAddress(ip); IPMappingAddress ipMappingAddress = checkAndConvert(mappingAddress, MappingAddress.Type.IPV4, IPMappingAddress.class); assertThat(ipMappingAddress.ip(), is(equalTo(ip))); }
@Override public int curThreadNum() { return (int)curThreadNum.sum(); }
@Test public void testStatisticLongAdder() throws InterruptedException { AtomicInteger atomicInteger = new AtomicInteger(0); StatisticNode statisticNode = new StatisticNode(); ExecutorService bizEs1 = new ThreadPoolExecutor(THREAD_COUNT, THREAD_COUNT, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); ExecutorService bizEs2 = new ThreadPoolExecutor(THREAD_COUNT, THREAD_COUNT, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>()); int taskCount = 100; for (int i = 0; i < taskCount; i++) { int op = i % 2; bizEs2.submit(new StatisticAtomicIntegerTask(atomicInteger, op, i)); bizEs1.submit(new StatisticLongAdderTask(statisticNode, op, i)); } Thread.sleep(5000); log("LongAdder totalCost : " + StatisticLongAdderTask.totalCost() + "ms"); log("AtomicInteger totalCost : " + StatisticAtomicIntegerTask.totalCost() + "ms"); Assert.assertEquals(statisticNode.curThreadNum(), atomicInteger.get()); }
@Override public void save(final CallContext ctx) { val webContext = ctx.webContext(); val sessionStore = ctx.sessionStore(); val requestedUrl = getRequestedUrl(webContext, sessionStore); if (WebContextHelper.isPost(webContext)) { LOGGER.debug("requestedUrl with data: {}", requestedUrl); val formPost = HttpActionHelper.buildFormPostContent(webContext); sessionStore.set(webContext, Pac4jConstants.REQUESTED_URL, new OkAction(formPost)); } else { LOGGER.debug("requestedUrl: {}", requestedUrl); sessionStore.set(webContext, Pac4jConstants.REQUESTED_URL, requestedUrl); } }
@Test public void testSavePost() { val context = MockWebContext.create().setFullRequestURL(PAC4J_URL).setRequestMethod("POST"); context.addRequestParameter(KEY, VALUE); val sessionStore = new MockSessionStore(); handler.save(new CallContext(context, sessionStore)); WithContentAction action = (OkAction) sessionStore.get(context, Pac4jConstants.REQUESTED_URL).get(); assertEquals(FORM_DATA, action.getContent()); }
public static int check(String passwd) { if (null == passwd) { throw new IllegalArgumentException("password is empty"); } int len = passwd.length(); int level = 0; // increase points if (countLetter(passwd, CHAR_TYPE.NUM) > 0) { level++; } if (countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0) { level++; } if (len > 4 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 4 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 8 && countLetter(passwd, CHAR_TYPE.NUM) > 0 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) > 0 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) > 0) { level++; } if (len > 6 && countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.NUM) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 3 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (len > 8 && countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 || countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2 || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (len > 10 && countLetter(passwd, CHAR_TYPE.NUM) >= 2 && countLetter(passwd, CHAR_TYPE.SMALL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) >= 2 && countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 2) { level++; } if (countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 3) { level++; } if (countLetter(passwd, CHAR_TYPE.OTHER_CHAR) >= 6) { level++; } if (len > 12) { level++; if (len >= 16) { level++; } } // decrease points if ("abcdefghijklmnopqrstuvwxyz".indexOf(passwd) > 0 || "ABCDEFGHIJKLMNOPQRSTUVWXYZ".indexOf(passwd) > 0) { level--; } if ("qwertyuiop".indexOf(passwd) > 0 || "asdfghjkl".indexOf(passwd) > 0 || "zxcvbnm".indexOf(passwd) > 0) { level--; } if (StrUtil.isNumeric(passwd) && ("01234567890".indexOf(passwd) > 0 || "09876543210".indexOf(passwd) > 0)) { level--; } if (countLetter(passwd, CHAR_TYPE.NUM) == len || countLetter(passwd, CHAR_TYPE.SMALL_LETTER) == len || countLetter(passwd, CHAR_TYPE.CAPITAL_LETTER) == len) { level--; } if (len % 2 == 0) { // aaabbb String part1 = passwd.substring(0, len / 2); String part2 = passwd.substring(len / 2); if (part1.equals(part2)) { level--; } if (StrUtil.isCharEquals(part1) && StrUtil.isCharEquals(part2)) { level--; } } if (len % 3 == 0) { // ababab String part1 = passwd.substring(0, len / 3); String part2 = passwd.substring(len / 3, len / 3 * 2); String part3 = passwd.substring(len / 3 * 2); if (part1.equals(part2) && part2.equals(part3)) { level--; } } if (StrUtil.isNumeric(passwd) && len >= 6 && len <= 8) { // 19881010 or 881010 int year = 0; if (len == 8 || len == 6) { year = Integer.parseInt(passwd.substring(0, len - 4)); } int size = sizeOfInt(year); int month = Integer.parseInt(passwd.substring(size, size + 2)); int day = Integer.parseInt(passwd.substring(size + 2, len)); if (year >= 1950 && year < 2050 && month >= 1 && month <= 12 && day >= 1 && day <= 31) { level--; } } for (String s : DICTIONARY) { if (passwd.equals(s) || s.contains(passwd)) { level--; break; } } if (len <= 6) { level--; if (len <= 4) { level--; if (len <= 3) { level = 0; } } } if (StrUtil.isCharEquals(passwd)) { level = 0; } if (level < 0) { level = 0; } return level; }
@Test public void strengthTest(){ String passwd = "2hAj5#mne-ix.86H"; assertEquals(13, PasswdStrength.check(passwd)); }
@Override public void delete(RuleDao nativeEntity) { ruleService.delete(nativeEntity.id()); }
@Test @MongoDBFixtures("PipelineRuleFacadeTest.json") public void delete() throws NotFoundException { final RuleDao ruleDao = ruleService.loadByName("debug"); assertThat(ruleService.loadAll()).hasSize(2); facade.delete(ruleDao); assertThatThrownBy(() -> ruleService.loadByName("debug")) .isInstanceOf(NotFoundException.class); assertThat(ruleService.loadAll()).hasSize(1); }
@VisibleForTesting static String toString(@Nullable TaskManagerLocation location) { // '(unassigned)' being the default value is added to support backward-compatibility for the // deprecated fields return location != null ? location.getEndpoint() : "(unassigned)"; }
@Test void testTaskManagerLocationHandling() { final TaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); assertThat(JobExceptionsHandler.toString(taskManagerLocation)) .isEqualTo( String.format( "%s:%s", taskManagerLocation.getFQDNHostname(), taskManagerLocation.dataPort())); }
@Override public void checkBeforeUpdate(final DropBroadcastTableRuleStatement sqlStatement) { if (!sqlStatement.isIfExists()) { checkBroadcastTableRuleExist(sqlStatement); } }
@Test void assertCheckSQLStatementWithoutToBeDroppedRule() { DropBroadcastTableRuleStatement sqlStatement = new DropBroadcastTableRuleStatement(false, Collections.singleton("t_address")); BroadcastRule rule = mock(BroadcastRule.class); when(rule.getConfiguration()).thenReturn(new BroadcastRuleConfiguration(Collections.emptyList())); executor.setRule(rule); assertThrows(MissingRequiredRuleException.class, () -> executor.checkBeforeUpdate(sqlStatement)); }
public static Interval of(String interval, TimeRange timeRange) { switch (timeRange.type()) { case TimeRange.KEYWORD: return timestampInterval(interval); case TimeRange.ABSOLUTE: return ofAbsoluteRange(interval, (AbsoluteRange)timeRange); case TimeRange.RELATIVE: return ofRelativeRange(interval, (RelativeRange)timeRange); } throw new RuntimeException("Unable to parse time range type: " + timeRange.type()); }
@Test public void returnsParsedIntervalIfRelativeRangeButBelowLimit() { final RelativeRange relativeRange = RelativeRange.create(450); final Interval interval = ApproximatedAutoIntervalFactory.of("minute", relativeRange); assertThat(interval).isEqualTo(TimeUnitInterval.create(TimeUnitInterval.IntervalUnit.MINUTES, 1)); }
public static String getExtension(final URI file) { return file == null ? null : getExtension(file.toString()); }
@Test void shouldGetExtension() { assertThat(FileUtils.getExtension((String)null), nullValue()); assertThat(FileUtils.getExtension(""), nullValue()); assertThat(FileUtils.getExtension("/file/hello"), nullValue()); assertThat(FileUtils.getExtension("/file/hello.txt"), is(".txt")); }
@Override public boolean remove(long key1, long key2) { return super.remove0(key1, key2); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testRemove_whenDisposed() { hsa.dispose(); hsa.remove(1, 1); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<Path>(); // At least one entry successfully parsed boolean success = false; // Call hook for those implementors which need to perform some action upon the list after it has been created // from the server stream, but before any clients see the list parser.preParse(replies); for(String line : replies) { final FTPFile f = parser.parseFTPEntry(line); if(null == f) { continue; } final String name = f.getName(); if(!success) { if(lenient) { // Workaround for #2410. STAT only returns ls of directory itself // Workaround for #2434. STAT of symbolic link directory only lists the directory itself. if(directory.getName().equals(name)) { log.warn(String.format("Skip %s matching parent directory name", f.getName())); continue; } if(name.contains(String.valueOf(Path.DELIMITER))) { if(!name.startsWith(directory.getAbsolute() + Path.DELIMITER)) { // Workaround for #2434. log.warn(String.format("Skip %s with delimiter in name", name)); continue; } } } } success = true; if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", f.getName())); } continue; } final Path parsed = new Path(directory, PathNormalizer.name(name), f.getType() == FTPFile.DIRECTORY_TYPE ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file)); switch(f.getType()) { case FTPFile.SYMBOLIC_LINK_TYPE: parsed.setType(EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Symbolic link target may be an absolute or relative path final String target = f.getLink(); if(StringUtils.isBlank(target)) { log.warn(String.format("Missing symbolic link target for %s", parsed)); final EnumSet<Path.Type> type = parsed.getType(); type.remove(Path.Type.symboliclink); } else if(StringUtils.startsWith(target, String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else if(StringUtils.equals("..", target)) { parsed.setSymlinkTarget(directory); } else if(StringUtils.equals(".", target)) { parsed.setSymlinkTarget(parsed); } else { parsed.setSymlinkTarget(new Path(directory, target, EnumSet.of(Path.Type.file))); } break; } if(parsed.isFile()) { parsed.attributes().setSize(f.getSize()); } parsed.attributes().setOwner(f.getUser()); parsed.attributes().setGroup(f.getGroup()); Permission.Action u = Permission.Action.none; if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)) { u = u.or(Permission.Action.read); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)) { u = u.or(Permission.Action.write); } if(f.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)) { u = u.or(Permission.Action.execute); } Permission.Action g = Permission.Action.none; if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)) { g = g.or(Permission.Action.read); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)) { g = g.or(Permission.Action.write); } if(f.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)) { g = g.or(Permission.Action.execute); } Permission.Action o = Permission.Action.none; if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)) { o = o.or(Permission.Action.read); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)) { o = o.or(Permission.Action.write); } if(f.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)) { o = o.or(Permission.Action.execute); } final Permission permission = new Permission(u, g, o); if(f instanceof FTPExtendedFile) { permission.setSetuid(((FTPExtendedFile) f).isSetuid()); permission.setSetgid(((FTPExtendedFile) f).isSetgid()); permission.setSticky(((FTPExtendedFile) f).isSticky()); } if(!Permission.EMPTY.equals(permission)) { parsed.attributes().setPermission(permission); } final Calendar timestamp = f.getTimestamp(); if(timestamp != null) { parsed.attributes().setModificationDate(timestamp.getTimeInMillis()); } children.add(parsed); } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void test3243() throws Exception { Path path = new Path("/SunnyD", EnumSet.of(Path.Type.directory)); assertEquals("SunnyD", path.getName()); assertEquals("/SunnyD", path.getAbsolute()); final AttributedList<Path> list = new AttributedList<Path>(); new FTPListResponseReader(new FTPParserSelector().getParser("UNIX")).read(path, Collections.singletonList( " drwxrwx--x 1 owner group 512 Jun 12 15:40 SunnyD") ); }
public static boolean isJsonValid(String schemaText, String jsonText) throws IOException { return isJsonValid(schemaText, jsonText, null); }
@Test void testValidateJsonSuccess() { boolean valid = false; String schemaText = null; String jsonText = "{\"name\": \"307\", \"model\": \"Peugeot 307\", \"year\": 2003}"; try { // Load schema from file. schemaText = FileUtils .readFileToString(new File("target/test-classes/io/github/microcks/util/car-schema.json")); // Validate Json according schema. valid = JsonSchemaValidator.isJsonValid(schemaText, jsonText); } catch (Exception e) { fail("Exception should not be thrown"); } // Assert Json object is valid. assertTrue(valid); }
boolean canFilterPlayer(String playerName) { boolean isMessageFromSelf = playerName.equals(client.getLocalPlayer().getName()); return !isMessageFromSelf && (config.filterFriends() || !client.isFriended(playerName, false)) && (config.filterFriendsChat() || !isFriendsChatMember(playerName)) && (config.filterClanChat() || !isClanChatMember(playerName)); }
@Test public void testMessageFromFriendsChatIsFiltered() { when(client.isFriended("B0aty", false)).thenReturn(false); when(chatFilterConfig.filterFriendsChat()).thenReturn(true); assertTrue(chatFilterPlugin.canFilterPlayer("B0aty")); }
@Converter public static Map<String, Object> toMap(final Struct struct) { final HashMap<String, Object> fieldsToValues = new HashMap<>(); struct.schema().fields().forEach(field -> { Object value = struct.get(field); // recursive call if we have nested structs if (value instanceof Struct) { fieldsToValues.put(field.name(), toMap((Struct) value)); } else { fieldsToValues.put(field.name(), value); } }); return fieldsToValues; }
@Test void testToMapNestedStruct() { Schema detailsSchema = SchemaBuilder.struct().field("age", SchemaBuilder.INT32_SCHEMA).build(); Schema valueSchema = SchemaBuilder.struct() .name("valueSchema") .field("id", SchemaBuilder.INT32_SCHEMA) .field("name", SchemaBuilder.STRING_SCHEMA) .field("isAdult", SchemaBuilder.BOOLEAN_SCHEMA) .field("details", detailsSchema) .build(); Struct inputValue = new Struct(valueSchema) .put("id", 12) .put("name", "jane doe") .put("isAdult", true) .put("details", new Struct(detailsSchema).put("age", 30)); final Map<String, Object> outputValue = DebeziumTypeConverter.toMap(inputValue); assertEquals("jane doe", outputValue.get("name")); assertEquals(12, outputValue.get("id")); assertTrue((Boolean) outputValue.get("isAdult")); assertEquals(30, ((Map) outputValue.get("details")).get("age")); }
public String prometheusName(String recordName, String metricName) { String baseName = StringUtils.capitalize(recordName) + StringUtils.capitalize(metricName); String[] parts = SPLIT_PATTERN.split(baseName); String joined = String.join("_", parts).toLowerCase(); return DELIMITERS.matcher(joined).replaceAll("_"); }
@Test public void testNamingCamelCase() { PrometheusMetricsSink sink = new PrometheusMetricsSink(); Assert.assertEquals("rpc_time_some_metrics", sink.prometheusName("RpcTime", "SomeMetrics")); Assert.assertEquals("om_rpc_time_om_info_keys", sink.prometheusName("OMRpcTime", "OMInfoKeys")); Assert.assertEquals("rpc_time_small", sink.prometheusName("RpcTime", "small")); }
@Override public void commitJob(JobContext originalContext) throws IOException { JobContext jobContext = TezUtil.enrichContextWithVertexId(originalContext); JobConf jobConf = jobContext.getJobConf(); long startTime = System.currentTimeMillis(); LOG.info("Committing job {} has started", jobContext.getJobID()); Collection<String> outputs = HiveIcebergStorageHandler.outputTables(jobContext.getJobConf()); Collection<String> jobLocations = new ConcurrentLinkedQueue<>(); ExecutorService fileExecutor = fileExecutor(jobConf); ExecutorService tableExecutor = tableExecutor(jobConf, outputs.size()); try { // Commits the changes for the output tables in parallel Tasks.foreach(outputs) .throwFailureWhenFinished() .stopOnFailure() .executeWith(tableExecutor) .run( output -> { Table table = HiveIcebergStorageHandler.table(jobConf, output); if (table != null) { String catalogName = HiveIcebergStorageHandler.catalogName(jobConf, output); jobLocations.add( generateJobLocation(table.location(), jobConf, jobContext.getJobID())); commitTable( table.io(), fileExecutor, jobContext, output, table.location(), catalogName); } else { LOG.info( "CommitJob found no serialized table in config for table: {}. Skipping job commit.", output); } }); } finally { fileExecutor.shutdown(); if (tableExecutor != null) { tableExecutor.shutdown(); } } LOG.info( "Commit took {} ms for job {}", System.currentTimeMillis() - startTime, jobContext.getJobID()); cleanup(jobContext, jobLocations); }
@Test public void testSuccessfulPartitionedWrite() throws IOException { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); Table table = table(temp.toFile().getPath(), true); JobConf conf = jobConf(table, 1); List<Record> expected = writeRecords(table.name(), 1, 0, true, false, conf); committer.commitJob(new JobContextImpl(conf, JOB_ID)); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 3); HiveIcebergTestUtils.validateData(table, expected, 0); }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testColumnAliasQuery() throws IOException, InterpreterException { Properties properties = new Properties(); properties.setProperty("common.max_count", "1000"); properties.setProperty("common.max_retry", "3"); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); JDBCInterpreter t = new JDBCInterpreter(properties); t.open(); String sqlQuery = "select NAME as SOME_OTHER_NAME from test_table limit 1"; InterpreterResult interpreterResult = t.interpret(sqlQuery, context); List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage(); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code(), interpreterResult.toString()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType()); assertEquals("SOME_OTHER_NAME\na_name\n", resultMessages.get(0).getData()); }
public static void close(final AutoCloseable closeable) { if (null == closeable) { return; } try { closeable.close(); // CHECKSTYLE:OFF } catch (final Exception ignored) { // CHECKSTYLE:ON } }
@Test void assertCloseWithNullResource() { assertDoesNotThrow(() -> QuietlyCloser.close(null)); }
static String generateIndexName(String baseString) { return generateResourceId( baseString, ILLEGAL_INDEX_NAME_CHARS, REPLACE_INDEX_NAME_CHAR, MAX_INDEX_NAME_LENGTH, TIME_FORMAT); }
@Test public void testGenerateIndexNameShouldReplaceStar() { String testBaseString = "Test*DB*Name"; String actual = generateIndexName(testBaseString); assertThat(actual).matches("test-db-name-\\d{8}-\\d{6}-\\d{6}"); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldThrowOnNoneExecutableDdlStatement() { // When: setupKsqlEngineWithSharedRuntimeEnabled(); final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "SHOW STREAMS;", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is("Statement not executable"))); assertThat(e, statementText(is("SHOW STREAMS;"))); }
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { AsyncByteArrayFeeder streamFeeder = streamReader.getInputFeeder(); logger.info("Decoding XMPP data.. "); byte[] buffer = new byte[in.readableBytes()]; in.readBytes(buffer); logger.debug("Buffer length: " + buffer.length); try { streamFeeder.feedInput(buffer, 0, buffer.length); } catch (XMLStreamException exception) { logger.info(exception.getMessage()); in.skipBytes(in.readableBytes()); logger.info("Bytes skipped"); throw exception; } while (streamReader.hasNext() && streamReader.next() != AsyncXMLStreamReader.EVENT_INCOMPLETE) { out.add(allocator.allocate(streamReader)); } }
@Test public void testDecodeNoChannel() throws Exception { XmlStreamDecoder decoder = new XmlStreamDecoder(); List<Object> list = Lists.newArrayList(); decoder.decode(new ActiveChannelHandlerContextAdapter(), Unpooled.buffer(), list); assertThat(list.size(), is(0)); }
@VisibleForTesting static List<String> getJmResourceParams(Configuration configuration) { JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap( configuration, JobManagerOptions.JVM_HEAP_MEMORY); logMasterConfiguration(jobManagerProcessSpec); return Arrays.asList( JobManagerProcessUtils.generateJvmParametersStr( jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec)); }
@Test void testJmLegacyHeapOptionSetsNewJvmHeap() { Configuration configuration = new Configuration(); MemorySize heapSize = MemorySize.ofMebiBytes(10); configuration.set(JobManagerOptions.JOB_MANAGER_HEAP_MEMORY, heapSize); String jvmArgsLine = BashJavaUtils.getJmResourceParams(configuration).get(0); Map<String, String> jvmArgs = ConfigurationUtils.parseJvmArgString(jvmArgsLine); String heapSizeStr = Long.toString(heapSize.getBytes()); assertThat(jvmArgs.get("-Xmx")).isEqualTo(heapSizeStr); assertThat(jvmArgs.get("-Xms")).isEqualTo(heapSizeStr); }