focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static RepositoryMetadataStore getInstance() { return repositoryMetadataStore; }
@Test public void shouldReturnNullForMetadataIfPluginIdIsNonExistent() { assertNull(RepositoryMetadataStore.getInstance().getMetadata("non-existent-plugin-id")); }
@Override public GcsResourceId resolve(String other, ResolveOptions resolveOptions) { checkState( isDirectory(), String.format("Expected the gcsPath is a directory, but had [%s].", gcsPath)); checkArgument( resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE) || resolveOptions.equals(StandardResolveOptions.RESOLVE_DIRECTORY), String.format("ResolveOptions: [%s] is not supported.", resolveOptions)); if (resolveOptions.equals(StandardResolveOptions.RESOLVE_FILE)) { checkArgument( !other.endsWith("/"), "The resolved file: [%s] should not end with '/'.", other); return fromGcsPath(gcsPath.resolve(other)); } else { // StandardResolveOptions.RESOLVE_DIRECTORY if (other.endsWith("/")) { // other already contains the delimiter for gcs. // It is not recommended for callers to set the delimiter. // However, we consider it as a valid input. return fromGcsPath(gcsPath.resolve(other)); } else { return fromGcsPath(gcsPath.resolve(other + "/")); } } }
@Test public void testResolve() { // Tests for common gcs paths. assertEquals( toResourceIdentifier("gs://bucket/tmp/aa"), toResourceIdentifier("gs://bucket/tmp/") .resolve("aa", StandardResolveOptions.RESOLVE_FILE)); assertEquals( toResourceIdentifier("gs://bucket/tmp/aa/bb/cc/"), toResourceIdentifier("gs://bucket/tmp/") .resolve("aa", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("bb", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("cc", StandardResolveOptions.RESOLVE_DIRECTORY)); // Tests absolute path. assertEquals( toResourceIdentifier("gs://bucket/tmp/aa"), toResourceIdentifier("gs://bucket/tmp/bb/") .resolve("gs://bucket/tmp/aa", StandardResolveOptions.RESOLVE_FILE)); // Tests bucket with no ending '/'. assertEquals( toResourceIdentifier("gs://my_bucket/tmp"), toResourceIdentifier("gs://my_bucket").resolve("tmp", StandardResolveOptions.RESOLVE_FILE)); // Tests path with unicode assertEquals( toResourceIdentifier("gs://bucket/输出 目录/输出 文件01.txt"), toResourceIdentifier("gs://bucket/输出 目录/") .resolve("输出 文件01.txt", StandardResolveOptions.RESOLVE_FILE)); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(0L == status.getLength()) { return new NullInputStream(0L); } final Storage.Objects.Get request = session.getClient().objects().get( containerService.getContainer(file).getName(), containerService.getKey(file)); if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) { request.setUserProject(session.getHost().getCredentials().getUsername()); } final VersioningConfiguration versioning = null != session.getFeature(Versioning.class) ? session.getFeature(Versioning.class).getConfiguration( containerService.getContainer(file) ) : VersioningConfiguration.empty(); if(versioning.isEnabled()) { if(StringUtils.isNotBlank(file.attributes().getVersionId())) { request.setGeneration(Long.parseLong(file.attributes().getVersionId())); } } if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } final HttpHeaders headers = request.getRequestHeaders(); headers.setRange(header); // Disable compression headers.setAcceptEncoding("identity"); } return request.executeMediaAsInputStream(); } catch(IOException e) { throw new GoogleStorageExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadCloseReleaseEntity() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final int length = 2048; final byte[] content = RandomUtils.nextBytes(length); final TransferStatus status = new TransferStatus().withLength(content.length); status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status)); final OutputStream out = new GoogleStorageWriteFeature(session).write(file, status, new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); final CountingInputStream in = new CountingInputStream(new GoogleStorageReadFeature(session).read(file, status, new DisabledConnectionCallback())); in.close(); assertEquals(0L, in.getByteCount(), 0L); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public synchronized void handle(ResourceEvent event) { LocalResourceRequest req = event.getLocalResourceRequest(); LocalizedResource rsrc = localrsrc.get(req); switch (event.getType()) { case LOCALIZED: if (useLocalCacheDirectoryManager) { inProgressLocalResourcesMap.remove(req); } break; case REQUEST: if (rsrc != null && (!isResourcePresent(rsrc))) { LOG.info("Resource " + rsrc.getLocalPath() + " is missing, localizing it again"); removeResource(req); rsrc = null; } if (null == rsrc) { rsrc = new LocalizedResource(req, dispatcher); localrsrc.put(req, rsrc); } break; case RELEASE: if (null == rsrc) { // The container sent a release event on a resource which // 1) Failed // 2) Removed for some reason (ex. disk is no longer accessible) ResourceReleaseEvent relEvent = (ResourceReleaseEvent) event; LOG.info("Container " + relEvent.getContainer() + " sent RELEASE event on a resource request " + req + " not present in cache."); return; } break; case LOCALIZATION_FAILED: /* * If resource localization fails then Localized resource will be * removed from local cache. */ removeResource(req); break; case RECOVERED: if (rsrc != null) { LOG.warn("Ignoring attempt to recover existing resource " + rsrc); return; } rsrc = recoverResource(req, (ResourceRecoveredEvent) event); localrsrc.put(req, rsrc); break; } if (rsrc == null) { LOG.warn("Received " + event.getType() + " event for request " + req + " but localized resource is missing"); return; } rsrc.handle(event); // Remove the resource if its downloading and its reference count has // become 0 after RELEASE. This maybe because a container was killed while // localizing and no other container is referring to the resource. // NOTE: This should NOT be done for public resources since the // download is not associated with a container-specific localizer. if (event.getType() == ResourceEventType.RELEASE) { if (rsrc.getState() == ResourceState.DOWNLOADING && rsrc.getRefCount() <= 0 && rsrc.getRequest().getVisibility() != LocalResourceVisibility.PUBLIC) { removeResource(req); } } if (event.getType() == ResourceEventType.LOCALIZED) { if (rsrc.getLocalPath() != null) { try { stateStore.finishResourceLocalization(user, appId, buildLocalizedResourceProto(rsrc)); } catch (IOException ioe) { LOG.error("Error storing resource state for " + rsrc, ioe); } } else { LOG.warn("Resource " + rsrc + " localized without a location"); } } }
@Test @SuppressWarnings("unchecked") public void testReleaseWhileDownloading() throws Exception { String user = "testuser"; DrainDispatcher dispatcher = null; try { Configuration conf = new Configuration(); dispatcher = createDispatcher(conf); EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class); EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class); dispatcher.register(LocalizerEventType.class, localizerEventHandler); dispatcher.register(ContainerEventType.class, containerEventHandler); ContainerId cId = BuilderUtils.newContainerId(1, 1, 1, 1); LocalizerContext lc = new LocalizerContext(user, cId, null); LocalResourceRequest req = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC); LocalizedResource lr = createLocalizedResource(req, dispatcher); ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>(); localrsrc.put(req, lr); LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, false, conf, new NMNullStateStoreService(), null); // request the resource ResourceEvent reqEvent = new ResourceRequestEvent(req, LocalResourceVisibility.PUBLIC, lc); tracker.handle(reqEvent); // release the resource ResourceEvent relEvent = new ResourceReleaseEvent(req, cId); tracker.handle(relEvent); // download completing after release ResourceLocalizedEvent rle = new ResourceLocalizedEvent(req, new Path("file:///tmp/r1"), 1); tracker.handle(rle); dispatcher.await(); } finally { if (dispatcher != null) { dispatcher.stop(); } } }
public long identifier() { return identifier; }
@Test void identifier() { LocalComponentIdDrlSession retrieved = new LocalComponentIdDrlSession(basePath, identifier); assertThat(retrieved.identifier()).isEqualTo(identifier); }
@Override public MapperResult selectGroupInfoBySize(MapperContext context) { String sql = "SELECT id, group_id FROM group_capacity WHERE id > ? OFFSET 0 ROWS FETCH NEXT ? ROWS ONLY"; return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.ID), context.getPageSize())); }
@Test void testSelectGroupInfoBySize() { Object id = 1; context.putWhereParameter(FieldConstant.ID, id); MapperResult mapperResult = groupCapacityMapperByDerby.selectGroupInfoBySize(context); assertEquals("SELECT id, group_id FROM group_capacity WHERE id > ? OFFSET 0 ROWS FETCH NEXT ? ROWS ONLY", mapperResult.getSql()); context.putWhereParameter(FieldConstant.GMT_CREATE, createTime); assertArrayEquals(new Object[] {id, pageSize}, mapperResult.getParamList().toArray()); }
@Override public ResponseHeader execute() throws SQLException { switch (operationType) { case BEGIN: handleBegin(); break; case SAVEPOINT: handleSavepoint(); break; case ROLLBACK_TO_SAVEPOINT: handleRollbackToSavepoint(); break; case RELEASE_SAVEPOINT: handleReleaseSavepoint(); break; case COMMIT: SQLStatement sqlStatement = getSQLStatementByCommit(); backendTransactionManager.commit(); return new UpdateResponseHeader(sqlStatement); case ROLLBACK: backendTransactionManager.rollback(); break; case SET_AUTOCOMMIT: handleSetAutoCommit(); break; default: throw new SQLFeatureNotSupportedException(operationType.name()); } return new UpdateResponseHeader(tclStatement); }
@Test void assertExecute() throws SQLException { ProxyDatabaseConnectionManager databaseConnectionManager = mock(ProxyDatabaseConnectionManager.class); when(connectionSession.getDatabaseConnectionManager()).thenReturn(databaseConnectionManager); when(databaseConnectionManager.getConnectionSession()).thenReturn(connectionSession); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); assertThat(new TransactionBackendHandler(mock(TCLStatement.class), TransactionOperationType.BEGIN, connectionSession).execute(), instanceOf(UpdateResponseHeader.class)); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test public void testMoverFailedRetry() throws Exception { // HDFS-8147 final Configuration conf = new HdfsConfiguration(); initConf(conf); conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2"); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3) .storageTypes( new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK, StorageType.ARCHIVE}}).build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testMoverFailedRetry"; // write to DISK final FSDataOutputStream out = dfs.create(new Path(file), (short) 2); out.writeChars("testMoverFailedRetry"); out.close(); // Delete block file so, block move will fail with FileNotFoundException LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock()); // move to ARCHIVE dfs.setStoragePolicy(new Path(file), "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", file.toString()}); Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc); } finally { cluster.shutdown(); } }
public void restore(final List<Pair<byte[], byte[]>> backupCommands) { // Delete the command topic deleteCommandTopicIfExists(); // Create the command topic KsqlInternalTopicUtils.ensureTopic(commandTopicName, serverConfig, topicClient); // Restore the commands restoreCommandTopic(backupCommands); }
@Test public void shouldThrowWhenRestoreIsInterrupted() throws Exception { // Given: when(topicClient.isTopicExists(COMMAND_TOPIC_NAME)).thenReturn(false); doThrow(new InterruptedException("fail")).when(future2).get(); // When: final Exception e = assertThrows( KsqlException.class, () -> restoreCommandTopic.restore(BACKUP_COMMANDS)); // Then: assertThat(e.getMessage(), containsString("Restore process was interrupted.")); verifyCreateCommandTopic(); final InOrder inOrder = inOrder(kafkaProducer, future1, future2); inOrder.verify(kafkaProducer).initTransactions(); inOrder.verify(kafkaProducer).beginTransaction(); inOrder.verify(kafkaProducer).send(RECORD_1); inOrder.verify(future1).get(); inOrder.verify(kafkaProducer).commitTransaction(); inOrder.verify(kafkaProducer).beginTransaction(); inOrder.verify(kafkaProducer).send(RECORD_2); inOrder.verify(future2).get(); inOrder.verify(kafkaProducer).abortTransaction(); inOrder.verify(kafkaProducer).close(); verifyNoMoreInteractions(kafkaProducer, future1, future2); verifyNoMoreInteractions(future3); }
public void createControlFile( String filename, Object[] row, OraBulkLoaderMeta meta ) throws KettleException { FileWriter fw = null; try { File controlFile = new File( getFileObject( filename, getTransMeta() ).getURL().getFile() ); // Need to ensure that the parent directory they set exists for the control file. controlFile.getParentFile().mkdirs(); controlFile.createNewFile(); fw = new FileWriter( controlFile ); fw.write( getControlFileContents( meta, getInputRowMeta(), row ) ); } catch ( IOException ex ) { throw new KettleException( ex.getMessage(), ex ); } finally { try { if ( fw != null ) { fw.close(); } } catch ( Exception ex ) { // Ignore errors } } }
@Test public void testCreateControlFile() throws Exception { // Create a tempfile, so we can use the temp file path when we run the createControlFile method String tempTrueControlFilepath = tempControlFile.getAbsolutePath() + "A.txt"; String expectedControlContents = "test"; OraBulkLoaderMeta oraBulkLoaderMeta = mock( OraBulkLoaderMeta.class ); RowMetaInterface rowMetaInterface = mock( RowMetaInterface.class ); Object[] objectRow = {}; doReturn( rowMetaInterface ).when( oraBulkLoader ).getInputRowMeta(); doReturn( expectedControlContents ).when( oraBulkLoader ).getControlFileContents( oraBulkLoaderMeta, rowMetaInterface, objectRow ); oraBulkLoader.createControlFile( tempTrueControlFilepath, objectRow, oraBulkLoaderMeta ); assertTrue( Files.exists( Paths.get( tempTrueControlFilepath ) ) ); File tempTrueControlFile = new File( tempTrueControlFilepath ); String tempTrueControlFileContents = new String( Files.readAllBytes( tempTrueControlFile.toPath() ) ); assertEquals( expectedControlContents, tempTrueControlFileContents ); tempTrueControlFile.delete(); }
public static Validator mapWithDoubleValue() { return (name, val) -> { if (!(val instanceof String)) { throw new ConfigException(name, val, "Must be a string"); } final String str = (String) val; final Map<String, String> map = KsqlConfig.parseStringAsMap(name, str); map.forEach((k, valueStr) -> { try { Double.parseDouble(valueStr); } catch (NumberFormatException e) { throw new ConfigException(name, valueStr, "Not a double"); } }); }; }
@Test public void shouldThrowOnBadDoubleValueInMap() { // Given: final Validator validator = ConfigValidators.mapWithDoubleValue(); // When: final Exception e = assertThrows( ConfigException.class, () -> validator.ensureValid("propName", "foo:abc") ); // Then: assertThat(e.getMessage(), containsString("Invalid value abc for configuration propName: Not a double")); }
@Override public ProcResult fetchResult() throws AnalysisException { BaseProcResult result = new BaseProcResult(); result.setNames(getMetadata()); final List<List<String>> computeNodesInfos = getClusterComputeNodesInfos(); for (List<String> computeNodesInfo : computeNodesInfos) { List<String> oneInfo = new ArrayList<>(computeNodesInfo.size()); oneInfo.addAll(computeNodesInfo); result.addRow(oneInfo); } return result; }
@Test public void testWarehouse(@Mocked WarehouseManager warehouseManager) throws AnalysisException { new Expectations() { { systemInfoService.getComputeNodeIds(anyBoolean); result = Lists.newArrayList(1000L, 1001L); } }; new Expectations() { { RunMode.isSharedDataMode(); minTimes = 0; result = true; globalStateMgr.getWarehouseMgr(); minTimes = 0; result = warehouseManager; warehouseManager.getWarehouse(anyLong); minTimes = 0; result = new DefaultWarehouse(WarehouseManager.DEFAULT_WAREHOUSE_ID, WarehouseManager.DEFAULT_WAREHOUSE_NAME); } }; ComputeNodeProcDir dir = new ComputeNodeProcDir(systemInfoService); ProcResult result = dir.fetchResult(); }
@Override public Set<Class<?>> classes() { Set<Class<?>> output = new HashSet<>(); if (application != null) { Set<Class<?>> clzs = application.getClasses(); if (clzs != null) { for (Class<?> clz : clzs) { if (!isIgnored(clz.getName())) { output.add(clz); } } } Set<Object> singletons = application.getSingletons(); if (singletons != null) { for (Object o : singletons) { if (!isIgnored(o.getClass().getName())) { output.add(o.getClass()); } } } } return output; }
@Test(description = "scan classes from Application only") public void shouldScanClassesApplicationOnly() throws Exception { assertEquals(scanner.classes().size(), 1); assertTrue(scanner.classes().contains(ResourceInPackageA.class)); }
@VisibleForTesting static boolean[] parseOSName() { boolean[] result = new boolean[] { false, false, false }; String osName = System.getProperty("os.name").toLowerCase(); if (osName.contains("windows")) { result[0] = true; } else if (osName.contains("linux")) { result[1] = true; } else if (osName.contains("mac")) { result[2] = true; } return result; }
@Test public void parseOSName() { String old = System.getProperty("os.name"); try { System.setProperty("os.name", "windows7"); Assert.assertTrue(SystemInfo.parseOSName()[0]); System.setProperty("os.name", "linux123"); Assert.assertTrue(SystemInfo.parseOSName()[1]); System.setProperty("os.name", "mac osx123"); Assert.assertTrue(SystemInfo.parseOSName()[2]); } finally { if (old == null) { System.clearProperty("os.name"); } else { System.setProperty("os.name", old); } } }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(!session.getClient().setFileType(FTP.BINARY_FILE_TYPE)) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(status.isAppend()) { session.getClient().setRestartOffset(status.getOffset()); } final InputStream in = new DataConnectionActionExecutor(session).data(new DataConnectionAction<InputStream>() { @Override public InputStream execute() throws BackgroundException { try { return session.getClient().retrieveFileStream(file.getAbsolute()); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return new ReadReplyInputStream(in, status); } catch(IOException e) { throw new FTPExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRange() throws Exception { final Path test = new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new FTPTouchFeature(session).touch(test, new TransferStatus()); final byte[] content = RandomUtils.nextBytes(2048); final OutputStream out = new FTPWriteFeature(session).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); out.close(); final TransferStatus status = new TransferStatus(); // Partial read with offset and not full content length final long limit = content.length - 100; status.setLength(limit); status.setAppend(true); final long offset = 2L; status.setOffset(offset); final InputStream in = new FTPReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream download = new ByteArrayOutputStream(); new StreamCopier(status, status).withLimit(limit).transfer(in, download); final byte[] reference = new byte[(int) limit]; System.arraycopy(content, (int) offset, reference, 0, (int) limit); assertArrayEquals(reference, download.toByteArray()); new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void cacheRuleData(final RuleData ruleData) { Optional.ofNullable(ruleData).ifPresent(this::ruleAccept); }
@Test public void testCacheRuleData() throws NoSuchFieldException, IllegalAccessException { RuleData firstCachedRuleData = RuleData.builder().id("1").selectorId(mockSelectorId1).sort(1).build(); BaseDataCache.getInstance().cacheRuleData(firstCachedRuleData); ConcurrentHashMap<String, List<RuleData>> ruleMap = getFieldByName(ruleMapStr); assertEquals(Lists.newArrayList(firstCachedRuleData), ruleMap.get(mockSelectorId1)); RuleData secondCachedRuleData = RuleData.builder().id("2").selectorId(mockSelectorId1).sort(2).build(); BaseDataCache.getInstance().cacheRuleData(secondCachedRuleData); assertEquals(Lists.newArrayList(firstCachedRuleData, secondCachedRuleData), ruleMap.get(mockSelectorId1)); }
private MessageRouter getMessageRouter() { MessageRouter messageRouter; MessageRoutingMode messageRouteMode = conf.getMessageRoutingMode(); switch (messageRouteMode) { case CustomPartition: messageRouter = Objects.requireNonNull(conf.getCustomMessageRouter()); break; case SinglePartition: messageRouter = new SinglePartitionMessageRouterImpl( ThreadLocalRandom.current().nextInt(topicMetadata.numPartitions()), conf.getHashingScheme()); break; case RoundRobinPartition: default: messageRouter = new RoundRobinPartitionMessageRouterImpl( conf.getHashingScheme(), ThreadLocalRandom.current().nextInt(topicMetadata.numPartitions()), conf.isBatchingEnabled(), TimeUnit.MICROSECONDS.toMillis(conf.batchingPartitionSwitchFrequencyIntervalMicros())); } return messageRouter; }
@Test public void testRoundRobinPartitionMessageRouterImplInstance() throws NoSuchFieldException, IllegalAccessException { ProducerConfigurationData producerConfigurationData = new ProducerConfigurationData(); producerConfigurationData.setMessageRoutingMode(MessageRoutingMode.RoundRobinPartition); MessageRouter messageRouter = getMessageRouter(producerConfigurationData); assertTrue(messageRouter instanceof RoundRobinPartitionMessageRouterImpl); }
public static void notNull(Object obj, String message) { if (obj == null) { throw new IllegalArgumentException(message); } }
@Test void testNotNull2() { Assertions.assertThrows( IllegalStateException.class, () -> notNull(null, new IllegalStateException("null object"))); }
@Override public HashSlotCursor16byteKey cursor() { return new CursorLongKey2(); }
@Test public void testCursor_advance_whenEmpty() { HashSlotCursor16byteKey cursor = hsa.cursor(); assertFalse(cursor.advance()); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testAllBrokersFenced() { MockRandom random = new MockRandom(); StripedReplicaPlacer placer = new StripedReplicaPlacer(random); assertEquals("All brokers are currently fenced.", assertThrows(InvalidReplicationFactorException.class, () -> place(placer, 0, 1, (short) 1, Arrays.asList( new UsableBroker(11, Optional.of("1"), true), new UsableBroker(10, Optional.of("1"), true)))).getMessage()); }
public static <T> CheckedSupplier<T> recover(CheckedSupplier<T> supplier, CheckedFunction<Throwable, T> exceptionHandler) { return () -> { try { return supplier.get(); } catch (Throwable throwable) { return exceptionHandler.apply(throwable); } }; }
@Test(expected = RuntimeException.class) public void shouldRethrowException2() throws Throwable { CheckedSupplier<String> callable = () -> { throw new RuntimeException("BAM!"); }; CheckedSupplier<String> callableWithRecovery = CheckedFunctionUtils.recover(callable, IllegalArgumentException.class, (ex) -> "Bla"); callableWithRecovery.get(); }
private void initialize() { // NOTE: Do not create a new model or use the default application/module model here! // Only the visible and only matching scope model can be injected, that is, module -> application -> framework. // The converse is a one-to-many relationship and cannot be injected. // One framework may have multiple applications, and one application may have multiple modules. // So, the spi extension/bean of application scope can be injected its application model and framework model, // but the spi extension/bean of framework scope cannot be injected an application or module model. if (scopeModel instanceof FrameworkModel) { frameworkModel = (FrameworkModel) scopeModel; } else if (scopeModel instanceof ApplicationModel) { applicationModel = (ApplicationModel) scopeModel; frameworkModel = applicationModel.getFrameworkModel(); } else if (scopeModel instanceof ModuleModel) { moduleModel = (ModuleModel) scopeModel; applicationModel = moduleModel.getApplicationModel(); frameworkModel = applicationModel.getFrameworkModel(); } }
@Test void testInitialize() { ScopeModelAwareExtensionProcessor processor1 = new ScopeModelAwareExtensionProcessor(frameworkModel); Assertions.assertEquals(processor1.getFrameworkModel(), frameworkModel); Assertions.assertEquals(processor1.getScopeModel(), frameworkModel); Assertions.assertNull(processor1.getApplicationModel()); Assertions.assertNull(processor1.getModuleModel()); ScopeModelAwareExtensionProcessor processor2 = new ScopeModelAwareExtensionProcessor(applicationModel); Assertions.assertEquals(processor2.getApplicationModel(), applicationModel); Assertions.assertEquals(processor2.getScopeModel(), applicationModel); Assertions.assertEquals(processor2.getFrameworkModel(), frameworkModel); Assertions.assertNull(processor2.getModuleModel()); ScopeModelAwareExtensionProcessor processor3 = new ScopeModelAwareExtensionProcessor(moduleModel); Assertions.assertEquals(processor3.getModuleModel(), moduleModel); Assertions.assertEquals(processor3.getScopeModel(), moduleModel); Assertions.assertEquals(processor2.getApplicationModel(), applicationModel); Assertions.assertEquals(processor2.getFrameworkModel(), frameworkModel); }
public Plan validateReservationUpdateRequest( ReservationSystem reservationSystem, ReservationUpdateRequest request) throws YarnException { ReservationId reservationId = request.getReservationId(); Plan plan = validateReservation(reservationSystem, reservationId, AuditConstants.UPDATE_RESERVATION_REQUEST); validateReservationDefinition(reservationId, request.getReservationDefinition(), plan, AuditConstants.UPDATE_RESERVATION_REQUEST); return plan; }
@Test public void testUpdateReservationNormal() { ReservationUpdateRequest request = createSimpleReservationUpdateRequest(1, 1, 1, 5, 3); Plan plan = null; try { plan = rrValidator.validateReservationUpdateRequest(rSystem, request); } catch (YarnException e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(plan); }
private <T> RestResponse<T> get(final String path, final Class<T> type) { return executeRequestSync(HttpMethod.GET, path, null, r -> deserialize(r.getBody(), type), Optional.empty()); }
@Test public void shouldPostQueryRequest_chunkHandler_nonOkStatusCode() { when(httpClientResponse.statusCode()).thenReturn(BAD_REQUEST.code()); ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST, Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT); executor.submit(this::expectPostQueryRequestChunkHandler); assertThatEventually(requestStarted::get, is(true)); exceptionCaptor.getValue().handle(new RuntimeException("Error!")); assertThatEventually(error::get, notNullValue()); assertThat(error.get().getMessage(), containsString("Error issuing POST to KSQL server. path:/query")); }
@JsonIgnore public List<Tag> deriveRuntimeTagPermits(StepRuntimeSummary runtimeSummary) { List<Tag> runtimeTagPermits = new ArrayList<>(); Long stepConcurrency = runProperties.getStepConcurrency(); if (instanceStepConcurrency != null) { // instance_step_concurrency is enabled. // if step_concurrency is set, then set a tag permit for it if (stepConcurrency != null) { runtimeTagPermits.add(buildATag(workflowId, stepConcurrency)); } // only if instance_step_concurrency is set for a leaf step, then add a tag permit for it. if (runtimeSummary.getType().isLeaf()) { runtimeTagPermits.add(buildATag(correlationId, instanceStepConcurrency)); } } else { // instance_step_concurrency is disabled. // if step_concurrency is unset, use the default, then set a tag permit for it if (stepConcurrency == null) { stepConcurrency = Defaults.DEFAULT_STEP_CONCURRENCY; } runtimeTagPermits.add(buildATag(workflowId, stepConcurrency)); // always add a default step level step concurrency limit with the default value. runtimeTagPermits.add( buildATag( workflowId + ":" + runtimeSummary.getStepId(), Defaults.DEFAULT_STEP_CONCURRENCY)); } return runtimeTagPermits; }
@Test public void testDeriveRuntimeTagPermits() throws Exception { WorkflowSummary summary = loadObject("fixtures/parameters/sample-wf-summary-params.json", WorkflowSummary.class); summary.setCorrelationId("correlation_id"); Tag t1 = new Tag(); t1.setName(Constants.MAESTRO_PREFIX + summary.getWorkflowId()); t1.setPermit((int) Defaults.DEFAULT_STEP_CONCURRENCY); Tag t2 = new Tag(); t2.setName(Constants.MAESTRO_PREFIX + summary.getWorkflowId() + ":stepid"); t2.setPermit((int) Defaults.DEFAULT_STEP_CONCURRENCY); Tag t3 = new Tag(); t3.setName(Constants.MAESTRO_PREFIX + summary.getCorrelationId()); t3.setPermit((int) Defaults.DEFAULT_INSTANCE_STEP_CONCURRENCY); StepRuntimeSummary runtimeSummary = StepRuntimeSummary.builder().stepId("stepid").type(StepType.NOOP).build(); // set default (workflow level and step level) permits if no concurrences are set assertEquals(Arrays.asList(t1, t2), summary.deriveRuntimeTagPermits(runtimeSummary)); // set permit for workflow level and step level step_concurrency summary.getRunProperties().setStepConcurrency(10L); t1.setPermit(10); assertEquals(Arrays.asList(t1, t2), summary.deriveRuntimeTagPermits(runtimeSummary)); // set permit for instance_step_concurrency only when workflow level and step level // step_concurrency are not set summary.getRunProperties().setStepConcurrency(null); summary.setInstanceStepConcurrency(15L); t3.setPermit(15); assertEquals(Collections.singletonList(t3), summary.deriveRuntimeTagPermits(runtimeSummary)); // set tag permits for both step_concurrency and instance_step_concurrency summary.getRunProperties().setStepConcurrency(10L); summary.setInstanceStepConcurrency(15L); t3.setPermit(15); assertEquals(Arrays.asList(t1, t3), summary.deriveRuntimeTagPermits(runtimeSummary)); // don't set tag permits for instance_step_concurrency if non-leaf steps runtimeSummary = StepRuntimeSummary.builder().stepId("stepid").type(StepType.FOREACH).build(); summary.getRunProperties().setStepConcurrency(10L); summary.setInstanceStepConcurrency(15L); assertEquals(Collections.singletonList(t1), summary.deriveRuntimeTagPermits(runtimeSummary)); // don't set any tag permit if no step_concurrency set for non-leaf steps summary.getRunProperties().setStepConcurrency(null); summary.setInstanceStepConcurrency(15L); assertEquals(Collections.emptyList(), summary.deriveRuntimeTagPermits(runtimeSummary)); }
@Override public List<Instance> getAllInstances(String serviceName) throws NacosException { return getAllInstances(serviceName, new ArrayList<>()); }
@Test void testGetAllInstances8() throws NacosException { //given String serviceName = "service1"; String groupName = "group1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); //when client.getAllInstances(serviceName, groupName, clusterList, false); //then verify(proxy, times(1)).queryInstancesOfService(serviceName, groupName, "cluster1,cluster2", false); }
@Override public void shutdown() { scheduledExecutorService.shutdown(); try { web3jService.close(); } catch (IOException e) { throw new RuntimeException("Failed to close web3j service", e); } }
@Test public void testStopExecutorOnShutdown() throws Exception { web3j.shutdown(); verify(scheduledExecutorService).shutdown(); verify(service).close(); }
@VisibleForTesting protected KeyQueryMetadata getKeyQueryMetadata(final KsqlKey key) { if (sharedRuntimesEnabled && kafkaStreams instanceof KafkaStreamsNamedTopologyWrapper) { return ((KafkaStreamsNamedTopologyWrapper) kafkaStreams) .queryMetadataForKey(storeName, key.getKey(), keySerializer, queryId); } try { return kafkaStreams.queryMetadataForKey(storeName, key.getKey(), keySerializer); } catch (IllegalStateException e) { // We may be in `PENDING_SHUTDOWN` here, which means that we should let the client retry, // as this will be happening during a roll. return KeyQueryMetadata.NOT_AVAILABLE; } }
@Test public void shouldUseNamedTopologyWhenSharedRuntimeIsEnabledForQueryMetadataForKey() { // Given: final KsLocator locator = new KsLocator(STORE_NAME, kafkaStreamsNamedTopologyWrapper, topology, keySerializer, LOCAL_HOST_URL, true, "queryId"); // When: locator.getKeyQueryMetadata(KEY); // Then: Mockito.verify(kafkaStreamsNamedTopologyWrapper) .queryMetadataForKey(STORE_NAME, KEY.getKey(), keySerializer, "queryId"); }
List<AlternativeInfo> calcAlternatives(final int s, final int t) { // First, do a regular bidirectional route search checkAlreadyRun(); init(s, 0, t, 0); runAlgo(); final Path bestPath = extractPath(); if (!bestPath.isFound()) { return Collections.emptyList(); } alternatives.add(new AlternativeInfo(bestPath, 0)); final ArrayList<PotentialAlternativeInfo> potentialAlternativeInfos = new ArrayList<>(); final Map<Integer, SPTEntry> bestWeightMapByNode = new HashMap<>(); bestWeightMapTo.forEach((IntObjectPredicate<SPTEntry>) (key, value) -> { bestWeightMapByNode.put(value.adjNode, value); return true; }); bestWeightMapFrom.forEach((IntObjectPredicate<SPTEntry>) (wurst, fromSPTEntry) -> { SPTEntry toSPTEntry = bestWeightMapByNode.get(fromSPTEntry.adjNode); if (toSPTEntry == null) return true; if (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath() > bestPath.getWeight() * maxWeightFactor) return true; // This gives us a path s -> v -> t, but since we are using contraction hierarchies, // s -> v and v -> t need not be shortest paths. In fact, they can sometimes be pretty strange. // We still use this preliminary path to filter for shared path length with other alternatives, // so we don't have to work so much. Path preliminaryRoute = createPathExtractor().extract(fromSPTEntry, toSPTEntry, fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath()); double preliminaryShare = calculateShare(preliminaryRoute); if (preliminaryShare > maxShareFactor) { return true; } assert fromSPTEntry.adjNode == toSPTEntry.adjNode; PotentialAlternativeInfo potentialAlternativeInfo = new PotentialAlternativeInfo(); potentialAlternativeInfo.v = fromSPTEntry.adjNode; potentialAlternativeInfo.edgeIn = getIncomingEdge(fromSPTEntry); potentialAlternativeInfo.weight = 2 * (fromSPTEntry.getWeightOfVisitedPath() + toSPTEntry.getWeightOfVisitedPath()) + preliminaryShare; potentialAlternativeInfos.add(potentialAlternativeInfo); return true; }); potentialAlternativeInfos.sort(Comparator.comparingDouble(o -> o.weight)); for (PotentialAlternativeInfo potentialAlternativeInfo : potentialAlternativeInfos) { int v = potentialAlternativeInfo.v; int tailSv = potentialAlternativeInfo.edgeIn; // Okay, now we want the s -> v -> t shortest via-path, so we route s -> v and v -> t // and glue them together. DijkstraBidirectionEdgeCHNoSOD svRouter = new DijkstraBidirectionEdgeCHNoSOD(graph); final Path suvPath = svRouter.calcPath(s, v, ANY_EDGE, tailSv); extraVisitedNodes += svRouter.getVisitedNodes(); int u = graph.getBaseGraph().getEdgeIteratorState(tailSv, v).getBaseNode(); DijkstraBidirectionEdgeCHNoSOD vtRouter = new DijkstraBidirectionEdgeCHNoSOD(graph); final Path uvtPath = vtRouter.calcPath(u, t, tailSv, ANY_EDGE); Path path = concat(graph.getBaseGraph(), suvPath, uvtPath); extraVisitedNodes += vtRouter.getVisitedNodes(); double sharedDistanceWithShortest = sharedDistanceWithShortest(path); double detourLength = path.getDistance() - sharedDistanceWithShortest; double directLength = bestPath.getDistance() - sharedDistanceWithShortest; if (detourLength > directLength * maxWeightFactor) { continue; } double share = calculateShare(path); if (share > maxShareFactor) { continue; } // This is the final test we need: Discard paths that are not "locally shortest" around v. // So move a couple of nodes to the left and right from v on our path, // route, and check if v is on the shortest path. final IntIndexedContainer svNodes = suvPath.calcNodes(); int vIndex = svNodes.size() - 1; if (!tTest(path, vIndex)) continue; alternatives.add(new AlternativeInfo(path, share)); if (alternatives.size() >= maxPaths) break; } return alternatives; }
@Test public void testCalcAlternatives() { BaseGraph g = createTestGraph(em); PMap hints = new PMap(); hints.putObject("alternative_route.max_weight_factor", 4); hints.putObject("alternative_route.local_optimality_factor", 0.5); hints.putObject("alternative_route.max_paths", 4); RoutingCHGraph routingCHGraph = prepareCH(g); AlternativeRouteEdgeCH altDijkstra = new AlternativeRouteEdgeCH(routingCHGraph, hints); List<AlternativeRouteEdgeCH.AlternativeInfo> pathInfos = altDijkstra.calcAlternatives(5, 10); assertEquals(2, pathInfos.size()); assertEquals(IntArrayList.from(5, 6, 7, 8, 4, 10), pathInfos.get(0).path.calcNodes()); assertEquals(IntArrayList.from(5, 1, 9, 2, 3, 4, 10), pathInfos.get(1).path.calcNodes()); // 3 -> 4 -> 11 is forbidden // 6 -> 3 -> 4 is forbidden }
public static String generateInstanceId(Instance instance) { String instanceIdGeneratorType = instance.getInstanceIdGenerator(); if (StringUtils.isBlank(instanceIdGeneratorType)) { instanceIdGeneratorType = Constants.DEFAULT_INSTANCE_ID_GENERATOR; } return INSTANCE.getInstanceIdGenerator(instanceIdGeneratorType).generateInstanceId(instance); }
@Test void testGenerateInstanceId() { Instance instance = new Instance(); instance.setServiceName("service"); instance.setClusterName("cluster"); instance.setIp("1.1.1.1"); instance.setPort(1000); assertThat(InstanceIdGeneratorManager.generateInstanceId(instance), is("1.1.1.1#1000#cluster#service")); }
@CheckForNull public String get() { // branches will be empty in CE if (branchConfiguration.isPullRequest() || branches.isEmpty()) { return null; } return Optional.ofNullable(getFromProperties()).orElseGet(this::loadWs); }
@Test public void get_uses_scanner_property_with_higher_priority() { when(branchConfiguration.branchType()).thenReturn(BranchType.BRANCH); when(branchConfiguration.branchName()).thenReturn(BRANCH_KEY); when(newCodePeriodLoader.load(PROJECT_KEY, BRANCH_KEY)).thenReturn(createResponse(NewCodePeriods.NewCodePeriodType.REFERENCE_BRANCH, "main")); when(configuration.get("sonar.newCode.referenceBranch")).thenReturn(Optional.of("master2")); assertThat(referenceBranchSupplier.get()).isEqualTo("master2"); }
@Override public void pluginJarAdded(BundleOrPluginFileDetails bundleOrPluginFileDetails) { final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails); try { LOGGER.info("Plugin load starting: {}", bundleOrPluginFileDetails.file()); validateIfExternalPluginRemovingBundledPlugin(bundleDescriptor); validatePluginCompatibilityWithCurrentOS(bundleDescriptor); validatePluginCompatibilityWithGoCD(bundleDescriptor); addPlugin(bundleOrPluginFileDetails, bundleDescriptor); } finally { LOGGER.info("Plugin load finished: {}", bundleOrPluginFileDetails.file()); } }
@Test void shouldCopyPluginToBundlePathAndInformRegistryAndUpdateTheOSGiManifestWhenAPluginIsAdded() throws Exception { String pluginId = "testplugin.descriptorValidator"; File pluginJarFile = new File(pluginWorkDir, PLUGIN_JAR_FILE_NAME); File expectedBundleDirectory = new File(bundleDir, PLUGIN_JAR_FILE_NAME); copyPluginToTheDirectory(pluginWorkDir, PLUGIN_JAR_FILE_NAME); String pluginJarFileLocation = pluginJarFile.getAbsolutePath(); GoPluginBundleDescriptor descriptor = new GoPluginBundleDescriptor(GoPluginDescriptor.builder() .id(pluginId) .bundleLocation(expectedBundleDirectory) .pluginJarFileLocation(pluginJarFileLocation) .isBundledPlugin(true) .build()); when(goPluginBundleDescriptorBuilder.build(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir))).thenReturn(descriptor); when(registry.getPluginByIdOrFileName(pluginId, PLUGIN_JAR_FILE_NAME)).thenReturn(null); doNothing().when(registry).loadPlugin(descriptor); listener.pluginJarAdded(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir)); assertThat(expectedBundleDirectory).exists(); verify(registry).getPluginByIdOrFileName(pluginId, PLUGIN_JAR_FILE_NAME); verify(registry).loadPlugin(descriptor); verify(osgiManifestGenerator).updateManifestOf(descriptor); verify(pluginLoader).loadPlugin(descriptor); verifyNoMoreInteractions(osgiManifestGenerator); verifyNoMoreInteractions(registry); assertThat(new File(expectedBundleDirectory, "lib/go-plugin-activator.jar")).exists(); }
public static String buildSplitScanQuery( TableId tableId, SeaTunnelRowType rowType, boolean isFirstSplit, boolean isLastSplit) { return buildSplitQuery(tableId, rowType, isFirstSplit, isLastSplit, -1, true); }
@Test public void testSplitScanQuery() { String splitScanSQL = SqlServerUtils.buildSplitScanQuery( TableId.parse("db1.schema1.table1"), new SeaTunnelRowType( new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}), false, false); Assertions.assertEquals( "SELECT * FROM [schema1].[table1] WHERE [id] >= ? AND NOT ([id] = ?) AND [id] <= ?", splitScanSQL); splitScanSQL = SqlServerUtils.buildSplitScanQuery( TableId.parse("db1.schema1.table1"), new SeaTunnelRowType( new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}), true, true); Assertions.assertEquals("SELECT * FROM [schema1].[table1]", splitScanSQL); splitScanSQL = SqlServerUtils.buildSplitScanQuery( TableId.parse("db1.schema1.table1"), new SeaTunnelRowType( new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}), true, false); Assertions.assertEquals( "SELECT * FROM [schema1].[table1] WHERE [id] <= ? AND NOT ([id] = ?)", splitScanSQL); splitScanSQL = SqlServerUtils.buildSplitScanQuery( TableId.parse("db1.schema1.table1"), new SeaTunnelRowType( new String[] {"id"}, new SeaTunnelDataType[] {BasicType.LONG_TYPE}), false, true); Assertions.assertEquals("SELECT * FROM [schema1].[table1] WHERE [id] >= ?", splitScanSQL); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { InvokeMode invokeMode = RpcUtils.getInvokeMode(invoker.getUrl(), invocation); if (InvokeMode.SYNC == invokeMode) { return syncInvoke(invoker, invocation); } else { return asyncInvoke(invoker, invocation); } }
@Test public void testInvokeAsync() { Invocation invocation = DubboTestUtil.getDefaultMockInvocationOne(); Invoker invoker = DubboTestUtil.getDefaultMockInvoker(); when(invocation.getAttachment(ASYNC_KEY)).thenReturn(Boolean.TRUE.toString()); final Result result = mock(Result.class); when(invoker.invoke(invocation)).thenAnswer(invocationOnMock -> { verifyInvocationStructureForAsyncCall(invoker, invocation); return result; }); consumerFilter.invoke(invoker, invocation); verify(invoker).invoke(invocation); Context context = ContextUtil.getContext(); assertNotNull(context); }
public static MetricName name(Class<?> klass, String... names) { return name(klass.getName(), names); }
@Test @SuppressWarnings("NullArgumentToVariableArgMethod") public void elidesNullValuesFromNamesWhenOnlyOneNullPassedIn() throws Exception { assertThat(name("one", (String)null)) .isEqualTo(MetricName.build("one")); }
public boolean isEmpty() { if (isTailFirstUsage(currentTailPtr)) { return currentHeadPtr.compareTo(currentTailPtr) == 0; } else { return currentHeadPtr.moveForward(1).compareTo(currentTailPtr) == 0; } }
@Test public void newlyQueueIsEmpty() throws QueueException { final QueuePool queuePool = QueuePool.loadQueues(tempQueueFolder, PAGE_SIZE, SEGMENT_SIZE); final Queue queue = queuePool.getOrCreate("test"); assertTrue(queue.isEmpty(), "Freshly created queue must be empty"); }
public static <T> @Nullable String getClassNameOrNull(@Nullable T value) { if (value != null) { return value.getClass().getName(); } return null; }
@Test public void testGetClassNameOrNullClassName() { assertEquals("java.lang.String", SingleStoreUtil.getClassNameOrNull("asd")); }
@Override public void add(long key, String value) { // fix https://github.com/crossoverJie/cim/issues/79 sortArrayMap.clear(); for (int i = 0; i < VIRTUAL_NODE_SIZE; i++) { Long hash = super.hash("vir" + key + i); sortArrayMap.add(hash,value); } sortArrayMap.add(key, value); }
@Test public void getFirstNodeValue2() { AbstractConsistentHash map = new SortArrayMapConsistentHash() ; List<String> strings = new ArrayList<String>(); for (int i = 0; i < 10; i++) { strings.add("127.0.0." + i) ; } String process = map.process(strings,"zhangsan2"); System.out.println(process); Assert.assertEquals("127.0.0.3",process); }
public void setSendIfNoneMatch(boolean sendIfNoneMatch) { kp.put("sendIfNoneMatch",sendIfNoneMatch); }
@Test public void testSendIfNoneMatch() throws Exception { fetcher().setSendIfNoneMatch(true); CrawlURI curi = makeCrawlURI("http://localhost:7777/if-none-match"); fetcher().process(curi); assertFalse(httpRequestString(curi).toLowerCase().contains("if-none-match: ")); assertTrue(curi.getHttpResponseHeader("etag").equals(ETAG_TEST_VALUE)); runDefaultChecks(curi, "requestLine"); FetchHistoryProcessor fetchHistoryProcessor = new FetchHistoryProcessor(); fetchHistoryProcessor.process(curi); fetcher().process(curi); // logger.info("\n" + httpRequestString(curi)); // logger.info("\n" + rawResponseString(curi)); assertTrue(httpRequestString(curi).contains("If-None-Match: " + ETAG_TEST_VALUE + "\r\n")); assertNull(curi.getRevisitProfile()); fetchHistoryProcessor.process(curi); assertNotNull(curi.getRevisitProfile()); assertTrue(curi.getRevisitProfile() instanceof ServerNotModifiedRevisit); ServerNotModifiedRevisit revisit = (ServerNotModifiedRevisit) curi.getRevisitProfile(); assertEquals(ETAG_TEST_VALUE, revisit.getETag()); assertNull(revisit.getLastModified()); }
@Override public int hashCode() { return Objects.hash( Arrays.hashCode(salt), Arrays.hashCode(storedKey), Arrays.hashCode(serverKey), iterations ); }
@Test public void testEqualsAndHashCode() { byte[] salt1 = {1, 2, 3}; byte[] storedKey1 = {4, 5, 6}; byte[] serverKey1 = {7, 8, 9}; int iterations1 = 1000; byte[] salt2 = {1, 2, 3}; byte[] storedKey2 = {4, 5, 6}; byte[] serverKey2 = {7, 8, 9}; int iterations2 = 1000; ScramCredentialData data1 = new ScramCredentialData(salt1, storedKey1, serverKey1, iterations1); ScramCredentialData data2 = new ScramCredentialData(salt2, storedKey2, serverKey2, iterations2); assertEquals(data1, data2); assertEquals(data1.hashCode(), data2.hashCode()); }
@BuildStep AdditionalBeanBuildItem addMetrics(Optional<MetricsCapabilityBuildItem> metricsCapability, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) { if (metricsCapability.isPresent() && metricsCapability.get().metricsSupported(MetricsFactory.MICROMETER)) { final AdditionalBeanBuildItem.Builder additionalBeanBuildItemBuilder = AdditionalBeanBuildItem.builder() .setUnremovable() .addBeanClasses(JobRunrMetricsStarter.class) .addBeanClasses(JobRunrMetricsProducer.StorageProviderMetricsProducer.class); if (jobRunrBuildTimeConfiguration.backgroundJobServer().enabled()) { additionalBeanBuildItemBuilder.addBeanClasses(JobRunrMetricsProducer.BackgroundJobServerMetricsProducer.class); } return additionalBeanBuildItemBuilder .build(); } return null; }
@Test void addMetricsDoesNotAddMetricsIfEnabledButNoMicroMeterSupport() { final AdditionalBeanBuildItem metricsBeanBuildItem = jobRunrExtensionProcessor.addMetrics(Optional.of(new MetricsCapabilityBuildItem(toSupport -> false)), jobRunrBuildTimeConfiguration); assertThat(metricsBeanBuildItem).isNull(); }
public ImmutableSet<EntityDescriptor> resolve(GRN entity) { // TODO: Replace entity excerpt usage with GRNDescriptors once we implemented GRN descriptors for every entity final ImmutableMap<GRN, Optional<String>> entityExcerpts = contentPackService.listAllEntityExcerpts().stream() // TODO: Use the GRNRegistry instead of manually building a GRN. Requires all entity types to be in the registry. .collect(ImmutableMap.toImmutableMap(e -> GRNType.create(e.type().name(), e.type().name() + ":").newGRNBuilder().entity(e.id().id()).build(), v -> Optional.ofNullable(v.title()))); final Set<org.graylog2.contentpacks.model.entities.EntityDescriptor> descriptors = contentPackService.resolveEntities(Collections.singleton(org.graylog2.contentpacks.model.entities.EntityDescriptor.builder() .id(ModelId.of(entity.entity())) // TODO: This is a hack! Until we stop using the content-pack dependency resolver, we have to use a different version for dashboards here .type(ModelType.of(entity.type(), "dashboard".equals(entity.type()) ? "2" : "1")) // TODO: Any way of NOT hardcoding the version here? .build())); final ImmutableSet<GRN> dependencies = descriptors.stream() .filter(dep -> { // Filter dependencies that aren't needed for grants sharing // TODO This is another reason why we shouldn't be using the content pack resolver ¯\_(ツ)_/¯ final Set<ModelType> ignoredDeps = IGNORED_DEPENDENCIES.getOrDefault(entity.grnType(), ImmutableSet.of()); return !ignoredDeps.contains(dep.type()); }) // TODO: Work around from using the content pack dependency resolver: // We've added stream_title content pack entities in https://github.com/Graylog2/graylog2-server/pull/17089, // but in this context we want to return the actual dependent Stream to add additional permissions to. .map(descriptor -> ModelTypes.STREAM_REF_V1.equals(descriptor.type()) ? org.graylog2.contentpacks.model.entities.EntityDescriptor.create(descriptor.id(), ModelTypes.STREAM_V1) : descriptor) .map(descriptor -> grnRegistry.newGRN(descriptor.type().name(), descriptor.id().id())) .filter(dependency -> !entity.equals(dependency)) // Don't include the given entity in dependencies .collect(ImmutableSet.toImmutableSet()); final Map<GRN, Set<GRN>> targetOwners = grantService.getOwnersForTargets(dependencies); return dependencies.stream() .map(dependency -> { String title = entityExcerpts.get(dependency) != null ? entityExcerpts.get(dependency).orElse("unnamed dependency: <" + dependency + ">") : "unknown dependency: <" + dependency + ">"; return EntityDescriptor.create( dependency, title, getOwners(targetOwners.get(dependency)) ); }) .collect(ImmutableSet.toImmutableSet()); }
@Test @DisplayName("Try a regular depency resolve") void resolve() { final String TEST_TITLE = "Test Stream Title"; final EntityExcerpt streamExcerpt = EntityExcerpt.builder() .type(ModelTypes.STREAM_V1) .id(ModelId.of("54e3deadbeefdeadbeefaffe")) .title(TEST_TITLE).build(); when(contentPackService.listAllEntityExcerpts()).thenReturn(ImmutableSet.of(streamExcerpt)); final EntityDescriptor streamDescriptor = EntityDescriptor.builder().type(ModelTypes.STREAM_V1).id(ModelId.of("54e3deadbeefdeadbeefaffe")).build(); when(contentPackService.resolveEntities(any())).thenReturn(ImmutableSet.of(streamDescriptor)); when(grnDescriptorService.getDescriptor(any(GRN.class))).thenAnswer(a -> { GRN grnArg = a.getArgument(0); return GRNDescriptor.builder().grn(grnArg).title("dummy").build(); }); final GRN dashboard = grnRegistry.newGRN("dashboard", "33e3deadbeefdeadbeefaffe"); final ImmutableSet<org.graylog.security.entities.EntityDescriptor> missingDependencies = entityDependencyResolver.resolve(dashboard); assertThat(missingDependencies).hasSize(1); assertThat(missingDependencies.asList().get(0)).satisfies(descriptor -> { assertThat(descriptor.id().toString()).isEqualTo("grn::::stream:54e3deadbeefdeadbeefaffe"); assertThat(descriptor.title()).isEqualTo(TEST_TITLE); assertThat(descriptor.owners()).hasSize(1); assertThat(descriptor.owners().asList().get(0).grn().toString()).isEqualTo("grn::::user:jane"); }); }
public static Logger verboseLogger() { return VERBOSE_ANDROID_LOGGER; }
@Test public void verboseLoggerReturnsSameInstance() { Logger logger1 = Loggers.verboseLogger(); Logger logger2 = Loggers.verboseLogger(); assertThat(logger1, sameInstance(logger2)); }
private Main() { // Utility Class. }
@Test public void runsAgainstRelease() throws Exception { final File pwd = temp.newFolder(); Main.main( String.format("--%s=5.5.0", UserInput.DISTRIBUTION_VERSION_PARAM), String.format("--workdir=%s", pwd.getAbsolutePath()) ); }
public static ShadowRouteEngine newInstance(final QueryContext queryContext) { SQLStatement sqlStatement = queryContext.getSqlStatementContext().getSqlStatement(); if (sqlStatement instanceof InsertStatement) { return createShadowInsertStatementRoutingEngine(queryContext); } if (sqlStatement instanceof DeleteStatement) { return createShadowDeleteStatementRoutingEngine(queryContext); } if (sqlStatement instanceof UpdateStatement) { return createShadowUpdateStatementRoutingEngine(queryContext); } if (sqlStatement instanceof SelectStatement) { return createShadowSelectStatementRoutingEngine(queryContext); } return createShadowNonMDLStatementRoutingEngine(queryContext); }
@Test void assertNewInstance() { ShadowRouteEngine shadowInsertRouteEngine = ShadowRouteEngineFactory.newInstance( new QueryContext(createInsertSqlStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class))); assertThat(shadowInsertRouteEngine, instanceOf(ShadowInsertStatementRoutingEngine.class)); ShadowRouteEngine shadowUpdateRouteEngine = ShadowRouteEngineFactory.newInstance( new QueryContext(createUpdateSqlStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class))); assertThat(shadowUpdateRouteEngine, instanceOf(ShadowUpdateStatementRoutingEngine.class)); ShadowRouteEngine shadowDeleteRouteEngine = ShadowRouteEngineFactory.newInstance( new QueryContext(createDeleteSqlStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class))); assertThat(shadowDeleteRouteEngine, instanceOf(ShadowDeleteStatementRoutingEngine.class)); ShadowRouteEngine shadowSelectRouteEngine = ShadowRouteEngineFactory.newInstance( new QueryContext(createSelectSqlStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class))); assertThat(shadowSelectRouteEngine, instanceOf(ShadowSelectStatementRoutingEngine.class)); }
@Override public void trace(String msg) { logger.trace(msg); }
@Test void testMarkerTraceWithFormat() { jobRunrDashboardLogger.trace(marker, "trace with {}", "format"); verify(slfLogger).trace(marker, "trace with {}", "format"); }
@Override public <T> @Nullable Schema schemaFor(TypeDescriptor<T> typeDescriptor) { checkForDynamicType(typeDescriptor); return ProtoSchemaTranslator.getSchema((Class<Message>) typeDescriptor.getRawType()); }
@Test public void testRequiredPrimitiveSchema() { Schema schema = new ProtoMessageSchema().schemaFor(TypeDescriptor.of(RequiredPrimitive.class)); assertEquals(REQUIRED_PRIMITIVE_SCHEMA, schema); }
public static String cleanHtmlTag(String content) { return content.replaceAll(RE_HTML_MARK, ""); }
@Test public void cleanHtmlTagTest() { //非闭合标签 String str = "pre<img src=\"xxx/dfdsfds/test.jpg\">"; String result = HtmlUtil.cleanHtmlTag(str); assertEquals("pre", result); //闭合标签 str = "pre<img>"; result = HtmlUtil.cleanHtmlTag(str); assertEquals("pre", result); //闭合标签 str = "pre<img src=\"xxx/dfdsfds/test.jpg\" />"; result = HtmlUtil.cleanHtmlTag(str); assertEquals("pre", result); //闭合标签 str = "pre<img />"; result = HtmlUtil.cleanHtmlTag(str); assertEquals("pre", result); //包含内容标签 str = "pre<div class=\"test_div\">dfdsfdsfdsf</div>"; result = HtmlUtil.cleanHtmlTag(str); assertEquals("predfdsfdsfdsf", result); //带换行 str = "pre<div class=\"test_div\">\r\n\t\tdfdsfdsfdsf\r\n</div><div class=\"test_div\">BBBB</div>"; result = HtmlUtil.cleanHtmlTag(str); assertEquals("pre\r\n\t\tdfdsfdsfdsf\r\nBBBB", result); }
public static List<String> computeNameParts(String loggerName) { List<String> partList = new ArrayList<String>(); int fromIndex = 0; while (true) { int index = getSeparatorIndexOf(loggerName, fromIndex); if (index == -1) { partList.add(loggerName.substring(fromIndex)); break; } partList.add(loggerName.substring(fromIndex, index)); fromIndex = index + 1; } return partList; }
@Test public void supportNestedClasses() { List<String> witnessList = new ArrayList<String>(); witnessList.add("com"); witnessList.add("foo"); witnessList.add("Bar"); witnessList.add("Nested"); List<String> partList = LoggerNameUtil.computeNameParts("com.foo.Bar$Nested"); assertEquals(witnessList, partList); }
@Override public AnnotationVisitor visitAnnotation(String descriptor, boolean visible) { //if (!descriptor.equals("Lorg/pf4j/Extension;")) { if (!Type.getType(descriptor).getClassName().equals(Extension.class.getName())) { return super.visitAnnotation(descriptor, visible); } return new AnnotationVisitor(ASM_VERSION) { @Override public AnnotationVisitor visitArray(final String name) { if ("ordinal".equals(name) || "plugins".equals(name) || "points".equals(name)) { return new AnnotationVisitor(ASM_VERSION, super.visitArray(name)) { @Override public void visit(String key, Object value) { log.debug("Load annotation attribute {} = {} ({})", name, value, value.getClass().getName()); if ("ordinal".equals(name)) { extensionInfo.ordinal = Integer.parseInt(value.toString()); } else if ("plugins".equals(name)) { if (value instanceof String) { log.debug("Found plugin {}", value); extensionInfo.plugins.add((String) value); } else if (value instanceof String[]) { log.debug("Found plugins {}", Arrays.toString((String[]) value)); extensionInfo.plugins.addAll(Arrays.asList((String[]) value)); } else { log.debug("Found plugin {}", value.toString()); extensionInfo.plugins.add(value.toString()); } } else { String pointClassName = ((Type) value).getClassName(); log.debug("Found point " + pointClassName); extensionInfo.points.add(pointClassName); } super.visit(key, value); } }; } return super.visitArray(name); } }; }
@Test void visitAnnotationShouldReturnExtensionAnnotationVisitor() { ExtensionInfo extensionInfo = new ExtensionInfo("org.pf4j.asm.ExtensionInfo"); ClassVisitor extensionVisitor = new ExtensionVisitor(extensionInfo); AnnotationVisitor returnedVisitor = extensionVisitor.visitAnnotation("Lorg/pf4j/Extension;", true); assertNotNull(returnedVisitor); }
private JobMetrics getJobMetrics() throws IOException { if (cachedMetricResults != null) { // Metric results have been cached after the job ran. return cachedMetricResults; } JobMetrics result = dataflowClient.getJobMetrics(dataflowPipelineJob.getJobId()); if (dataflowPipelineJob.getState().isTerminal()) { // Add current query result to the cache. cachedMetricResults = result; } return result; }
@Test public void testIgnoreDistributionButGetCounterUpdates() throws IOException { AppliedPTransform<?, ?, ?> myStep = mock(AppliedPTransform.class); when(myStep.getFullName()).thenReturn("myStepName"); BiMap<AppliedPTransform<?, ?, ?>, String> transformStepNames = HashBiMap.create(); transformStepNames.put(myStep, "s2"); JobMetrics jobMetrics = new JobMetrics(); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowPipelineJob job = mock(DataflowPipelineJob.class); DataflowPipelineOptions options = mock(DataflowPipelineOptions.class); when(options.isStreaming()).thenReturn(false); when(job.getDataflowOptions()).thenReturn(options); when(job.getState()).thenReturn(State.RUNNING); when(job.getJobId()).thenReturn(JOB_ID); when(job.getTransformStepNames()).thenReturn(transformStepNames); // The parser relies on the fact that one tentative and one committed metric update exist in // the job metrics results. jobMetrics.setMetrics( ImmutableList.of( makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, false), makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, true), makeCounterMetricUpdate("otherCounter[MIN]", "otherNamespace", "s2", 0L, false), makeCounterMetricUpdate("otherCounter[MIN]", "otherNamespace", "s2", 0L, true))); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.allMetrics(); assertThat( result.getCounters(), containsInAnyOrder( attemptedMetricsResult("counterNamespace", "counterName", "myStepName", 1233L))); assertThat( result.getCounters(), containsInAnyOrder( committedMetricsResult("counterNamespace", "counterName", "myStepName", 1233L))); }
public static String buildURIFromPattern(String pattern, List<Parameter> parameters) { if (parameters != null) { // Browse parameters and choose between template or query one. for (Parameter parameter : parameters) { String wadlTemplate = "{" + parameter.getName() + "}"; String swaggerTemplate = "/:" + parameter.getName(); if (pattern.contains(wadlTemplate)) { // It's a template parameter. pattern = pattern.replace(wadlTemplate, encodePath(parameter.getValue())); } else if (pattern.contains(swaggerTemplate)) { // It's a template parameter. pattern = pattern.replace(":" + parameter.getName(), encodePath(parameter.getValue())); } else { // It's a query parameter, ensure we have started delimiting them. if (!pattern.contains("?")) { pattern += "?"; } if (pattern.contains("=")) { pattern += "&"; } pattern += parameter.getName() + "=" + encodeValue(parameter.getValue()); } } } return pattern; }
@Test void testBuildURIFromPatternWithNoParameters() { String pattern = "http://localhost:8080/blog/{year}/{month}"; try { String uri = URIBuilder.buildURIFromPattern(pattern, new ArrayList<Parameter>()); } catch (NullPointerException npe) { fail("buildURIFromPattern should not fail with no parameters"); } }
@Override public void publish(ScannerReportWriter writer) { AbstractProjectOrModule rootProject = moduleHierarchy.root(); ScannerReport.Metadata.Builder builder = ScannerReport.Metadata.newBuilder() .setAnalysisDate(projectInfo.getAnalysisDate().getTime()) // Here we want key without branch .setProjectKey(rootProject.key()) .setCrossProjectDuplicationActivated(cpdSettings.isCrossProjectDuplicationEnabled()) .setRootComponentRef(rootProject.scannerId()); projectInfo.getProjectVersion().ifPresent(builder::setProjectVersion); projectInfo.getBuildString().ifPresent(builder::setBuildString); if (branchConfiguration.branchName() != null) { addBranchInformation(builder); } String newCodeReferenceBranch = referenceBranchSupplier.getFromProperties(); if (newCodeReferenceBranch != null) { builder.setNewCodeReferenceBranch(newCodeReferenceBranch); } addScmInformation(builder); addNotAnalyzedFileCountsByLanguage(builder); for (QProfile qp : qProfiles.findAll()) { builder.putQprofilesPerLanguage(qp.getLanguage(), ScannerReport.Metadata.QProfile.newBuilder() .setKey(qp.getKey()) .setLanguage(qp.getLanguage()) .setName(qp.getName()) .setRulesUpdatedAt(qp.getRulesUpdatedAt().getTime()).build()); } for (Entry<String, ScannerPlugin> pluginEntry : pluginRepository.getPluginsByKey().entrySet()) { builder.putPluginsByKey(pluginEntry.getKey(), ScannerReport.Metadata.Plugin.newBuilder() .setKey(pluginEntry.getKey()) .setUpdatedAt(pluginEntry.getValue().getUpdatedAt()).build()); } addRelativePathFromScmRoot(builder); writer.writeMetadata(builder.build()); }
@Test public void should_not_crash_when_scm_provider_does_not_support_relativePathFromScmRoot() { ScmProvider fakeScmProvider = new ScmProvider() { @Override public String key() { return "foo"; } }; when(scmConfiguration.provider()).thenReturn(fakeScmProvider); underTest.publish(writer); ScannerReport.Metadata metadata = reader.readMetadata(); assertThat(metadata.getRelativePathFromScmRoot()).isEmpty(); }
@Override public Map<String, String> getSourcesMap(final CompilationDTO<RegressionModel> compilationDTO) { logger.trace("getKiePMMLModelWithSources {} {} {} {}", compilationDTO.getPackageName(), compilationDTO.getFields(), compilationDTO.getModel(), compilationDTO.getPmmlContext()); try { return KiePMMLRegressionModelFactory.getKiePMMLRegressionModelSourcesMap(RegressionCompilationDTO.fromCompilationDTO(compilationDTO)); } catch (IOException e) { throw new KiePMMLException(e); } }
@Test void getKiePMMLModelWithSources() throws Exception { final PMML pmml = TestUtils.loadFromFile(SOURCE_1); assertThat(pmml).isNotNull(); assertThat(pmml.getModels()).hasSize(1); assertThat(pmml.getModels().get(0)).isInstanceOf(RegressionModel.class); RegressionModel regressionModel = (RegressionModel) pmml.getModels().get(0); final CommonCompilationDTO<RegressionModel> compilationDTO = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), SOURCE_1); final KiePMMLModelWithSources retrieved = PROVIDER.getKiePMMLModelWithSources(compilationDTO); assertThat(retrieved).isNotNull(); final Map<String, String> sourcesMap = retrieved.getSourcesMap(); assertThat(sourcesMap).isNotNull(); assertThat(sourcesMap).isNotEmpty(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); final Map<String, Class<?>> compiled = KieMemoryCompiler.compile(sourcesMap, classLoader); for (Class<?> clazz : compiled.values()) { assertThat(clazz).isInstanceOf(Serializable.class); } }
static List<Order> getImplicitOrderBy(StructuredQuery query) { List<OrderByFieldPath> expectedImplicitOrders = new ArrayList<>(); if (query.hasWhere()) { fillInequalityFields(query.getWhere(), expectedImplicitOrders); } Collections.sort(expectedImplicitOrders); if (expectedImplicitOrders.stream().noneMatch(OrderByFieldPath::isDocumentName)) { expectedImplicitOrders.add(OrderByFieldPath.fromString("__name__")); } for (Order order : query.getOrderByList()) { OrderByFieldPath orderField = OrderByFieldPath.fromString(order.getField().getFieldPath()); expectedImplicitOrders.remove(orderField); } List<Order> additionalOrders = new ArrayList<>(); if (!expectedImplicitOrders.isEmpty()) { Direction lastDirection = query.getOrderByCount() == 0 ? Direction.ASCENDING : query.getOrderByList().get(query.getOrderByCount() - 1).getDirection(); for (OrderByFieldPath field : expectedImplicitOrders) { additionalOrders.add( Order.newBuilder() .setDirection(lastDirection) .setField( FieldReference.newBuilder().setFieldPath(field.getOriginalString()).build()) .build()); } } return additionalOrders; }
@Test public void getImplicitOrderBy_nameInWhere() { StructuredQuery.Builder builder = testQuery.toBuilder(); builder .getWhereBuilder() .getCompositeFilterBuilder() .addFilters( Filter.newBuilder() .setFieldFilter( FieldFilter.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("__name__")) .setOp(FieldFilter.Operator.NOT_EQUAL) .setValue(Value.newBuilder().setStringValue("")))); testQuery = builder.build(); // WHERE (`z€a`.a.a != "" AND `b` > "") AND c == "" AND `z$` > "456" AND `z` > "123" AND z IS // NOT NAN AND __name__ != "" ORDER BY b DESC // -> (ORDER BY b DESC) + __name__ DESC, `z` DESC, `z$` DESC, `z€a`.a.a DESC List<Order> expected = ImmutableList.of( Order.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("__name__")) .setDirection(Direction.DESCENDING) .build(), Order.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("`z`")) .setDirection(Direction.DESCENDING) .build(), Order.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("`z$`")) .setDirection(Direction.DESCENDING) .build(), Order.newBuilder() .setField(FieldReference.newBuilder().setFieldPath("`z€a`.a.a")) .setDirection(Direction.DESCENDING) .build()); List<Order> actual = QueryUtils.getImplicitOrderBy(testQuery); assertEquals(expected, actual); }
@Override public <E extends Exception> Job getOrCreateJob( JobID jobId, SupplierWithException<? extends JobTable.JobServices, E> jobServicesSupplier) throws E { JobOrConnection job = jobs.get(jobId); if (job == null) { job = new JobOrConnection(jobId, jobServicesSupplier.get()); jobs.put(jobId, job); } return job; }
@Test void connectJob_Connected_Fails() { final JobTable.Job job = jobTable.getOrCreateJob(jobId, DEFAULT_JOB_SERVICES_SUPPLIER); connectJob(job, ResourceID.generate()); assertThatThrownBy(() -> connectJob(job, ResourceID.generate())) .isInstanceOf(IllegalStateException.class); }
public static TermQueryBuilder termQuery(String name, String value) { return new TermQueryBuilder(name, value); }
@Test public void testTermQuery() throws Exception { assertEquals("{\"term\":{\"k\":\"aaaa\"}}", toJson(QueryBuilders.termQuery("k", "aaaa"))); assertEquals("{\"term\":{\"aaaa\":\"k\"}}", toJson(QueryBuilders.termQuery("aaaa", "k"))); assertEquals("{\"term\":{\"k\":0}}", toJson(QueryBuilders.termQuery("k", (byte) 0))); assertEquals("{\"term\":{\"k\":123}}", toJson(QueryBuilders.termQuery("k", (long) 123))); assertEquals("{\"term\":{\"k\":41}}", toJson(QueryBuilders.termQuery("k", (short) 41))); assertEquals("{\"term\":{\"k\":128}}", toJson(QueryBuilders.termQuery("k", 128))); assertEquals("{\"term\":{\"k\":42.42}}", toJson(QueryBuilders.termQuery("k", 42.42D))); assertEquals("{\"term\":{\"k\":1.1}}", toJson(QueryBuilders.termQuery("k", 1.1F))); assertEquals("{\"term\":{\"k\":1}}", toJson(QueryBuilders.termQuery("k", new BigDecimal(1)))); assertEquals("{\"term\":{\"k\":121}}", toJson(QueryBuilders.termQuery("k", new BigInteger("121")))); assertEquals("{\"term\":{\"k\":true}}", toJson(QueryBuilders.termQuery("k", new AtomicBoolean(true)))); }
BrokerResponse getQueueResponse(String queueName) throws IOException { String queryUrl = getQueueEndpoint(messageVpn, queueName); HttpResponse response = executeGet(new GenericUrl(baseUrl + queryUrl)); return BrokerResponse.fromHttpResponse(response); }
@Test public void testExecuteStatus3xx() { MockHttpTransport transport = new MockHttpTransport() { @Override public LowLevelHttpRequest buildRequest(String method, String url) { return new MockLowLevelHttpRequest() { @Override public LowLevelHttpResponse execute() { MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); response.setStatusCode(301); response.setContentType(Json.MEDIA_TYPE); response.setContent( "{\"meta\":{\"error\":{\"code\":301,\"description\":\"some" + " error\",\"status\":\"xx\"}}}"); return response; } }; } }; HttpRequestFactory requestFactory = transport.createRequestFactory(); SempBasicAuthClientExecutor client = new SempBasicAuthClientExecutor( "http://host", "username", "password", "vpnName", requestFactory); assertThrows(HttpResponseException.class, () -> client.getQueueResponse("queue")); }
public boolean overlap(final Window other) throws IllegalArgumentException { if (getClass() != other.getClass()) { throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type " + other.getClass() + "."); } final SessionWindow otherWindow = (SessionWindow) other; return !(otherWindow.endMs < startMs || endMs < otherWindow.startMs); }
@Test public void shouldOverlapIfOtherWindowStartIsWithinThisWindow() { /* * This: [-------] * Other: [-------] */ assertTrue(window.overlap(new SessionWindow(start, end + 1))); assertTrue(window.overlap(new SessionWindow(start, 150))); assertTrue(window.overlap(new SessionWindow(75, end + 1))); assertTrue(window.overlap(new SessionWindow(75, 150))); assertTrue(window.overlap(new SessionWindow(end, end + 1))); assertTrue(window.overlap(new SessionWindow(end, 150))); }
@Override public void register(ThreadPoolPlugin plugin) { }
@Test public void testRegister() { manager.register(new TestPlugin()); Assert.assertTrue(isEmpty(manager)); }
@Nullable public static EpoxyModel<?> getModelFromPayload(List<Object> payloads, long modelId) { if (payloads.isEmpty()) { return null; } for (Object payload : payloads) { DiffPayload diffPayload = (DiffPayload) payload; if (diffPayload.singleModel != null) { if (diffPayload.singleModel.id() == modelId) { return diffPayload.singleModel; } } else { EpoxyModel<?> modelForId = diffPayload.modelsById.get(modelId); if (modelForId != null) { return modelForId; } } } return null; }
@Test public void getSingleModelsFromMultipleDiffPayloads() { TestModel model1 = new TestModel(); DiffPayload diffPayload1 = diffPayloadWithModels(model1); TestModel model2 = new TestModel(); DiffPayload diffPayload2 = diffPayloadWithModels(model2); List<Object> payloads = payloadsWithDiffPayloads(diffPayload1, diffPayload2); EpoxyModel<?> modelFromPayload1 = getModelFromPayload(payloads, model1.id()); EpoxyModel<?> modelFromPayload2 = getModelFromPayload(payloads, model2.id()); assertEquals(model1, modelFromPayload1); assertEquals(model2, modelFromPayload2); }
@Override public void doWork() { pollOnce(Long.MAX_VALUE); }
@Test public void testShouldCallCompletionHandlerWithDisconnectedResponseWhenNodeNotReady() { final AbstractRequest.Builder<?> request = new StubRequestBuilder<>(); final Node node = new Node(1, "", 8080); final RequestAndCompletionHandler handler = new RequestAndCompletionHandler(time.milliseconds(), node, request, completionHandler); final TestInterBrokerSendThread sendThread = new TestInterBrokerSendThread(); final ClientRequest clientRequest = new ClientRequest("dest", request, 0, "1", 0, true, requestTimeoutMs, handler.handler); when(networkClient.newClientRequest( ArgumentMatchers.eq("1"), same(handler.request), anyLong(), ArgumentMatchers.eq(true), ArgumentMatchers.eq(requestTimeoutMs), same(handler.handler) )).thenReturn(clientRequest); when(networkClient.ready(node, time.milliseconds())).thenReturn(false); when(networkClient.connectionDelay(any(), anyLong())).thenReturn(0L); when(networkClient.poll(anyLong(), anyLong())).thenReturn(Collections.emptyList()); when(networkClient.connectionFailed(node)).thenReturn(true); when(networkClient.authenticationException(node)).thenReturn(new AuthenticationException("")); sendThread.enqueue(handler); sendThread.doWork(); verify(networkClient) .newClientRequest( ArgumentMatchers.eq("1"), same(handler.request), anyLong(), ArgumentMatchers.eq(true), ArgumentMatchers.eq(requestTimeoutMs), same(handler.handler)); verify(networkClient).ready(any(), anyLong()); verify(networkClient).connectionDelay(any(), anyLong()); verify(networkClient).poll(anyLong(), anyLong()); verify(networkClient).connectionFailed(any()); verify(networkClient).authenticationException(any()); verifyNoMoreInteractions(networkClient); assertTrue(completionHandler.executedWithDisconnectedResponse); }
public void start() { configService.addListener(configListener); interfaceService.addListener(interfaceListener); setUpConnectivity(); }
@Test public void testConnectionSetup() { reset(intentSynchronizer); // Setup the expected intents for (Intent intent : intentList) { intentSynchronizer.submit(eqExceptId(intent)); } replay(intentSynchronizer); // Running the interface to be tested. peerConnectivityManager.start(); verify(intentSynchronizer); }
public static String getPathWithoutScheme(Path path) { return path.toUri().getPath(); }
@Test public void testGetPathWithoutSchemaThatContainsSchema() { final Path path = new Path("file:///foo/bar/baz"); final String output = HadoopUtils.getPathWithoutScheme(path); assertEquals("/foo/bar/baz", output); }
public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } }
@Test public void injectIntoArray() { f2.slime1.setArray(); inject(f1.empty.get(), new ArrayInserter(f2.slime1.get())); inject(f1.nixValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.boolValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.longValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.doubleValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.stringValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.dataValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.arrayValue.get(), new ArrayInserter(f2.slime1.get())); inject(f1.objectValue.get(), new ArrayInserter(f2.slime1.get())); assertEquals(f1.empty.get().toString(), f2.slime1.get().entry(0).toString()); assertEquals(f1.nixValue.get().toString(), f2.slime1.get().entry(1).toString()); assertEquals(f1.boolValue.get().toString(), f2.slime1.get().entry(2).toString()); assertEquals(f1.longValue.get().toString(), f2.slime1.get().entry(3).toString()); assertEquals(f1.doubleValue.get().toString(), f2.slime1.get().entry(4).toString()); assertEquals(f1.stringValue.get().toString(), f2.slime1.get().entry(5).toString()); assertEquals(f1.dataValue.get().toString(), f2.slime1.get().entry(6).toString()); assertEquals(f1.arrayValue.get().toString(), f2.slime1.get().entry(7).toString()); assertEqualTo(f1.objectValue.get(), f2.slime1.get().entry(8)); }
public List<String> getPoints() { return Collections.unmodifiableList(points); }
@Test void getPointsShouldReturnEmptyListWhenNotSet() { ExtensionInfo info = new ExtensionInfo("org.pf4j.asm.ExtensionInfo"); assertTrue(info.getPoints().isEmpty()); }
@Override public JobManagerRunner get(JobID jobId) { assertJobRegistered(jobId); return this.jobManagerRunners.get(jobId); }
@Test void testGetOnNonExistingJobManagerRunner() { assertThatThrownBy(() -> testInstance.get(new JobID())) .isInstanceOf(NoSuchElementException.class); }
@Override public Result apply(String action, Class<? extends Validatable> aClass, String resource, String resourceToOperateWithin) { if (matchesAction(action) && matchesType(aClass) && matchesResource(resource)) { return Result.DENY; } if (isRequestForElasticAgentProfiles(aClass) && matchesAction(action) && matchesResource(resourceToOperateWithin)) { return Result.DENY; } return Result.SKIP; }
@Test void forAdministerOfWildcardDefinedClusterProfile() { Deny directive = new Deny("administer", "cluster_profile", "team1_*"); Result viewAllElasticAgentProfiles = directive.apply("view", ElasticProfile.class, "*", null); Result viewAllElasticAgentProfilesUnderTeam1 = directive.apply("view", ElasticProfile.class, "*", "team1_uat"); Result viewAllElasticAgentProfilesUnderTeam2 = directive.apply("view", ElasticProfile.class, "*", "team2_uat"); Result viewAllClusterProfiles = directive.apply("view", ClusterProfile.class, "*", null); Result viewTeam1ClusterProfile = directive.apply("view", ClusterProfile.class, "team1_uat", null); Result viewTeam2ClusterProfile = directive.apply("view", ClusterProfile.class, "team2_uat", null); Result administerAllElasticAgentProfiles = directive.apply("administer", ElasticProfile.class, "*", null); Result administerAllElasticAgentProfilesUnderTeam1 = directive.apply("administer", ElasticProfile.class, "*", "team1_uat"); Result administerAllElasticAgentProfilesUnderTeam2 = directive.apply("administer", ElasticProfile.class, "*", "team2_uat"); Result administerAllClusterProfiles = directive.apply("administer", ClusterProfile.class, "*", null); Result administerTeam1ClusterProfile = directive.apply("administer", ClusterProfile.class, "team1_uat", null); Result administerTeam2ClusterProfile = directive.apply("administer", ClusterProfile.class, "team2_uat", null); assertThat(viewAllElasticAgentProfiles).isEqualTo(Result.SKIP); assertThat(viewAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.DENY); assertThat(viewAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP); assertThat(viewAllClusterProfiles).isEqualTo(Result.SKIP); assertThat(viewTeam1ClusterProfile).isEqualTo(Result.DENY); assertThat(viewTeam2ClusterProfile).isEqualTo(Result.SKIP); assertThat(administerAllElasticAgentProfiles).isEqualTo(Result.SKIP); assertThat(administerAllElasticAgentProfilesUnderTeam1).isEqualTo(Result.DENY); assertThat(administerAllElasticAgentProfilesUnderTeam2).isEqualTo(Result.SKIP); assertThat(administerAllClusterProfiles).isEqualTo(Result.SKIP); assertThat(administerTeam1ClusterProfile).isEqualTo(Result.DENY); assertThat(administerTeam2ClusterProfile).isEqualTo(Result.SKIP); }
@Override public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final long beginTimeMills = this.brokerController.getMessageStore().now(); request.addExtFieldIfNotExist(BORN_TIME, String.valueOf(System.currentTimeMillis())); if (Objects.equals(request.getExtFields().get(BORN_TIME), "0")) { request.addExtField(BORN_TIME, String.valueOf(System.currentTimeMillis())); } Channel channel = ctx.channel(); RemotingCommand response = RemotingCommand.createResponseCommand(PopMessageResponseHeader.class); final PopMessageResponseHeader responseHeader = (PopMessageResponseHeader) response.readCustomHeader(); final PopMessageRequestHeader requestHeader = (PopMessageRequestHeader) request.decodeCommandCustomHeader(PopMessageRequestHeader.class, true); StringBuilder startOffsetInfo = new StringBuilder(64); StringBuilder msgOffsetInfo = new StringBuilder(64); StringBuilder orderCountInfo = null; if (requestHeader.isOrder()) { orderCountInfo = new StringBuilder(64); } brokerController.getConsumerManager().compensateBasicConsumerInfo(requestHeader.getConsumerGroup(), ConsumeType.CONSUME_POP, MessageModel.CLUSTERING); response.setOpaque(request.getOpaque()); if (brokerController.getBrokerConfig().isEnablePopLog()) { POP_LOGGER.info("receive PopMessage request command, {}", request); } if (requestHeader.isTimeoutTooMuch()) { response.setCode(ResponseCode.POLLING_TIMEOUT); response.setRemark(String.format("the broker[%s] pop message is timeout too much", this.brokerController.getBrokerConfig().getBrokerIP1())); return response; } if (!PermName.isReadable(this.brokerController.getBrokerConfig().getBrokerPermission())) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark(String.format("the broker[%s] pop message is forbidden", this.brokerController.getBrokerConfig().getBrokerIP1())); return response; } if (requestHeader.getMaxMsgNums() > 32) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark(String.format("the broker[%s] pop message's num is greater than 32", this.brokerController.getBrokerConfig().getBrokerIP1())); return response; } if (!brokerController.getMessageStore().getMessageStoreConfig().isTimerWheelEnable()) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark(String.format("the broker[%s] pop message is forbidden because timerWheelEnable is false", this.brokerController.getBrokerConfig().getBrokerIP1())); return response; } TopicConfig topicConfig = this.brokerController.getTopicConfigManager().selectTopicConfig(requestHeader.getTopic()); if (null == topicConfig) { POP_LOGGER.error("The topic {} not exist, consumer: {} ", requestHeader.getTopic(), RemotingHelper.parseChannelRemoteAddr(channel)); response.setCode(ResponseCode.TOPIC_NOT_EXIST); response.setRemark(String.format("topic[%s] not exist, apply first please! %s", requestHeader.getTopic(), FAQUrl.suggestTodo(FAQUrl.APPLY_TOPIC_URL))); return response; } if (!PermName.isReadable(topicConfig.getPerm())) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("the topic[" + requestHeader.getTopic() + "] peeking message is forbidden"); return response; } if (requestHeader.getQueueId() >= topicConfig.getReadQueueNums()) { String errorInfo = String.format("queueId[%d] is illegal, topic:[%s] topicConfig.readQueueNums:[%d] " + "consumer:[%s]", requestHeader.getQueueId(), requestHeader.getTopic(), topicConfig.getReadQueueNums(), channel.remoteAddress()); POP_LOGGER.warn(errorInfo); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark(errorInfo); return response; } SubscriptionGroupConfig subscriptionGroupConfig = this.brokerController.getSubscriptionGroupManager().findSubscriptionGroupConfig(requestHeader.getConsumerGroup()); if (null == subscriptionGroupConfig) { response.setCode(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST); response.setRemark(String.format("subscription group [%s] does not exist, %s", requestHeader.getConsumerGroup(), FAQUrl.suggestTodo(FAQUrl.SUBSCRIPTION_GROUP_NOT_EXIST))); return response; } if (!subscriptionGroupConfig.isConsumeEnable()) { response.setCode(ResponseCode.NO_PERMISSION); response.setRemark("subscription group no permission, " + requestHeader.getConsumerGroup()); return response; } BrokerConfig brokerConfig = brokerController.getBrokerConfig(); SubscriptionData subscriptionData = null; ExpressionMessageFilter messageFilter = null; if (requestHeader.getExp() != null && !requestHeader.getExp().isEmpty()) { try { subscriptionData = FilterAPI.build(requestHeader.getTopic(), requestHeader.getExp(), requestHeader.getExpType()); brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(), requestHeader.getTopic(), subscriptionData); String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2()); SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, SubscriptionData.SUB_ALL, requestHeader.getExpType()); brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(), retryTopic, retrySubscriptionData); ConsumerFilterData consumerFilterData = null; if (!ExpressionType.isTagType(subscriptionData.getExpressionType())) { consumerFilterData = ConsumerFilterManager.build( requestHeader.getTopic(), requestHeader.getConsumerGroup(), requestHeader.getExp(), requestHeader.getExpType(), System.currentTimeMillis() ); if (consumerFilterData == null) { POP_LOGGER.warn("Parse the consumer's subscription[{}] failed, group: {}", requestHeader.getExp(), requestHeader.getConsumerGroup()); response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED); response.setRemark("parse the consumer's subscription failed"); return response; } } messageFilter = new ExpressionMessageFilter(subscriptionData, consumerFilterData, brokerController.getConsumerFilterManager()); } catch (Exception e) { POP_LOGGER.warn("Parse the consumer's subscription[{}] error, group: {}", requestHeader.getExp(), requestHeader.getConsumerGroup()); response.setCode(ResponseCode.SUBSCRIPTION_PARSE_FAILED); response.setRemark("parse the consumer's subscription failed"); return response; } } else { try { subscriptionData = FilterAPI.build(requestHeader.getTopic(), "*", ExpressionType.TAG); brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(), requestHeader.getTopic(), subscriptionData); String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2()); SubscriptionData retrySubscriptionData = FilterAPI.build(retryTopic, "*", ExpressionType.TAG); brokerController.getConsumerManager().compensateSubscribeData(requestHeader.getConsumerGroup(), retryTopic, retrySubscriptionData); } catch (Exception e) { POP_LOGGER.warn("Build default subscription error, group: {}", requestHeader.getConsumerGroup()); } } int randomQ = random.nextInt(100); int reviveQid; if (requestHeader.isOrder()) { reviveQid = KeyBuilder.POP_ORDER_REVIVE_QUEUE; } else { reviveQid = (int) Math.abs(ckMessageNumber.getAndIncrement() % this.brokerController.getBrokerConfig().getReviveQueueNum()); } GetMessageResult getMessageResult = new GetMessageResult(requestHeader.getMaxMsgNums()); ExpressionMessageFilter finalMessageFilter = messageFilter; StringBuilder finalOrderCountInfo = orderCountInfo; // Due to the design of the fields startOffsetInfo, msgOffsetInfo, and orderCountInfo, // a single POP request could only invoke the popMsgFromQueue method once // for either a normal topic or a retry topic's queue. Retry topics v1 and v2 are // considered the same type because they share the same retry flag in previous fields. // Therefore, needRetryV1 is designed as a subset of needRetry, and within a single request, // only one type of retry topic is able to call popMsgFromQueue. boolean needRetry = randomQ % 5 == 0; boolean needRetryV1 = false; if (brokerConfig.isEnableRetryTopicV2() && brokerConfig.isRetrieveMessageFromPopRetryTopicV1()) { needRetryV1 = randomQ % 2 == 0; } long popTime = System.currentTimeMillis(); CompletableFuture<Long> getMessageFuture = CompletableFuture.completedFuture(0L); if (needRetry && !requestHeader.isOrder()) { if (needRetryV1) { String retryTopic = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup()); getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture); } else { String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2()); getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture); } } if (requestHeader.getQueueId() < 0) { // read all queue getMessageFuture = popMsgFromTopic(topicConfig, false, getMessageResult, requestHeader, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture); } else { int queueId = requestHeader.getQueueId(); getMessageFuture = getMessageFuture.thenCompose(restNum -> popMsgFromQueue(topicConfig.getTopicName(), requestHeader.getAttemptId(), false, getMessageResult, requestHeader, queueId, restNum, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, finalOrderCountInfo)); } // if not full , fetch retry again if (!needRetry && getMessageResult.getMessageMapedList().size() < requestHeader.getMaxMsgNums() && !requestHeader.isOrder()) { if (needRetryV1) { String retryTopicV1 = KeyBuilder.buildPopRetryTopicV1(requestHeader.getTopic(), requestHeader.getConsumerGroup()); getMessageFuture = popMsgFromTopic(retryTopicV1, true, getMessageResult, requestHeader, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture); } else { String retryTopic = KeyBuilder.buildPopRetryTopic(requestHeader.getTopic(), requestHeader.getConsumerGroup(), brokerConfig.isEnableRetryTopicV2()); getMessageFuture = popMsgFromTopic(retryTopic, true, getMessageResult, requestHeader, reviveQid, channel, popTime, finalMessageFilter, startOffsetInfo, msgOffsetInfo, orderCountInfo, randomQ, getMessageFuture); } } final RemotingCommand finalResponse = response; SubscriptionData finalSubscriptionData = subscriptionData; getMessageFuture.thenApply(restNum -> { if (!getMessageResult.getMessageBufferList().isEmpty()) { finalResponse.setCode(ResponseCode.SUCCESS); getMessageResult.setStatus(GetMessageStatus.FOUND); if (restNum > 0) { // all queue pop can not notify specified queue pop, and vice versa popLongPollingService.notifyMessageArriving( requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(), null, 0L, null, null); } } else { PollingResult pollingResult = popLongPollingService.polling( ctx, request, new PollingHeader(requestHeader), finalSubscriptionData, finalMessageFilter); if (PollingResult.POLLING_SUC == pollingResult) { if (restNum > 0) { popLongPollingService.notifyMessageArriving( requestHeader.getTopic(), requestHeader.getQueueId(), requestHeader.getConsumerGroup(), null, 0L, null, null); } return null; } else if (PollingResult.POLLING_FULL == pollingResult) { finalResponse.setCode(ResponseCode.POLLING_FULL); } else { finalResponse.setCode(ResponseCode.POLLING_TIMEOUT); } getMessageResult.setStatus(GetMessageStatus.NO_MESSAGE_IN_QUEUE); } responseHeader.setInvisibleTime(requestHeader.getInvisibleTime()); responseHeader.setPopTime(popTime); responseHeader.setReviveQid(reviveQid); responseHeader.setRestNum(restNum); responseHeader.setStartOffsetInfo(startOffsetInfo.toString()); responseHeader.setMsgOffsetInfo(msgOffsetInfo.toString()); if (requestHeader.isOrder() && finalOrderCountInfo != null) { responseHeader.setOrderCountInfo(finalOrderCountInfo.toString()); } finalResponse.setRemark(getMessageResult.getStatus().name()); switch (finalResponse.getCode()) { case ResponseCode.SUCCESS: if (this.brokerController.getBrokerConfig().isTransferMsgByHeap()) { final byte[] r = this.readGetMessageResult(getMessageResult, requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId()); this.brokerController.getBrokerStatsManager().incGroupGetLatency(requestHeader.getConsumerGroup(), requestHeader.getTopic(), requestHeader.getQueueId(), (int) (this.brokerController.getMessageStore().now() - beginTimeMills)); finalResponse.setBody(r); } else { final GetMessageResult tmpGetMessageResult = getMessageResult; try { FileRegion fileRegion = new ManyMessageTransfer(finalResponse.encodeHeader(getMessageResult.getBufferTotalSize()), getMessageResult); channel.writeAndFlush(fileRegion) .addListener((ChannelFutureListener) future -> { tmpGetMessageResult.release(); Attributes attributes = RemotingMetricsManager.newAttributesBuilder() .put(LABEL_REQUEST_CODE, RemotingHelper.getRequestCodeDesc(request.getCode())) .put(LABEL_RESPONSE_CODE, RemotingHelper.getResponseCodeDesc(finalResponse.getCode())) .put(LABEL_RESULT, RemotingMetricsManager.getWriteAndFlushResult(future)) .build(); RemotingMetricsManager.rpcLatency.record(request.getProcessTimer().elapsed(TimeUnit.MILLISECONDS), attributes); if (!future.isSuccess()) { POP_LOGGER.error("Fail to transfer messages from page cache to {}", channel.remoteAddress(), future.cause()); } }); } catch (Throwable e) { POP_LOGGER.error("Error occurred when transferring messages from page cache", e); getMessageResult.release(); } return null; } break; default: return finalResponse; } return finalResponse; }).thenAccept(result -> NettyRemotingAbstract.writeResponse(channel, request, result)); return null; }
@Test public void testProcessRequest_whenTimerWheelIsFalse() throws RemotingCommandException { MessageStoreConfig messageStoreConfig = new MessageStoreConfig(); messageStoreConfig.setTimerWheelEnable(false); when(messageStore.getMessageStoreConfig()).thenReturn(messageStoreConfig); final RemotingCommand request = createPopMsgCommand(); RemotingCommand response = popMessageProcessor.processRequest(handlerContext, request); assertThat(response).isNotNull(); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); assertThat(response.getRemark()).contains("pop message is forbidden because timerWheelEnable is false"); }
@VisibleForTesting boolean applyRaisingException() throws Exception { Boolean outcome = apply(); if (thrown != null) { throw thrown; } return outcome; }
@Test public void testReadFailure() throws Throwable { int threshold = 50; SDKStreamDrainer drainer = new SDKStreamDrainer("s3://example/", new FakeSDKInputStream(BYTES, threshold), false, BYTES, EMPTY_INPUT_STREAM_STATISTICS, "test"); intercept(IOException.class, "", () -> drainer.applyRaisingException()); assertAborted(drainer); }
@Override public String getFileChecksum(Path sourceFile) throws IOException { FileSystem fs = sourceFile.getFileSystem(this.conf); try (FSDataInputStream in = fs.open(sourceFile)) { return this.checksum.computeChecksum(in); } }
@Test(expected = FileNotFoundException.class) public void testNonexistantFileChecksum() throws Exception { Path file = new Path(TEST_ROOT_DIR, "non-existant-file"); client.getFileChecksum(file); }
public static InMemorySorter create(Options options) { return new InMemorySorter(options); }
@Test public void testSingleElement() throws Exception { SorterTestUtils.testSingleElement(InMemorySorter.create(new InMemorySorter.Options())); }
@Override public void start() { if (embeddedDatabase == null) { String jdbcUrl = config.get(JDBC_URL.getKey()).get(); if (startsWith(jdbcUrl, URL_PREFIX)) { embeddedDatabase = createEmbeddedDatabase(); embeddedDatabase.start(); } } }
@Test public void should_not_start_mem_h2_database() { settings.setProperty(JDBC_URL.getKey(), "jdbc:h2:mem"); EmbeddedDatabase embeddedDatabase = mock(EmbeddedDatabase.class); EmbeddedDatabaseFactory databaseFactory = new EmbeddedDatabaseFactory(settings.asConfig(), system2) { @Override EmbeddedDatabase createEmbeddedDatabase() { return embeddedDatabase; } }; databaseFactory.start(); verify(embeddedDatabase, never()).start(); }
public static Optional<Object> getAdjacentValue(Type type, Object value, boolean isPrevious) { if (!type.isOrderable()) { throw new IllegalStateException("Type is not orderable: " + type); } requireNonNull(value, "value is null"); if (type.equals(BIGINT) || type instanceof TimestampType) { return getBigintAdjacentValue(value, isPrevious); } if (type.equals(INTEGER) || type.equals(DATE)) { return getIntegerAdjacentValue(value, isPrevious); } if (type.equals(SMALLINT)) { return getSmallIntAdjacentValue(value, isPrevious); } if (type.equals(TINYINT)) { return getTinyIntAdjacentValue(value, isPrevious); } if (type.equals(DOUBLE)) { return getDoubleAdjacentValue(value, isPrevious); } if (type.equals(REAL)) { return getRealAdjacentValue(value, isPrevious); } return Optional.empty(); }
@Test public void testNextValueForOtherType() { assertThat(getAdjacentValue(VARCHAR, "anystr", false)) .isEmpty(); assertThat(getAdjacentValue(BOOLEAN, true, false)) .isEmpty(); assertThat(getAdjacentValue(TIME, 123L, false)) .isEmpty(); assertThat(getAdjacentValue(DecimalType.createDecimalType(8, 2), 12345L, false)) .isEmpty(); assertThat(getAdjacentValue(DecimalType.createDecimalType(20, 2), encodeScaledValue(new BigDecimal(111111111111111123.45)), false)) .isEmpty(); }
@WithSpan @Override public QueryResult doRun(SearchJob job, Query query, ESGeneratedQueryContext queryContext) { if (query.searchTypes().isEmpty()) { return QueryResult.builder() .query(query) .searchTypes(Collections.emptyMap()) .errors(new HashSet<>(queryContext.errors())) .build(); } LOG.debug("Running query {} for job {}", query.id(), job.getId()); final HashMap<String, SearchType.Result> resultsMap = Maps.newHashMap(); final Set<String> affectedIndices = indexLookup.indexNamesForStreamsInTimeRange(query.usedStreamIds(), query.timerange()); final Map<String, SearchSourceBuilder> searchTypeQueries = queryContext.searchTypeQueries(); final List<String> searchTypeIds = new ArrayList<>(searchTypeQueries.keySet()); final List<SearchRequest> searches = searchTypeIds .stream() .map(searchTypeId -> { final Set<String> affectedIndicesForSearchType = query.searchTypes().stream() .filter(s -> s.id().equalsIgnoreCase(searchTypeId)).findFirst() .flatMap(searchType -> { if (searchType.effectiveStreams().isEmpty() && query.globalOverride().flatMap(GlobalOverride::timerange).isEmpty() && searchType.timerange().isEmpty()) { return Optional.empty(); } return Optional.of(indexLookup.indexNamesForStreamsInTimeRange(query.effectiveStreams(searchType), query.effectiveTimeRange(searchType))); }) .orElse(affectedIndices); Set<String> indices = affectedIndicesForSearchType.isEmpty() ? Collections.singleton("") : affectedIndicesForSearchType; return new SearchRequest() .source(searchTypeQueries.get(searchTypeId)) .indices(indices.toArray(new String[0])) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); }) .toList(); //ES does not support per-request cancel_after_time_interval. We have to use simplified solution - the whole multi-search will be cancelled if it takes more than configured max. exec. time. final PlainActionFuture<MultiSearchResponse> mSearchFuture = client.cancellableMsearch(searches); job.setSearchEngineTaskFuture(mSearchFuture); final List<MultiSearchResponse.Item> results = getResults(mSearchFuture, job.getCancelAfterSeconds(), searches.size()); for (SearchType searchType : query.searchTypes()) { final String searchTypeId = searchType.id(); final Provider<ESSearchTypeHandler<? extends SearchType>> handlerProvider = elasticsearchSearchTypeHandlers.get(searchType.type()); if (handlerProvider == null) { LOG.error("Unknown search type '{}', cannot convert query result.", searchType.type()); // no need to add another error here, as the query generation code will have added the error about the missing handler already continue; } if (isSearchTypeWithError(queryContext, searchTypeId)) { LOG.error("Failed search type '{}', cannot convert query result, skipping.", searchType.type()); // no need to add another error here, as the query generation code will have added the error about the missing handler already continue; } // we create a new instance because some search type handlers might need to track information between generating the query and // processing its result, such as aggregations, which depend on the name and type final ESSearchTypeHandler<? extends SearchType> handler = handlerProvider.get(); final int searchTypeIndex = searchTypeIds.indexOf(searchTypeId); final MultiSearchResponse.Item multiSearchResponse = results.get(searchTypeIndex); if (multiSearchResponse.isFailure()) { ElasticsearchException e = new ElasticsearchException("Search type returned error: ", multiSearchResponse.getFailure()); queryContext.addError(SearchTypeErrorParser.parse(query, searchTypeId, e)); } else if (checkForFailedShards(multiSearchResponse).isPresent()) { ElasticsearchException e = checkForFailedShards(multiSearchResponse).get(); queryContext.addError(SearchTypeErrorParser.parse(query, searchTypeId, e)); } else { try { final SearchType.Result searchTypeResult = handler.extractResult(job, query, searchType, multiSearchResponse.getResponse(), queryContext); if (searchTypeResult != null) { resultsMap.put(searchTypeId, searchTypeResult); } } catch (Exception e) { LOG.warn("Unable to extract results: ", e); queryContext.addError(new SearchTypeError(query, searchTypeId, e)); } } } LOG.debug("Query {} ran for job {}", query.id(), job.getId()); return QueryResult.builder() .query(query) .searchTypes(resultsMap) .errors(new HashSet<>(queryContext.errors())) .build(); }
@Test public void executesSearchForEmptySearchTypes() { final Query query = Query.builder() .id("query1") .query(ElasticsearchQueryString.of("")) .timerange(RelativeRange.create(300)) .build(); final Search search = Search.builder().queries(ImmutableSet.of(query)).build(); final SearchJob job = new SearchJob("deadbeef", search, "admin", "test-node-id"); final ESGeneratedQueryContext queryContext = mock(ESGeneratedQueryContext.class); final QueryResult queryResult = backend.doRun(job, query, queryContext); assertThat(queryResult).isNotNull(); assertThat(queryResult.searchTypes()).isEmpty(); assertThat(queryResult.executionStats()).isNotNull(); assertThat(queryResult.errors()).isEmpty(); }
public static String buildPopRetryTopic(String topic, String cid, boolean enableRetryV2) { if (enableRetryV2) { return buildPopRetryTopicV2(topic, cid); } return buildPopRetryTopicV1(topic, cid); }
@Test public void testBuildPopRetryTopic() { assertThat(KeyBuilder.buildPopRetryTopicV2(topic, group)).isEqualTo(MixAll.RETRY_GROUP_TOPIC_PREFIX + group + "+" + topic); }
@Override public String getKeyId() { return this.keyId; }
@Test void shouldGetKeyIdFromJwk() { assertTrue(StringUtils.hasText(service.getKeyId())); }
public void watch(File file) throws IOException { String dirString; if (file.isFile()) { dirString = file.getParentFile().getAbsolutePath(); } else { throw new IOException(file.getName() + " is not a file"); } if (dirString == null) { dirString = "/"; } Path dir = FileSystems.getDefault().getPath(dirString); LOGGER.info("watch {}", dir); WatchKey key = dir.register(watcher, ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY); synchronized (watchKeys) { watchKeys.put(key, new File(dirString)); watchFiles.add(file); } }
@Test void test() throws IOException, InterruptedException { assertNull(fileChanged); assertEquals(0, numChanged.get()); // create new file File file1 = new File(tmpDir.toFile(), "test1"); file1.createNewFile(); File file2 = new File(tmpDir.toFile(), "test2"); file2.createNewFile(); watcher.watch(file1); FileOutputStream out1 = new FileOutputStream(file1); out1.write(1); out1.close(); FileOutputStream out2 = new FileOutputStream(file2); out2.write(1); out2.close(); synchronized (this) { wait(30 * 1000); } assertNotNull(fileChanged); assertEquals(fileChanged, file1); assertTrue(numChanged.get() >= 1, "Changes: " + numChanged.get()); }
void startup(@Observes StartupEvent event) { if (storageProviderMetricsBinderInstance.isResolvable()) { storageProviderMetricsBinderInstance.get(); LOGGER.debug("JobRunr StorageProvider MicroMeter Metrics enabled"); } if (backgroundJobServerMetricsBinderInstance.isResolvable()) { backgroundJobServerMetricsBinderInstance.get(); LOGGER.debug("JobRunr BackgroundJobServer MicroMeter Metrics enabled"); } }
@Test void metricsStarterDoesNotStartBackgroundJobServerMetricsBinderIfNotAvailable() { jobRunrMetricsStarter.startup(new StartupEvent()); verify(backgroundJobServerMetricsBinderInstance, never()).get(); }
@Override public Optional<CompletableFuture<TaskManagerLocation>> getTaskManagerLocation( ExecutionVertexID executionVertexId) { return inputsLocationsRetriever .getTaskManagerLocation(executionVertexId) .filter(future -> future.isDone() && !future.isCompletedExceptionally()); }
@Test void testNoInputLocation() { TestingInputsLocationsRetriever originalLocationRetriever = getOriginalLocationRetriever(); InputsLocationsRetriever availableInputsLocationsRetriever = new AvailableInputsLocationsRetriever(originalLocationRetriever); assertThat(availableInputsLocationsRetriever.getTaskManagerLocation(EV1)).isNotPresent(); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final String msg = new String(rawMessage.getPayload(), charset); try (Timer.Context ignored = this.decodeTime.time()) { final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress(); final InetSocketAddress remoteAddress; if (address == null) { remoteAddress = null; } else { remoteAddress = address.getInetSocketAddress(); } return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp()); } }
@Test public void testIssue3502() throws Exception { // https://github.com/Graylog2/graylog2-server/issues/3502 final RawMessage rawMessage = buildRawMessage("<6>0 2017-02-15T16:01:07.000+01:00 hostname test - - - test 4"); final Message message = codec.decode(rawMessage); assertNotNull(message); assertEquals("test 4", message.getMessage()); assertEquals(new DateTime(2017, 2, 15, 15, 1, 7, DateTimeZone.UTC), message.getTimestamp()); assertEquals("hostname", message.getSource()); assertEquals(6, message.getField("level")); assertEquals("kernel", message.getField("facility")); assertEquals("test", message.getField("application_name")); assertEquals(0, message.getField("facility_num")); }
@Override public void write(Object object) throws IOException { objectOutputStream.writeObject(object); objectOutputStream.flush(); preventMemoryLeak(); }
@Test public void writesToUnderlyingObjectOutputStream() throws IOException { // given ObjectWriter objectWriter = new AutoFlushingObjectWriter(objectOutputStream, 2); String object = "foo"; // when objectWriter.write(object); // then verify(objectOutputStream).writeObjectOverride(object); }
public static CharSequence escapeCsv(CharSequence value) { return escapeCsv(value, false); }
@Test public void escapeCsvWithCarriageReturn() { CharSequence value = "some text\r more text"; CharSequence expected = "\"some text\r more text\""; escapeCsv(value, expected); }
public static void main(String[] args) throws Exception { String usage = "Usage: MapFile inFile outFile"; if (args.length != 2) { System.err.println(usage); System.exit(-1); } String in = args[0]; String out = args[1]; Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); MapFile.Reader reader = null; try { reader = new MapFile.Reader(fs, in, conf); WritableComparable<?> key = ReflectionUtils.newInstance( reader.getKeyClass().asSubclass(WritableComparable.class), conf); Writable value = ReflectionUtils.newInstance(reader.getValueClass() .asSubclass(Writable.class), conf); try (MapFile.Writer writer = new MapFile.Writer(conf, fs, out, reader.getKeyClass().asSubclass(WritableComparable.class), reader.getValueClass())) { while (reader.next(key, value)) { // copy all entries writer.append(key, value); } } } finally { IOUtils.cleanupWithLogger(LOG, reader); } }
@Test public void testMainMethodMapFile() { String inFile = "mainMethodMapFile.mapfile"; String path = new Path(TEST_DIR, inFile).toString(); String[] args = { path, path }; MapFile.Writer writer = null; try { writer = createWriter(inFile, IntWritable.class, Text.class); writer.append(new IntWritable(1), new Text("test_text1")); writer.append(new IntWritable(2), new Text("test_text2")); writer.close(); MapFile.main(args); } catch (Exception ex) { fail("testMainMethodMapFile error !!!"); } finally { IOUtils.cleanupWithLogger(null, writer); } }
@Override protected boolean isStepCompleted(@NonNull Context context) { return SetupSupport.isThisKeyboardSetAsDefaultIME(context); }
@Test public void testKeyboardEnabledButNotDefault() { final String flatASKComponent = new ComponentName(BuildConfig.APPLICATION_ID, SoftKeyboard.class.getName()) .flattenToString(); Settings.Secure.putString( getApplicationContext().getContentResolver(), Settings.Secure.ENABLED_INPUT_METHODS, flatASKComponent); WizardPageSwitchToKeyboardFragment fragment = startFragment(); Assert.assertFalse(fragment.isStepCompleted(getApplicationContext())); ImageView stateIcon = fragment.getView().findViewById(R.id.step_state_icon); Assert.assertNotNull(stateIcon); Assert.assertEquals( R.drawable.ic_wizard_switch_off, Shadows.shadowOf(stateIcon.getDrawable()).getCreatedFromResId()); Assert.assertTrue(stateIcon.isClickable()); View.OnClickListener stateIconClickHandler = Shadows.shadowOf(stateIcon).getOnClickListener(); View.OnClickListener linkClickHandler = Shadows.shadowOf((View) fragment.getView().findViewById(R.id.go_to_switch_keyboard_action)) .getOnClickListener(); Assert.assertNotNull(stateIconClickHandler); Assert.assertSame(stateIconClickHandler, linkClickHandler); }
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { FunctionConfig mergedConfig = existingConfig.toBuilder().build(); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getJar())) { mergedConfig.setJar(newConfig.getJar()); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getCustomSerdeInputs() != null) { newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getCustomSchemaInputs() != null) { newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } mergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName() .equals(existingConfig.getOutputSerdeClassName())) { throw new IllegalArgumentException("Output Serde mismatch"); } if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType() .equals(existingConfig.getOutputSchemaType())) { throw new IllegalArgumentException("Output Schema mismatch"); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (!StringUtils.isEmpty(newConfig.getOutput())) { mergedConfig.setOutput(newConfig.getOutput()); } if (newConfig.getUserConfig() != null) { mergedConfig.setUserConfig(newConfig.getUserConfig()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) { throw new IllegalArgumentException("Runtime cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getMaxMessageRetries() != null) { mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries()); } if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) { mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic()); } if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName() .equals(existingConfig.getSubName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getWindowConfig() != null) { mergedConfig.setWindowConfig(newConfig.getWindowConfig()); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test public void testMergeDifferentParallelism() { FunctionConfig functionConfig = createFunctionConfig(); FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("parallelism", 101); FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); assertEquals( mergedConfig.getParallelism(), Integer.valueOf(101) ); mergedConfig.setParallelism(functionConfig.getParallelism()); assertEquals( new Gson().toJson(functionConfig), new Gson().toJson(mergedConfig) ); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 2) { onInvalidDataReceived(device, data); return; } final int interval = data.getIntValue(Data.FORMAT_UINT16_LE, 0); onMeasurementIntervalReceived(device, interval); }
@Test public void onInvalidDataReceived() { final ProfileReadResponse response = new MeasurementIntervalDataCallback() { @Override public void onMeasurementIntervalReceived(@NonNull final BluetoothDevice device, final int interval) { called = true; } }; called = false; final Data data = new Data(new byte[] { 60 }); response.onDataReceived(null, data); assertFalse(called); assertFalse(response.isValid()); }
@Override public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) { return sqlStatementContext instanceof UpdateStatementContext && encryptRule.findEncryptTable(((TableAvailable) sqlStatementContext).getTablesContext().getSimpleTables().iterator().next().getTableName().getIdentifier().getValue()).isPresent(); }
@Test void assertIsGenerateSQLTokenUpdateSQLSuccess() { assertTrue(tokenGenerator.isGenerateSQLToken(updateStatementContext)); }
public static String generateResourceId( String baseString, Pattern illegalChars, String replaceChar, int targetLength, DateTimeFormatter timeFormat) { // first, make sure the baseString, typically the test ID, is not empty checkArgument(baseString.length() != 0, "baseString cannot be empty."); // next, replace all illegal characters from given string with given replacement character String illegalCharsRemoved = illegalChars.matcher(baseString.toLowerCase()).replaceAll(replaceChar); // finally, append the date/time and return the substring that does not exceed the length limit LocalDateTime localDateTime = LocalDateTime.now(ZoneId.of(TIME_ZONE)); String timeAddOn = localDateTime.format(timeFormat); return illegalCharsRemoved.subSequence( 0, min(targetLength - timeAddOn.length() - 1, illegalCharsRemoved.length())) + replaceChar + localDateTime.format(timeFormat); }
@Test public void testGenerateResourceIdShouldReplaceUpperCaseLettersWithLowerCase() { String testBaseString = "Test-Instance"; String actual = generateResourceId( testBaseString, ILLEGAL_INSTANCE_CHARS, REPLACE_INSTANCE_CHAR, MAX_INSTANCE_ID_LENGTH, TIME_FORMAT); assertThat(actual).matches("test-instance-\\d{8}-\\d{6}-\\d{6}"); }
public Rectangle getBounds() { double minX = Double.MAX_VALUE; double minY = Double.MAX_VALUE; double maxX = Double.MIN_VALUE; double maxY = Double.MIN_VALUE; for (LineSegment segment : this.segments) { minX = Math.min(minX, Math.min(segment.start.x, segment.end.x)); minY = Math.min(minY, Math.min(segment.start.y, segment.end.y)); maxX = Math.max(maxX, Math.max(segment.start.x, segment.end.x)); maxY = Math.max(maxY, Math.max(segment.start.y, segment.end.y)); } return new Rectangle(minX, minY, maxX, maxY); }
@Test public void boundsTest() { Point point1 = new Point(0, 0); Point point2 = new Point(1, 0); Point point3 = new Point(1, 1); LineString lineString = new LineString(); lineString.segments.add(new LineSegment(point1, point2)); lineString.segments.add(new LineSegment(point2, point3)); Assert.assertEquals(new Rectangle(0, 0, 1, 1), lineString.getBounds()); }
public SampleResult mergeSampleResult(SampleResult sampleCollectResult, List<SampleResult> sampleResults) { SampleResult mergeResult = new SampleResult(); Map<String, String> listenersGroupkeyStatus; if (sampleCollectResult.getLisentersGroupkeyStatus() == null || sampleCollectResult.getLisentersGroupkeyStatus() .isEmpty()) { listenersGroupkeyStatus = new HashMap<>(10); } else { listenersGroupkeyStatus = sampleCollectResult.getLisentersGroupkeyStatus(); } for (SampleResult sampleResult : sampleResults) { Map<String, String> listenersGroupkeyStatusTmp = sampleResult.getLisentersGroupkeyStatus(); listenersGroupkeyStatus.putAll(listenersGroupkeyStatusTmp); } mergeResult.setLisentersGroupkeyStatus(listenersGroupkeyStatus); return mergeResult; }
@Test void testMergeSampleResult() throws Exception { SampleResult sampleResult1 = new SampleResult(); Map<String, String> listener1 = new HashMap<>(); listener1.put("config1", "md51123"); listener1.put("config11", "md5123123"); sampleResult1.setLisentersGroupkeyStatus(listener1); SampleResult sampleResult2 = new SampleResult(); Map<String, String> listener2 = new HashMap<>(); listener2.put("config22", "md51123"); listener2.put("config2", "md5123123"); sampleResult2.setLisentersGroupkeyStatus(listener2); List<SampleResult> sampleResults = new ArrayList<>(); sampleResults.add(sampleResult2); SampleResult sampleResult3 = new SampleResult(); Map<String, String> listener3 = new HashMap<>(); listener3.put("config33", "md51123"); listener3.put("config3", "md5123123"); sampleResult3.setLisentersGroupkeyStatus(listener3); sampleResults.add(sampleResult3); //sampleResult ips is null SampleResult sampleResultMerge1 = configSubService.mergeSampleResult(sampleResult1, sampleResults); assertEquals(6, sampleResultMerge1.getLisentersGroupkeyStatus().size()); SampleResult sampleResultMerge2 = configSubService.mergeSampleResult(new SampleResult(), sampleResults); assertEquals(4, sampleResultMerge2.getLisentersGroupkeyStatus().size()); }
public BackgroundException map(HttpResponse response) throws IOException { final S3ServiceException failure; if(null == response.getEntity()) { failure = new S3ServiceException(response.getStatusLine().getReasonPhrase()); } else { EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(), EntityUtils.toString(response.getEntity())); } failure.setResponseCode(response.getStatusLine().getStatusCode()); if(response.containsHeader(MINIO_ERROR_CODE)) { failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue()); } if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) { failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue()); } return this.map(failure); }
@Test public void testAlgorithmFailure() { assertEquals("EC AlgorithmParameters not available. Please contact your web hosting service provider for assistance.", new S3ExceptionMappingService().map(new S3ServiceException( new SSLException( new RuntimeException( new NoSuchAlgorithmException("EC AlgorithmParameters not available") ) ))).getDetail()); }
public static String formatLocalizedErrorMessage(Message localizedErrorMessage, Locale locale) { var bundle = ResourceBundle.getBundle(BUNDLE, locale); var localizedMessage = bundle.getString(localizedErrorMessage.messageKey()); var key = localizedErrorMessage.messageKey(); if (!key.isBlank()) { localizedMessage = localizedMessage.formatted((Object[]) localizedErrorMessage.args()); } return localizedMessage; }
@Test void test_negotiatePreferredLocales_errorWithContent() { var errorMessage = new Message("error.badRedirect", String.valueOf(BASE_URI)); var locale = Locale.GERMANY; var result = LocaleUtils.formatLocalizedErrorMessage(errorMessage, locale); var expected = "Ungültige redirect_uri='%s'. Übergebener Link ist nicht gültig.".formatted(BASE_URI); assertEquals(expected, result); }
@Override public TokenIdent cancelToken(Token<TokenIdent> token, String canceller) throws IOException { ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); TokenIdent id = createIdentifier(); id.readFields(in); syncLocalCacheWithZk(id); return super.cancelToken(token, canceller); }
@SuppressWarnings("unchecked") @Test public void testNodeUpAferAWhile() throws Exception { for (int i = 0; i < TEST_RETRIES; i++) { String connectString = zkServer.getConnectString(); Configuration conf = getSecretConf(connectString); DelegationTokenManager tm1 = new DelegationTokenManager(conf, new Text("bla")); tm1.init(); Token<DelegationTokenIdentifier> token1 = (Token<DelegationTokenIdentifier>) tm1.createToken( UserGroupInformation.getCurrentUser(), "foo"); Assert.assertNotNull(token1); Token<DelegationTokenIdentifier> token2 = (Token<DelegationTokenIdentifier>) tm1.createToken( UserGroupInformation.getCurrentUser(), "bar"); Assert.assertNotNull(token2); Token<DelegationTokenIdentifier> token3 = (Token<DelegationTokenIdentifier>) tm1.createToken( UserGroupInformation.getCurrentUser(), "boo"); Assert.assertNotNull(token3); tm1.verifyToken(token1); tm1.verifyToken(token2); tm1.verifyToken(token3); // Cancel one token tm1.cancelToken(token1, "foo"); // Start second node after some time.. Thread.sleep(1000); DelegationTokenManager tm2 = new DelegationTokenManager(conf, new Text("bla")); tm2.init(); tm2.verifyToken(token2); tm2.verifyToken(token3); try { verifyTokenFail(tm2, token1); fail("Expected InvalidToken"); } catch (SecretManager.InvalidToken it) { // Ignore } // Create a new token thru the new ZKDTSM Token<DelegationTokenIdentifier> token4 = (Token<DelegationTokenIdentifier>) tm2.createToken( UserGroupInformation.getCurrentUser(), "xyz"); Assert.assertNotNull(token4); tm2.verifyToken(token4); tm1.verifyToken(token4); // Bring down tm2 verifyDestroy(tm2, conf); // Start third node after some time.. Thread.sleep(1000); DelegationTokenManager tm3 = new DelegationTokenManager(conf, new Text("bla")); tm3.init(); tm3.verifyToken(token2); tm3.verifyToken(token3); tm3.verifyToken(token4); try { verifyTokenFail(tm3, token1); fail("Expected InvalidToken"); } catch (SecretManager.InvalidToken it) { // Ignore } verifyDestroy(tm3, conf); verifyDestroy(tm1, conf); } }
@Override public <T> Future<T> submit(Callable<T> callable) { return schedule(callable, 0, TimeUnit.SECONDS); }
@Test public void submit() throws Exception { CountTask task = new CountTask(); ControllableScheduler scheduler = new ControllableScheduler(); scheduler.submit(task); assertFalse(scheduler.schedulerIsIdle()); scheduler.runNextPendingCommand(); assertEquals(1, task.runTimes()); assertTrue(scheduler.schedulerIsIdle()); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldHandleAllSortsOfLiterals() { // Given: final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(COL1, COL0), ImmutableList.of( new LongLiteral(2L), new StringLiteral("str")) ); // When: executor.execute(statement, mock(SessionProperties.class), engine, serviceContext); // Then: verify(valueSerializer).serialize(TOPIC_NAME, genericRow("str", 2L)); verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE)); }
@VisibleForTesting static boolean hasEnoughCurvature(final int[] xs, final int[] ys, final int middlePointIndex) { // Calculate the radianValue formed between middlePointIndex, and one point in either // direction final int startPointIndex = middlePointIndex - CURVATURE_NEIGHBORHOOD; final int startX = xs[startPointIndex]; final int startY = ys[startPointIndex]; final int endPointIndex = middlePointIndex + CURVATURE_NEIGHBORHOOD; final int endX = xs[endPointIndex]; final int endY = ys[endPointIndex]; final int middleX = xs[middlePointIndex]; final int middleY = ys[middlePointIndex]; final int firstSectionXDiff = startX - middleX; final int firstSectionYDiff = startY - middleY; final double firstSectionLength = Math.sqrt(firstSectionXDiff * firstSectionXDiff + firstSectionYDiff * firstSectionYDiff); final int secondSectionXDiff = endX - middleX; final int secondSectionYDiff = endY - middleY; final double secondSectionLength = Math.sqrt( secondSectionXDiff * secondSectionXDiff + secondSectionYDiff * secondSectionYDiff); final double dotProduct = firstSectionXDiff * secondSectionXDiff + firstSectionYDiff * secondSectionYDiff; final double radianValue = Math.acos(dotProduct / firstSectionLength / secondSectionLength); return radianValue <= CURVATURE_THRESHOLD; }
@Test public void testHasEnoughCurvature9Degrees() { final int[] Xs = new int[3]; final int[] Ys = new int[3]; // https://www.triangle-calculator.com/?what=&q=A%3D171%2C+b%3D100%2C+c%3D100&submit=Solve // A[100; 0] B[0; 0] C[198.769; 15.643] Xs[0] = 0; Ys[0] = 0; Xs[1] = 100; Ys[1] = 0; Xs[2] = 198; Ys[2] = 16; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = 0; Ys[0] = 0; Xs[1] = 100; Ys[1] = 0; Xs[2] = 198; Ys[2] = -16; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskInsertStatement() { // Given final String query = "--this is a comment. \n" + "INSERT INTO foo (KEY_COL, COL_A) VALUES (\"key\", \"A\");"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "INSERT INTO `FOO` (`KEY_COL`, `COL_A`) VALUES ('[value]', '[value]');"; assertThat(maskedQuery, is(expected)); }
public String getTopicName(AsyncMockDefinition definition, EventMessage eventMessage) { logger.debugf("AsyncAPI Operation {%s}", definition.getOperation().getName()); // Produce service name part of topic name. String serviceName = definition.getOwnerService().getName().replace(" ", ""); serviceName = serviceName.replace("-", ""); // Produce version name part of topic name. String versionName = definition.getOwnerService().getVersion().replace(" ", ""); versionName = versionName.replace(".", ""); // Produce operation name part of topic name. String operationName = ProducerManager.getDestinationOperationPart(definition.getOperation(), eventMessage); // Aggregate the 3 parts using '-' as delimiter. return serviceName + "-" + versionName + "-" + operationName.replace("/", "-"); }
@Test void testTopicNameWithPart() { AmazonSNSProducerManager producerManager = new AmazonSNSProducerManager(); Service service = new Service(); service.setName("Pastry orders API"); service.setVersion("0.1.0"); Operation operation = new Operation(); operation.setName("SUBSCRIBE pastry/orders"); operation.setMethod("SUBSCRIBE"); operation.setDispatcher("URI_PARTS"); operation.setResourcePaths(Set.of("pastry/orders/{orderId}")); service.addOperation(operation); EventMessage eventMessage = new EventMessage(); eventMessage.setName("Sample"); eventMessage.setDispatchCriteria("/orderId=123-456-789"); List<EventMessage> eventsMessages = List.of(eventMessage); AsyncMockDefinition definition = new AsyncMockDefinition(service, operation, eventsMessages); String queueName = producerManager.getTopicName(definition, eventMessage); assertEquals("PastryordersAPI-010-pastry-orders-123-456-789", queueName); }
public static Sensor processLatencySensor(final String threadId, final StreamsMetricsImpl streamsMetrics) { final Sensor sensor = streamsMetrics.threadLevelSensor(threadId, PROCESS + LATENCY_SUFFIX, RecordingLevel.INFO); final Map<String, String> tagMap = streamsMetrics.threadLevelTagMap(threadId); addAvgAndMaxToSensor( sensor, THREAD_LEVEL_GROUP, tagMap, PROCESS + LATENCY_SUFFIX, PROCESS_AVG_LATENCY_DESCRIPTION, PROCESS_MAX_LATENCY_DESCRIPTION ); return sensor; }
@Test public void shouldGetProcessLatencySensor() { final String operationLatency = "process" + LATENCY_SUFFIX; final String avgLatencyDescription = "The average process latency"; final String maxLatencyDescription = "The maximum process latency"; when(streamsMetrics.threadLevelSensor(THREAD_ID, operationLatency, RecordingLevel.INFO)).thenReturn(expectedSensor); when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap); try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) { final Sensor sensor = ThreadMetrics.processLatencySensor(THREAD_ID, streamsMetrics); streamsMetricsStaticMock.verify( () -> StreamsMetricsImpl.addAvgAndMaxToSensor( expectedSensor, THREAD_LEVEL_GROUP, tagMap, operationLatency, avgLatencyDescription, maxLatencyDescription ) ); assertThat(sensor, is(expectedSensor)); } }