comment
stringlengths
22
3.02k
method_body
stringlengths
46
368k
target_code
stringlengths
0
181
method_body_after
stringlengths
12
368k
context_before
stringlengths
11
634k
context_after
stringlengths
11
632k
Should move validation to the maxAutoLockRenewDuration setter.
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, ...
validateAndThrow(maxAutoLockRenewDuration);
public ServiceBusReceiverAsyncClient buildAsyncClient() { final MessagingEntityType entityType = validateEntityPaths(logger, connectionStringEntityName, topicName, queueName); final String entityPath = getEntityPath(logger, entityType, queueName, topicName, subscriptionName, ...
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String su...
class ServiceBusSessionReceiverClientBuilder { private Integer maxConcurrentSessions = null; private int prefetchCount = DEFAULT_PREFETCH_COUNT; private String queueName; private ReceiveMode receiveMode = ReceiveMode.PEEK_LOCK; private String sessionId; private String su...
This is only used in dev so we are alright. Also it's the exact same logic.
public DevClasspathStaticHandler(Set<String> generatedResources, DevClasspathStaticHandlerOptions options) { this.generatedResources = generatedResources; this.compressedMediaTypes = options.getCompressMediaTypes(); this.currentClassLoader = Thread.currentThread().getContextClassLoader(); ...
this.compressedMediaTypes = options.getCompressMediaTypes();
public DevClasspathStaticHandler(Set<String> generatedResources, DevClasspathStaticHandlerOptions options) { this.generatedResources = generatedResources; this.compressedMediaTypes = options.getCompressMediaTypes(); this.currentClassLoader = Thread.currentThread().getContextClassLoader(); ...
class DevClasspathStaticHandler implements Handler<RoutingContext> { private static final Logger LOG = Logger.getLogger(DevClasspathStaticHandler.class); private static final Set<HttpMethod> ALLOWED_HTTP_METHODS = Set.of(HttpMethod.GET, HttpMethod.HEAD, HttpMethod.OPTIONS); private static final int HTTP_ST...
class DevClasspathStaticHandler implements Handler<RoutingContext> { private static final Logger LOG = Logger.getLogger(DevClasspathStaticHandler.class); private static final int HTTP_STATUS_OK = 200; private static final int HTTP_STATUS_NO_CONTENT = 204; private static final String ALLOW_HEADER = "All...
You'll have the previous behaviour, which seems to timeout frequently. I'll keep the warning for now, as running w/o a Vert.x context seems tricky for other reasons as well.
public HealthCheckResponse call() { HealthCheckResponseBuilder builder = HealthCheckResponse.named(healthCheckResponseName); builder.up(); for (Map.Entry<String, Pool> pgPoolEntry : pools.entrySet()) { final String dataSourceName = pgPoolEntry.getKey(); final Pool pgPool...
log.warn("Vert.x context unavailable to perform healthcheck of reactive datasource `" + dataSourceName + "`. This is unlikely to work correctly.");
public HealthCheckResponse call() { HealthCheckResponseBuilder builder = HealthCheckResponse.named(healthCheckResponseName); builder.up(); for (Map.Entry<String, Pool> pgPoolEntry : pools.entrySet()) { final String dataSourceName = pgPoolEntry.getKey(); final Pool pgPool...
class ReactiveDatasourceHealthCheck implements HealthCheck { private static final Logger log = Logger.getLogger(ReactiveDatasourceHealthCheck.class); private final Map<String, Pool> pools = new ConcurrentHashMap<>(); private final String healthCheckResponseName; private final String healthCheckSQL; ...
class ReactiveDatasourceHealthCheck implements HealthCheck { private static final Logger log = Logger.getLogger(ReactiveDatasourceHealthCheck.class); private final Map<String, Pool> pools = new ConcurrentHashMap<>(); private final String healthCheckResponseName; private final String healthCheckSQL; ...
Indeed looks like a rebasing error. A bit tricky one to get right because we changed the way the container removal worked before the rebasing was done. We were using the `--rm` initially but before the rebase we changed it to remove the container explicitly via `docker remove <container_id>`.
public RemoteEnvironment createEnvironment(Environment environment) throws Exception { Preconditions.checkState( environment .getUrn() .equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)), "The passed environment does not contain a DockerPayload."); ...
dockerOptsBuilder.add("--rm");
public RemoteEnvironment createEnvironment(Environment environment) throws Exception { Preconditions.checkState( environment .getUrn() .equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)), "The passed environment does not contain a DockerPayload."); ...
class DockerEnvironmentFactory implements EnvironmentFactory { private static final Logger LOG = LoggerFactory.getLogger(DockerEnvironmentFactory.class); static DockerEnvironmentFactory forServicesWithDocker( DockerCommand docker, GrpcFnServer<FnApiControlClientPoolService> controlServiceServer, ...
class DockerEnvironmentFactory implements EnvironmentFactory { private static final Logger LOG = LoggerFactory.getLogger(DockerEnvironmentFactory.class); static DockerEnvironmentFactory forServicesWithDocker( DockerCommand docker, GrpcFnServer<FnApiControlClientPoolService> controlServiceServer, ...
```suggestion "There should be always data available because the test utilizes no rate-limiting strategy and splits are provided.") ```
void testReaderCheckpoints() throws Exception { final int numCycles = 3; final long from = 0; final long mid = 156; final long to = 383; final long elementsPerCycle = (to - from + 1) / numCycles; final TestingReaderOutput<Long> out = new TestingReaderOutput<>(); ...
"There should be always data available because the test doesn't rely on any no rate-limiting strategy and splits are provided.")
void testReaderCheckpoints() throws Exception { final int numCycles = 3; final long from = 0; final long mid = 156; final long to = 383; final long elementsPerCycle = (to - from + 1) / numCycles; final TestingReaderOutput<Long> out = new TestingReaderOutput<>(); ...
class DataGeneratorSourceTest { @Test @DisplayName("Correctly restores SplitEnumerator from a snapshot.") void testRestoreEnumerator() throws Exception { final GeneratorFunction<Long, Long> generatorFunctionStateless = index -> index; final DataGeneratorSource<Long> dataGeneratorSource = ...
class DataGeneratorSourceTest { @Test @DisplayName("Correctly restores SplitEnumerator from a snapshot.") void testRestoreEnumerator() throws Exception { final GeneratorFunction<Long, Long> generatorFunctionStateless = index -> index; final DataGeneratorSource<Long> dataGeneratorSource = ...
I'm not too familiar with loggers and junit extensions, but isn't this going to leak in the testsuite? Looks like it will keep adding new InMemoryLogHandler instances, and also it's not emptying each of them after usage. I suppose the `rootLogger` is a global static so this might get nasty?
public void beforeEach(ExtensionContext extensionContext) throws Exception { rootLogger.addHandler(inMemoryLogHandler); if (archiveProducer == null) { throw new RuntimeException("QuarkusDevModeTest does not have archive producer set"); } if (logFileName != null) { ...
rootLogger.addHandler(inMemoryLogHandler);
public void beforeEach(ExtensionContext extensionContext) throws Exception { rootLogger.addHandler(inMemoryLogHandler); if (archiveProducer == null) { throw new RuntimeException("QuarkusDevModeTest does not have archive producer set"); } if (logFileName != null) { ...
class QuarkusDevModeTest implements BeforeEachCallback, AfterEachCallback, TestInstanceFactory { private static final Logger rootLogger; static { System.setProperty("java.util.logging.manager", "org.jboss.logmanager.LogManager"); rootLogger = LogManager.getLogManager().getLogger(""); ...
class QuarkusDevModeTest implements BeforeEachCallback, AfterEachCallback, TestInstanceFactory { private static final Logger rootLogger; static { System.setProperty("java.util.logging.manager", "org.jboss.logmanager.LogManager"); rootLogger = LogManager.getLogManager().getLogger(""); ...
Fixed, I also changed the test to use equals as the results are repeatable.
public void testAdaptiveLoadBalancer() { LoadBalancer lb = new AdaptiveLoadBalancer("foo"); List<Mirror.Entry> entries = Arrays.asList(new Mirror.Entry("foo/0/default", "tcp/bar:1"), new Mirror.Entry("foo/1/default", "tcp/bar:2"), new Mirror.Entry("foo/2/default", "tcp/b...
assertTrue(10 > Math.abs(metrics.sent() - 3333));
public void testAdaptiveLoadBalancer() { LoadBalancer lb = new AdaptiveLoadBalancer("foo", new Random(1)); List<Mirror.Entry> entries = Arrays.asList(new Mirror.Entry("foo/0/default", "tcp/bar:1"), new Mirror.Entry("foo/1/default", "tcp/bar:2"), new Mirror.Entry("foo/2/d...
class LoadBalancerTestCase { @Test public void requireThatParseExceptionIsReadable() { assertIllegalArgument("foo", "bar", "Expected recipient on the form 'foo/x/[y.]number/z', got 'bar'."); assertIllegalArgument("foo", "foobar", "Expected recipient on the form 'foo/x/[y.]number/z', got 'foobar...
class LoadBalancerTestCase { @Test public void requireThatParseExceptionIsReadable() { assertIllegalArgument("foo", "bar", "Expected recipient on the form 'foo/x/[y.]number/z', got 'bar'."); assertIllegalArgument("foo", "foobar", "Expected recipient on the form 'foo/x/[y.]number/z', got 'foobar...
Because to test the new override the e2e policy has to be applied to client level - and then it can have side effects because the client instance is often used for an entire test class.
protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) { CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties(); String cosmosContainerId = cosmosContainerProperties.getId(); logger.info("Truncating collection {} ...", cosmosC...
options.setCosmosEndToEndOperationLatencyPolicyConfig(
protected static void truncateCollection(CosmosAsyncContainer cosmosContainer) { CosmosContainerProperties cosmosContainerProperties = cosmosContainer.read().block().getProperties(); String cosmosContainerId = cosmosContainerProperties.getId(); logger.info("Truncating collection {} ...", cosmosC...
class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager { public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) { return new DatabaseManagerImpl(client); } private final CosmosAsyncClient client; private DatabaseManagerImpl(CosmosAsyncCl...
class DatabaseManagerImpl implements CosmosDatabaseForTest.DatabaseManager { public static DatabaseManagerImpl getInstance(CosmosAsyncClient client) { return new DatabaseManagerImpl(client); } private final CosmosAsyncClient client; private DatabaseManagerImpl(CosmosAsyncCl...
`/` means `or`. `and/or` is `and or or`, which can be simplified to `or`. (`or` is not `xor` in English)
private static Set<String> findEnabledReportersInConfiguration(Configuration configuration, String includedReportersString) { Set<String> includedReporters = reporterListPattern.splitAsStream(includedReportersString) .filter(r -> !r.isEmpty()) .collect(Collectors.toSet()); Set<String> namedOrderedReporte...
LOG.info("Excluding reporter {}, not configured in reporter list ({}).", reporterName, includedReportersString);
private static Set<String> findEnabledReportersInConfiguration(Configuration configuration, String includedReportersString) { Set<String> includedReporters = reporterListPattern.splitAsStream(includedReportersString) .filter(r -> !r.isEmpty()) .collect(Collectors.toSet()); Set<String> namedOrderedReporte...
class ReporterSetup { private static final Logger LOG = LoggerFactory.getLogger(ReporterSetup.class); private static final Pattern reporterListPattern = Pattern.compile("\\s*,\\s*"); private static final Pattern reporterClassPattern = Pattern.compile( Pattern.quote(ConfigConstants.METRICS_REPORTER_PREFIX) +...
class ReporterSetup { private static final Logger LOG = LoggerFactory.getLogger(ReporterSetup.class); private static final Pattern reporterListPattern = Pattern.compile("\\s*,\\s*"); private static final Pattern reporterClassPattern = Pattern.compile( Pattern.quote(ConfigConstants.METRICS_REPORTER_PREFIX) +...
Yes, good catch, I've forgotten about it.
public void broadcastEvent(AbstractEvent event, boolean isPriorityEvent) throws IOException { try (BufferConsumer eventBufferConsumer = EventSerializer.toBufferConsumer(event, isPriorityEvent)) { AbstractEvent deserializedEvent = EventSe...
eventBufferConsumer.build(), getClass().getClassLoader());
public void broadcastEvent(AbstractEvent event, boolean isPriorityEvent) throws IOException { try (BufferConsumer eventBufferConsumer = EventSerializer.toBufferConsumer(event, isPriorityEvent)) { Buffer buffer = eventBufferConsumer.build(); try ...
class RecordOrEventCollectingResultPartitionWriter<T> extends AbstractCollectingResultPartitionWriter { private final Collection<Object> output; private final NonReusingDeserializationDelegate<T> delegate; private final RecordDeserializer<DeserializationDelegate<T>> deserializer = new Sp...
class RecordOrEventCollectingResultPartitionWriter<T> extends AbstractCollectingResultPartitionWriter { private final Collection<Object> output; private final NonReusingDeserializationDelegate<T> delegate; private final RecordDeserializer<DeserializationDelegate<T>> deserializer = new Sp...
Sorry for the late addition: You might want to test some more extreme values. VarIntCoder is variable sized, so around those size boundaries might be good to test. https://github.com/apache/beam/blob/50fcf55d60e8fa7a8399d63e030c930a2d45402a/sdks/java/core/src/main/java/org/apache/beam/sdk/coders/VarIntCoder.java#L29
public void testSingleCallOrderingWithShuffle() { List<Integer> perKeyElements = Lists.newArrayList(-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8); Collections.shuffle(perKeyElements); List<String> allKeys = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).stream() ...
Lists.newArrayList(-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8);
public void testSingleCallOrderingWithShuffle() { List<Integer> perKeyElements = Lists.newArrayList(-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 123456789); Collections.shuffle(perKeyElements); List<String> allKeys = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).stream...
class VerifyDoFn<T> extends DoFn<KV<String, T>, KV<String, Boolean>> { private final List<T> perKeyElements; VerifyDoFn(List<T> perKeyElements) { this.perKeyElements = perKeyElements; } @StateId("matchedElements") private final StateSpec<ValueState<Integer>> elementsMatchedState = StateSpec...
class VerifyDoFn<T> extends DoFn<KV<String, T>, KV<String, Boolean>> { private final List<T> perKeyElements; VerifyDoFn(List<T> perKeyElements) { this.perKeyElements = perKeyElements; } @StateId("matchedElements") private final StateSpec<ValueState<Integer>> elementsMatchedState = StateSpec...
Can we assert the content of the file?
public static void main(String[] args) throws IOException { List<URL> resources = Collections .list(UberJarMain.class.getClassLoader().getResources("META-INF/cxf/cxf.fixml")); System.out.println("RESOURCES: " + resources.size()); }
System.out.println("RESOURCES: " + resources.size());
public static void main(String[] args) throws IOException { List<URL> resources = Collections .list(UberJarMain.class.getClassLoader().getResources("META-INF/cxf/cxf.fixml")); System.out.println("RESOURCES: " + resources.size()); }
class UberJarMain { }
class UberJarMain { }
The only usage I found you can safely treat a character as a human, and not an well aged java core developer... `ParseException p = new ParseException("Could not parse '" + Text.truncate(expression, 50) + "'");`
public boolean annotate(StringFieldValue text) { if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true; Tokenizer tokenizer = factory.getTokenizer(); String input = (text.getString().length() <= config.getMaxTokenizeLength()) ? text.getString() : Tex...
: Text.safeSubstring(text.getString(), config.getMaxTokenizeLength());
public boolean annotate(StringFieldValue text) { if (text.getSpanTree(SpanTrees.LINGUISTICS) != null) return true; Tokenizer tokenizer = factory.getTokenizer(); String input = (text.getString().length() <= config.getMaxTokenizeLength()) ? text.getString() : Tex...
class TermOccurrences { final Map<String, Integer> termOccurrences = new HashMap<>(); final int maxOccurrences; public TermOccurrences(int maxOccurences) { this.maxOccurrences = maxOccurences; } boolean termCountBelowLimit(String term) { String lowerCas...
class TermOccurrences { final Map<String, Integer> termOccurrences = new HashMap<>(); final int maxOccurrences; public TermOccurrences(int maxOccurences) { this.maxOccurrences = maxOccurences; } boolean termCountBelowLimit(String term) { String lowerCas...
⛏️ Shall we also say around line 1170 that the first iteration always gets defined here? I suggest this because this comment only says do not skip but not the reason but still programmers have to read the code and identify why it is saying not to skip.
private void defineTypeNodes(List<BLangNode> typeDefs, SymbolEnv env) { if (typeDefs.isEmpty()) { return; } this.unresolvedTypes = new ArrayList<>(typeDefs.size()); this.unresolvedRecordDueToFields = new HashSet<>(typeDefs.size()); this.resolveRecordsUnresolvedDueToF...
&& i != 0) {
private void defineTypeNodes(List<BLangNode> typeDefs, SymbolEnv env) { if (typeDefs.isEmpty()) { return; } this.unresolvedTypes = new ArrayList<>(typeDefs.size()); this.unresolvedRecordDueToFields = new HashSet<>(typeDefs.size()); this.resolveRecordsUnresolvedDueToF...
class SymbolEnter extends BLangNodeVisitor { private static final CompilerContext.Key<SymbolEnter> SYMBOL_ENTER_KEY = new CompilerContext.Key<>(); private final SymbolTable symTable; private final Names names; private final SymbolResolver symResolver; private final BLangDiagnosticLog d...
class SymbolEnter extends BLangNodeVisitor { private static final CompilerContext.Key<SymbolEnter> SYMBOL_ENTER_KEY = new CompilerContext.Key<>(); private final SymbolTable symTable; private final Names names; private final SymbolResolver symResolver; private final BLangDiagnosticLog d...
Yes, good point, will remove that
public byte[] toJson() { try { Slime slime = new Slime(); toSlime(slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of " + this + " to json failed", e); } }
throw new RuntimeException("Serialization of " + this + " to json failed", e);
public byte[] toJson() { try { Slime slime = new Slime(); toSlime(slime.setObject()); return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new RuntimeException("Serialization of session data to json failed", e); } }
class for session information, typically parameters supplied in a deployment request that needs * to be persisted in ZooKeeper. These will be used when creating a new session based on an existing one. * * @author hmusum */ public record SessionData(ApplicationId applicationId, FileReferenc...
class for session information, typically parameters supplied in a deployment request that needs * to be persisted in ZooKeeper. These will be used when creating a new session based on an existing one. * * @author hmusum */ public record SessionData(ApplicationId applicationId, FileReferenc...
Thus iterating chars for comparison is rarely right: you must use the codepoint static methods on `Character` to ask if a codepoint is on 1 or 2 chars. Now, it's possible that the 2-char codepoints have both chars greater than the ascii range, and you're only escaping (5) ascii values so your code may work, but we shou...
private String doEscape(CharSequence value, int index, StringBuilder builder) { int length = value.length(); while (++index < length) { char c = value.charAt(index); String replacement = replacements.get(c); if (replacement != null) { builder.append(re...
char c = value.charAt(index);
private String doEscape(CharSequence value, int index, StringBuilder builder) { int length = value.length(); while (++index < length) { char c = value.charAt(index); String replacement = replacements.get(c); if (replacement != null) { builder.append(re...
class Escaper { private final Map<Character, String> replacements; /** * * @param replacements */ private Escaper(Map<Character, String> replacements) { this.replacements = replacements.isEmpty() ? Collections.emptyMap() : new HashMap<>(replacements); } /** ...
class Escaper { private final Map<Character, String> replacements; /** * * @param replacements */ private Escaper(Map<Character, String> replacements) { this.replacements = replacements.isEmpty() ? Collections.emptyMap() : new HashMap<>(replacements); } /** ...
Put null checking in method at the first place.
public ScoringParameter(String name, GeoPoint value) { this(name, toLonLatStrings(value)); }
this(name, toLonLatStrings(value));
public ScoringParameter(String name, GeoPoint value) { this(name, toLonLatStrings(value)); }
class with the given name and GeographyPoint value. * * @param name Name of the scoring parameter. * @param value Value of the scoring parameter. */
class with the given name and GeographyPoint value. * * @param name Name of the scoring parameter. * @param value Value of the scoring parameter. */
This list looks to exist only for this error message (which appears to be an internal error), this is going to get out of sync. You might want to just remove it.
private Value mapObjectToValue(Object value) { if (value == null) { return Value.newBuilder().build(); } if (Boolean.class.equals(value.getClass())) { return makeValue((Boolean) value).build(); } else if (Byte.class.equals(value.getClass())) { return makeVa...
+ SUPPORTED_JAVA_TYPES.toString());
private Value mapObjectToValue(Object value) { if (value == null) { return Value.newBuilder().build(); } if (Boolean.class.equals(value.getClass())) { return makeValue((Boolean) value).build(); } else if (Byte.class.equals(value.getClass())) { return makeVa...
class RowToEntityConverter extends DoFn<Row, Entity> { private final List<Class> SUPPORTED_JAVA_TYPES = ImmutableList.<Class>builder() .add( Boolean.class, Byte.class, Long.class, Integer.class, Short...
class RowToEntityConverter extends DoFn<Row, Entity> { private final boolean useNonRandomKey; RowToEntityConverter(boolean useNonRandomKey) { super(); this.useNonRandomKey = useNonRandomKey; } @DoFn.ProcessElement public void processElement(ProcessContext context) { ...
SO this won't work before all deployemnts are re-done, but I guess that's OK?
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId()...
else tenantsByZone.get(deployment.zone()).add(instance.id().tenant());
protected double maintain() { Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>(); Map<ZoneId, Set<CloudAccount>> accountsByZone = new HashMap<>(); controller().zoneRegistry().zonesIncludingSystem().reachable().zones().forEach(zone -> { tenantsByZone.put(zone.getVirtualId()...
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; ...
class ArchiveUriUpdater extends ControllerMaintainer { private static final Set<TenantName> INFRASTRUCTURE_TENANTS = Set.of(SystemApplication.TENANT); private final ApplicationController applications; private final NodeRepository nodeRepository; private final CuratorArchiveBucketDb archiveBucketDb; ...
From the provided code patch, here are some suggestions for improvement: 1. Naming: - Consider renaming the method `hasBooleanTypePartitionColumn()` to provide more clarity about its purpose. 2. Code style and best practices: - Format the ternary operator in the `getProperties()` method for improved readability...
public void modifyTableSchema(String dbName, String tableName, HiveTable updatedTable) { ImmutableList.Builder<Column> fullSchemaTemp = ImmutableList.builder(); ImmutableMap.Builder<String, Column> nameToColumnTemp = ImmutableMap.builder(); ImmutableList.Builder<String> dataColumnNamesTemp = Imm...
ImmutableMap.Builder<String, Column> nameToColumnTemp = ImmutableMap.builder();
public void modifyTableSchema(String dbName, String tableName, HiveTable updatedTable) { ImmutableList.Builder<Column> fullSchemaTemp = ImmutableList.builder(); ImmutableMap.Builder<String, Column> nameToColumnTemp = ImmutableMap.builder(); ImmutableList.Builder<String> dataColumnNamesTemp = Imm...
class HiveTable extends Table implements HiveMetaStoreTable { private static final Logger LOG = LogManager.getLogger(HiveTable.class); private static final String JSON_KEY_HIVE_DB = "hiveDb"; private static final String JSON_KEY_HIVE_TABLE = "hiveTable"; private static final String JSON_KEY_RESOURCE_NA...
class HiveTable extends Table implements HiveMetaStoreTable { private static final Logger LOG = LogManager.getLogger(HiveTable.class); private static final String JSON_KEY_HIVE_DB = "hiveDb"; private static final String JSON_KEY_HIVE_TABLE = "hiveTable"; private static final String JSON_KEY_RESOURCE_NA...
This seems to rely on very particular behavior of the embedded executor. Can we achieve the same by delaying the acknowledgement of the job submission? That'd seem more intuitive to me.
void testApplicationIsStoppedWhenStoppingBootstrap() throws Exception { final AtomicBoolean shutdownCalled = new AtomicBoolean(false); final CompletableFuture<JobStatus> getJobStatusFuture = new CompletableFuture<>(); final TestingDispatcherGateway.Builder dispatcherBuilder = run...
getJobStatusFuture.complete(JobStatus.RUNNING);
void testApplicationIsStoppedWhenStoppingBootstrap() throws Exception { final AtomicBoolean shutdownCalled = new AtomicBoolean(false); final TestingDispatcherGateway.Builder dispatcherBuilder = runningJobGatewayBuilder() .setClusterShutdownFunction( ...
class ApplicationDispatcherBootstrapTest { private static final int TIMEOUT_SECONDS = 10; private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4); private final ScheduledExecutor scheduledExecutor = new ScheduledExecutorServiceAdapter(executor); @AfterEach ...
class ApplicationDispatcherBootstrapTest { private static final int TIMEOUT_SECONDS = 10; private final ScheduledExecutorService executor = Executors.newScheduledThreadPool(4); private final ScheduledExecutor scheduledExecutor = new ScheduledExecutorServiceAdapter(executor); @AfterEach ...
Please check for status code 200 as well
public void createResources() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CollectionAttributes collectionAttributes = _entityConfiguration.collectionAttribu...
if (response.isPresent()) {
public void createResources() throws CosmosException { final String containerName = _configuration.getCollectionId(); final CosmosAsyncDatabase database = _client.getDatabase(_configuration.getDatabaseId()); final CollectionAttributes collectionAttributes = _entityConfiguration.collectionAttribu...
class CollectionResourceManager implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(CollectionResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final EntityCon...
class CollectionResourceManager implements ResourceManager { private static final Logger LOGGER = LoggerFactory.getLogger(CollectionResourceManager.class); private static final Duration RESOURCE_CRUD_WAIT_TIME = Duration.ofSeconds(30); private final Configuration _configuration; private final EntityCon...
There can be instances where the parent is not visited. The ExternalTreeList Node implementation is one such instance. (The apply method does not visit the transformer). Hense, the parent will not be visited. IMO, to handle such scenarios a loop is required since we can't guarantee that the ContextTypeResolver always r...
public Optional<TypeSymbol> getContextType() { if (this.isContextTypeFound) { return this.contextType; } this.isContextTypeFound = true; if (this.getNodeAtCursor() == null) { this.contextType = Optional.empty(); return this.contextType; } ...
do {
public Optional<TypeSymbol> getContextType() { if (this.isContextTypeFound) { return Optional.ofNullable(this.contextType); } this.isContextTypeFound = true; if (this.getNodeAtCursor() == null) { this.contextType = null; return Optional.ofNullable(co...
class BallerinaCompletionContextImpl extends CompletionContextImpl implements BallerinaCompletionContext { private final List<Node> resolverChain = new ArrayList<>(); private Token tokenAtCursor; private NonTerminalNode nodeAtCursor; private boolean isContextTypeFound = false; private Optional<Type...
class BallerinaCompletionContextImpl extends CompletionContextImpl implements BallerinaCompletionContext { private final List<Node> resolverChain = new ArrayList<>(); private Token tokenAtCursor; private NonTerminalNode nodeAtCursor; private boolean isContextTypeFound = false; private TypeSymbol co...
Addressed with https://github.com/ballerina-platform/ballerina-lang/pull/31829/commits/111094729f69d3296a3c694581dabfdebc283d93
public void visit(BIRBasicBlock birBasicBlock) { addCpAndWriteString(birBasicBlock.id.value); buf.writeInt(birBasicBlock.instructions.size() + 1); birBasicBlock.instructions.forEach(instruction -> { writePosition(instruction.pos); ...
writeScopes(terminator);
public void visit(BIRBasicBlock birBasicBlock) { addCpAndWriteString(birBasicBlock.id.value); buf.writeInt(birBasicBlock.instructions.size() + 1); birBasicBlock.instructions.forEach(instruction -> { writePosition(instruction.pos); ...
class BIRInstructionWriter extends BIRVisitor { private ByteBuf buf; private ByteBuf scopeBuf; private ConstantPool cp; private BIRBinaryWriter binaryWriter; private int instructionOffset; private Set<BirScope> completedScopeSet; private int scopeCount; BIRInstructionWriter(ByteBuf buf...
class BIRInstructionWriter extends BIRVisitor { private ByteBuf buf; private ByteBuf scopeBuf; private ConstantPool cp; private BIRBinaryWriter binaryWriter; private int instructionOffset; private Set<BirScope> completedScopeSet; private int scopeCount; BIRInstructionWriter(ByteBuf buf...
can you actually remove a public method? I suspect that you need to make it a no-op that logs a warning
public WriteWithResults withWriteResults() { return new WriteWithResults( getBigtableConfig(), getBigtableWriteOptions(), getServiceFactory(), getBadRecordErrorHandler(), getBadRecordRouter()); }
getBadRecordErrorHandler(),
public WriteWithResults withWriteResults() { return new WriteWithResults( getBigtableConfig(), getBigtableWriteOptions(), getServiceFactory(), getBadRecordErrorHandler(), getBadRecordRouter()); }
class Builder { abstract Builder setBigtableConfig(BigtableConfig bigtableConfig); abstract Builder setBigtableWriteOptions(BigtableWriteOptions writeOptions); abstract Builder setServiceFactory(BigtableServiceFactory factory); abstract Builder setBadRecordErrorHandler(ErrorHandler<BadRecord...
class Builder { abstract Builder setBigtableConfig(BigtableConfig bigtableConfig); abstract Builder setBigtableWriteOptions(BigtableWriteOptions writeOptions); abstract Builder setServiceFactory(BigtableServiceFactory factory); abstract Builder setBadRecordErrorHandler(ErrorHandler<BadRecord...
but what if there is a valid uri next to it eg: `xmlns 5 "myxml" as ns1;` ?
private boolean isServiceDeclStart(ParserRuleContext currentContext, int lookahead) { switch (peek(lookahead + 1).kind) { case IDENTIFIER_TOKEN: SyntaxKind tokenAfterIdentifier = peek(lookahead + 2).kind; switch (tokenAfterIdentifier) { ca...
namespaceUri = parseSimpleConstExpr();
private boolean isServiceDeclStart(ParserRuleContext currentContext, int lookahead) { switch (peek(lookahead + 1).kind) { case IDENTIFIER_TOKEN: SyntaxKind tokenAfterIdentifier = peek(lookahead + 2).kind; switch (tokenAfterIdentifier) { ca...
class BallerinaParser extends AbstractParser { private static final OperatorPrecedence DEFAULT_OP_PRECEDENCE = OperatorPrecedence.DEFAULT; protected BallerinaParser(AbstractTokenReader tokenReader) { super(tokenReader, new BallerinaParserErrorHandler(tokenReader)); } /** * Start parsing ...
class BallerinaParser extends AbstractParser { private static final OperatorPrecedence DEFAULT_OP_PRECEDENCE = OperatorPrecedence.DEFAULT; protected BallerinaParser(AbstractTokenReader tokenReader) { super(tokenReader, new BallerinaParserErrorHandler(tokenReader)); } /** * Start parsing ...
since `%s` can become large, should we also do some line break and indention here
public String asSerializableString(String table) { switch (type) { case SLIDE: return String.format( "HOP((%s), DESCRIPTOR(%s), %s, %s)", table, timeAttribute.asSerializableString(), ...
table,
public String asSerializableString(String table) { switch (type) { case SLIDE: return String.format( "HOP((%s\n), DESCRIPTOR(%s), %s, %s)", OperationUtils.indent(table), timeAttribute.asSe...
class ResolvedGroupWindow { private final WindowType type; private final String alias; private final FieldReferenceExpression timeAttribute; private final ValueLiteralExpression slide; private final ValueLiteralExpression size; private final ValueLiteralExpression gap; ...
class ResolvedGroupWindow { private final WindowType type; private final String alias; private final FieldReferenceExpression timeAttribute; private final ValueLiteralExpression slide; private final ValueLiteralExpression size; private final ValueLiteralExpression gap; ...
What's the meaning of finstId?
public Future<PFetchDataResult> fetchData(PFetchDataRequest request, RpcCallback<PFetchDataResult> callback) { String fid = DebugUtil.printId(request.finstId); String queryId = resultSinkInstanceToQueryId.get(fid); if (queryId == null) { LOG.warn("no queryId found for...
LOG.warn("no progress found for finstId {} queryId", fid, queryId);
public Future<PFetchDataResult> fetchData(PFetchDataRequest request, RpcCallback<PFetchDataResult> callback) { String fid = DebugUtil.printId(request.finstId); String queryId = resultSinkInstanceToQueryId.get(fid); if (queryId == null) { LOG.warn("no queryId found for...
class PseudoPBackendService implements PBackendServiceAsync { private final ExecutorService executor; PseudoPBackendService() { executor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(@NotNull Runnable r) { ...
class PseudoPBackendService implements PBackendServiceAsync { private final ExecutorService executor; PseudoPBackendService() { executor = Executors.newSingleThreadExecutor(new ThreadFactory() { @Override public Thread newThread(@NotNull Runnable r) { ...
I think we shouldn't log unexpected exceptions on `debug` level.
private void startTriggeringCheckpoint(CheckpointTriggerRequest request) { try { synchronized (lock) { preCheckGlobalState(request.isPeriodic); } final Execution[] executions = getTriggerExecutions(); final Map<ExecutionAttemptID, ExecutionVertex> ackTasks = getAckTasks(); Preconditions.check...
LOG.warn("Error encountered during shutdown", error);
private void startTriggeringCheckpoint(CheckpointTriggerRequest request) { try { synchronized (lock) { preCheckGlobalState(request.isPeriodic); } final Execution[] executions = getTriggerExecutions(); final Map<ExecutionAttemptID, ExecutionVertex> ackTasks = getAckTasks(); Preconditions.check...
class CheckpointCoordinator { private static final Logger LOG = LoggerFactory.getLogger(CheckpointCoordinator.class); /** The number of recent checkpoints whose IDs are remembered. */ private static final int NUM_GHOST_CHECKPOINT_IDS = 16; /** Coordinator-wide lock to safeguard the checkpoint updates. */ pri...
class CheckpointCoordinator { private static final Logger LOG = LoggerFactory.getLogger(CheckpointCoordinator.class); /** The number of recent checkpoints whose IDs are remembered. */ private static final int NUM_GHOST_CHECKPOINT_IDS = 16; /** Coordinator-wide lock to safeguard the checkpoint updates. */ pri...
nit: no need for the `Mono.defer` here. `Mono.defer` should only be used if the value being set in the `Mono.just` is expensive to compute to defer that computation until the Mono, or Flux, is subscribed. This can just be `Mono.just`. And actually, in this case, this would make performance worse as `Mono.defer` isn't ...
private Mono<Boolean> isValidKeyLocallyAvailable() { if (localOperationNotSupported) { return Mono.defer(() -> Mono.just(false)); } boolean keyNotAvailable = (key == null && keyCollection != null); if (keyNotAvailable) { if (keyCollection.equals(CryptographyClie...
return Mono.defer(() -> Mono.just(false));
private Mono<Boolean> isValidKeyLocallyAvailable() { if (localOperationNotSupported) { return Mono.just(false); } boolean keyNotAvailable = (key == null && keyCollection != null); if (keyNotAvailable) { if (Objects.equals(keyCollection, CryptographyClientImpl.SE...
class CryptographyAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class); private final String keyCollection; private final HttpPipeline pipeline; private boolean localOperationNotSupported = false; private LocalKeyCryptographyClient localKeyCrypto...
class CryptographyAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(CryptographyAsyncClient.class); private final String keyCollection; private final HttpPipeline pipeline; private volatile boolean localOperationNotSupported = false; private LocalKeyCryptographyClient local...
That would make JdbcIOTest stuck again. This piece of code already exists in JdbcIOTest.beforeClass. What happens is that the timeout setting set once in JdbcIOTest.beforeClass somehow gets reset if JdbcIOExceptionHandlingParameterizedTest is running, as both uses same embed derby server.
public static void beforeClass() { System.setProperty("derby.locks.waitTimeout", "2"); System.setProperty("derby.stream.error.file", "build/derby.log"); }
System.setProperty("derby.stream.error.file", "build/derby.log");
public static void beforeClass() { System.setProperty("derby.locks.waitTimeout", "2"); System.setProperty("derby.stream.error.file", "build/derby.log"); }
class JdbcIOExceptionHandlingParameterizedTest { @Rule public final transient ExpectedLogs expectedLogs = ExpectedLogs.none(JdbcIO.class); private static final JdbcIO.DataSourceConfiguration DATA_SOURCE_CONFIGURATION = JdbcIO.DataSourceConfiguration.create( "org.apache.derby.jdbc.EmbeddedDriver", "...
class JdbcIOExceptionHandlingParameterizedTest { @Rule public final transient ExpectedLogs expectedLogs = ExpectedLogs.none(JdbcIO.class); private static final JdbcIO.DataSourceConfiguration DATA_SOURCE_CONFIGURATION = JdbcIO.DataSourceConfiguration.create( "org.apache.derby.jdbc.EmbeddedDriver", "...
why do we want both dot graph and json graph?
public PortablePipelineResult runPortablePipeline(RunnerApi.Pipeline pipeline, JobInfo jobInfo) { final String dotGraph = PipelineDotRenderer.toDotString(pipeline); LOG.info("Portable pipeline to run dot graph:\n{}", dotGraph); final String jsonGraph = PipelineJsonRenderer.toJsonString(pipeline); LOG.i...
LOG.info("Portable pipeline to run dot graph:\n{}", dotGraph);
public PortablePipelineResult runPortablePipeline(RunnerApi.Pipeline pipeline, JobInfo jobInfo) { final String dotGraph = PipelineDotRenderer.toDotString(pipeline); LOG.info("Portable pipeline to run DOT graph:\n{}", dotGraph); final String jsonGraph = PipelineJsonRenderer.toJsonString(pipeline); LOG.i...
class SamzaRunner extends PipelineRunner<SamzaPipelineResult> { private static final Logger LOG = LoggerFactory.getLogger(SamzaRunner.class); private static final String BEAM_DOT_GRAPH = "beamDotGraph"; private static final String BEAM_JSON_GRAPH = "beamJsonGraph"; public static SamzaRunner fromOptions(Pipelin...
class SamzaRunner extends PipelineRunner<SamzaPipelineResult> { private static final Logger LOG = LoggerFactory.getLogger(SamzaRunner.class); private static final String BEAM_DOT_GRAPH = "beamDotGraph"; private static final String BEAM_JSON_GRAPH = "beamJsonGraph"; public static SamzaRunner fromOptions(Pipelin...
Do we have a better way to differentiate a resource function
private void emitTransactionParticipantBeginIfApplicable(BLangBlockStmt body) { BLangNode parent = body.parent; if (parent == null) { return; } if (parent instanceof BLangFunction) { Set<Flag> flagSet = ((BLangFunction) parent).flagSet; if (fl...
if (flagSet != null && flagSet.contains(Flag.RESOURCE)) {
private void emitTransactionParticipantBeginIfApplicable(BLangBlockStmt body) { BLangNode parent = body.parent; if (parent == null || parent.getKind() != NodeKind.FUNCTION) { return; } BLangFunction function = (BLangFunction) parent; List<BLangAnnotationAttachment> pa...
class CodeGenerator extends BLangNodeVisitor { private static final CompilerContext.Key<CodeGenerator> CODE_GENERATOR_KEY = new CompilerContext.Key<>(); /** * This structure holds current package-level variable indexes. */ private VariableIndex pvIndexes = new VariableIndex(PACKAGE); ...
class CodeGenerator extends BLangNodeVisitor { private static final CompilerContext.Key<CodeGenerator> CODE_GENERATOR_KEY = new CompilerContext.Key<>(); /** * This structure holds current package-level variable indexes. */ private VariableIndex pvIndexes = new VariableIndex(PACKAGE); ...
This is OK for creating table/partition. But for alter operation, there is a daemon scheduler to add/remove replicas on be to match the replication_num property. This check cannot cover the alter case.
private List<Long> chosenBackendIdBySeq(int replicationNum) throws DdlException { List<Long> chosenBackendIds = systemInfoService.seqChooseBackendIds(replicationNum, true, true); if (!CollectionUtils.isEmpty(chosenBackendIds)) { return chosenBackendIds; } else if (rep...
if (!CollectionUtils.isEmpty(chosenBackendIds)) {
private List<Long> chosenBackendIdBySeq(int replicationNum) throws DdlException { List<Long> chosenBackendIds = systemInfoService.seqChooseBackendIds(replicationNum, true, true); if (!CollectionUtils.isEmpty(chosenBackendIds)) { return chosenBackendIds; } else if (rep...
class LocalMetastore implements ConnectorMetadata { private static final Logger LOG = LogManager.getLogger(LocalMetastore.class); private final ConcurrentHashMap<Long, Database> idToDb = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, Database> fullNameToDb = new ConcurrentHashMap<>(); ...
class LocalMetastore implements ConnectorMetadata { private static final Logger LOG = LogManager.getLogger(LocalMetastore.class); private final ConcurrentHashMap<Long, Database> idToDb = new ConcurrentHashMap<>(); private final ConcurrentHashMap<String, Database> fullNameToDb = new ConcurrentHashMap<>(); ...
So we removed the dependency on semconv package in this PR https://github.com/Azure/azure-sdk-for-java/pull/18940 since it was moved out of the core oTel lib and was to be managed independently (possible breaking changes). I missed doing these updates to the exporter lib in the last PR so covering them now.
private void applySemanticConventions(Attributes attributes, RemoteDependencyData remoteDependencyData, SpanKind spanKind) { String httpMethod = attributes.get(AttributeKey.stringKey("http.method")); if (httpMethod != null) { applyHttpClientSpan(attributes, remoteDependencyData); ...
String httpMethod = attributes.get(AttributeKey.stringKey("http.method"));
private void applySemanticConventions(Attributes attributes, RemoteDependencyData remoteDependencyData, SpanKind spanKind) { String httpMethod = attributes.get(AttributeKey.stringKey("http.method")); if (httpMethod != null) { applyHttpClientSpan(attributes, remoteDependencyData); ...
class AzureMonitorTraceExporter implements SpanExporter { private static final Pattern COMPONENT_PATTERN = Pattern .compile("io\\.opentelemetry\\.javaagent\\.([^0-9]*)(-[0-9.]*)?"); private static final Set<String> SQL_DB_SYSTEMS; private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES; ...
class AzureMonitorTraceExporter implements SpanExporter { private static final Pattern COMPONENT_PATTERN = Pattern .compile("io\\.opentelemetry\\.javaagent\\.([^0-9]*)(-[0-9.]*)?"); private static final Set<String> SQL_DB_SYSTEMS; private static final Set<String> STANDARD_ATTRIBUTE_PREFIXES; ...
we haven't used peek() inside the resumeParsing method. Let's stick to a single pattern.
public STNode resumeParsing(ParserRuleContext context, Object... args) { switch (context) { case FUNC_BODY: return parseFunctionBody((boolean) args[0]); case OPEN_BRACE: return parseOpenBrace(); case CLOSE_BRACE: return...
return parseTupleTypeDescOrListConstructorMember(peek().kind, (STNode) args[0]);
public STNode resumeParsing(ParserRuleContext context, Object... args) { switch (context) { case FUNC_BODY: return parseFunctionBody((boolean) args[0]); case OPEN_BRACE: return parseOpenBrace(); case CLOSE_BRACE: return...
class BallerinaParser extends AbstractParser { private static final OperatorPrecedence DEFAULT_OP_PRECEDENCE = OperatorPrecedence.DEFAULT; protected BallerinaParser(AbstractTokenReader tokenReader) { super(tokenReader, new BallerinaParserErrorHandler(tokenReader)); } /** * Start parsing ...
class BallerinaParser extends AbstractParser { private static final OperatorPrecedence DEFAULT_OP_PRECEDENCE = OperatorPrecedence.DEFAULT; protected BallerinaParser(AbstractTokenReader tokenReader) { super(tokenReader, new BallerinaParserErrorHandler(tokenReader)); } /** * Start parsing ...
Just a question, how will we handle error if we could `actionCompleted.compareAndSet(false, true)` but throwing a unchecked exception within the suceess statement.
public void run() { LOG.debug("starting attempt {}", attemptNumber); if (!actionCompleted.get()) { Optional<ScheduledFuture<?>> timeoutFuture = scheduleTimeout(); try { Result result = action.tryExecute(); if (actionComplete...
handleError(e);
public void run() { LOG.debug("starting attempt {}", attemptNumber); if (actionCompleted.get()) { return; } Optional<ScheduledFuture<?>> timeoutFuture = scheduleTimeout(); try { Result result = action.tryExecute(); ...
class RetriableActionAttempt<Result> implements Runnable { private final RetriableAction<Result> action; private final ScheduledExecutorService blockingExecutor; private final ScheduledExecutorService timer; private final int attemptNumber; private final RetryPolicy retryPolicy; ...
class RetriableActionAttempt<Result> implements Runnable { private final RetriableAction<Result> action; private final ScheduledExecutorService blockingExecutor; private final ScheduledExecutorService timer; private final int attemptNumber; private final RetryPolicy retryPolicy; ...
This isn't in sync with what is deployed now, but we should never trigger things that haven't already passed this check at triggering-time. I will fix this when I add specific versions for system and staging tests, based on what will be deployed in production.
private void validateChange(Application application, ZoneId zone, Version version) { if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + ...
if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) {
private void validateChange(Application application, ZoneId zone, Version version) { if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone + ...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and shari...
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For permanent storage */ private final ControllerDb db; /** For working memory storage and shari...
> How about only print out the id of the db? There should be some simpler and safer way to identify db. As an open source project that can be privatized deployed, I think our log on exception should be as detailed as possible because those logs are all we can get from the users.
public boolean close() { boolean closeSuccess = true; lock.writeLock().lock(); try { closing = true; LOG.info("start to close log databases"); for (CloseSafeDatabase db : openedDatabases) { try { db.close(); ...
LOG.error("Error closing db", exception);
public boolean close() { boolean closeSuccess = true; lock.writeLock().lock(); try { closing = true; LOG.info("start to close log databases"); for (CloseSafeDatabase db : openedDatabases) { try { db.close(); ...
class BDBEnvironment { private static final Logger LOG = LogManager.getLogger(BDBEnvironment.class); private static final int RETRY_TIME = 3; private static final int MEMORY_CACHE_PERCENT = 20; public static final String STARROCKS_JOURNAL_GROUP = "PALO_JOURNAL_GROUP"; private ReplicatedEnvironment...
class BDBEnvironment { private static final Logger LOG = LogManager.getLogger(BDBEnvironment.class); private static final int RETRY_TIME = 3; private static final int MEMORY_CACHE_PERCENT = 20; public static final String STARROCKS_JOURNAL_GROUP = "PALO_JOURNAL_GROUP"; private ReplicatedEnvironment...
FYI typo introduced from structured renaming
public Optional<JvmMemoryPercentage> getMemoryPercentage() { if (memoryPercentage != null) return Optional.of(JvmMemoryPercentage.of(memoryPercentage)); if (isHostedVespa()) { int heapSizePercentageOfAvailable = heapSizePercentageOfAvailable(); if (getContainers().isEmpty()) ret...
public Optional<JvmMemoryPercentage> getMemoryPercentage() { if (memoryPercentage != null) return Optional.of(JvmMemoryPercentage.of(memoryPercentage)); if (isHostedVespa()) { int heapSizePercentageOfAvailable = heapSizePercentageOfAvailable(); if (getContainers().isEmpty()) ret...
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Produce...
class ApplicationContainerCluster extends ContainerCluster<ApplicationContainer> implements ApplicationBundlesConfig.Producer, QrStartConfig.Producer, RankProfilesConfig.Producer, RankingConstantsConfig.Producer, OnnxModelsConfig.Producer, RankingExpressionsConfig.Produce...
Though is it safe to reuse the same flux for each iteration? Or is the flux "used up" after the first upload so we would need to create a new one every time?
public Mono<Void> runAsync() { return blobAsyncClient.upload(randomByteBufferFlux, null, true).then(); }
return blobAsyncClient.upload(randomByteBufferFlux, null, true).then();
public Mono<Void> runAsync() { return blobAsyncClient.upload(randomByteBufferFlux, null, true).then(); }
class UploadBlobTest extends BlobTestBase<PerfStressOptions> { private final Flux<ByteBuffer> randomByteBufferFlux; public UploadBlobTest(PerfStressOptions options) { super(options); this.randomByteBufferFlux = createRandomByteBufferFlux(options.getSize()); } @Override public void...
class UploadBlobTest extends BlobTestBase<PerfStressOptions> { private final Flux<ByteBuffer> randomByteBufferFlux; public UploadBlobTest(PerfStressOptions options) { super(options); this.randomByteBufferFlux = createRandomByteBufferFlux(options.getSize()); } @Override public void...
In case there is no value, set `false`?
public void readFields(DataInput in) throws IOException { super.readFields(in); backupTimestamp = Text.readString(in); jobInfo = BackupJobInfo.read(in); allowLoad = in.readBoolean(); state = RestoreJobState.valueOf(Text.readString(in)); if (in.readBoolean()) { ...
String value = Text.readString(in);
public void readFields(DataInput in) throws IOException { super.readFields(in); backupTimestamp = Text.readString(in); jobInfo = BackupJobInfo.read(in); allowLoad = in.readBoolean(); state = RestoreJobState.valueOf(Text.readString(in)); if (in.readBoolean()) { ...
class RestoreJob extends AbstractJob { private static final String PROP_RESERVE_REPLICA = "reserve_replica"; private static final Logger LOG = LogManager.getLogger(RestoreJob.class); public enum RestoreJobState { PENDING, SNAPSHOTING, DOWN...
class RestoreJob extends AbstractJob { private static final String PROP_RESERVE_REPLICA = "reserve_replica"; private static final Logger LOG = LogManager.getLogger(RestoreJob.class); public enum RestoreJobState { PENDING, SNAPSHOTING, DOWN...
```suggestion Optional<Module> jamm = ModuleLayer.boot().findModule("jamm"); if (!jamm.isPresent()) { ... log warning about expected to find jamm module ... return; } ```
public static void premain(String argument, Instrumentation instrumentation) { Set<Module> modulesToOpen = new HashSet<>(); Module jamm = ModuleLayer.boot().findModule("jamm").orElse(null); ModuleLayer.boot() .modules() .forEach( module -> { if (!module.getDescripto...
Module jamm = ModuleLayer.boot().findModule("jamm").orElse(null);
public static void premain(String argument, Instrumentation instrumentation) { Set<Module> modulesToOpen = new HashSet<>(); Optional<Module> jamm = ModuleLayer.boot().findModule("jamm"); if (!jamm.isPresent()) { System.err.println("Jamm module expected, but not found"); return; } Module...
class OpenModuleAgent { }
class OpenModuleAgent { }
Sure thing, I will do that!
public Object create() { RestClientBuilder builder = RestClientBuilder.newBuilder(); String baseUrl = getBaseUrl(); try { return builder.baseUrl(new URL(baseUrl)).build(proxyType); } catch (MalformedURLException e) { throw new IllegalArgumentException("The value o...
throw new IllegalArgumentException(baseUrl + " requires SSL support but it is disabled. You probably have set shamrock.ssl.native to false.");
public Object create() { RestClientBuilder builder = RestClientBuilder.newBuilder(); String baseUrl = getBaseUrl(); try { return builder.baseUrl(new URL(baseUrl)).build(proxyType); } catch (MalformedURLException e) { throw new IllegalArgumentException("The value o...
class RestClientBase { public static final String REST_URL_FORMAT = "%s/mp-rest/url"; private final Class<?> proxyType; private final Config config; public RestClientBase(Class<?> proxyType) { this.proxyType = proxyType; this.config = ConfigProvider.getConfig(); } priv...
class RestClientBase { public static final String REST_URL_FORMAT = "%s/mp-rest/url"; private final Class<?> proxyType; private final Config config; public RestClientBase(Class<?> proxyType) { this.proxyType = proxyType; this.config = ConfigProvider.getConfig(); } priv...
If we move this logic to a function, we can improve the readability of the code. WDYT?
public void visit(BLangListConstructorExpr listConstructor) { BType actualType = symTable.semanticError; resultType = symTable.semanticError; int expTypeTag = expType.tag; if ((expTypeTag == TypeTags.ANY || expTypeTag == TypeTags.ANYDATA || expTypeTag == TypeTags.NONE)...
return;
public void visit(BLangListConstructorExpr listConstructor) { BType actualType = symTable.semanticError; resultType = symTable.semanticError; int expTypeTag = expType.tag; if ((expTypeTag == TypeTags.ANY || expTypeTag == TypeTags.ANYDATA || expTypeTag == TypeTags.NONE)...
class TypeChecker extends BLangNodeVisitor { private static final CompilerContext.Key<TypeChecker> TYPE_CHECKER_KEY = new CompilerContext.Key<>(); private static final String TABLE_TNAME = "table"; private Names names; private SymbolTable symTable; private SymbolEnter symbolEnter; ...
class TypeChecker extends BLangNodeVisitor { private static final CompilerContext.Key<TypeChecker> TYPE_CHECKER_KEY = new CompilerContext.Key<>(); private static final String TABLE_TNAME = "table"; private Names names; private SymbolTable symTable; private SymbolEnter symbolEnter; ...
the index isn't relevant to this test.
public void testPortConflictHandling() throws Exception { ReporterSetup reporterSetup1 = ReporterSetup.forReporter("test1", new JMXReporter("9020-9035")); ReporterSetup reporterSetup2 = ReporterSetup.forReporter("test2", new JMXReporter("9020-9035")); MetricRegistryImpl reg = new MetricRegistryImpl( MetricReg...
new ReporterScopedSettings(0, '.'),
public void testPortConflictHandling() throws Exception { ReporterSetup reporterSetup1 = ReporterSetup.forReporter("test1", new JMXReporter("9020-9035")); ReporterSetup reporterSetup2 = ReporterSetup.forReporter("test2", new JMXReporter("9020-9035")); MetricRegistryImpl reg = new MetricRegistryImpl( MetricReg...
class JMXReporterTest extends TestLogger { @Test public void testReplaceInvalidChars() { assertEquals("", JMXReporter.replaceInvalidChars("")); assertEquals("abc", JMXReporter.replaceInvalidChars("abc")); assertEquals("abc", JMXReporter.replaceInvalidChars("abc\"")); assertEquals("abc", JMXReporter.replaceIn...
class JMXReporterTest extends TestLogger { @Test public void testReplaceInvalidChars() { assertEquals("", JMXReporter.replaceInvalidChars("")); assertEquals("abc", JMXReporter.replaceInvalidChars("abc")); assertEquals("abc", JMXReporter.replaceInvalidChars("abc\"")); assertEquals("abc", JMXReporter.replaceIn...
Consider adding an assert for the number of dispatchers to ensure that we actually check something in the loop.
public void requireThatDispatchTuningIsApplied() throws ParseException { ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"), joinLines("<tuning>", "</tuning>")); for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) { ...
for (Dispatch tld : cluster.getSearch().getIndexed().getTLDs()) {
public void requireThatDispatchTuningIsApplied() throws ParseException { ContentCluster cluster = newContentCluster(joinLines("<search>", "</search>"), joinLines("<tuning>", "</tuning>")); assertEquals(1, cluster.getSearch().getIndexed().getTLDs().size()); ...
class ClusterTest { @Test public void requireThatContentSearchIsApplied() throws ParseException { ContentCluster cluster = newContentCluster(joinLines("<search>", " <query-timeout>1.1</query-timeout>", " <visibility-delay>2.3</visibility-delay>", "</sea...
class ClusterTest { @Test public void requireThatContentSearchIsApplied() throws ParseException { ContentCluster cluster = newContentCluster(joinLines("<search>", " <query-timeout>1.1</query-timeout>", " <visibility-delay>2.3</visibility-delay>", "</sea...
The most risky bug in this code is: Incorrect handling and combination of OFFSET and LIMIT values. You can modify the code like this: ``` @@ -367,7 +368,17 @@ protected ParseNode visitQuerySpecification(QuerySpecification node, ParseTreeCo } else { resultSelectRelation.setOrderBy(new ArrayList<>(...
protected ParseNode visitLimit(Limit node, ParseTreeContext context) { long limit = ((LiteralExpr) visit(node.getRowCount(), context)).getLongValue(); return new LimitElement(0, limit); }
long limit = ((LiteralExpr) visit(node.getRowCount(), context)).getLongValue();
protected ParseNode visitLimit(Limit node, ParseTreeContext context) { long limit = ((LiteralExpr) visit(node.getRowCount(), context)).getLongValue(); return new LimitElement(0, limit); }
class AstBuilder extends AstVisitor<ParseNode, ParseTreeContext> { private final long sqlMode; public AstBuilder(long sqlMode) { this.sqlMode = sqlMode; } private static final ImmutableMap<Join.Type, JoinOperator> JOIN_TYPE_MAP = ImmutableMap.<Join.Type, JoinOperator>builder(). ...
class AstBuilder extends AstVisitor<ParseNode, ParseTreeContext> { private final long sqlMode; public AstBuilder(long sqlMode) { this.sqlMode = sqlMode; } private static final ImmutableMap<Join.Type, JoinOperator> JOIN_TYPE_MAP = ImmutableMap.<Join.Type, JoinOperator>builder(). ...
I think we should extract this partition value parsing logic to a method. You can see `toListPartitionItem()` in `HiveMetaStoreCache`, it also handle the special char: `// hive partition value maybe contains special characters like '=' and '/'`
private void getPartitionColumnStats() throws Exception { Set<String> partitionNames = table.getPartitionNames(); Set<String> ndvPartValues = Sets.newHashSet(); long numNulls = 0; long dataSize = 0; String min = null; String max = null; for (String names : partiti...
private void getPartitionColumnStats() throws Exception { Set<String> partitionNames = table.getPartitionNames(); Set<String> ndvPartValues = Sets.newHashSet(); long numNulls = 0; long dataSize = 0; String min = null; String max = null; for (String names : partiti...
class HMSAnalysisTask extends BaseAnalysisTask { private static final Logger LOG = LogManager.getLogger(HMSAnalysisTask.class); public static final String TOTAL_SIZE = "totalSize"; public static final String NUM_ROWS = "numRows"; public static final String NUM_FILES = "numFiles"; public static fina...
class HMSAnalysisTask extends BaseAnalysisTask { private static final Logger LOG = LogManager.getLogger(HMSAnalysisTask.class); private static final String NDV_MULTIPLY_THRESHOLD = "0.3"; private static final String ANALYZE_TABLE_TEMPLATE = "INSERT INTO " + "${internalDB}.${columnSta...
```suggestion if (parentCtx != ParserRuleContext.QUERY_EXPRESSION) { ``` btw, why do we need to check the query ctx here? Any sample?
private ParserRuleContext getNextRuleForVarName() { ParserRuleContext parentCtx = getParentContext(); switch (parentCtx) { case ASSIGNMENT_STMT: return ParserRuleContext.ASSIGNMENT_STMT_RHS; case CALL_STMT: return ParserRuleContext.ARG_LIST; ...
if (!(parentCtx == ParserRuleContext.QUERY_EXPRESSION)) {
private ParserRuleContext getNextRuleForVarName() { ParserRuleContext parentCtx = getParentContext(); switch (parentCtx) { case ASSIGNMENT_STMT: return ParserRuleContext.ASSIGNMENT_STMT_RHS; case CALL_STMT: return ParserRuleContext.ARG_LIST; ...
class BallerinaParserErrorHandler extends AbstractParserErrorHandler { /** * FUNC_DEF_OR_FUNC_TYPE --> When a func-def and func-type-desc are possible. * e.g: start of a module level construct that starts with 'function' keyword. */ private static final ParserRuleContext[] FUNC_TYPE_OR_DEF_OPTIO...
class BallerinaParserErrorHandler extends AbstractParserErrorHandler { /** * FUNC_DEF_OR_FUNC_TYPE --> When a func-def and func-type-desc are possible. * e.g: start of a module level construct that starts with 'function' keyword. */ private static final ParserRuleContext[] FUNC_TYPE_OR_DEF_OPTIO...
```suggestion LOG.error("The configuration item \"cloud_native_hdfs_url\" is empty."); ```
public void initialize(BDBEnvironment environment, String baseImageDir) throws IOException { journalSystem = new BDBJEJournalSystem(environment); imageDir = baseImageDir + IMAGE_SUBDIR; com.staros.util.Config.STARMGR_IP = FrontendOptions.getLocalHostAddress(); com.star...
LOG.error("HDFS url is empty.");
public void initialize(BDBEnvironment environment, String baseImageDir) throws IOException { journalSystem = new BDBJEJournalSystem(environment); imageDir = baseImageDir + IMAGE_SUBDIR; com.staros.util.Config.STARMGR_IP = FrontendOptions.getLocalHostAddress(); com.star...
class SingletonHolder { private static final StarMgrServer INSTANCE = new StarMgrServer(); }
class SingletonHolder { private static final StarMgrServer INSTANCE = new StarMgrServer(); }
I think it isn't a good idea to use `getInstance()` here, because this may lead some error if another `Catalog` exist in system, and it is strange to call `getInstance()` to get itself. In addition I think it is better to limit use of singleton as little as possible. So, could you find another way to write unit tes...
public void createDb(CreateDbStmt stmt) throws DdlException { final String clusterName = stmt.getClusterName(); String fullDbName = stmt.getFullDbName(); long id = 0L; if (!tryLock(false)) { throw new DdlException("Failed to acquire catalog lock. Try again"); } ...
Catalog.getInstance().getEditLog().logCreateDb(db);
public void createDb(CreateDbStmt stmt) throws DdlException { final String clusterName = stmt.getClusterName(); String fullDbName = stmt.getFullDbName(); long id = 0L; if (!tryLock(false)) { throw new DdlException("Failed to acquire catalog lock. Try again"); } ...
class SingletonHolder { private static final Catalog INSTANCE = new Catalog(); }
class SingletonHolder { private static final Catalog INSTANCE = new Catalog(); }
I think it could just be removed, due to other list will not deal with 404.
public PagedFlux<VirtualMachineExtensionImage> listByRegionAsync(String regionName) { return PagedConverter .flatMapPage( publishers.listByRegionAsync(regionName), virtualMachinePublisher -> virtualMachinePublisher .extensio...
e -> e.getResponse().getStatusCode() == 404 ? Flux.empty() : Flux.error(e))
public PagedFlux<VirtualMachineExtensionImage> listByRegionAsync(String regionName) { return PagedConverter .flatMapPage( publishers.listByRegionAsync(regionName), virtualMachinePublisher -> virtualMachinePublisher .extensio...
class VirtualMachineExtensionImagesImpl implements VirtualMachineExtensionImages { private final VirtualMachinePublishers publishers; public VirtualMachineExtensionImagesImpl(VirtualMachinePublishers publishers) { this.publishers = publishers; } @Override public PagedIterable<VirtualMachin...
class VirtualMachineExtensionImagesImpl implements VirtualMachineExtensionImages { private final VirtualMachinePublishers publishers; public VirtualMachineExtensionImagesImpl(VirtualMachinePublishers publishers) { this.publishers = publishers; } @Override public PagedIterable<VirtualMachin...
Duh, I should have checked that, thanks! :)
public String finalName() { if (finalName == null || finalName.length() == 0) return project.getName() + "-" + project.getVersion(); else return finalName; }
return project.getName() + "-" + project.getVersion();
public String finalName() { if (finalName == null || finalName.length() == 0) return project.getName() + "-" + project.getVersion(); else return finalName; }
class QuarkusPluginExtension { private final Project project; private String transformedClassesDirectory = "transformed-classes"; private String wiringClassesDirectory = "wiring-classes"; private String libDir = "lib"; private String outputDirectory; private String finalName; private ...
class QuarkusPluginExtension { private final Project project; private String transformedClassesDirectory = "transformed-classes"; private String wiringClassesDirectory = "wiring-classes"; private String libDir = "lib"; private String outputDirectory; private String finalName; private ...
Ah, didn't see that it's printing the file path. In that case, we don't need the word `module`.
public static FunctionInfo getMainFunctionInfo(PackageInfo entryPkgInfo) { String errorMsg = "'main' function not found in '" + entryPkgInfo.getProgramFile().getProgramFilePath() + "'"; FunctionInfo functionInfo = entryPkgInfo.getFunctionInfo(MAIN_FUNCTION_NAME); if (functionInfo == null) { ...
String errorMsg = "'main' function not found in '" + entryPkgInfo.getProgramFile().getProgramFilePath() + "'";
public static FunctionInfo getMainFunctionInfo(PackageInfo entryPkgInfo) { String errorMsg = "'main' function not found in '" + entryPkgInfo.getProgramFile().getProgramFilePath() + "'"; FunctionInfo functionInfo = entryPkgInfo.getFunctionInfo(MAIN_FUNCTION_NAME); if (functionInfo == null) { ...
class BLangProgramRunner { public static void runService(ProgramFile programFile) { if (!programFile.isServiceEPAvailable()) { throw new BallerinaException("no services found in '" + programFile.getProgramFilePath() + "'"); } PackageInfo servicesPackage = programFile.g...
class BLangProgramRunner { public static void runService(ProgramFile programFile) { if (!programFile.isServiceEPAvailable()) { throw new BallerinaException("no services found in '" + programFile.getProgramFilePath() + "'"); } PackageInfo servicesPackage = programFile.g...
I haven't understood the control flow here. Is this line really executed? Did you want to put it into `finally`?
public void testBufferRecycledOnFailure() throws IOException { FailingChannelStateSerializer serializer = new FailingChannelStateSerializer(); TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler(); try (FSDataInputStream stream = geStream(serializer, 10)) { new ChannelStateChunkRea...
checkState(serializer.failed);
public void testBufferRecycledOnFailure() throws IOException, InterruptedException { FailingChannelStateSerializer serializer = new FailingChannelStateSerializer(); TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler(); try (FSDataInputStream stream = getStream(serializer, 10)) { n...
class ChannelStateChunkReaderTest { @Test(expected = TestException.class) @Test public void testBuffersNotRequestedForEmptyStream() throws IOException { ChannelStateSerializer serializer = new ChannelStateSerializerImpl(); TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler(); ...
class ChannelStateChunkReaderTest { @Test(expected = TestException.class) @Test public void testBuffersNotRequestedForEmptyStream() throws IOException, InterruptedException { ChannelStateSerializer serializer = new ChannelStateSerializerImpl(); TestRecoveredChannelStateHandler handler = new TestRecoveredChan...
If it's not necessary, I don't see why we would do this. Registering types for reflection has a cost.
void build(BuildProducer<ReflectiveClassBuildItem> reflectiveClass) { final String driverName = "org.postgresql.Driver"; reflectiveClass.produce(ReflectiveClassBuildItem.builder(driverName).build()); final String[] pgObjectClasses =...
"org.postgresql.jdbc.PgResultSet.NullObject"
void build(BuildProducer<ReflectiveClassBuildItem> reflectiveClass) { final String driverName = "org.postgresql.Driver"; reflectiveClass.produce(ReflectiveClassBuildItem.builder(driverName).build()); final String[] pgObjectClasses =...
class PostgreSQLJDBCReflections { @BuildStep }
class PostgreSQLJDBCReflections { @BuildStep }
> Good catch and thanks for your contribution. Only two minor comments left, please consider to modify the error message. ok, have modified.
public DataStreamSource<String> readTextFile(String filePath, String charsetName) { Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(filePath), "The file path must not be blank."); TextInputFormat format = new TextInputFormat(new Path(filePath)); format.setFilesFilter(FilePathFilter.createDefaultF...
Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(filePath), "The file path must not be blank.");
public DataStreamSource<String> readTextFile(String filePath, String charsetName) { Preconditions.checkArgument(!StringUtils.isNullOrWhitespaceOnly(filePath), "The file path must not be null or blank."); TextInputFormat format = new TextInputFormat(new Path(filePath)); format.setFilesFilter(FilePathFilter.create...
class (as given in * {@link
class (as given in * {@link
@FroMage Right, but the `referer` header has a single value, or have I missed your point ? I mean, this referer check is not guaranteed to succeed, but it is not a big problem - since the only thing we are deciding upon here is how to fail, see my response to Pedro as well: whether to return `401` immediately or redire...
private boolean isRedirectFromProvider(RoutingContext context, TenantConfigContext configContext) { String referer = context.request().getHeader(HttpHeaders.REFERER); return referer != null && referer.startsWith(configContext.provider.getMetadata().getAuthorizationUri()); }
return referer != null && referer.startsWith(configContext.provider.getMetadata().getAuthorizationUri());
private boolean isRedirectFromProvider(RoutingContext context, TenantConfigContext configContext) { String referer = context.request().getHeader(HttpHeaders.REFERER); return referer != null && referer.startsWith(configContext.provider.getMetadata().getAuthorizationUri()); }
class CodeAuthenticationMechanism extends AbstractOidcAuthenticationMechanism { static final String AMP = "&"; static final String EQ = "="; static final String UNDERSCORE = "_"; static final String COOKIE_DELIM = "|"; static final Pattern COOKIE_PATTERN = Pattern.compile("\\" + COOKIE_DELIM); ...
class CodeAuthenticationMechanism extends AbstractOidcAuthenticationMechanism { static final String AMP = "&"; static final String EQ = "="; static final String UNDERSCORE = "_"; static final String COOKIE_DELIM = "|"; static final Pattern COOKIE_PATTERN = Pattern.compile("\\" + COOKIE_DELIM); ...
Many local variables here that are not part of cleanup can be moved a lot closer to their actual usage or even omitted when just passing the new object to a constructor directly.
public RocksDBKeyedStateBackend<K> build() throws BackendBuildingException { RocksDBKeyedStateBackend<K> backend = null; RocksDBWriteBatchWrapper writeBatchWrapper = null; ColumnFamilyHandle defaultColumnFamilyHandle = null; RocksDBNativeMetricMonitor nativeMetricMonitor = nativeMetricOptions.isEnabled() ? n...
LinkedHashMap<String, StateColumnFamilyHandle> kvStateInformation = new LinkedHashMap<>();
public RocksDBKeyedStateBackend<K> build() throws BackendBuildingException { RocksDBWriteBatchWrapper writeBatchWrapper = null; ColumnFamilyHandle defaultColumnFamilyHandle = null; RocksDBNativeMetricMonitor nativeMetricMonitor = null; CloseableRegistry cancelStreamRegistry = new CloseableRegistry(); Write...
class RocksDBKeyedStateBackendBuilder<K> extends AbstractKeyedStateBackendBuilder<K> { private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyedStateBackendBuilder.class); public static final String DB_INSTANCE_DIR_STRING = "db"; /** String that identifies the operator that owns this backend. */ priv...
class RocksDBKeyedStateBackendBuilder<K> extends AbstractKeyedStateBackendBuilder<K> { private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyedStateBackendBuilder.class); public static final String DB_INSTANCE_DIR_STRING = "db"; /** String that identifies the operator that owns this backend. */ priv...
```suggestion @FunctionalInterface public interface RangeEndEstimator { long estimate(); } ```
public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) { if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) { return super.trySplit(fractionOfRemainder); } long cur = (lastAttemptedOffset == null) ? range.getFrom() - 1 : lastAttemptedOffset; if (cur =...
long cur = (lastAttemptedOffset == null) ? range.getFrom() - 1 : lastAttemptedOffset;
public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) { if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) { return super.trySplit(fractionOfRemainder); } if (lastAttemptedOffset != null && lastAttemptedOffset == Long.MAX_VALUE) { return null; }...
class GrowableOffsetRangeTracker extends OffsetRangeTracker { /** * An interface that should be implemented to fetch estimated end offset of range. * * <p>{@code estimateRangeEnd} is called to give te end offset when {@code trySplit} or {@code * getProgress} is invoked. The end offset is exclusive for the...
class GrowableOffsetRangeTracker extends OffsetRangeTracker { /** * Provides the estimated end offset of the range. * * <p>{@link * * required to monotonically increase as it will only be taken into consideration when the * estimated end offset is larger than the current position. Returning {@code...
I believe it's fine for now, but we may need to add a hint (a file) in the service bindings.
public Optional<ServiceBindingConfigSource> convert(List<ServiceBinding> serviceBindings) { Optional<ServiceBinding> matchingByType = ServiceBinding.singleMatchingByType("kafka", serviceBindings); if (!matchingByType.isPresent()) { return Optional.empty(); } Map<String, Stri...
String.format("org.apache.kafka.common.security.plain.PlainLoginModule required %s %s", user, password));
public Optional<ServiceBindingConfigSource> convert(List<ServiceBinding> serviceBindings) { Optional<ServiceBinding> matchingByType = ServiceBinding.singleMatchingByType("kafka", serviceBindings); if (!matchingByType.isPresent()) { return Optional.empty(); } Map<String, Stri...
class KafkaBindingConverter implements ServiceBindingConverter { @Override }
class KafkaBindingConverter implements ServiceBindingConverter { @Override }
I agree, but without the refactoring, I don't think we can avoid spaghetti. I'd suggest to do a refactoring as a separate task.
void randomEmit(T record, int targetChannelIndex) throws IOException, InterruptedException { tryFinishCurrentBufferBuilder(targetChannelIndex); randomTriggered = true; emit(record, targetChannelIndex); randomTriggered = false; if (bufferBuilder != null) { for (int index = 0; index < numberOfChannels; ind...
addBufferConsumer(randomTriggeredConsumer.copyAndSync(bufferBuilder), index);
void randomEmit(T record, int targetChannelIndex) throws IOException, InterruptedException { tryFinishCurrentBufferBuilder(targetChannelIndex); randomTriggered = true; emit(record, targetChannelIndex); randomTriggered = false; if (bufferBuilder != null) { for (int index = 0; index < numberOfChannels; ind...
class BroadcastRecordWriter<T extends IOReadableWritable> extends RecordWriter<T> { /** The current buffer builder shared for all the channels. */ @Nullable private BufferBuilder bufferBuilder; /** * The flag for judging whether {@link * is triggered by {@link */ private boolean randomTriggered; privat...
class BroadcastRecordWriter<T extends IOReadableWritable> extends RecordWriter<T> { /** The current buffer builder shared for all the channels. */ @Nullable private BufferBuilder bufferBuilder; /** * The flag for judging whether {@link * is triggered by {@link */ private boolean randomTriggered; privat...
+1 for having the if then the logic is more clear for the reader. A comment above the if will also help for reading the code
public void execute() { if (this.helpFlag) { String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(TEST_COMMAND); this.errStream.println(commandUsageInfo); return; } String[] args = LaunchUtils .initConfigurations(this.argList == null ? n...
if (listGroups) {
public void execute() { if (this.helpFlag) { String commandUsageInfo = BLauncherCmd.getCommandUsageInfo(TEST_COMMAND); this.errStream.println(commandUsageInfo); return; } String[] args = LaunchUtils .initConfigurations(this.argList == null ? n...
class TestCommand implements BLauncherCmd { private final PrintStream outStream; private final PrintStream errStream; private Path sourceRootPath; private boolean exitWhenFinish; private boolean skipCopyLibsFromDist; private Task testTask; public TestCommand() { this.sourceRootPath...
class TestCommand implements BLauncherCmd { private final PrintStream outStream; private final PrintStream errStream; private Path sourceRootPath; private boolean exitWhenFinish; private boolean skipCopyLibsFromDist; public TestCommand() { this.sourceRootPath = Paths.get(System.getProp...
Shall we remove this `PackageFile.MAGIC_VALUE` & `PackageFile.LANG_VERSION` as they are not used in other places? Can be done through another PR as well.
public static byte[] writePackage(PackageFile packageFile) throws IOException { ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream(); try (DataOutputStream dataOutStream = new DataOutputStream(byteArrayOS)) { dataOutStream.write(packageFile.pkgBinaryContent); return by...
dataOutStream.write(packageFile.pkgBinaryContent);
public static byte[] writePackage(PackageFile packageFile) throws IOException { ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream(); try (DataOutputStream dataOutStream = new DataOutputStream(byteArrayOS)) { dataOutStream.write(packageFile.pkgBinaryContent); return by...
class PackageFileWriter { public static byte[] writePackage(BIRPackageFile packageFile) throws IOException { ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream(); try (DataOutputStream dataOutStream = new DataOutputStream(byteArrayOS)) { dataOutStream.write(packageFile....
class PackageFileWriter { public static byte[] writePackage(BIRPackageFile packageFile) throws IOException { ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream(); try (DataOutputStream dataOutStream = new DataOutputStream(byteArrayOS)) { dataOutStream.write(packageFile....
Don't you need the sorting by precedence done in L809 before this?
public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.DESUGAR)) { result = pkgNode; return; } observabilityDesugar.addObserveInternalModuleImport(pkgNode); code2CloudDesugar.addCode2CloudModuleImport(pkgNode); creat...
rewrite(pkgNode.typeDefinitions, env);
public void visit(BLangPackage pkgNode) { if (pkgNode.completedPhases.contains(CompilerPhase.DESUGAR)) { result = pkgNode; return; } observabilityDesugar.addObserveInternalModuleImport(pkgNode); code2CloudDesugar.addCode2CloudModuleImport(pkgNode); creat...
class definition node for which the initializer is created * @param env The env for the type node * @return The generated initializer method */ private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) { BLangFunction generatedIni...
class definition node for which the initializer is created * @param env The env for the type node * @return The generated initializer method */ private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) { BLangFunction generatedIni...
I've made changes to ensure that we finalize the request after the error response was sent.
protected void respondAsLeader(ChannelHandlerContext ctx, RoutedRequest routedRequest, T gateway) { HttpRequest httpRequest = routedRequest.getRequest(); if (log.isTraceEnabled()) { log.trace("Received request " + httpRequest.uri() + '.'); } FileUploads uploadedFiles = null; try { inFlightRequestTracke...
finalizeRequestProcessing(finalUploadedFiles);
protected void respondAsLeader(ChannelHandlerContext ctx, RoutedRequest routedRequest, T gateway) { HttpRequest httpRequest = routedRequest.getRequest(); if (log.isTraceEnabled()) { log.trace("Received request " + httpRequest.uri() + '.'); } FileUploads uploadedFiles = null; try { inFlightRequestTracke...
class AbstractHandler<T extends RestfulGateway, R extends RequestBody, M extends MessageParameters> extends RedirectHandler<T> implements AutoCloseableAsync { protected final Logger log = LoggerFactory.getLogger(getClass()); protected static final ObjectMapper MAPPER = RestMapperUtils.getStrictObjectMapper(); pri...
class AbstractHandler<T extends RestfulGateway, R extends RequestBody, M extends MessageParameters> extends RedirectHandler<T> implements AutoCloseableAsync { protected final Logger log = LoggerFactory.getLogger(getClass()); protected static final ObjectMapper MAPPER = RestMapperUtils.getStrictObjectMapper(); pri...
Ah sorry about that, thank you
public BigDecimal toInputType(BigDecimal base) { checkArgument( base == null || (base.precision() <= precision && base.scale() <= scale) || base.round(new MathContext(precision)).compareTo(base) == 0, "Expected BigDecimal base to be null or have precision <= %s (...
return base;
public BigDecimal toInputType(BigDecimal base) { if (precision != -1) { checkArgument( base == null || (base.precision() <= precision && base.scale() <= scale) || base.round(new MathContext(precision)).compareTo(base) == 0, "Expected BigDecimal base to be...
class FixedPrecisionNumeric extends PassThroughLogicalType<BigDecimal> { public static final String IDENTIFIER = "beam:logical_type:fixed_decimal:v1"; /** * Identifier of the unspecified precision numeric type. It corresponds to Java SDK's {@link * FieldType * type in order to be compatible with exi...
class FixedPrecisionNumeric extends PassThroughLogicalType<BigDecimal> { public static final String IDENTIFIER = "beam:logical_type:fixed_decimal:v1"; /** * Identifier of the unspecified precision numeric type. It corresponds to Java SDK's {@link * FieldType * type in order to be compatible...
nit: consistent use of `this`. In the method above, you did not reference member variables with `this`. In general, unless there is a naming conflict, I would remove `this`.
public String getFullyQualifiedNamespace() { return this.endpoint.getHost(); }
return this.endpoint.getHost();
public String getFullyQualifiedNamespace() { return endpoint.getHost(); }
class EventHubConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private final String sharedAccessSignature; private EventHubConnectionStringProperties(ConnectionStringProperti...
class EventHubConnectionStringProperties { private final URI endpoint; private final String entityPath; private final String sharedAccessKeyName; private final String sharedAccessKey; private EventHubConnectionStringProperties(ConnectionStringProperties properties) { this.endpoint = propert...
Do you know what's expected here? When used in distributed mode, the protocol is a hand-made protocol on top of TCP.
protected String protocol(final Message message) { return null; }
return null;
protected String protocol(final Message message) { return null; }
class EventBusAttributesExtractor extends MessagingAttributesExtractor<Message, Message> { private final MessageOperation operation; public EventBusAttributesExtractor(final MessageOperation operation) { this.operation = operation; } @Override public MessageOperatio...
class EventBusAttributesExtractor extends MessagingAttributesExtractor<Message, Message> { private final MessageOperation operation; public EventBusAttributesExtractor(final MessageOperation operation) { this.operation = operation; } @Override public MessageOperatio...
But how can this floating credit be assigned to this blocked `RemoteInputChannel`? Wouldn't it cause the same deadlock, when floating buffers are assigned to blocked channels and job/task can not make any progress? It sounds like maybe this should have been handled sooner when trying to increase `unannouncedCredit`?
public void resumeConsumption() throws IOException { checkState(!isReleased.get(), "Channel released."); checkPartitionRequestQueueInitialized(); if (initialCredit == 0) { unannouncedCredit.set(0); } partitionRequestClient.resumeConsumption(this); ...
partitionRequestClient.resumeConsumption(this);
public void resumeConsumption() throws IOException { checkState(!isReleased.get(), "Channel released."); checkPartitionRequestQueueInitialized(); if (initialCredit == 0) { unannouncedCredit.set(0); } ...
class RemoteInputChannel extends InputChannel { private static final Logger LOG = LoggerFactory.getLogger(RemoteInputChannel.class); private static final int NONE = -1; /** ID to distinguish this channel from other channels sharing the same TCP connection. */ private final InputChannelID id = new Inpu...
class RemoteInputChannel extends InputChannel { private static final Logger LOG = LoggerFactory.getLogger(RemoteInputChannel.class); private static final int NONE = -1; /** ID to distinguish this channel from other channels sharing the same TCP connection. */ private final InputChannelID id = new Inpu...
If the shortest mitigation is empty it means that the situation can be mitigated in 0 moves, i.e it is already fine. This is an inconsistency that will only arise if some change happened to mitigate it between when the nodes were read by CapacityChecker and by this, or if either the CapacityChecker or this has a bug. I...
private Move findMitigatingMove(CapacityChecker.HostFailurePath failurePath) { Optional<Node> nodeWhichCantMove = failurePath.failureReason.tenant; if (nodeWhichCantMove.isEmpty()) return Move.empty(); Node node = nodeWhichCantMove.get(); NodeList allNodes = nodeRepository().list(); ...
if (shortestMitigation == null || shortestMitigation.size() > mitigation.size())
private Move findMitigatingMove(CapacityChecker.HostFailurePath failurePath) { Optional<Node> nodeWhichCantMove = failurePath.failureReason.tenant; if (nodeWhichCantMove.isEmpty()) return Move.empty(); Node node = nodeWhichCantMove.get(); NodeList allNodes = nodeRepository().list(); ...
class SpareCapacityMaintainer extends NodeRepositoryMaintainer { private final int maxIterations; private final Deployer deployer; private final Metric metric; public SpareCapacityMaintainer(Deployer deployer, NodeRepository nodeRepository, ...
class SpareCapacityMaintainer extends NodeRepositoryMaintainer { private final int maxIterations; private final Deployer deployer; private final Metric metric; public SpareCapacityMaintainer(Deployer deployer, NodeRepository nodeRepository, ...
Since we are iterating the original member type list we wont me missing any types
private BType getExpectedTypeForInferredTypedescMember(BUnionType originalType, BType expType, BType member) { if (expType == null || !this.isInvocation || !Symbols.isFlagOn(member.flags, Flags.PARAMETERIZED)) { return null; } BType impliedExpType = Types.getImpliedType(expType); ...
originalTypesSet.addAll(typesToAdd);
private BType getExpectedTypeForInferredTypedescMember(BUnionType originalType, BType expType, BType member) { if (expType == null || !this.isInvocation || !Symbols.isFlagOn(member.flags, Flags.PARAMETERIZED)) { return null; } BType impliedExpType = Types.getImpliedType(expType); ...
class Unifier implements BTypeVisitor<BType, BType> { private Map<String, BType> paramValueTypes; private Set<BType> visitedTypes = new HashSet<>(); private boolean isInvocation; private BLangInvocation invocation; private BLangFunction function; private SymbolTable symbolTable; private Sym...
class Unifier implements BTypeVisitor<BType, BType> { private Map<String, BType> paramValueTypes; private Set<BType> visitedTypes = new HashSet<>(); private boolean isInvocation; private BLangInvocation invocation; private BLangFunction function; private SymbolTable symbolTable; private Sym...
This should check for `startsWith` instead of `equals` as sometimes the service returns `text/event-stream; charset=utf8` as the content type.
private static boolean isTextEventStream(okhttp3.Headers responseHeaders) { return responseHeaders != null && responseHeaders.get(HeaderName.CONTENT_TYPE.toString()) != null && Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM); }
Objects.equals(responseHeaders.get(HeaderName.CONTENT_TYPE.toString()), ContentType.TEXT_EVENT_STREAM);
private static boolean isTextEventStream(okhttp3.Headers responseHeaders) { if (responseHeaders != null) { return ServerSentEventUtil .isTextEventStreamContentType(responseHeaders.get(HttpHeaderName.CONTENT_TYPE.toString())); } return false; }
class OkHttpHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class); private static final byte[] EMPTY_BODY = new byte[0]; private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY); final OkHttpClient httpClient; ...
class OkHttpHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(OkHttpHttpClient.class); private static final byte[] EMPTY_BODY = new byte[0]; private static final RequestBody EMPTY_REQUEST_BODY = RequestBody.create(EMPTY_BODY); final OkHttpClient httpClient; ...
Well -1 == 0xff so I am pretty sure there should not be an issue, I will change it if you think is more readable.
private static byte[] unsignedCopyAndIncrement(byte[] input) { byte[] copy = Arrays.copyOf(input, input.length); if (copy == null) { throw new IllegalArgumentException("cannot increment null array"); } else { for (int i = copy.length - 1; i >= 0; --i) { if (copy[i] != -1) { ++c...
if (copy[i] != -1) {
private static byte[] unsignedCopyAndIncrement(byte[] input) { if (input.length == 0) { return new byte[] {0}; } byte[] copy = Arrays.copyOf(input, input.length); for (int i = copy.length - 1; i >= 0; --i) { if (copy[i] != (byte) 0xff) { ++copy[i]; return copy; } ...
class ByteKeyRangeTracker extends RestrictionTracker<ByteKeyRange, ByteKey> { private ByteKeyRange range; @Nullable private ByteKey lastClaimedKey = null; @Nullable private ByteKey lastAttemptedKey = null; private ByteKeyRangeTracker(ByteKeyRange range) { this.range = checkNotNull(range); } public sta...
class ByteKeyRangeTracker extends RestrictionTracker<ByteKeyRange, ByteKey> { private ByteKeyRange range; @Nullable private ByteKey lastClaimedKey = null; @Nullable private ByteKey lastAttemptedKey = null; private ByteKeyRangeTracker(ByteKeyRange range) { this.range = checkNotNull(range); } public sta...
1. Yes agreed, I have made the change. 2. Shifted `columnMetaDataMap` declaration to `setUp`
public void assertGetColumnMetaDataGivenColumnIndex() { List<String> pipelineTableColumns = pipelineTableMetaData.getColumnNames(); PipelineColumnMetaData actual = columnMetaDataMap.get(pipelineTableColumns.get(0)); assertThat(actual.getOrdinalPosition(), is(0)); assertThat(actual.getNam...
assertThat(actual.getOrdinalPosition(), is(0));
public void assertGetColumnMetaDataGivenColumnIndex() { PipelineColumnMetaData actual = pipelineTableMetaData.getColumnMetaData(0); assertThat(actual.getOrdinalPosition(), is(1)); assertThat(actual.getName(), is("test")); assertThat(actual.getDataType(), is(Types.INTEGER)); asser...
class PipelineTableMetaDataTest { private PipelineTableMetaData pipelineTableMetaData; private Map<String, PipelineColumnMetaData> columnMetaDataMap; @Before public void setUp() { PipelineColumnMetaData pipelineColumnMetaData = new PipelineColumnMetaData(0, "test", Types.INTEGER, "INTEGER", tru...
class PipelineTableMetaDataTest { private PipelineTableMetaData pipelineTableMetaData; @Before public void setUp() { pipelineTableMetaData = new PipelineTableMetaData("test_data", Collections.singletonMap("test", new PipelineColumnMetaData(1, "test", Types.INTEGER, "INTEGER", true))); } @...
Should be use `if else` for these conditions?
public Type clickhouseTypeToDoris(JdbcFieldSchema fieldSchema) { String ckType = fieldSchema.getDataTypeName(); if (ckType.startsWith("LowCardinality")) { ckType = ckType.substring(15, ckType.length() - 1); } if (ckType.startsWith("Nullable")) { ckType = ckType.su...
int precision = Integer.parseInt(accuracy[0]);
public Type clickhouseTypeToDoris(JdbcFieldSchema fieldSchema) { String ckType = fieldSchema.getDataTypeName(); if (ckType.startsWith("LowCardinality")) { ckType = ckType.substring(15, ckType.length() - 1); if (ckType.startsWith("Nullable")) { ckType = ckType.subs...
class JdbcFieldSchema { private String columnName; private int dataType; private String dataTypeName; private int columnSize; private int decimalDigits; private int numPrecRadix; private String remarks; ...
class JdbcFieldSchema { private String columnName; private int dataType; private String dataTypeName; private int columnSize; private int decimalDigits; private int numPrecRadix; private String remarks; ...
@iocanel can you please add a check for annotations as well?
public void assertGeneratedResources() throws IOException { Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes"); assertThat(kubernetesDir) .isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json")) .isDirectoryContaining(p -> p.getFil...
assertThat(m.getLabels()).contains(entry("foo", "bar"));
public void assertGeneratedResources() throws IOException { Path kubernetesDir = prodModeTestResults.getBuildDir().resolve("kubernetes"); assertThat(kubernetesDir) .isDirectoryContaining(p -> p.getFileName().endsWith("openshift.json")) .isDirectoryContaining(p -> p.getFil...
class OpenshiftWithApplicationPropertiesTest { @RegisterExtension static final QuarkusProdModeTest config = new QuarkusProdModeTest() .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class).addClasses(GreetingResource.class)) .setApplicationName("openshift") .setAppli...
class OpenshiftWithApplicationPropertiesTest { @RegisterExtension static final QuarkusProdModeTest config = new QuarkusProdModeTest() .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class).addClasses(GreetingResource.class)) .setApplicationName("openshift") .setAppli...
The original code will set the MetaContext of the current thread to `null`, any subsequent invoking of the following methods will throw NullPointerException: https://github.com/StarRocks/starrocks/blob/d350c6cf0979eefe05dec25faa72bd141bbafb9a/fe/fe-core/src/main/java/com/starrocks/catalog/Catalog.java#L704 https://gi...
public static boolean copy(Writable orig, Writable dest, Class c) { MetaContext oldContext = MetaContext.get(); MetaContext metaContext = new MetaContext(); metaContext.setMetaVersion(FeConstants.meta_version); metaContext.setStarRocksMetaVersion(FeConstants.starrocks_meta_versi...
MetaContext oldContext = MetaContext.get();
public static boolean copy(Writable orig, Writable dest, Class c) { MetaContext oldContext = MetaContext.get(); MetaContext metaContext = new MetaContext(); metaContext.setMetaVersion(FeConstants.meta_version); metaContext.setStarRocksMetaVersion(FeConstants.starrocks_meta_versi...
class must has method "readFields(DataInput)"
class must has method "readFields(DataInput)"
Isn't it incomplete though? The state which is usually specified within the quotes isn't specified here?
public SemanticModel getSemanticModel(ModuleId moduleId) { ModuleContext moduleContext = this.rootPackageContext.moduleContext(moduleId); if (moduleContext.compilationState() != ModuleCompilationState.COMPILED) { throw new IllegalStateException("Semantic model cannot be ret...
: (" " + moduleContext.compilationState().name())) + "'. " +
public SemanticModel getSemanticModel(ModuleId moduleId) { ModuleContext moduleContext = this.rootPackageContext.moduleContext(moduleId); if (moduleContext.compilationState() != ModuleCompilationState.COMPILED) { throw new IllegalStateException("Semantic model cannot be ret...
class PackageCompilation { private final PackageContext rootPackageContext; private final PackageResolution packageResolution; private final CompilationOptions compilationOptions; private CompilerContext compilerContext; private Map<TargetPlatform, CompilerBackend> compilerBackends; private Lis...
class PackageCompilation { private final PackageContext rootPackageContext; private final PackageResolution packageResolution; private final CompilationOptions compilationOptions; private CompilerContext compilerContext; private Map<TargetPlatform, CompilerBackend> compilerBackends; private Lis...
The most risky bug in this code is: Potential loss of TaskRuns when `replayCreateTaskRun` rejects a task due to `arrangeTaskRun` returning false. You can modify the code like this: ```java if (!taskRunManager.arrangeTaskRun(taskRun)) { LOG.warn("Submit task run to pending queue failed, reject the submit:{}", taskR...
public void replayUpdateTaskRun(TaskRunStatusChange statusChange) { Constants.TaskRunState fromStatus = statusChange.getFromStatus(); Constants.TaskRunState toStatus = statusChange.getToStatus(); Long taskId = statusChange.getTaskId(); LOG.info("replayUpdateTaskRun:" + statusChange); ...
taskRunManager.getPendingTaskRunMap().remove(taskId);
public void replayUpdateTaskRun(TaskRunStatusChange statusChange) { Constants.TaskRunState fromStatus = statusChange.getFromStatus(); Constants.TaskRunState toStatus = statusChange.getToStatus(); Long taskId = statusChange.getTaskId(); LOG.info("replayUpdateTaskRun:" + statusChange); ...
class TaskManager implements MemoryTrackable { private static final Logger LOG = LogManager.getLogger(TaskManager.class); private final Map<Long, Task> idToTaskMap; private final Map<String, Task> nameToTaskMap; private final Map<Long, ScheduledFuture<?>> periodFutureMap; priv...
class TaskManager implements MemoryTrackable { private static final Logger LOG = LogManager.getLogger(TaskManager.class); private final Map<Long, Task> idToTaskMap; private final Map<String, Task> nameToTaskMap; private final Map<Long, ScheduledFuture<?>> periodFutureMap; priv...
I don't entirely understand the question. Can someone please provide a bit more context?
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about clos...
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for f...
Hmm that's a good point. It is redundant to specify both - specifying `jdbcType` that conflicts with `driverClassName` won't affect the code path which will uses the `driverClassName` parameter and ignores the `jdbcType`. This just might be misleading behavior, but I can remove the error for now since it isn't technica...
public void validate() throws IllegalArgumentException { if (Strings.isNullOrEmpty(getDriverClassName()) && Strings.isNullOrEmpty(getJdbcType())) { throw new IllegalArgumentException( "Either JDBC Driver class name or JDBC type must be set."); } if (Strings.isNullOrEmpty(getJdbcUrl...
if (Strings.isNullOrEmpty(getDriverClassName()) && Strings.isNullOrEmpty(getJdbcType())) {
public void validate() throws IllegalArgumentException { if (Strings.isNullOrEmpty(getJdbcUrl())) { throw new IllegalArgumentException("JDBC URL cannot be blank"); } boolean driverClassNamePresent = !Strings.isNullOrEmpty(getDriverClassName()); boolean jdbcTypePresent = !Strings.isNullO...
class JdbcReadSchemaTransformConfiguration implements Serializable { @Nullable public abstract String getDriverClassName(); @Nullable public abstract String getJdbcType(); public abstract String getJdbcUrl(); @Nullable public abstract String getUsername(); @Nullable public abstra...
class JdbcReadSchemaTransformConfiguration implements Serializable { @Nullable public abstract String getDriverClassName(); @Nullable public abstract String getJdbcType(); public abstract String getJdbcUrl(); @Nullable public abstract String getUsername(); @Nullable public abstra...
FLINK-14869 is opened to track this issue.
public ResourceSpec subtract(final ResourceSpec other) { checkNotNull(other, "Cannot subtract null resources"); if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) { return UNKNOWN; } checkArgument(other.lessThanOrEqual(this), "Cannot subtract a larger ResourceSpec from this one."); final ResourceSpec ta...
return subtracted.getValue().compareTo(BigDecimal.ZERO) == 0 ? null : subtracted;
public ResourceSpec subtract(final ResourceSpec other) { checkNotNull(other, "Cannot subtract null resources"); if (this.equals(UNKNOWN) || other.equals(UNKNOWN)) { return UNKNOWN; } checkArgument(other.lessThanOrEqual(this), "Cannot subtract a larger ResourceSpec from this one."); final ResourceSpec ta...
class ResourceSpec implements Serializable { private static final long serialVersionUID = 1L; /** * A ResourceSpec that indicates an unknown set of resources. */ public static final ResourceSpec UNKNOWN = new ResourceSpec(); /** * The default ResourceSpec used for operators and transformation functions. ...
class ResourceSpec implements Serializable { private static final long serialVersionUID = 1L; /** * A ResourceSpec that indicates an unknown set of resources. */ public static final ResourceSpec UNKNOWN = new ResourceSpec(); /** * The default ResourceSpec used for operators and transformation functions. ...
What would be the consequence if we try to depend on the node kind instance of code action node kind?
public static CodeActionNodeType codeActionNodeType(Node node) { if (node == null) { return CodeActionNodeType.NONE; } switch (node.kind()) { case SERVICE_DECLARATION: return CodeActionNodeType.SERVICE; case FUNCTION_DEFINITION: ...
case SERVICE_DECLARATION:
public static CodeActionNodeType codeActionNodeType(Node node) { if (node == null) { return CodeActionNodeType.NONE; } switch (node.kind()) { case SERVICE_DECLARATION: return CodeActionNodeType.SERVICE; case FUNCTION_DEFINITION: ...
class CodeActionUtil { private CodeActionUtil() { } /** * Get the top level node type at the cursor line. * * @param node node * @return {@link String} Top level node type */ /** * Translates ballerina diagnostics into lsp4j diagnostics. * * @param balle...
class CodeActionUtil { private CodeActionUtil() { } /** * Get the top level node type at the cursor line. * * @param node node * @return {@link String} Top level node type */ /** * Translates ballerina diagnostics into lsp4j diagnostics. * * @param balle...
Yes. The insert stmt's database would be set as the current database.
public static void analyze(CreatePipeStmt stmt, ConnectContext context) { analyzeProperties(stmt.getProperties()); Map<String, String> properties = stmt.getProperties(); InsertStmt insertStmt = stmt.getInsertStmt(); stmt.setTargetTable(insertStmt.getTableName()); String insertSq...
analyzePipeName(stmt.getPipeName(), insertStmt.getTableName().getDb());
public static void analyze(CreatePipeStmt stmt, ConnectContext context) { analyzeProperties(stmt.getProperties()); Map<String, String> properties = stmt.getProperties(); InsertStmt insertStmt = stmt.getInsertStmt(); stmt.setTargetTable(insertStmt.getTableName()); String insertSq...
class PipeAnalyzer { public static final String TASK_VARIABLES_PREFIX = "TASK."; public static final String PROPERTY_AUTO_INGEST = "auto_ingest"; public static final String PROPERTY_POLL_INTERVAL = "poll_interval"; public static final String PROPERTY_BATCH_SIZE = "batch_size"; public static final S...
class PipeAnalyzer { public static final String TASK_VARIABLES_PREFIX = "TASK."; public static final String PROPERTY_AUTO_INGEST = "auto_ingest"; public static final String PROPERTY_POLL_INTERVAL = "poll_interval"; public static final String PROPERTY_BATCH_SIZE = "batch_size"; public static final S...
Looks like the PR build failed due to spotbugs. So, the fix is working!
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
if (digest.length != getDigestLength()) {
private void checkDigestLength(byte[] digest) { if (digest.length != getDigestLength()) { throw new IllegalArgumentException("Invalid digest length."); } }
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecds...
class EcdsaSignatureTransform implements ISignatureTransform { private static final String ALGORITHM = "NONEwithECDSA"; private final KeyPair keyPair; private final Provider provider; private final Ecdsa algorithm; EcdsaSignatureTransform(KeyPair keyPair, Provider provider, Ecds...
The directories should point to `balo` and `caches` here right?
public void testCleanCommand() { CleanCommand cleanCommand = new CleanCommand(Paths.get(System.getProperty("user.dir")), false); new CommandLine(cleanCommand).parse("--sourceroot", this.testResources.resolve("valid-project").toString()); cleanCommand.execute(); Path bin = this.testResour...
+ File.separator + ProjectDirConstants.BIN_DIR_NAME);
public void testCleanCommand() { CleanCommand cleanCommand = new CleanCommand(Paths.get(System.getProperty("user.dir")), false); new CommandLine(cleanCommand).parse("--sourceroot", this.testResources.resolve("valid-project").toString()); cleanCommand.execute(); Path bin = this.testResour...
class BuildCommandTest extends CommandTest { private Path moduleBalo; private Path tplModuleBalo; private Path testResources; @BeforeClass public void setup() throws IOException { super.setup(); try { this.testResources = super.tmpDir.resolve("build-test-resources"); ...
class BuildCommandTest extends CommandTest { private Path moduleBalo; private Path tplModuleBalo; private Path testResources; @BeforeClass public void setup() throws IOException { super.setup(); try { this.testResources = super.tmpDir.resolve("build-test-resources"); ...
Well, it all depends on what your service returns so I prefer to do it on a per call basis. I'm not very familiar with this health thing so I don't know if it can also produce XML. But I decided go per call to be on the safe side. I can change it if you prefer though.
public void testHealth() { try { RestAssured.defaultParser = Parser.JSON; RestAssured.when().get("/health").then() .body("outcome", is("UP"), "checks.state", contains("UP"), "checks.name", contains("basi...
RestAssured.defaultParser = Parser.JSON;
public void testHealth() { try { RestAssured.defaultParser = Parser.JSON; RestAssured.when().get("/health").then() .body("outcome", is("UP"), "checks.state", contains("UP"), "checks.name", contains("basi...
class HealthUnitTest { @Deployment public static JavaArchive deploy() { return ShrinkWrap.create(JavaArchive.class) .addClasses(BasicHealthCheck.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); } @Test }
class HealthUnitTest { @Deployment public static JavaArchive deploy() { return ShrinkWrap.create(JavaArchive.class) .addClasses(BasicHealthCheck.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); } @Test }
It should not be necessary to set the same value as the default in the def file. We should instead avoid setting the wrong values at higher levels in the model. The handling of `getMemoryPercentage` in `ContainerCluster.getConfig(QrStartConfig)` should be moved to the `ApplicationContainerCluster` class. Only those clu...
public void getConfig(QrStartConfig.Builder builder) { builder.jvm.heapsize(512); builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0); builder.jvm.availableProcessors(2); builder.jvm.verbosegc(false); }
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0);
public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0); }
class ClusterControllerContainer extends Container implements BundlesConfig.Producer, ZookeeperServerConfig.Producer, QrStartConfig.Producer { private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps"); private static final...
class ClusterControllerContainer extends Container implements BundlesConfig.Producer, ZookeeperServerConfig.Producer, QrStartConfig.Producer { private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps"); private static final...
No, we don't want to do this. We had something similar before. This is an exception for panache.
private void registerConditionalDependencies(Project project) { ConditionalDependenciesEnabler conditionalDependenciesEnabler = new ConditionalDependenciesEnabler(project); ApplicationDeploymentClasspathBuilder deploymentClasspathBuilder = new ApplicationDeploymentClasspathBuilder( proje...
project.getConfigurations().getByName(DEV_MODE_CONFIGURATION_NAME).getIncoming().beforeResolve((devDependencies) -> {
private void registerConditionalDependencies(Project project) { ConditionalDependenciesEnabler conditionalDependenciesEnabler = new ConditionalDependenciesEnabler(project); ApplicationDeploymentClasspathBuilder deploymentClasspathBuilder = new ApplicationDeploymentClasspathBuilder( proje...
class QuarkusPlugin implements Plugin<Project> { public static final String ID = "io.quarkus"; public static final String QUARKUS_PACKAGE_TYPE = "quarkus.package.type"; public static final String EXTENSION_NAME = "quarkus"; public static final String LIST_EXTENSIONS_TASK_NAME = "listExtensions"; p...
class QuarkusPlugin implements Plugin<Project> { public static final String ID = "io.quarkus"; public static final String QUARKUS_PACKAGE_TYPE = "quarkus.package.type"; public static final String EXTENSION_NAME = "quarkus"; public static final String LIST_EXTENSIONS_TASK_NAME = "listExtensions"; p...
I'm curious what the intention of this comment is: Doesn't it just describes what the next line does? It feels a bit obsolete. WDYT?
public void testWriteFile() throws Exception { File file = TEMPORARY_FOLDER.newFolder(); Path basePath = new Path(file.toURI()); List<String> data = Arrays.asList("first line", "second line", "third line"); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecuti...
public void testWriteFile() throws Exception { File file = TEMPORARY_FOLDER.newFolder(); Path basePath = new Path(file.toURI()); List<String> data = Arrays.asList("first line", "second line", "third line"); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnviron...
class HadoopPathBasedPartFileWriterTest extends AbstractTestBase { @Rule public final Timeout timeoutPerTest = Timeout.seconds(2000); @Test public void testPendingFileRecoverableSerializer() throws IOException { HadoopPathBasedPendingFileRecoverable recoverable = new HadoopPathBased...
class HadoopPathBasedPartFileWriterTest extends AbstractTestBase { @Rule public final Timeout timeoutPerTest = Timeout.seconds(2000); @Test public void testPendingFileRecoverableSerializer() throws IOException { HadoopPathBasedPendingFileRecoverable recoverable = new HadoopPathBased...
A bit out-of-scope, but I'm wondering whether we should also assert the completion of the `CompletableFuture` that is returned by `closeAsync` here? ...just to be sure that the service has actually stopped properly after the completion of the `terminationFuture` succeeded. WDYT? :thinking:
void testActorTerminationWhenServiceShutdown() throws Exception { final ActorSystem rpcActorSystem = AkkaUtils.createDefaultActorSystem(); final RpcService rpcService = new AkkaRpcService( rpcActorSystem, AkkaRpcServiceConfiguration.defaultConfiguration()); ...
rpcService.closeAsync();
void testActorTerminationWhenServiceShutdown() throws Exception { final ActorSystem rpcActorSystem = AkkaUtils.createDefaultActorSystem(); final RpcService rpcService = new AkkaRpcService( rpcActorSystem, AkkaRpcServiceConfiguration.defaultConfiguration()); ...
class AkkaRpcActorTest { private static final Logger LOG = LoggerFactory.getLogger(AkkaRpcActorTest.class); private static Duration timeout = Duration.ofSeconds(10L); private static AkkaRpcService akkaRpcService; @BeforeAll static void setup() { akkaRpcService = ...
class AkkaRpcActorTest { private static final Logger LOG = LoggerFactory.getLogger(AkkaRpcActorTest.class); private static Duration timeout = Duration.ofSeconds(10L); private static AkkaRpcService akkaRpcService; @BeforeAll static void setup() { akkaRpcService = ...
We can't since it's override.
public void testRegisterHealthOnBlockingThreadStep1() { try { Thread.sleep(5000); } catch (InterruptedException e) { throw new RuntimeException(e); } try { RestAssured.defaultParser = Parser.JSON; for (int i = 0; i < ...
throw new RuntimeException(e);
public void testRegisterHealthOnBlockingThreadStep1() { given() .when().get("/start-health") .then().statusCode(200); try { RestAssured.defaultParser = Parser.JSON; for (int i = 0; i < 3; i++) { RestAssured.wh...
class BlockingNonBlockingTest { private static final java.util.logging.Logger rootLogger = java.util.logging.LogManager.getLogManager() .getLogger("io.vertx.core"); private static final InMemoryLogHandler inMemoryLogHandler = new InMemoryLogHandler( record -> record.getLevel().intValue(...
class BlockingNonBlockingTest { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withApplicationRoot((jar) -> jar .addClasses(BlockingHealthCheck.class, Routes.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml")); ...
Yeah, I just noticed that I deactivated this test.
void testMetricsInitialization() { }
void testMetricsInitialization() { assertNull(getGaugeValueOrNull("mongodb.connection-pool.size", getTags())); assertNull(getGaugeValueOrNull("mongodb.connection-pool.checked-out-count", getTags())); String name = client.listDatabaseNames().first(); assertEquals(1L, getGaugeVa...
class MongoMetricsTest extends MongoTestBase { @Inject MongoClient client; @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class).addClasses(MongoTestBase.class)) .withConfigurationResource("...
class MongoMetricsTest extends MongoTestBase { @Inject MongoClient client; @Inject @RegistryType(type = MetricRegistry.Type.VENDOR) MetricRegistry registry; @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .setArchiveProducer(() -> ShrinkWrap.crea...
```suggestion final PreferredLocationsRetriever locationsRetriever = new DefaultPreferredLocationsRetriever( ```
public void testStateLocationsWillBeReturnedIfExist() { final TaskManagerLocation stateLocation = new LocalTaskManagerLocation(); final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder = new TestingInputsLocationsRetriever.Builder(); final ExecutionVertexID consumerId = new ExecutionVertexID(...
final DefaultPreferredLocationsRetriever locationsRetriever = new DefaultPreferredLocationsRetriever(
public void testStateLocationsWillBeReturnedIfExist() { final TaskManagerLocation stateLocation = new LocalTaskManagerLocation(); final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder = new TestingInputsLocationsRetriever.Builder(); final ExecutionVertexID consumerId = new ExecutionVertexID(...
class DefaultPreferredLocationsRetrieverTest extends TestLogger { @Test @Test public void testInputLocationsIgnoresEdgeOfTooManyLocations() { final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder = new TestingInputsLocationsRetriever.Builder(); final ExecutionVertexID consumerId = new Ex...
class DefaultPreferredLocationsRetrieverTest extends TestLogger { @Test @Test public void testInputLocationsIgnoresEdgeOfTooManyLocations() { final TestingInputsLocationsRetriever.Builder locationRetrieverBuilder = new TestingInputsLocationsRetriever.Builder(); final ExecutionVertexID consumerId = new Ex...
nit: "Flink configuration directory (%s) in environment should not be null"
public static void main(String[] args) { EnvironmentInformation.logEnvironmentInfo(LOG, KubernetesSessionClusterEntrypoint.class.getSimpleName(), args); SignalHandler.register(LOG); JvmShutdownSafeguard.installAsShutdownHook(LOG); final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR); ...
"%s environment should not be null!", ConfigConstants.ENV_FLINK_CONF_DIR);
public static void main(String[] args) { EnvironmentInformation.logEnvironmentInfo(LOG, KubernetesSessionClusterEntrypoint.class.getSimpleName(), args); SignalHandler.register(LOG); JvmShutdownSafeguard.installAsShutdownHook(LOG); final String configDir = System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR); ...
class KubernetesSessionClusterEntrypoint extends SessionClusterEntrypoint { public KubernetesSessionClusterEntrypoint(Configuration configuration) { super(configuration); } @Override protected DispatcherResourceManagerComponentFactory createDispatcherResourceManagerComponentFactory(Configuration configuration) ...
class KubernetesSessionClusterEntrypoint extends SessionClusterEntrypoint { public KubernetesSessionClusterEntrypoint(Configuration configuration) { super(configuration); } @Override protected DispatcherResourceManagerComponentFactory createDispatcherResourceManagerComponentFactory(Configuration configuration) ...
Makes sense. Sounds good to me!
public void executeBundles(ProcessContext context) { String body = context.element(); try { mapper.readTree(body); client.executeFhirBundle(fhirStore, body); } catch (IOException | HealthcareHttpException e) { failedBundles.inc(); context.output...
client.executeFhirBundle(fhirStore, body);
public void executeBundles(ProcessContext context) { String body = context.element(); try { mapper.readTree(body); client.executeFhirBundle(fhirStore.get(), body); } catch (IOException | HealthcareHttpException e) { failedBundles.inc(); context....
class ExecuteBundlesFn extends DoFn<String, HealthcareIOError<String>> { private Counter failedBundles = Metrics.counter(ExecuteBundlesFn.class, "failed-bundles"); private transient HealthcareApiClient client; private final ObjectMapper mapper = new ObjectMapper(); /** The Fhir store. */ ...
class ExecuteBundlesFn extends DoFn<String, HealthcareIOError<String>> { private Counter failedBundles = Metrics.counter(ExecuteBundlesFn.class, "failed-bundles"); private transient HealthcareApiClient client; private final ObjectMapper mapper = new ObjectMapper(); /** The Fhir store. */ ...
Yes. I asked about this even with the older APIs. Their resolver on the backend checks the operation against the last path part. All the other APIs with optional versions also list them as required in swagger. You could elide the double slash (e.g. "/keys/{key-name}//export") in the URL in that case, but in my experien...
public Mono<KeyVaultKey> exportKey(String name, String environment) { try { return exportKeyWithResponse(name, "", environment).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } }
return exportKeyWithResponse(name, "", environment).flatMap(FluxUtil::toMono);
public Mono<KeyVaultKey> exportKey(String name, String environment) { try { return exportKeyWithResponse(name, "", environment).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } }
class KeyAsyncClient { private final String apiVersion; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private static final ...
class KeyAsyncClient { private final String apiVersion; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private static final ...
NIT: I'm wondering if something like ``` assertEquals( uploader.getBasePath().getPath(), String.format( "/%s/%s/%s", rootPath, jobID.toHexString(), StateChangeFsUploader.PATH_SUFFIX)); ``` will be more readable, WDYT?
void testBasePath() throws IOException { JobID jobID = JobID.generate(); Path oriBasePath = new Path("file: ChangelogStorageMetricGroup metrics = new ChangelogStorageMetricGroup(createUnregisteredTaskManagerJobMetricGroup()); StateChangeFsUploader uploader = ...
assertThat(basePath.getParent().getParent()).isEqualTo(oriBasePath);
void testBasePath() throws IOException { JobID jobID = JobID.generate(); String rootPath = "/dstl-root-path"; Path oriBasePath = new Path(rootPath); ChangelogStorageMetricGroup metrics = new ChangelogStorageMetricGroup(createUnregisteredTaskManagerJobMetricGroup()); ...
class StateChangeFsUploaderTest { @Test }
class StateChangeFsUploaderTest { @Test }