comment
stringlengths
16
8.84k
method_body
stringlengths
37
239k
target_code
stringlengths
0
242
method_body_after
stringlengths
29
239k
context_before
stringlengths
14
424k
context_after
stringlengths
14
284k
Can't we simply return `values.clone`?
public byte[] getBytes() { BByteArray copy = (BByteArray) this.copy(); return copy.values; }
return copy.values;
public byte[] getBytes() { return values.clone(); }
class BByteArray extends BNewArray { private static BType arrayType = new BArrayType(BTypes.typeByte); private byte[] values; public BByteArray(byte[] values) { this.values = values; this.size = values.length; } public BByteArray() { values = (byte[]) newArrayInstance(Byte.TYPE); } public void add(long index, byte valu...
class BByteArray extends BNewArray { private static BType arrayType = new BArrayType(BTypes.typeByte); private byte[] values; public BByteArray(byte[] values) { this.values = values; this.size = values.length; } public BByteArray() { values = (byte[]) newArrayInstance(Byte.TYPE); } public void add(long index, byte valu...
I think we can remove the DEFAULT_CLUSTER also. The feature is useless but only has compatibility.
public TGetDBPrivsResult getDBPrivs(TGetDBPrivsParams params) throws TException { LOG.debug("get database privileges request: {}", params); TGetDBPrivsResult result = new TGetDBPrivsResult(); List<TDBPrivDesc> tDBPrivs = Lists.newArrayList(); result.setDb_privs(tDBPrivs); UserIdentity currentUser = UserIdentity.fromThr...
String clusterPrefix = SystemInfoService.DEFAULT_CLUSTER + ClusterNamespace.CLUSTER_DELIMITER;
public TGetDBPrivsResult getDBPrivs(TGetDBPrivsParams params) throws TException { LOG.debug("get database privileges request: {}", params); TGetDBPrivsResult result = new TGetDBPrivsResult(); List<TDBPrivDesc> tDBPrivs = Lists.newArrayList(); result.setDb_privs(tDBPrivs); UserIdentity currentUser = UserIdentity.fromThr...
class FrontendServiceImpl implements FrontendService.Iface { private static final Logger LOG = LogManager.getLogger(LeaderImpl.class); private LeaderImpl leaderImpl; private ExecuteEnv exeEnv; public FrontendServiceImpl(ExecuteEnv exeEnv) { leaderImpl = new LeaderImpl(); this.exeEnv = exeEnv; } @Override public TGetDbs...
class FrontendServiceImpl implements FrontendService.Iface { private static final Logger LOG = LogManager.getLogger(LeaderImpl.class); private LeaderImpl leaderImpl; private ExecuteEnv exeEnv; public FrontendServiceImpl(ExecuteEnv exeEnv) { leaderImpl = new LeaderImpl(); this.exeEnv = exeEnv; } @Override public TGetDbs...
the `hashCode` is also inefficient
public int hashCode() { return Objects.hashCode(this.toString()); }
return Objects.hashCode(this.toString());
public int hashCode() { return Objects.hashCode(this.sql); }
class AstKey { private final ParseNode parseNode; public AstKey(ParseNode parseNode) { this.parseNode = parseNode; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || ! (o instanceof AstKey)) { return false; } AstKey other = (AstKey) o; return this.toString().equals(other.toSt...
class AstKey { private final String sql; public AstKey(ParseNode parseNode) { this.sql = new AstToSQLBuilder.AST2SQLBuilderVisitor(true, false).visit(parseNode); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || ! (o instanceof AstKey)) { return false; } AstKey other = (AstK...
I think this deserves a "final" here, and the line below too.
public static void main(String[] args) throws Exception { ServerConfiguration configuration = new ServerConfiguration(); CmdLineParser parser = new CmdLineParser(configuration); try { parser.parseArgument(args); fromConfig(configuration).run(); } catch (CmdLineException e) { LOG.error("Unable to parse command line argu...
ServerConfiguration configuration = new ServerConfiguration();
public static void main(String[] args) throws Exception { final ServerConfiguration configuration = new ServerConfiguration(); final CmdLineParser parser = new CmdLineParser(configuration); try { parser.parseArgument(args); fromConfig(configuration).run(); } catch (CmdLineException e) { LOG.error("Unable to parse comma...
class ServerConfiguration { @Option(name = "--job-port", usage = "The job service port. (Default: 11440)") private int jobPort = 11440; @Option(name = "--control-port", usage = "The FnControl port. (Default: 11441)") private int controlPort = 11441; }
class ServerConfiguration { @Option(name = "--job-port", usage = "The job service port. (Default: 11440)") private int jobPort = 11440; @Option(name = "--control-port", usage = "The FnControl port. (Default: 11441)") private int controlPort = 11441; }
The above TODO can be removed.
protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException, UserException { if (cols != null) { if (cols.size() != viewDefStmt.getColLabels().size()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_VIEW_WRONG_LIST); } for (int i = 0; i < cols.size(); ++i) { Type type = viewDefStmt.getBaseTbl...
Type type = viewDefStmt.getBaseTblResultExprs().get(i).getType().clone();
protected void createColumnAndViewDefs(Analyzer analyzer) throws AnalysisException, UserException { if (cols != null) { if (cols.size() != viewDefStmt.getColLabels().size()) { ErrorReport.reportAnalysisException(ErrorCode.ERR_VIEW_WRONG_LIST); } for (int i = 0; i < cols.size(); ++i) { Type type = viewDefStmt.getBaseTbl...
class BaseViewStmt extends DdlStmt { private static final Logger LOG = LogManager.getLogger(BaseViewStmt.class); protected final TableName tableName; protected final List<ColWithComment> cols; protected final QueryStmt viewDefStmt; protected final List<Column> finalCols; protected String originalViewDef; protected Stri...
class BaseViewStmt extends DdlStmt { private static final Logger LOG = LogManager.getLogger(BaseViewStmt.class); protected final TableName tableName; protected final List<ColWithComment> cols; protected final QueryStmt viewDefStmt; protected final List<Column> finalCols; protected String originalViewDef; protected Stri...
Then maybe we substitute `Set<CoLocationGroupDesc>` with `Map<AbstractID, CoLocationGroupDesc>`? ``` final Set<CoLocationGroupDesc> coLocationGroupDescs = getVertices() .stream() .map(vertex -> CoLocationGroupDesc.from(vertex.getCoLocationGroup())) .collect(Collectors.toMap(CoLocationGroupDesc::getId, d -> ...
public Set<CoLocationGroupDesc> getCoLocationGroupDescriptors() { final Set<CoLocationGroup> coLocationGroups = new HashSet<>(); for (JobVertex vertex : getVertices()) { CoLocationGroup coLocationGroup = vertex.getCoLocationGroup(); if (coLocationGroup != null) { coLocationGroups.add(coLocationGroup); } } final Set<CoL...
}
public Set<CoLocationGroupDesc> getCoLocationGroupDescriptors() { final Set<CoLocationGroupDesc> coLocationGroups = IterableUtils .toStream(getVertices()) .map(JobVertex::getCoLocationGroup) .filter(Objects::nonNull) .distinct() .map(CoLocationGroupDesc::from) .collect(Collectors.toSet()); return Collections.unmodifiab...
class JobGraph implements Serializable { private static final long serialVersionUID = 1L; /** List of task vertices included in this job graph. */ private final Map<JobVertexID, JobVertex> taskVertices = new LinkedHashMap<JobVertexID, JobVertex>(); /** The job configuration attached to this job. */ private final Config...
class JobGraph implements Serializable { private static final long serialVersionUID = 1L; /** List of task vertices included in this job graph. */ private final Map<JobVertexID, JobVertex> taskVertices = new LinkedHashMap<JobVertexID, JobVertex>(); /** The job configuration attached to this job. */ private final Config...
Oh, I see! You cache the `Uni` and resubscribe to it every time (well Quarkus does). That's actually interesting, but I'm not sure it's what the user would expect. Imagine: ```java @GET public Uni<String> callMyRemoteService() { return webClient.send().map(r -> r.bodyAsString()); } ``` Basically, it calls a rem...
public Uni<String> cachedMethod(String key) { invocations++; return Uni.createFrom().item(() -> new String()); }
return Uni.createFrom().item(() -> new String());
public Uni<String> cachedMethod(String key) { invocations++; return Uni.createFrom().item(() -> { subscriptions++; return "" + subscriptions; }); }
class CachedService { private int invocations; @CacheResult(cacheName = "test-cache") public int getInvocations() { return invocations; } }
class CachedService { private int invocations; private int subscriptions; @CacheResult(cacheName = "test-cache") public int getInvocations() { return invocations; } }
If you could abstract the query schedule strategy like `Presto` and refactor this class, which would be very great.
private void computeFragmentExecParams() throws Exception { computeFragmentHosts(); instanceIds.clear(); for (FragmentExecParams params : fragmentExecParamsMap.values()) { if (LOG.isDebugEnabled()) { LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size()); } for (int...
if (bucketShuffleJoinController.isBucketShuffleJoin(destFragment.getFragmentId().asInt())) {
private void computeFragmentExecParams() throws Exception { computeFragmentHosts(); instanceIds.clear(); for (FragmentExecParams params : fragmentExecParamsMap.values()) { if (LOG.isDebugEnabled()) { LOG.debug("fragment {} has instances {}", params.fragment.getFragmentId(), params.instanceExecParams.size()); } for (int...
class Coordinator { private static final Logger LOG = LogManager.getLogger(Coordinator.class); private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private static String localIP = FrontendOptions.getLocalHostAddress(); private static Random instanceRandom = new Random(); Status que...
class Coordinator { private static final Logger LOG = LogManager.getLogger(Coordinator.class); private static final DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private static String localIP = FrontendOptions.getLocalHostAddress(); private static Random instanceRandom = new Random(); Status que...
Currently, the shift operation for tuples is not supported by the runtime. Therefore the runtime checks if the object performing the shift is an array and if not, throws an `OperationNotSupported` exception. The changes proposed in this PR allows the use of shift operation on tuples but this also allows the risk of run...
private void validateTupleSizeAndInherentType() { int numOfMandatoryTypes = this.tupleType.getTupleTypes().size(); if (numOfMandatoryTypes >= this.getLength()) { throw ErrorHelper.getRuntimeException( getModulePrefixedReason(ARRAY_LANG_LIB, SIZE_MISMATCH_ERROR_IDENTIFIER), ErrorCodes.INVALID_MEMBER_SIZE, numOfMandatory...
this.tupleType.getRestType() : this.tupleType.getTupleTypes().get(i));
private void validateTupleSizeAndInherentType() { List<Type> tupleTypesList = this.tupleType.getTupleTypes(); int numOfMandatoryTypes = tupleTypesList.size(); if (numOfMandatoryTypes >= this.getLength()) { throw ErrorHelper.getRuntimeException(getModulePrefixedReason(ARRAY_LANG_LIB, OPERATION_NOT_SUPPORTED_IDENTIFIER),...
class TupleValueImpl extends AbstractArrayValue { protected TupleType tupleType; protected Type type; Object[] refValues; private final int minSize; private final boolean hasRestElement; private BTypedesc typedesc; private TypedescValueImpl inherentType; public TupleValueImpl(Object[] values, TupleType type) { this.ref...
class TupleValueImpl extends AbstractArrayValue { protected TupleType tupleType; protected Type type; Object[] refValues; private final int minSize; private final boolean hasRestElement; private BTypedesc typedesc; private TypedescValueImpl inherentType; public TupleValueImpl(Object[] values, TupleType type) { this.ref...
These two return two different results. Original one is : 2 ^ (tryCount-1) Now: (tryCount -1) ^ 2 What if tryCount = 1, does delay expect to be negative?
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy ...
delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs;
long calculateDelayInMs(int tryCount) { long delay; switch (this.retryPolicyType) { case EXPONENTIAL: delay = ((tryCount - 1) * (tryCount - 1) - 1L) * this.retryDelayInMs; break; case FIXED: delay = this.retryDelayInMs; break; default: throw logger.logExceptionAsError(new IllegalArgumentException("Invalid retry policy ...
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHos...
class RequestRetryOptions { private final ClientLogger logger = new ClientLogger(RequestRetryOptions.class); private final int maxTries; private final int tryTimeout; private final long retryDelayInMs; private final long maxRetryDelayInMs; private final RetryPolicyType retryPolicyType; private final String secondaryHos...
when we add multiLayerProjection, we do not modify its original projects. That is why only translator need to be modified.
public PlanFragment visitPhysicalProject(PhysicalProject<? extends Plan> project, PlanTranslatorContext context) { if (project.child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0)).setShouldTranslateOutput(false); } if (project.child(0) instanceof PhysicalFilter) { if (project.chil...
if (project.hasMultiLayerProjection() && !(inputFragment instanceof MultiCastPlanFragment)) {
public PlanFragment visitPhysicalProject(PhysicalProject<? extends Plan> project, PlanTranslatorContext context) { if (project.child(0) instanceof AbstractPhysicalJoin) { ((AbstractPhysicalJoin<?, ?>) project.child(0)).setShouldTranslateOutput(false); } if (project.child(0) instanceof PhysicalFilter) { if (project.chil...
class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, PlanTranslatorContext> { private static final Logger LOG = LogManager.getLogger(PhysicalPlanTranslator.class); private final StatsErrorEstimator statsErrorEstimator; private final PlanTranslatorContext context; public PhysicalPlanTranslator() { this(...
class PhysicalPlanTranslator extends DefaultPlanVisitor<PlanFragment, PlanTranslatorContext> { private static final Logger LOG = LogManager.getLogger(PhysicalPlanTranslator.class); private final StatsErrorEstimator statsErrorEstimator; private final PlanTranslatorContext context; public PhysicalPlanTranslator() { this(...
Why have we commented out these lines?
public void process(ServiceNode serviceNode, List<AnnotationAttachmentNode> annotations) { List<BLangFunction> resources = (List<BLangFunction>) serviceNode.getResources(); resources.forEach(res -> validate(serviceNode.getName().getValue(), res, this.diagnosticLog)); }
public void process(ServiceNode serviceNode, List<AnnotationAttachmentNode> annotations) { List<BLangFunction> resources = (List<BLangFunction>) serviceNode.getResources(); resources.forEach(res -> validate(serviceNode.getName().getValue(), res, this.diagnosticLog)); }
class SocketCompilerPlugin extends AbstractCompilerPlugin { private static final String INVALID_RESOURCE_SIGNATURE = "Invalid resource signature for %s in service %s. "; private DiagnosticLog diagnosticLog = null; private int resourceCount = 0; @Override public void init(DiagnosticLog diagnosticLog) { this.diagnosticLo...
class SocketCompilerPlugin extends AbstractCompilerPlugin { private static final String INVALID_RESOURCE_SIGNATURE = "Invalid resource signature for %s in service %s. "; private DiagnosticLog diagnosticLog = null; private int resourceCount = 0; @Override public void init(DiagnosticLog diagnosticLog) { this.diagnosticLo...
WDYM? The class now *potentially* holds two annotation targets. So we first try to create the String representation with the method param AT and if not present, we fallback to the original target.
public String getTargetInfo() { if (target == null) { return ""; } if (methodParameterTarget != null && Kind.METHOD_PARAMETER.equals(methodParameterTarget.kind())) { String method = methodParameterTarget.asMethodParameter().method().name(); if (method.equals(Methods.INIT)) { method = " constructor"; } else { method = "...
}
public String getTargetInfo() { if (target == null) { return ""; } switch (target.kind()) { case FIELD: return target.asField().declaringClass().name() + " case METHOD: String param = target.asMethod().parameterName(position); if (param == null || param.isBlank()) { param = "arg" + position; } String method = target.as...
class InjectionPointInfo { private static boolean isNamedWithoutValue(AnnotationInstance annotation) { if (annotation.name().equals(DotNames.NAMED)) { AnnotationValue name = annotation.value(); return name == null || name.asString().isEmpty(); } return false; } static InjectionPointInfo fromField(FieldInfo field, Class...
class InjectionPointInfo { private static boolean isNamedWithoutValue(AnnotationInstance annotation) { if (annotation.name().equals(DotNames.NAMED)) { AnnotationValue name = annotation.value(); return name == null || name.asString().isEmpty(); } return false; } static InjectionPointInfo fromField(FieldInfo field, Class...
Oh, that was because I don't use the consumer created in beforeTest() and create my own. I'll remove it. It's not necessary.
public void receiveUntilTimeoutMultipleTimes() throws IOException { this.consumer.close(); this.consumer = null; final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents, TestUtils.MESSAGE_TRACKING_ID); final List<EventDa...
this.consumer.close();
public void receiveUntilTimeoutMultipleTimes() { final int numberOfEvents = 15; final int numberOfEvents2 = 3; final String partitionId = "1"; final List<EventData> events = getEventsAsList(numberOfEvents); final List<EventData> events2 = getEventsAsList(numberOfEvents2); final EventHubConsumer consumer = client.create...
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventH...
class EventHubConsumerIntegrationTest extends IntegrationTestBase { private static final String PARTITION_ID = "0"; private static final int NUMBER_OF_EVENTS = 10; private static final AtomicBoolean HAS_PUSHED_EVENTS = new AtomicBoolean(); private static volatile IntegrationTestEventData testData = null; private EventH...
Yea, recording the annotation processing flow and redirecting it to a different element was my preference as well, but I could not see a way to do this because in order to redirect the annotation processing we would need the Visitor instance from the generated accessor method so we could delegate to it. However, the vi...
public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { FieldVisitor superVisitor = super.visitField(access, name, descriptor, signature, value); EntityField ef = fields.get(name); if (fields == null || ef == null) return superVisitor; ef.signature = signature; retur...
return new AnnotationVisitor(Opcodes.ASM7) {
public FieldVisitor visitField(int access, String name, String descriptor, String signature, Object value) { FieldVisitor superVisitor = super.visitField(access, name, descriptor, signature, value); EntityField ef = fields.get(name); if (fields == null || ef == null) return superVisitor; ef.signature = signature; retur...
class PanacheEntityClassVisitor<EntityFieldType extends EntityField> extends ClassVisitor { protected Type thisClass; protected Map<String, ? extends EntityFieldType> fields; private Set<String> methods = new HashSet<>(); private MetamodelInfo<?> modelInfo; private ClassInfo panacheEntityBaseClassInfo; public PanacheEn...
class PanacheEntityClassVisitor<EntityFieldType extends EntityField> extends ClassVisitor { protected Type thisClass; protected Map<String, ? extends EntityFieldType> fields; private Set<String> methods = new HashSet<>(); private MetamodelInfo<?> modelInfo; private ClassInfo panacheEntityBaseClassInfo; public PanacheEn...
We will probably want to add configuration properties for these but I'll let our community add them if they need them.
public QuarkusPathLocationScanner(Collection<Location> locations) { LOGGER.debugv("Locations: {0}", locations); this.scannedResources = new ArrayList<>(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); FileSystemScanner fileSystemScanner = null; for (String migrationFile : applicationMigratio...
fileSystemScanner = new FileSystemScanner(StandardCharsets.UTF_8, false, false, false);
public QuarkusPathLocationScanner(Collection<Location> locations) { LOGGER.debugv("Locations: {0}", locations); this.scannedResources = new ArrayList<>(); ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); FileSystemScanner fileSystemScanner = null; for (String migrationFile : applicationMigratio...
class QuarkusPathLocationScanner implements ResourceAndClassScanner { private static final Logger LOGGER = Logger.getLogger(QuarkusPathLocationScanner.class); private static final String LOCATION_SEPARATOR = "/"; private static Collection<String> applicationMigrationFiles = Collections.emptyList(); private static Colle...
class QuarkusPathLocationScanner implements ResourceAndClassScanner { private static final Logger LOGGER = Logger.getLogger(QuarkusPathLocationScanner.class); private static final String LOCATION_SEPARATOR = "/"; private static Collection<String> applicationMigrationFiles = Collections.emptyList(); private static Colle...
Not sure If I understand how more than one node can be set to maintenance (as is the goal when allowing more than one group to be down at a time) if that is the case? This methods checks both wanted state and actual state, so name is a bit misleading.
private Result checkAllNodesAreUp(ClusterState clusterState) { if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState(); for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETI...
if (moreThanOneGroupAllowedToBeDown()) return allowSettingOfWantedState();
private Result checkAllNodesAreUp(ClusterState clusterState) { for (NodeInfo storageNodeInfo : clusterInfo.getStorageNodeInfos()) { State wantedState = storageNodeInfo.getUserWantedState().getState(); if (wantedState != UP && wantedState != RETIRED) { return createDisallowed("Another storage node wants state " + wanted...
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISA...
class Result { public enum Action { MUST_SET_WANTED_STATE, ALREADY_SET, DISALLOWED } private final Action action; private final String reason; private Result(Action action, String reason) { this.action = action; this.reason = reason; } public static Result createDisallowed(String reason) { return new Result(Action.DISA...
will there be readme.md file? pls help to add more specific description on uasge of user token. keyvault, mysql flexible, network feature
public static void main(String[] args) throws Exception { TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) .build(); AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); HttpPipelinePolicy userTokenPolicy = (context, next) -> { Mono<Stri...
public static void main(String[] args) throws Exception { TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(AzureAuthorityHosts.AZURE_PUBLIC_CLOUD) .build(); AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); HttpPipelinePolicy userTokenPolicy = new UserTokenPolicy(credential...
class CreateServiceLinker { private static final String USER_TOKEN_HEADER = "x-ms-serviceconnector-user-token"; /** * Main entry point. * * @param args the parameters */ private static void creatSpringCloudAndSQLConnection(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String re...
class CreateServiceLinker { private static final String USER_TOKEN_HEADER = "x-ms-serviceconnector-user-token"; /** * Main entry point. * * @param args the parameters */ private static void createSpringCloudAndSQLConnection(AzureResourceManager azureResourceManager, ServiceLinkerManager serviceLinkerManager) { String r...
Ah, URI#getPath returns a string. > we can use artifactFilePath.getPath() sounds good 👍
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) { CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync( () -> { try { final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin"); try (ObjectOutputStream objectOut = new ObjectOutputStre...
artifactFilePath.toUri().getPath()),
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) { CompletableFuture<java.nio.file.Path> jobGraphFileFuture = CompletableFuture.supplyAsync( () -> { try { final java.nio.file.Path jobGraphFile = Files.createTempFile("flink-jobgraph", ".bin"); try (ObjectOutputStream objectOut = new ObjectOutputStre...
class RestClusterClient<T> implements ClusterClient<T> { private static final Logger LOG = LoggerFactory.getLogger(RestClusterClient.class); private final RestClusterClientConfiguration restClusterClientConfiguration; private final Configuration configuration; private final RestClient restClient; private final Executor...
class RestClusterClient<T> implements ClusterClient<T> { private static final Logger LOG = LoggerFactory.getLogger(RestClusterClient.class); private final RestClusterClientConfiguration restClusterClientConfiguration; private final Configuration configuration; private final RestClient restClient; private final Executor...
> How do you know you are getting metrics from the pools and they reflect correct values? Atm I trust the Micormeter code do do that. Let me try and add something ...
public void testNettyEventExecutorMetrics() { testNettyMetrics(2L, NettyEventExecutorMetrics.class); }
testNettyMetrics(2L, NettyEventExecutorMetrics.class);
public void testNettyEventExecutorMetrics() { testNettyMetrics(2L, NettyEventExecutorMetrics.class); }
class NettyMetricsTest { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withConfigurationResource("test-logging.properties") .overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false") .overrideConfigKey("quarkus.micrometer.binder.netty.enabled", "true") .overrideConfigKey(...
class NettyMetricsTest { @RegisterExtension static final QuarkusUnitTest config = new QuarkusUnitTest() .withApplicationRoot(jar -> jar.addClasses(HelloResource.class)) .withConfigurationResource("test-logging.properties") .overrideConfigKey("quarkus.micrometer.binder-enabled-default", "false") .overrideConfigKey("quar...
@tuichenchuxin Please cast project to ColumnSegment.
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) { if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((Expre...
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), (ExpressionSegment) projection);
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) { if (projection instanceof AggregationProjectionSegment) { ((AggregationProjectionSegment) projection).setAlias(alias); return projection; } if (projection instanceof ExpressionProjectionSegment) { ((Expre...
class MySQLStatementSQLVisitor extends MySQLStatementBaseVisitor<ASTNode> { private int currentParameterIndex; public MySQLStatementSQLVisitor(final Properties props) { } @Override public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) { return new ParameterMarkerValue(currentParameterIndex++); } @...
class MySQLStatementSQLVisitor extends MySQLStatementBaseVisitor<ASTNode> { private int currentParameterIndex; public MySQLStatementSQLVisitor(final Properties props) { } @Override public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) { return new ParameterMarkerValue(currentParameterIndex++); } @...
```suggestion supportedPlatforms.add(ProgramFileConstants.ANY_PLATFORM); ``` Can we use this content without adding a String here?
private NativeDependencyResolverImpl(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); this.buildContext = buildContext; this.manifest = ManifestProcessor.getInstance(context).getManifest(); this.sourceRootPath = buildContext.get(B...
supportedPlatforms.add("any");
private NativeDependencyResolverImpl(BuildContext buildContext, boolean skipCopyLibsFromDist) { CompilerContext context = buildContext.get(BuildContextField.COMPILER_CONTEXT); this.buildContext = buildContext; this.manifest = ManifestProcessor.getInstance(context).getManifest(); this.sourceRootPath = buildContext.get(B...
class NativeDependencyResolverImpl implements NativeDependencyResolver { private static final CompilerContext.Key<NativeDependencyResolver> JAR_RESOLVER_KEY = new CompilerContext.Key<>(); private List<String> supportedPlatforms = Arrays.stream(ProgramFileConstants.SUPPORTED_PLATFORMS) .collect(Collectors.toList()); pri...
class NativeDependencyResolverImpl implements NativeDependencyResolver { private static final CompilerContext.Key<NativeDependencyResolver> JAR_RESOLVER_KEY = new CompilerContext.Key<>(); private List<String> supportedPlatforms = Arrays.stream(ProgramFileConstants.SUPPORTED_PLATFORMS) .collect(Collectors.toList()); pri...
Don't we need to check for `ERROR_VARIABLE` here? Is it final by default? I think it is better to add a default clause as well which throws an error to make sure that no unexpected value gets ignored here. WDYT?
private void recursivelySetFinalFlag(BLangVariable variable) { switch (variable.getKind()) { case VARIABLE: variable.symbol.flags |= Flags.FINAL; break; case TUPLE_VARIABLE: ((BLangTupleVariable) variable).memberVariables.forEach(this::recursivelySetFinalFlag); break; case RECORD_VARIABLE: ((BLangRecordVariable) variab...
switch (variable.getKind()) {
private void recursivelySetFinalFlag(BLangVariable variable) { if (variable == null) { return; } switch (variable.getKind()) { case VARIABLE: if (variable.symbol == null) { return; } variable.symbol.flags |= Flags.FINAL; break; case TUPLE_VARIABLE: BLangTupleVariable tupleVariable = (BLangTupleVariable) variable; tuple...
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private static final String ANONYMOUS_RECORD_NAME = "anonymous-record"; private static final String NULL_LITERAL = "null"; private SymbolTable symTable; private...
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private static final String ANONYMOUS_RECORD_NAME = "anonymous-record"; private static final String NULL_LITERAL = "null"; private static final String LEFT_BRAC...
maybe u could use `Utils.toSqlString`, just rename this function to a proper name
public String toString() { return "CostStateEvent{" + "groupExpression=" + getGroupExpression() + ", cost=" + cost + ", physicalProperties=" + physicalProperties + '}'; }
+ '}';
public String toString() { return Utils.toSqlString("CostStateEvent", "groupExpression", getGroupExpression(), "cost", cost, "physicalProperties", physicalProperties); }
class CostStateUpdateEvent extends StateEvent { private final double cost; private final PhysicalProperties physicalProperties; private CostStateUpdateEvent(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { super(groupExpression); this.cost = cost; this.physicalProperties = physical...
class CostStateUpdateEvent extends StateEvent { private final double cost; private final PhysicalProperties physicalProperties; private CostStateUpdateEvent(GroupExpression groupExpression, double cost, PhysicalProperties physicalProperties) { super(groupExpression); this.cost = cost; this.physicalProperties = physical...
Why add elements into the same `mysqlTypeCodes` in `PrepareStmt`? Is there any problem lead the list size larger than placeholder number?
private void handleExecute() { packetBuf = packetBuf.order(ByteOrder.LITTLE_ENDIAN); int stmtId = packetBuf.getInt(); packetBuf.get(); packetBuf.getInt(); PrepareStmtContext prepareCtx = ctx.getPreparedStmt(String.valueOf(stmtId)); if (null == prepareCtx) { ctx.getState().setError("msg: Not Found prepared statement, st...
.forEach(i -> prepareCtx.getStmt().addMysqlTypeCodes((int) packetBuf.getChar()));
private void handleExecute() { packetBuf = packetBuf.order(ByteOrder.LITTLE_ENDIAN); int stmtId = packetBuf.getInt(); packetBuf.get(); packetBuf.getInt(); PrepareStmtContext prepareCtx = ctx.getPreparedStmt(String.valueOf(stmtId)); if (null == prepareCtx) { ctx.getState().setError("msg: Not Found prepared statement, st...
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); protected final ConnectContext ctx; private ByteBuffer packetBuf; protected StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String ...
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); protected final ConnectContext ctx; private ByteBuffer packetBuf; protected StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String ...
how followers recover default warehouse?
private void transferToLeader() { FrontendNodeType oldType = feType; if (replayer != null) { replayer.setStop(); try { replayer.join(); } catch (InterruptedException e) { LOG.warn("got exception when stopping the replayer thread", e); } replayer = null; } isReady.set(false); try { journal.open(); if (!haProtocol.fencin...
updateDefaultWarehouse();
private void transferToLeader() { FrontendNodeType oldType = feType; if (replayer != null) { replayer.setStop(); try { replayer.join(); } catch (InterruptedException e) { LOG.warn("got exception when stopping the replayer thread", e); } replayer = null; } isReady.set(false); try { journal.open(); if (!haProtocol.fencin...
class SingletonHolder { private static final GlobalStateMgr INSTANCE = new GlobalStateMgr(); }
class SingletonHolder { private static final GlobalStateMgr INSTANCE = new GlobalStateMgr(); }
How about changing the usage of `CompletableFuture` to [1]? Having a completed future will avoid the async aspect of the futures. [1] https://github.com/ballerina-platform/ballerina-lang/blob/master/language-server/modules/langserver-core/src/main/java/org/ballerinalang/langserver/BallerinaTextDocumentService.java#L13...
public CompletableFuture<VariablesResponse> variables(VariablesArguments args) { VariablesResponse variablesResponse = new VariablesResponse(); try { Integer frameId = scopeIdToFrameIds.get(args.getVariablesReference()); if (frameId == null) { variablesResponse.setVariables(computeChildVariables(args)); return Completa...
return CompletableFuture.completedFuture(variablesResponse);
public CompletableFuture<VariablesResponse> variables(VariablesArguments args) { VariablesResponse variablesResponse = new VariablesResponse(); try { Integer frameId = scopeIdToFrameIds.get(args.getVariablesReference()); if (frameId == null) { variablesResponse.setVariables(computeChildVariables(args)); return Completa...
class JBallerinaDebugServer implements IDebugProtocolServer { private IDebugProtocolClient client; private ClientConfigHolder clientConfigHolder; private DebugExecutionManager executionManager; private JDIEventProcessor eventProcessor; private final ExecutionContext context; private ThreadReferenceProxyImpl activeThrea...
class JBallerinaDebugServer implements IDebugProtocolServer { private IDebugProtocolClient client; private ClientConfigHolder clientConfigHolder; private DebugExecutionManager executionManager; private JDIEventProcessor eventProcessor; private final ExecutionContext context; private ThreadReferenceProxyImpl activeThrea...
would rather keep the comment and delete the commented code.
protected void runPendingJob() throws AlterCancelException { long numTablets = 0; AgentBatchTask batchTask = new AgentBatchTask(); MarkedCountDownLatch<Long, Long> countDownLatch; try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.g...
protected void runPendingJob() throws AlterCancelException { long numTablets = 0; AgentBatchTask batchTask = new AgentBatchTask(); MarkedCountDownLatch<Long, Long> countDownLatch; try (ReadLockedDatabase db = getReadLockedDatabase(dbId)) { LakeTable table = getTableOrThrow(db, tableId); Preconditions.checkState(table.g...
class LakeTableSchemaChangeJob extends AlterJobV2 { private static final Logger LOG = LogManager.getLogger(LakeTableSchemaChangeJob.class); @SerializedName(value = "partitionIndexTabletMap") private Table<Long, Long, Map<Long, Long>> partitionIndexTabletMap = HashBasedTable.create(); @SerializedName(value = "partitionI...
class LakeTableSchemaChangeJob extends AlterJobV2 { private static final Logger LOG = LogManager.getLogger(LakeTableSchemaChangeJob.class); @SerializedName(value = "partitionIndexTabletMap") private Table<Long, Long, Map<Long, Long>> partitionIndexTabletMap = HashBasedTable.create(); @SerializedName(value = "partitionI...
If the job is cancelled by user, the `clearJob()` will be called twice. Set thess strings to empty,to ensure the idempotency of the `clearJob` function.
private void clearJob() { Preconditions.checkState(state == JobState.FINISHED || state == JobState.CANCELLED); LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); SparkEtlJobHandler handler = new SparkEtlJobHandler(); if (state == JobState.CANCELLED) { if ((!Strings.isNullOrEmpty(appId) && spa...
appId = "";
private void clearJob() { Preconditions.checkState(state == JobState.FINISHED || state == JobState.CANCELLED); LOG.debug("kill etl job and delete etl files. id: {}, state: {}", id, state); SparkEtlJobHandler handler = new SparkEtlJobHandler(); if (state == JobState.CANCELLED) { if ((!Strings.isNullOrEmpty(appId) && spa...
class SparkLoadJob extends BulkLoadJob { private static final Logger LOG = LogManager.getLogger(SparkLoadJob.class); private SparkResource sparkResource; private long etlStartTimestamp = -1; private String appId = ""; private String etlOutputPath = ""; private Map<String, Pair<String, Long>> tabletMetaToFileInfo = Maps...
class SparkLoadJob extends BulkLoadJob { private static final Logger LOG = LogManager.getLogger(SparkLoadJob.class); private SparkResource sparkResource; private long etlStartTimestamp = -1; private String appId = ""; private String etlOutputPath = ""; private Map<String, Pair<String, Long>> tabletMetaToFileInfo = Maps...
Is this necessary, given that we'll call `FactoryUtil.validateFactoryOptions` and `FactoryUtil.validateUnconsumedKeys` later on?
public DynamicTableSource createDynamicTableSource(Context context) { createTableFactoryHelper(this, context).validateExcept(FIELDS); Configuration options = new Configuration(); context.getCatalogTable().getOptions().forEach(options::setString); TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getCatalo...
createTableFactoryHelper(this, context).validateExcept(FIELDS);
public DynamicTableSource createDynamicTableSource(Context context) { Configuration options = new Configuration(); context.getCatalogTable().getOptions().forEach(options::setString); TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); DataGenerator[] fieldGenerators = new Dat...
class DataGenTableSourceFactory implements DynamicTableSourceFactory { public static final String IDENTIFIER = "datagen"; public static final Long ROWS_PER_SECOND_DEFAULT_VALUE = 10000L; public static final ConfigOption<Long> ROWS_PER_SECOND = key("rows-per-second") .longType() .defaultValue(ROWS_PER_SECOND_DEFAULT_VAL...
class DataGenTableSourceFactory implements DynamicTableSourceFactory { public static final String IDENTIFIER = "datagen"; public static final Long ROWS_PER_SECOND_DEFAULT_VALUE = 10000L; public static final int RANDOM_STRING_LENGTH_DEFAULT = 100; public static final ConfigOption<Long> ROWS_PER_SECOND = key("rows-per-se...
nit: can use `hasSize` This also applies to below lines.
void testLazyInitialization() throws Exception { final int parallelism = 3; final int configuredMaxParallelism = 12; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, configuredMaxParallelism, -1); Assertions.assertThat(ejv.getParallelism()).isEqualTo(parallelism); Assertions.assertThat(ejv.ge...
Assertions.assertThat(ejv.getTaskVertices().length).isEqualTo(3);
void testLazyInitialization() throws Exception { final int parallelism = 3; final int configuredMaxParallelism = 12; final ExecutionJobVertex ejv = createDynamicExecutionJobVertex(parallelism, configuredMaxParallelism, -1); assertThat(ejv.getParallelism()).isEqualTo(parallelism); assertThat(ejv.getMaxParallelism()).isE...
class ExecutionJobVertexTest { @RegisterExtension static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE = TestingUtils.defaultExecutorExtension(); @Test void testParallelismGreaterThanMaxParallelism() { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvok...
class ExecutionJobVertexTest { @RegisterExtension static final TestExecutorExtension<ScheduledExecutorService> EXECUTOR_RESOURCE = TestingUtils.defaultExecutorExtension(); @Test void testParallelismGreaterThanMaxParallelism() { JobVertex jobVertex = new JobVertex("testVertex"); jobVertex.setInvokableClass(AbstractInvok...
`describe formatted tbl` => `DESCRIBE FORMATTED `TBL`` ?
public void testDescribeTable() { sql("describe tbl").ok("DESCRIBE `TBL`"); sql("describe extended tbl").ok("DESCRIBE EXTENDED `TBL`"); sql("describe formatted tbl").ok("DESCRIBE EXTENDED `TBL`"); }
}
public void testDescribeTable() { sql("describe tbl").ok("DESCRIBE `TBL`"); sql("describe extended tbl").ok("DESCRIBE EXTENDED `TBL`"); sql("describe formatted tbl").ok("DESCRIBE FORMATTED `TBL`"); }
class FlinkHiveSqlParserImplTest extends SqlParserTest { @Override protected SqlParserImplFactory parserImplFactory() { return FlinkHiveSqlParserImpl.FACTORY; } @Override public void testDescribeStatement() { } @Override public void testTableHintsInInsert() { } @Override public void testDescribeSchema() { } @Test publi...
class FlinkHiveSqlParserImplTest extends SqlParserTest { @Override protected SqlParserImplFactory parserImplFactory() { return FlinkHiveSqlParserImpl.FACTORY; } @Override public void testDescribeStatement() { } @Override public void testTableHintsInInsert() { } @Override public void testDescribeSchema() { } @Test publi...
we should call `view.unsynchronizedGetNumberOfQueuedBuffers` instead.
public int unsynchronizedGetNumberOfQueuedBuffers() { ResultSubpartitionView view = subpartitionView; if (view != null) { return subpartitionView.unsynchronizedGetNumberOfQueuedBuffers(); } return 0; }
return subpartitionView.unsynchronizedGetNumberOfQueuedBuffers();
public int unsynchronizedGetNumberOfQueuedBuffers() { ResultSubpartitionView view = subpartitionView; if (view != null) { return view.unsynchronizedGetNumberOfQueuedBuffers(); } return 0; }
class LocalInputChannel extends InputChannel implements BufferAvailabilityListener { private static final Logger LOG = LoggerFactory.getLogger(LocalInputChannel.class); private final Object requestLock = new Object(); /** The local partition manager. */ private final ResultPartitionManager partitionManager; /** Task ev...
class LocalInputChannel extends InputChannel implements BufferAvailabilityListener { private static final Logger LOG = LoggerFactory.getLogger(LocalInputChannel.class); private final Object requestLock = new Object(); /** The local partition manager. */ private final ResultPartitionManager partitionManager; /** Task ev...
That's a very good point and I have to admit that I need to check the code and tests...
public FieldInjector(Field field, Object testInstance) throws Exception { this.field = field; ArcContainer container = Arc.container(); BeanManager beanManager = container.beanManager(); java.lang.reflect.Type requiredType = field.getGenericType(); Annotation[] qualifiers = getQualifiers(field, beanManager); Object inj...
if (isListRequiredType(requiredType)) {
public FieldInjector(Field field, Object testInstance) throws Exception { this.field = field; ArcContainer container = Arc.container(); BeanManager beanManager = container.beanManager(); java.lang.reflect.Type requiredType = field.getGenericType(); Annotation[] qualifiers = getQualifiers(field, beanManager); Object inj...
class FieldInjector { private final Field field; private final List<InstanceHandle<?>> unsetHandles; void unset(Object testInstance) throws Exception { for (InstanceHandle<?> handle : unsetHandles) { if (handle.getBean() != null && handle.getBean().getScope().equals(Dependent.class)) { try { handle.destroy(); } catch (...
class FieldInjector { private final Field field; private final List<InstanceHandle<?>> unsetHandles; void unset(Object testInstance) throws Exception { for (InstanceHandle<?> handle : unsetHandles) { if (handle.getBean() != null && handle.getBean().getScope().equals(Dependent.class)) { try { handle.destroy(); } catch (...
Why check is isMaterializedView in this function instead of check this outside and call getPartitionNamesToRefreshForMv dirctly?
public Set<String> getUpdatedPartitionNamesOfTable(Table base, boolean withMv) { if (!base.isLocalTable()) { return Sets.newHashSet(); } OlapTable baseTable = (OlapTable) base; Map<String, BasePartitionInfo> baseTableVisibleVersionMap = getRefreshScheme() .getAsyncRefreshContext() .getBaseTableVisibleVersionMap() .comp...
Set<String> partitionNames = ((MaterializedView) baseTable).getPartitionNamesToRefreshForMv();
public Set<String> getUpdatedPartitionNamesOfTable(Table base, boolean withMv) { if (!base.isLocalTable()) { return Sets.newHashSet(); } OlapTable baseTable = (OlapTable) base; Map<String, BasePartitionInfo> baseTableVisibleVersionMap = getRefreshScheme() .getAsyncRefreshContext() .getBaseTableVisibleVersionMap() .comp...
class MvRefreshScheme { @SerializedName(value = "type") private RefreshType type; @SerializedName(value = "asyncRefreshContext") private AsyncRefreshContext asyncRefreshContext; @SerializedName(value = "lastRefreshTime") private long lastRefreshTime; public MvRefreshScheme() { this.type = RefreshType.ASYNC; this.asyncR...
class MvRefreshScheme { @SerializedName(value = "type") private RefreshType type; @SerializedName(value = "asyncRefreshContext") private AsyncRefreshContext asyncRefreshContext; @SerializedName(value = "lastRefreshTime") private long lastRefreshTime; public MvRefreshScheme() { this.type = RefreshType.ASYNC; this.asyncR...
```suggestion final Optional<AllocatedSlot> freedSlot = slotPool.freeReservedSlot(allocationId, currentTime); ```
public ResourceCounter freeReservedSlot(AllocationID allocationId, @Nullable Throwable cause, long currentTime) { LOG.debug("Release slot {}.", allocationId); final Optional<AllocatedSlot> releasedSlot = slotPool.freeReservedSlot(allocationId, currentTime); Optional<ResourceCounter> previouslyFulfilledRequirement = rel...
final Optional<AllocatedSlot> releasedSlot = slotPool.freeReservedSlot(allocationId, currentTime);
public ResourceCounter freeReservedSlot(AllocationID allocationId, @Nullable Throwable cause, long currentTime) { LOG.debug("Free reserved slot {}.", allocationId); final Optional<AllocatedSlot> freedSlot = slotPool.freeReservedSlot(allocationId, currentTime); Optional<ResourceCounter> previouslyFulfilledRequirement = ...
class DefaultDeclarativeSlotPool implements DeclarativeSlotPool { private static final Logger LOG = LoggerFactory.getLogger(DefaultDeclarativeSlotPool.class); private final Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements; private final Consumer<? super Collection<? extends PhysicalSlot>>...
class DefaultDeclarativeSlotPool implements DeclarativeSlotPool { private static final Logger LOG = LoggerFactory.getLogger(DefaultDeclarativeSlotPool.class); private final Consumer<? super Collection<ResourceRequirement>> notifyNewResourceRequirements; private final Consumer<? super Collection<? extends PhysicalSlot>>...
We can revisit this separately.
public void visit(BLangTypeDefinition astTypeDefinition) { BType type = getDefinedType(astTypeDefinition); Name displayName = astTypeDefinition.symbol.name; if (type.tag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) type; if (recordType.shouldPrintShape()) { displayName = new Name(recordType.toString());...
&& !(Symbols.isFlagOn(typeSymbol.flags, Flags.CLASS))) {
public void visit(BLangTypeDefinition astTypeDefinition) { BType type = getDefinedType(astTypeDefinition); Name displayName = astTypeDefinition.symbol.name; if (type.tag == TypeTags.RECORD) { BRecordType recordType = (BRecordType) type; if (recordType.shouldPrintShape()) { displayName = new Name(recordType.toString());...
class BIRGen extends BLangNodeVisitor { private static final CompilerContext.Key<BIRGen> BIR_GEN = new CompilerContext.Key<>(); public static final String DEFAULT_WORKER_NAME = "function"; public static final String CLONE_READ_ONLY = "cloneReadOnly"; private BIRGenEnv env; private Names names; private final SymbolTable...
class BIRGen extends BLangNodeVisitor { private static final CompilerContext.Key<BIRGen> BIR_GEN = new CompilerContext.Key<>(); public static final String DEFAULT_WORKER_NAME = "function"; public static final String CLONE_READ_ONLY = "cloneReadOnly"; private BIRGenEnv env; private Names names; private final SymbolTable...
I think I misunderstood, removed it.
public static boolean usesMultimapState(DoFn<?, ?> doFn) { return usesGivenStateClass(doFn, MultimapState.class) || requiresTimeSortedInput(doFn); }
return usesGivenStateClass(doFn, MultimapState.class) || requiresTimeSortedInput(doFn);
public static boolean usesMultimapState(DoFn<?, ?> doFn) { return usesGivenStateClass(doFn, MultimapState.class); }
class %s: timer declaration field %s is not accessible.", format(DoFn.class), target.getClass().getName(), timerFamilyDeclaration.field().getName())); } } public static boolean isSplittable(DoFn<?, ?> doFn) { return signatureForDoFn(doFn).processElement().isSplittable(); }
class %s: timer declaration field %s is not accessible.", format(DoFn.class), target.getClass().getName(), timerFamilyDeclaration.field().getName())); } } public static boolean isSplittable(DoFn<?, ?> doFn) { return signatureForDoFn(doFn).processElement().isSplittable(); }
this description is used only for the purpose of logging if the connection is lost (see [ZooKeeperMultipleComponentLeaderElectionDriver:175](https://github.com/apache/flink/blob/23d942cb6bba947ca3844687a65e8d0451c62041/flink-runtime/src/main/java/org/apache/flink/runtime/leaderelection/ZooKeeperMultipleComponentLeaderE...
private MultipleComponentLeaderElectionService getOrInitializeSingleLeaderElectionService() { if (multipleComponentLeaderElectionService == null) { try { multipleComponentLeaderElectionService = new DefaultMultipleComponentLeaderElectionService( fatalErrorHandler, "Single leader election service.", new ZooKeeperMultipl...
"Single leader election service.",
private MultipleComponentLeaderElectionService getOrInitializeSingleLeaderElectionService() { synchronized (lock) { if (multipleComponentLeaderElectionService == null) { try { multipleComponentLeaderElectionService = new DefaultMultipleComponentLeaderElectionService( fatalErrorHandler, new ZooKeeperMultipleComponentLea...
class ZooKeeperMultipleComponentLeaderElectionHaServices extends AbstractZooKeeperHaServices { private final Object lock = new Object(); private final CuratorFramework leaderNamespacedCuratorFramework; private final FatalErrorHandler fatalErrorHandler; @Nullable @GuardedBy("lock") private MultipleComponentLeaderElectio...
class ZooKeeperMultipleComponentLeaderElectionHaServices extends AbstractZooKeeperHaServices { private final Object lock = new Object(); private final CuratorFramework leaderNamespacedCuratorFramework; private final FatalErrorHandler fatalErrorHandler; @Nullable @GuardedBy("lock") private MultipleComponentLeaderElectio...
It would be better to have this logic in one place and not distributed across two different classes (head is being marked as ended in the processor, while non heads are marked via task). Also what you proposed adds an (additional) cyclic dependency between Task and Processor, which is also not good. Maybe the best ap...
private boolean checkFinished() throws Exception { boolean isFinished = input.isFinished(); if (isFinished) { if (streamOperator instanceof BoundedOneInput) { synchronized (lock) { ((BoundedOneInput) streamOperator).endInput(); } } } return isFinished; }
if (streamOperator instanceof BoundedOneInput) {
private boolean checkFinished() throws Exception { boolean isFinished = input.isFinished(); if (isFinished) { synchronized (lock) { operatorChain.endInput(1); } } return isFinished; }
class StreamInputProcessor<IN> { private static final Logger LOG = LoggerFactory.getLogger(StreamInputProcessor.class); private final StreamTaskInput input; private final Object lock; /** Valve that controls how watermarks and stream statuses are forwarded. */ private StatusWatermarkValve statusWatermarkValve; private ...
class StreamInputProcessor<IN> { private static final Logger LOG = LoggerFactory.getLogger(StreamInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream statuses are forwarded. */ private St...
Openjdk has com.sun.management available , and is giving numbers. I will try with another jdk maybe IBM
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String avail...
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availa...
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEn...
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management....
The diff output you posted indicates modifications in a snippet of Java code likely from a version control system such as git. Here are some observations and suggestions for improvements: 1. **Alignment and Indentation:** - The alignment correction for the string literals is an improvement to code readability. 2....
private static void collectTableMetrics(MetricVisitor visitor, boolean minifyTableMetrics) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db)...
}
private static void collectTableMetrics(MetricVisitor visitor, boolean minifyTableMetrics) { GlobalStateMgr globalStateMgr = GlobalStateMgr.getCurrentState(); List<String> dbNames = globalStateMgr.getDbNames(); for (String dbName : dbNames) { Database db = GlobalStateMgr.getCurrentState().getDb(dbName); if (null == db)...
class MetricRepo { private static final Logger LOG = LogManager.getLogger(MetricRepo.class); private static final MetricRegistry METRIC_REGISTER = new MetricRegistry(); private static final StarRocksMetricRegistry STARROCKS_METRIC_REGISTER = new StarRocksMetricRegistry(); public static volatile boolean isInit = false; ...
class MetricRepo { private static final Logger LOG = LogManager.getLogger(MetricRepo.class); private static final MetricRegistry METRIC_REGISTER = new MetricRegistry(); private static final StarRocksMetricRegistry STARROCKS_METRIC_REGISTER = new StarRocksMetricRegistry(); public static volatile boolean isInit = false; ...
Actually, the JsonParser would detect this ...
static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { Tensor.Builder builder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); expectOneOf(buffer.current(), JsonToken.START_OBJECT, JsonToken.START_ARRAY); int initNesting = buffer.nesting(); while ( ! buffer.isEmpty()) { Supp...
throw new IllegalArgumentException("incomplete JSON structure for " + tensorFieldValue);
static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { Tensor.Builder builder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); expectOneOf(buffer.current(), JsonToken.START_OBJECT, JsonToken.START_ARRAY); int initNesting = buffer.nesting(); while (true) { Supplier<Token> loo...
class TensorReader { public static final String TENSOR_TYPE = "type"; public static final String TENSOR_ADDRESS = "address"; public static final String TENSOR_CELLS = "cells"; public static final String TENSOR_VALUES = "values"; public static final String TENSOR_BLOCKS = "blocks"; public static final String TENSOR_VALU...
class TensorReader { public static final String TENSOR_TYPE = "type"; public static final String TENSOR_ADDRESS = "address"; public static final String TENSOR_CELLS = "cells"; public static final String TENSOR_VALUES = "values"; public static final String TENSOR_BLOCKS = "blocks"; public static final String TENSOR_VALU...
Fixed (Introduced separate `roCellContainng` to handle the readonly case)
public static CellSemType cellContaining(Env env, SemType ty, CellAtomicType.CellMutability mut) { assert !(ty instanceof CellSemType); CellAtomicType atomicCell = CellAtomicType.from(ty, mut); TypeAtom atom = env.cellAtom(atomicCell); BddNode bdd = bddAtom(atom); ComplexSemType complexSemType = PredefinedType.basicSub...
TypeAtom atom = env.cellAtom(atomicCell);
public static CellSemType cellContaining(Env env, SemType ty, CellAtomicType.CellMutability mut) { assert Core.isNever(ty) || !Core.isSubtypeSimple(ty, PredefinedType.CELL); CellAtomicType atomicCell = CellAtomicType.from(ty, mut); TypeAtom atom = env.cellAtom(atomicCell); BddNode bdd = bddAtom(atom); ComplexSemType co...
class CellSubtype { public static CellSemType cellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_NONE); } }
class CellSubtype { public static CellSemType cellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_LIMITED); } public static CellSemType roCellContaining(Env env, SemType ty) { return cellContaining(env, ty, CELL_MUT_NONE); } }
According to this, order doesn't matter. https://stackoverflow.com/a/37986481/4220757
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new ...
}).subscribeOn(scheduler)
private static Mono<Void> generateEvents(AtomicBoolean isRunning) { final Logger logger = LoggerFactory.getLogger("Producer"); final Scheduler scheduler = Schedulers.elastic(); final Duration operationTimeout = Duration.ofSeconds(5); final String[] machineIds = new String[]{"2A", "9B", "6C"}; final Random random = new ...
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main meth...
class EventProcessorClientAggregateEventsSample { private static final Duration REPORTING_INTERVAL = Duration.ofSeconds(5); private static final String EH_CONNECTION_STRING = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubName}"; /** * Main meth...
`ScheduledUnit` will just use the `ExecutionVertexID` of the given `Execution`, so that the location preference in this execution param will not take effect. The location preference takes effect in the following slotProfile param(e.g. `slotProfileForLocation(loc1)`)
public void testGetsNonLocalFromSharingGroupFirst() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); ass...
new ScheduledUnit(getExecution(jid3, 1, 2, sharingGroup), sharingGroup.getSlotSharingGroupId(), cc2), slotProfileForLocation(loc1), TestingUtils.infiniteTime()).get();
public void testGetsNonLocalFromSharingGroupFirst() throws Exception { JobVertexID jid1 = new JobVertexID(); JobVertexID jid2 = new JobVertexID(); JobVertexID jid3 = new JobVertexID(); TaskManagerLocation loc1 = testingSlotProvider.addTaskManager(1); TaskManagerLocation loc2 = testingSlotProvider.addTaskManager(1); ass...
class ScheduleWithCoLocationHintTest extends SchedulerTestBase { @Override protected ComponentMainThreadExecutor getComponentMainThreadExecutor() { return ComponentMainThreadExecutorServiceAdapter.forMainThread(); } @Test public void scheduleAllSharedAndCoLocated() throws Exception { JobVertexID jid1 = new JobVertexID(...
class ScheduleWithCoLocationHintTest extends SchedulerTestBase { @Override protected ComponentMainThreadExecutor getComponentMainThreadExecutor() { return ComponentMainThreadExecutorServiceAdapter.forMainThread(); } @Test public void scheduleAllSharedAndCoLocated() throws Exception { JobVertexID jid1 = new JobVertexID(...
user's init workload group = normal?
public void update(List<Pair<String, String>> properties) throws UserException { long newMaxConn = this.commonProperties.getMaxConn(); long newMaxQueryInstances = this.commonProperties.getMaxQueryInstances(); String sqlBlockRules = this.commonProperties.getSqlBlockRules(); int cpuResourceLimit = this.commonProperties.g...
String workloadGroup = this.commonProperties.getWorkloadGroup();
public void update(List<Pair<String, String>> properties) throws UserException { long newMaxConn = this.commonProperties.getMaxConn(); long newMaxQueryInstances = this.commonProperties.getMaxQueryInstances(); String sqlBlockRules = this.commonProperties.getSqlBlockRules(); int cpuResourceLimit = this.commonProperties.g...
class UserProperty implements Writable { private static final String PROP_MAX_USER_CONNECTIONS = "max_user_connections"; private static final String PROP_MAX_QUERY_INSTANCES = "max_query_instances"; private static final String PROP_RESOURCE_TAGS = "resource_tags"; private static final String PROP_RESOURCE = "resource";...
class UserProperty implements Writable { private static final String PROP_MAX_USER_CONNECTIONS = "max_user_connections"; private static final String PROP_MAX_QUERY_INSTANCES = "max_query_instances"; private static final String PROP_RESOURCE_TAGS = "resource_tags"; private static final String PROP_RESOURCE = "resource";...
That's a sanity check I copied over from the old `ClassPathPackagedProgramRetrieverTest`. I thought it's reasonable to check whether the jar loading without modifying the `java.class.path` System property. Isn't that essentially the same as what you suggest? ...just with less code?
public void testJarFromSystemClasspath() throws MalformedURLException { classpathProvider.setSystemClasspathWithTwoEntryClasses(); final Collection<String> systemClasspath = StreamSupport.stream( FromClasspathEntryClassInformationProvider.extractSystemClasspath() .spliterator(), false) .map(File::getName) .collect(Coll...
false)
public void testJarFromSystemClasspath() throws MalformedURLException { multipleEntryClassesClasspathProvider.setSystemClasspath(); final Collection<String> systemClasspath = StreamSupport.stream( FromClasspathEntryClassInformationProvider.extractSystemClasspath() .spliterator(), false) .map(File::getName) .collect(Col...
class FromClasspathEntryClassInformationProviderTest extends TestLogger { @Rule public final ClasspathProvider classpathProvider = new ClasspathProvider(); @Test public void testJobClassOnUserClasspathWithExplicitJobClassName() throws MalformedURLException, FlinkException { FromClasspathEntryClassInformationProvider te...
class FromClasspathEntryClassInformationProviderTest extends TestLogger { @Rule public ClasspathProvider noEntryClassClasspathProvider = ClasspathProvider.createWithNoEntryClass(); @Rule public ClasspathProvider singleEntryClassClasspathProvider = ClasspathProvider.createWithSingleEntryClass(); @Rule public ClasspathPr...
space after reason is not applicable if the details record is empty.
public String stringValue() { return "error " + reason + " " + Optional.ofNullable(details).map(Object::toString).orElse(""); }
return "error " + reason + " " + Optional.ofNullable(details).map(Object::toString).orElse("");
public String stringValue() { return "error " + reason + Optional.ofNullable(details).map(details -> " " + details).orElse(""); }
class ErrorValue extends RuntimeException implements RefValue { private static final long serialVersionUID = 1L; private final BType type; private final String reason; private final Object details; public ErrorValue(String reason, Object details) { super(reason); this.type = new BErrorType(TypeConstants.ERROR, BTypes.t...
class ErrorValue extends RuntimeException implements RefValue { private static final long serialVersionUID = 1L; private final BType type; private final String reason; private final Object details; public ErrorValue(String reason, Object details) { super(reason); this.type = new BErrorType(TypeConstants.ERROR, BTypes.t...
The same comment about moving it inside `SavepointFormatType`
public SavepointOptions(CommandLine line) { super(line); args = line.getArgs(); dispose = line.hasOption(SAVEPOINT_DISPOSE_OPTION.getOpt()); disposeSavepointPath = line.getOptionValue(SAVEPOINT_DISPOSE_OPTION.getOpt()); jarFile = line.getOptionValue(JAR_OPTION.getOpt()); if (line.hasOption(SAVEPOINT_FORMAT_OPTION)) { f...
formatType = SavepointFormatType.DEFAULT;
public SavepointOptions(CommandLine line) { super(line); args = line.getArgs(); dispose = line.hasOption(SAVEPOINT_DISPOSE_OPTION.getOpt()); disposeSavepointPath = line.getOptionValue(SAVEPOINT_DISPOSE_OPTION.getOpt()); jarFile = line.getOptionValue(JAR_OPTION.getOpt()); if (line.hasOption(SAVEPOINT_FORMAT_OPTION)) { f...
class SavepointOptions extends CommandLineOptions { private final String[] args; private final SavepointFormatType formatType; private boolean dispose; private String disposeSavepointPath; private String jarFile; public String[] getArgs() { return args == null ? new String[0] : args; } public boolean isDispose() { retu...
class SavepointOptions extends CommandLineOptions { private final String[] args; private final SavepointFormatType formatType; private boolean dispose; private String disposeSavepointPath; private String jarFile; public String[] getArgs() { return args == null ? new String[0] : args; } public boolean isDispose() { retu...
The testSuiteMap contains 'TestSuites' for each module. If the map is empty, then the TestRunner shouldn't be running at all since no TestSuites have been initialized at compile time. Even if the TestSuite has no tests, it will still count as part of the map. So having the map empty and running the TestRunner is the p...
public static void main(String[] args) throws IOException { int exitStatus = 0; int result; Path testCache = Paths.get(args[0]); String target = args[1]; boolean report = Boolean.valueOf(args[2]); boolean coverage = Boolean.valueOf(args[3]); if (report || coverage) { testReport = new TestReport(); } out.println(); out....
exitStatus = 1;
public static void main(String[] args) throws IOException { int exitStatus = 0; int result; if (args.length >= 4) { Path testCache = Paths.get(args[0]); String target = args[1]; boolean report = Boolean.valueOf(args[2]); boolean coverage = Boolean.valueOf(args[3]); if (report || coverage) { testReport = new TestReport(...
class Main { private static final PrintStream out = System.out; static TestReport testReport; static ClassLoader classLoader; private static int startTestSuit(Path sourceRootPath, TestSuite testSuite, Path jsonTmpSummaryPath, ClassLoader classLoader) throws IOException { int exitStatus = 0; try { TesterinaUtils.execute...
class Main { private static final PrintStream out = System.out; static TestReport testReport; static ClassLoader classLoader; private static int startTestSuit(Path sourceRootPath, TestSuite testSuite, Path jsonTmpSummaryPath, ClassLoader classLoader) throws IOException { int exitStatus = 0; try { TesterinaUtils.execute...
Consider something like ```suggestion return find(tenant, zoneBuckets).map(ArchiveBucket::bucketArn).orElseGet(() -> assignToBucket(zoneId, tenant)); ``` even better if `find()` does the mapping since both users just want the bucket ARN anyway...
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) { var zoneBuckets = curatorDb.readArchiveBuckets(zoneId); if (find(tenant, zoneBuckets).isPresent()) return find(tenant, zoneBuckets).get().bucketArn(); else return assignToBucket(zoneId, tenant); }
else return assignToBucket(zoneId, tenant);
private String findOrAssignBucket(ZoneId zoneId, TenantName tenant) { var zoneBuckets = curatorDb.readArchiveBuckets(zoneId); return find(tenant, zoneBuckets).orElseGet(() -> assignToBucket(zoneId, tenant)); }
class CuratorArchiveBucketDb extends AbstractComponent implements ArchiveBucketDb { /** * Due to policy limits, we can't put data for more than this many tenants in a bucket. * Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants. * We set the maximum a bit lower to have a solid ...
class CuratorArchiveBucketDb implements ArchiveBucketDb { /** * Due to policy limits, we can't put data for more than this many tenants in a bucket. * Policy size limit is 20kb, with approx. 500 bytes of policy required per tenant = 40 tenants. * We set the maximum a bit lower to have a solid margin of error. */ privat...
These two asserts only checks for the counts. We need to assert the content as well, to see whether they were updated correctly or not.
public void testSeparatedListNodeAllNodeModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); ModuleVariableDeclarationNode oldModuleVariableDeclarationNode = (ModuleVariableDeclarationNode) oldRoot.members().get(0); ListConstructorExpressi...
newSeperatedlistNode.expressions().size());
public void testSeparatedListNodeAllNodeModification() { SyntaxTree syntaxTree = parseFile("separated_node_list_modify_all_nodes.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); WhiteSpaceMinutiaeRemover whiteSpaceMinutiaeRemover = new WhiteSpaceMinutiaeRemover(); ModulePartNode newRoot = (ModulePartNode) oldRoot...
class SyntaxTreeModifierTest extends AbstractSyntaxTreeAPITest { @Test public void testVarDeclStmtModification() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); VariableDeclModifier variableDeclModifier = new VariableDeclModifier(); ModulePartNode ne...
class SyntaxTreeModifierTest extends AbstractSyntaxTreeAPITest { @Test public void testVarDeclStmtModification() { SyntaxTree syntaxTree = parseFile("variable_decl_stmt_modify.bal"); ModulePartNode oldRoot = syntaxTree.rootNode(); VariableDeclModifier variableDeclModifier = new VariableDeclModifier(); ModulePartNode ne...
No, NA would pick up the context set during the last HA tick.
public NodeAgentContext nextContext() throws InterruptedException { synchronized (monitor) { nextContext = null; Duration untilNextContext = Duration.ZERO; while (setAndGetIsFrozen(wantFrozen) || nextContext == null || (untilNextContext = Duration.between(Instant.now(), nextContextAt)).toMillis() > 0) { if (pendingInte...
currentContext = nextContext;
public NodeAgentContext nextContext() throws InterruptedException { synchronized (monitor) { nextContext = null; Duration untilNextContext = Duration.ZERO; while (setAndGetIsFrozen(wantFrozen) || nextContext == null || (untilNextContext = Duration.between(Instant.now(), nextContextAt)).toMillis() > 0) { if (pendingInte...
class NodeAgentContextManager implements NodeAgentContextSupplier, NodeAgentScheduler { private final Object monitor = new Object(); private final Clock clock; private NodeAgentContext currentContext; private NodeAgentContext nextContext; private Instant nextContextAt; private boolean wantFrozen = false; private boolea...
class NodeAgentContextManager implements NodeAgentContextSupplier, NodeAgentScheduler { private final Object monitor = new Object(); private final Clock clock; private NodeAgentContext currentContext; private NodeAgentContext nextContext; private Instant nextContextAt; private boolean wantFrozen = false; private boolea...
Each Iceberg split contains data files, delete files (for upsert), schema string. Each data file also contains stats for every column. if the table is wide (many columns), each split may go over 10 KB
public byte[] serialize(HybridSourceEnumeratorState enumState) throws IOException { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos)) { out.writeInt(enumState.getCurrentSourceIndex()); out.writeInt(enumState.wrappedStateSerializerVersion()); out.writeInt(e...
out.writeInt(enumState.getWrappedState().length);
public byte[] serialize(HybridSourceEnumeratorState enumState) throws IOException { try (ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream out = new DataOutputStream(baos)) { out.writeInt(enumState.getCurrentSourceIndex()); out.writeInt(enumState.getWrappedStateSerializerVersion()); out.writeIn...
class HybridSourceEnumeratorStateSerializer implements SimpleVersionedSerializer<HybridSourceEnumeratorState> { private static final int CURRENT_VERSION = 0; public HybridSourceEnumeratorStateSerializer() {} @Override public int getVersion() { return CURRENT_VERSION; } @Override @Override public HybridSourceEnumeratorS...
class HybridSourceEnumeratorStateSerializer implements SimpleVersionedSerializer<HybridSourceEnumeratorState> { private static final int CURRENT_VERSION = 0; public HybridSourceEnumeratorStateSerializer() {} @Override public int getVersion() { return CURRENT_VERSION; } @Override @Override public HybridSourceEnumeratorS...
There are still has some assertEquals
public void histogram() { assertEquals(metricsTrackerFacade.getMetricsTrackerManager().getClass(), MetricsTrackerManagerFixture2.class); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); HistogramMetricsTrackerDelegate delega...
assertEquals(metricsTrackerFacade.getMetricsTrackerManager().getClass(), MetricsTrackerManagerFixture2.class);
public void histogram() { assertThat(metricsTrackerFacade.getMetricsTrackerManager().getClass().getName(), is(MetricsTrackerManagerFixture2.class.getName())); ((MetricsTrackerManagerFixture2) metricsTrackerFacade.getMetricsTrackerManager()).setMetricsTrackerFactory(new MetricsTrackerFactoryFixture2()); HistogramMetrics...
class MetricsTrackerFacadeTest { private MetricsTrackerFacade metricsTrackerFacade = MetricsTrackerFacade.getInstance(); @Before public void setUp() { MetricsConfiguration metricsConfiguration = new MetricsConfiguration("fixture", null, null, null); metricsTrackerFacade.init(metricsConfiguration); assertThat(metricsTra...
class MetricsTrackerFacadeTest { private MetricsTrackerFacade metricsTrackerFacade = MetricsTrackerFacade.getInstance(); @Before public void setUp() { MetricsConfiguration metricsConfiguration = new MetricsConfiguration("fixture", null, null, null); metricsTrackerFacade.init(metricsConfiguration); assertThat(metricsTra...
```suggestion if (!isAllowNull && !EngineType.supportNotNullColumn(engineName)) { ```
public void analyze(boolean isOlap, boolean isInternalCatalog, String engineName) throws AnalysisException { if (isInternalCatalog) { if (!isAllowNull) { if (!EngineType.supportNotNullColumn(engineName)) { throw new AnalysisException(String.format("All columns must be nullable for external table. " + "Column %s is not ...
if (!EngineType.supportNotNullColumn(engineName)) {
public void analyze(boolean isOlap, boolean isInternalCatalog, String engineName) throws AnalysisException { if (isInternalCatalog) { if (!isAllowNull && !EngineType.supportNotNullColumn(engineName)) { throw new AnalysisException(String.format("All columns must be nullable for external table. " + "Column %s is not null...
class DefaultValueDef { public boolean isSet; public Expr expr; public DefaultValueDef(boolean isSet, Expr expr) { this.isSet = isSet; if (expr != null) { this.expr = expr; } else { this.expr = NullLiteral.create(Type.VARCHAR); } } private static final String ZERO = new String(new byte[] {0}); public static DefaultValu...
class DefaultValueDef { public boolean isSet; public Expr expr; public DefaultValueDef(boolean isSet, Expr expr) { this.isSet = isSet; if (expr != null) { this.expr = expr; } else { this.expr = NullLiteral.create(Type.VARCHAR); } } private static final String ZERO = new String(new byte[] {0}); public static DefaultValu...
There is no changed has been done to the `objectType.flags` value. If you look at the line 1655 we are explicitly set the flags value. This line is anyway didn't had any impact.
public BType readType(int cpI) throws IOException { byte tag = inputStream.readByte(); Name name = names.fromString(getStringCPEntryValue(inputStream)); var flags = inputStream.readLong(); int typeFlags = inputStream.readInt(); switch (tag) { case TypeTags.INT: return typeParamAnalyzer.getNominalType(symTable.intType, ...
for (int i = 0; i < fieldCount; i++) {
public BType readType(int cpI) throws IOException { byte tag = inputStream.readByte(); Name name = names.fromString(getStringCPEntryValue(inputStream)); var flags = inputStream.readLong(); int typeFlags = inputStream.readInt(); switch (tag) { case TypeTags.INT: return typeParamAnalyzer.getNominalType(symTable.intType, ...
class BIRTypeReader { private DataInputStream inputStream; public BIRTypeReader(DataInputStream inputStream) { this.inputStream = inputStream; } private BType readTypeFromCp() throws IOException { return readBType(inputStream); } private BInvokableType setTSymbolForInvokableType(BInvokableType bInvokableType, BType ret...
class BIRTypeReader { private DataInputStream inputStream; public BIRTypeReader(DataInputStream inputStream) { this.inputStream = inputStream; } private BType readTypeFromCp() throws IOException { return readBType(inputStream); } private BInvokableType setTSymbolForInvokableType(BInvokableType bInvokableType, BType ret...
Please add a method on environment instead of matching the free form and human-readable region.
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData...
if (environment.getRegion().startsWith("aws-")) {
private void createContainerData(ContainerNodeSpec nodeSpec) { ContainerData containerData = ContainerData.createClean(environment, ContainerName.fromHostname(nodeSpec.hostname)); if (nodeSpec.nodeType.equals(NodeType.config.name())) { logger.info("Creating files needed by config server"); new ConfigServerContainerData...
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, ...
class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, ...
To me it would make sense to auto-add `/` like for the `*Page` variables.
public FormAuthenticationMechanism get() { String key; if (!httpConfiguration.encryptionKey.isPresent()) { if (encryptionKey != null) { key = encryptionKey; } else { byte[] data = new byte[32]; new SecureRandom().nextBytes(data); key = encryptionKey = Base64.getEncoder().encodeToString(data); log.warn("Encryption key w...
String postLocation = form.postLocation;
public FormAuthenticationMechanism get() { String key; if (!httpConfiguration.encryptionKey.isPresent()) { if (encryptionKey != null) { key = encryptionKey; } else { byte[] data = new byte[32]; new SecureRandom().nextBytes(data); key = encryptionKey = Base64.getEncoder().encodeToString(data); log.warn("Encryption key w...
class HttpSecurityRecorder { private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class); protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { } }; static volatile String encryptionKey; public Handler<RoutingContext...
class HttpSecurityRecorder { private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class); protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() { @Override public void accept(Throwable throwable) { } }; static volatile String encryptionKey; public Handler<RoutingContext...
yes, it's catalog, db and table name are case-sensitive.
public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PaimonTable that = (PaimonTable) o; return catalogName.equals(that.catalogName) && databaseName.equals(that.databaseName) && tableName.equals(that.tableName) && createTime == that.createTime;...
return catalogName.equals(that.catalogName) &&
public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PaimonTable that = (PaimonTable) o; return catalogName.equals(that.catalogName) && databaseName.equals(that.databaseName) && tableName.equals(that.tableName) && createTime == that.createTime;...
class PaimonTable extends Table { private final String catalogName; private final String databaseName; private final String tableName; private final AbstractFileStoreTable paimonNativeTable; private final List<String> partColumnNames; private final List<String> paimonFieldNames; private long latestSnapshotId; public Pa...
class PaimonTable extends Table { private final String catalogName; private final String databaseName; private final String tableName; private final AbstractFileStoreTable paimonNativeTable; private final List<String> partColumnNames; private final List<String> paimonFieldNames; private long latestSnapshotId; public Pa...
Not sure, if I understand your question. Before applying any specs the table has a schema as described by ``` contextResolvedTable.getResolvedSchema() .toSourceRowDataType() .getLogicalType() ``` Only once the `ReadingMetadataSpec` is applied it will change the type to `spec.getProdu...
private DynamicTableSource getTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { if (tableSource == null) { DynamicTableSourceFactory factory = context.getModuleManager() .getFactory(Module::getTableSourceFactory) .orElse(null); if (factory == null) { Catalog catalog = context.getCatalogManager() .getCat...
.getLogicalType();
private DynamicTableSource getTableSource(FlinkContext context, FlinkTypeFactory typeFactory) { if (tableSource == null) { DynamicTableSourceFactory factory = context.getModuleManager() .getFactory(Module::getTableSourceFactory) .orElse(null); if (factory == null) { Catalog catalog = context.getCatalogManager() .getCat...
class DynamicTableSourceSpec extends DynamicTableSpecBase { public static final String FIELD_NAME_CATALOG_TABLE = "table"; public static final String FIELD_NAME_SOURCE_ABILITIES = "abilities"; private final ContextResolvedTable contextResolvedTable; private final @Nullable List<SourceAbilitySpec> sourceAbilities; priva...
class DynamicTableSourceSpec extends DynamicTableSpecBase { public static final String FIELD_NAME_CATALOG_TABLE = "table"; public static final String FIELD_NAME_SOURCE_ABILITIES = "abilities"; private final ContextResolvedTable contextResolvedTable; private final @Nullable List<SourceAbilitySpec> sourceAbilities; priva...
IMU, if we set `azure.activedirectory.tenant-id=common`, then multi-tenant application will function.
private ClientRegistration azureClientRegistration() { String tenantId = aadAuthenticationProperties.getTenantId().trim(); Assert.hasText(tenantId, "azure.activedirectory.tenant-id should have text."); Assert.doesNotContain(tenantId, " ", "azure.activedirectory.tenant-id should not contain ' '."); Assert.doesNotContain...
return ClientRegistration.withRegistrationId("azure")
private ClientRegistration azureClientRegistration() { String tenantId = aadAuthenticationProperties.getTenantId().trim(); Assert.hasText(tenantId, "azure.activedirectory.tenant-id should have text."); Assert.doesNotContain(tenantId, " ", "azure.activedirectory.tenant-id should not contain ' '."); Assert.doesNotContain...
class AADOAuth2AutoConfiguration { private final AADAuthenticationProperties aadAuthenticationProperties; private final ServiceEndpointsProperties serviceEndpointsProperties; public AADOAuth2AutoConfiguration(AADAuthenticationProperties aadAuthProperties, ServiceEndpointsProperties serviceEndpointsProperties) { this.aa...
class AADOAuth2AutoConfiguration { private final AADAuthenticationProperties aadAuthenticationProperties; private final ServiceEndpointsProperties serviceEndpointsProperties; public AADOAuth2AutoConfiguration(AADAuthenticationProperties aadAuthProperties, ServiceEndpointsProperties serviceEndpointsProperties) { this.aa...
You are right, I will add a checkArgument.
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException { if (numExclusiveBuffers <= 0) { return; } Collection<MemorySegment> segments = globalPool.requestMemorySegments(numExclusiveBuffers); synchronized (bufferQueue) { for (MemorySegment segment : segments) { bufferQueue.addExclusiveBuffer( new Netwo...
if (numExclusiveBuffers <= 0) {
void requestExclusiveBuffers(int numExclusiveBuffers) throws IOException { checkArgument(numExclusiveBuffers >= 0, "Num exclusive buffers must be non-negative."); if (numExclusiveBuffers == 0) { return; } Collection<MemorySegment> segments = globalPool.requestMemorySegments(numExclusiveBuffers); synchronized (bufferQue...
class BufferManager implements BufferListener, BufferRecycler { /** The available buffer queue wraps both exclusive and requested floating buffers. */ private final AvailableBufferQueue bufferQueue = new AvailableBufferQueue(); /** The buffer provider for requesting exclusive buffers. */ private final MemorySegmentProv...
class BufferManager implements BufferListener, BufferRecycler { /** The available buffer queue wraps both exclusive and requested floating buffers. */ private final AvailableBufferQueue bufferQueue = new AvailableBufferQueue(); /** The buffer provider for requesting exclusive buffers. */ private final MemorySegmentProv...
your means that it should statistic the usage of disk instead of the data?
public void init() throws LoadBalanceException { Backend be = infoService.getBackend(beId); if (be == null) { throw new LoadBalanceException("backend " + beId + " does not exist"); } isAvailable = be.isScheduleAvailable() && be.isLoadAvailable() && be.isQueryAvailable(); ImmutableMap<String, DiskInfo> disks = be.getDis...
diskInfo.getTotalCapacityB(), diskInfo.getDiskUsedCapacityB(), diskInfo.getState());
public void init() throws LoadBalanceException { Backend be = infoService.getBackend(beId); if (be == null) { throw new LoadBalanceException("backend " + beId + " does not exist"); } isAvailable = be.isScheduleAvailable() && be.isLoadAvailable() && be.isQueryAvailable(); ImmutableMap<String, DiskInfo> disks = be.getDis...
class LoadScore { public double replicaNumCoefficient = 0.5; public double capacityCoefficient = 0.5; public double score = 0.0; public static final LoadScore DUMMY = new LoadScore(); }
class LoadScore { public double replicaNumCoefficient = 0.5; public double capacityCoefficient = 0.5; public double score = 0.0; public static final LoadScore DUMMY = new LoadScore(); }
If old version forward to new version, the request.catalog will be null. Will it report NEP?
public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); ctx.setDatabase(request.db); ctx.setQualifiedUser(request.user); ctx.setGlobalStateMgr(GlobalStateMgr.getCurrentState()); ctx.getState().reset(); if (request.isSetResourceInfo()) { ctx.getSessionVariable().setResourc...
ctx.setCurrentCatalog(request.catalog);
public TMasterOpResult proxyExecute(TMasterOpRequest request) { ctx.setCurrentCatalog(request.catalog); if (ctx.getCurrentCatalog() == null) { TMasterOpResult result = new TMasterOpResult(); ctx.getState().setError( "Missing current catalog. You need to upgrade this Frontend to the same version as Leader Frontend."); r...
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); private final ConnectContext ctx; private ByteBuffer packetBuf; private StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String iden...
class ConnectProcessor { private static final Logger LOG = LogManager.getLogger(ConnectProcessor.class); private final ConnectContext ctx; private ByteBuffer packetBuf; private StmtExecutor executor = null; public ConnectProcessor(ConnectContext context) { this.ctx = context; } private void handleInitDb() { String iden...
Printing `acquiredResources` can actually produce quite some text. Hence, I am wondering whether we should really include this information in every `NoResourceAvailableException` since it will be logged for every request later on. Maybe it could be better to log it once in this method and then fail the requests w/o thi...
private void failPendingRequests(Collection<ResourceRequirement> acquiredResources) { if (!pendingRequests.isEmpty()) { final NoResourceAvailableException cause = new NoResourceAvailableException( "Could not acquire the minimum required resources. Acquired: " + acquiredResources + ". Current slot pool status: " + getSl...
+ getSlotServiceStatus());
private void failPendingRequests(Collection<ResourceRequirement> acquiredResources) { if (!pendingRequests.isEmpty()) { final NoResourceAvailableException cause = new NoResourceAvailableException( "Could not acquire the minimum required resources. Acquired: " + acquiredResources + ". Current slot pool status: " + getSl...
class DeclarativeSlotPoolBridge extends DeclarativeSlotPoolService implements SlotPool { private final Map<SlotRequestId, PendingRequest> pendingRequests; private final Map<SlotRequestId, AllocationID> fulfilledRequests; private final Time idleSlotTimeout; @Nullable private ComponentMainThreadExecutor componentMainThre...
class DeclarativeSlotPoolBridge extends DeclarativeSlotPoolService implements SlotPool { private final Map<SlotRequestId, PendingRequest> pendingRequests; private final Map<SlotRequestId, AllocationID> fulfilledRequests; private final Time idleSlotTimeout; @Nullable private ComponentMainThreadExecutor componentMainThre...
should this path be more generic?
public void connectionAttributes() throws Exception { Map<String, String> attributes = new HashMap<String, String>(); attributes.put("_connector_name", "Apache Beam SingleStoreDB I/O"); attributes.put("_connector_version", ReleaseInfo.getReleaseInfo().getVersion()); attributes.put("_product_version", ReleaseInfo.getRel...
File file = new File("/home/amakarovych-ua/Test/log");
public void connectionAttributes() throws Exception { Map<String, String> attributes = new HashMap<String, String>(); attributes.put("_connector_name", "Apache Beam SingleStoreDB I/O"); attributes.put("_connector_version", ReleaseInfo.getReleaseInfo().getVersion()); attributes.put("_product_version", ReleaseInfo.getRel...
class SingleStoreIOConnectionAttributesIT { private static String serverName; private static String username; private static String password; private static Integer port; @BeforeClass public static void setup() throws Exception { SingleStoreIOTestPipelineOptions options; try { options = readIOTestPipelineOptions(Single...
class SingleStoreIOConnectionAttributesIT { private static String serverName; private static String username; private static String password; private static Integer port; @BeforeClass public static void setup() throws Exception { SingleStoreIOTestPipelineOptions options; try { options = readIOTestPipelineOptions(Single...
We are using the targetType in the switch case
private TypeTestResult checkBuiltInIntSubtypeWidenPossible(BType actualType, BType targetType) { int actualTag = getReferredType(actualType).tag; switch (targetType.tag) { case TypeTags.INT: if (actualTag == TypeTags.BYTE || TypeTags.isIntegerTypeTag(actualTag)) { return TypeTestResult.TRUE; } break; case TypeTags.SIGN...
int actualTag = getReferredType(actualType).tag;
private TypeTestResult checkBuiltInIntSubtypeWidenPossible(BType actualType, BType targetType) { int actualTag = getReferredType(actualType).tag; switch (targetType.tag) { case TypeTags.INT: if (actualTag == TypeTags.BYTE || TypeTags.isIntegerTypeTag(actualTag)) { return TypeTestResult.TRUE; } break; case TypeTags.SIGN...
class Types { private static final CompilerContext.Key<Types> TYPES_KEY = new CompilerContext.Key<>(); private final Unifier unifier; private SymbolTable symTable; private SymbolResolver symResolver; private BLangDiagnosticLog dlog; private Names names; private int finiteTypeCount = 0; private BUnionType expandedXMLBui...
class Types { private static final CompilerContext.Key<Types> TYPES_KEY = new CompilerContext.Key<>(); private final Unifier unifier; private SymbolTable symTable; private SymbolResolver symResolver; private BLangDiagnosticLog dlog; private Names names; private int finiteTypeCount = 0; private BUnionType expandedXMLBui...
Could you extract the common code of initializing the environment?
public void testNonMaterialization() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.enableCheckpointing(10) .enableChangelogStateBackend(true) .getCheckpointConfig() .setCheckpointStorage(TEMPORARY_FOLDER.newFolder().toURI()); env.setStateBackend(new Delega...
env.configure(
public void testNonMaterialization() throws Exception { File checkpointFolder = TEMPORARY_FOLDER.newFolder(); SharedReference<AtomicBoolean> hasMaterialization = sharedObjects.add(new AtomicBoolean(true)); StreamExecutionEnvironment env = getEnv(delegatedStateBackend, checkpointFolder, 1000, 1, Long.MAX_VALUE, 0); wait...
class ChangelogPeriodicMaterializationITCase extends ChangelogPeriodicMaterializationTestBase { private static final AtomicBoolean triggerDelegatedSnapshot = new AtomicBoolean(); public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { super(delegatedStateBackend); } @Before public vo...
class ChangelogPeriodicMaterializationITCase extends ChangelogPeriodicMaterializationTestBase { public ChangelogPeriodicMaterializationITCase(AbstractStateBackend delegatedStateBackend) { super(delegatedStateBackend); } /** Recovery from checkpoint only containing non-materialized state. */ @Test /** Recovery from chec...
The test is now failing to build, please address the precommit failures (e.g. https://github.com/apache/beam/actions/runs/6344373926/job/17234456430?pr=28513)
public void testActiveThreadMetric() throws Exception { int maxThreads = 5; int threadExpirationSec = 60; BoundedQueueExecutor executor = new BoundedQueueExecutor( maxThreads, threadExpirationSec, TimeUnit.SECONDS, maxThreads, 10000000, new ThreadFactoryBuilder() .setNameFormat("DataflowWorkUnits-%d") .setDaemon(true) ...
MockActiveWork m1 =
public void testActiveThreadMetric() throws Exception { int maxThreads = 5; int threadExpirationSec = 60; BoundedQueueExecutor executor = new BoundedQueueExecutor( maxThreads, threadExpirationSec, TimeUnit.SECONDS, maxThreads, 10000000, new ThreadFactoryBuilder() .setNameFormat("DataflowWorkUnits-%d") .setDaemon(true) ...
class MockActiveWork extends StreamingDataflowWorker.Work { public static volatile boolean exit; public MockActiveWork(long workToken) { super( Windmill.WorkItem.newBuilder().setKey(ByteString.EMPTY).setWorkToken(workToken).build(), Instant::now, Collections.emptyList()); exit = false; } @Override public void run() {} ...
class StreamingDataflowWorkerTest { private static final Logger LOG = LoggerFactory.getLogger(StreamingDataflowWorkerTest.class); private static final IntervalWindow DEFAULT_WINDOW = new IntervalWindow(new Instant(1234), Duration.millis(1000)); private static final IntervalWindow WINDOW_AT_ZERO = new IntervalWindow(new...
Thanks for this simplification ! Indeed, only needed to test at the source level, not need for the higher level stuff (server, pipeline, sending a message)
public void testCheckpointCoderIsSane() { SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class)); CoderProperties.coderSerializable(source.getCheckpointMarkCoder()); }
SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class));
public void testCheckpointCoderIsSane() { SqsUnboundedSource source = new SqsUnboundedSource(mock(SqsIO.Read.class)); CoderProperties.coderSerializable(source.getCheckpointMarkCoder()); }
class SqsUnboundedSourceTest { @Test }
class SqsUnboundedSourceTest { @Test }
LGTM. I extracted a single method in Write to populate the displayData with parameters to avoid repeating code.
public void populateDisplayData(DisplayData.Builder builder) { super.populateDisplayData(builder); spec.getSpannerConfig().populateDisplayData(builder); builder.add( DisplayData.item("batchSizeBytes", spec.getBatchSizeBytes()) .withLabel("Max batch size in sytes")); builder.add( DisplayData.item("maxNumMutations", spec...
builder.add(
public void populateDisplayData(DisplayData.Builder builder) { super.populateDisplayData(builder); populateDisplayDataWithParamaters(builder); }
class Builder { abstract Builder setSpannerConfig(SpannerConfig spannerConfig); abstract Builder setBatchSizeBytes(long batchSizeBytes); abstract Builder setMaxNumMutations(long maxNumMutations); abstract Builder setMaxNumRows(long maxNumRows); abstract Builder setFailureMode(FailureMode failureMode); abstract Builder ...
class Builder { abstract Builder setSpannerConfig(SpannerConfig spannerConfig); abstract Builder setBatchSizeBytes(long batchSizeBytes); abstract Builder setMaxNumMutations(long maxNumMutations); abstract Builder setMaxNumRows(long maxNumRows); abstract Builder setFailureMode(FailureMode failureMode); abstract Builder ...
We don't in any place doing this currently, given this is happening during shutdown maybe it's best we don't?
public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) { if (executorService == null) { return null; } Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null."); if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) { throw new IllegalAr...
Thread.currentThread().interrupt();
public static ExecutorService addShutdownHookSafely(ExecutorService executorService, Duration shutdownTimeout) { if (executorService == null) { return null; } Objects.requireNonNull(shutdownTimeout, "'shutdownTimeout' cannot be null."); if (shutdownTimeout.isZero() || shutdownTimeout.isNegative()) { throw new IllegalAr...
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] a...
class from an array of Objects. * * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] a...
use name is not good idea, if we have same name output in producer
public Rule build() { return logicalCTEConsumer().thenApply(ctx -> { LogicalCTEConsumer cteConsumer = ctx.root; int refCount = ctx.cascadesContext.cteReferencedCount(cteConsumer.getCteId()); if (ConnectContext.get().getSessionVariable().enablePipelineEngine && refCount > ConnectContext.get().getSessionVariable().inline...
}
public Rule build() { return logicalCTEConsumer().thenApply(ctx -> { LogicalCTEConsumer cteConsumer = ctx.root; int refCount = ctx.cascadesContext.cteReferencedCount(cteConsumer.getCteId()); /* * Current we only implement CTE Materialize on pipeline engine and only materialize those CTE whose * refCount > NereidsRewr...
class InlineCTE extends OneRewriteRuleFactory { @Override }
class InlineCTE extends OneRewriteRuleFactory { @Override }
should be contains BoundStar? what about `select *, c1 from t group by 1`
private Plan bindAggregate(MatchingContext<LogicalAggregate<Plan>> ctx) { LogicalAggregate<Plan> agg = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer aggOutputAnalyzer = buildSimpleExprAnalyzer( agg, cascadesContext, agg.children(), true, true); List<NamedExpression> boundAggOutput ...
if (boundAggOutput.size() == 1 && boundAggOutput.get(0) instanceof BoundStar) {
private Plan bindAggregate(MatchingContext<LogicalAggregate<Plan>> ctx) { LogicalAggregate<Plan> agg = ctx.root; CascadesContext cascadesContext = ctx.cascadesContext; SimpleExprAnalyzer aggOutputAnalyzer = buildSimpleExprAnalyzer( agg, cascadesContext, agg.children(), true, true); List<NamedExpression> boundAggOutput ...
class BindExpression implements AnalysisRuleFactory { public static final Logger LOG = LogManager.getLogger(NereidsPlanner.class); @Override public List<Rule> buildRules() { /* * some rules not only depends on the condition Plan::canBind, for example, * BINDING_FILTER_SLOT need transform 'filter(unix_timestamp() > 100)...
class BindExpression implements AnalysisRuleFactory { public static final Logger LOG = LogManager.getLogger(NereidsPlanner.class); @Override public List<Rule> buildRules() { /* * some rules not only depends on the condition Plan::canBind, for example, * BINDING_FILTER_SLOT need transform 'filter(unix_timestamp() > 100)...
According to the JavaDoc of AssertJ, as() should be before isFalse(): ```suggestion assertThat(reader.isClosed) .as("The reader should have not been closed.") .isFalse(); ```
public void testIdleShutdownSplitFetcherWaitsUntilRecordProcessed() throws Exception { final String splitId = "testSplit"; final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))...
.as("The reader should have not been closed.");
public void testIdleShutdownSplitFetcherWaitsUntilRecordProcessed() throws Exception { final String splitId = "testSplit"; final AwaitingReader<Integer, TestingSourceSplit> reader = new AwaitingReader<>( new IOException("Should not happen"), new RecordsBySplits<>( Collections.emptyMap(), Collections.singleton(splitId))...
class SplitFetcherManagerTest { @Test public void testExceptionPropagationFirstFetch() throws Exception { testExceptionPropagation(); } @Test public void testExceptionPropagationSuccessiveFetch() throws Exception { testExceptionPropagation( new TestingRecordsWithSplitIds<>("testSplit", 1, 2, 3, 4), new TestingRecordsWi...
class SplitFetcherManagerTest { @Test public void testExceptionPropagationFirstFetch() throws Exception { testExceptionPropagation(); } @Test public void testExceptionPropagationSuccessiveFetch() throws Exception { testExceptionPropagation( new TestingRecordsWithSplitIds<>("testSplit", 1, 2, 3, 4), new TestingRecordsWi...
Why is the continuation range needed? Isn't the information in the FeedRange enough (getFeedRange)?
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != nul...
final Range<String> continuationRange = request.getContinuationRange();
private Mono<RxDocumentServiceRequest> populateHeaders(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != nul...
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String...
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String...
👍good catch, `Thread.sleep(50);` is deleted, so the `InterruptedException` wouldn't throw.
private void seizeCapacity() { if (currentContext.isKeyOccupied()) { return; } RecordContext<R, K> storedContext = currentContext; try { while (inFlightRecordNum.get() > maxInFlightRecordNum) { if (!mailboxExecutor.tryYield()) { triggerIfNeeded(true); Thread.sleep(50); } } setCurrentContext(storedContext); inFlightReco...
} catch (InterruptedException e) {
private void seizeCapacity() { if (currentContext.isKeyOccupied()) { return; } RecordContext<R, K> storedContext = currentContext; try { while (inFlightRecordNum.get() > maxInFlightRecordNum) { if (!mailboxExecutor.tryYield()) { triggerIfNeeded(true); Thread.sleep(1); } } } catch (InterruptedException ignored) { } setC...
class AsyncExecutionController<R, K> { private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionController.class); public static final int DEFAULT_BATCH_SIZE = 1000; public static final int DEFAULT_MAX_IN_FLIGHT_RECORD_NUM = 6000; /** * The batch size. When the number of state requests in the active buff...
class AsyncExecutionController<R, K> { private static final Logger LOG = LoggerFactory.getLogger(AsyncExecutionController.class); public static final int DEFAULT_BATCH_SIZE = 1000; public static final int DEFAULT_MAX_IN_FLIGHT_RECORD_NUM = 6000; /** * The batch size. When the number of state requests in the active buff...
You can use Objects.requireNonNull to ensure that the parameter cannot be null
public InPredicate(Expression compareExpr, List<Expression> optionsList) { super(new Builder<Expression>().add(compareExpr).addAll(optionsList).build().toArray(new Expression[0])); this.compareExpr = compareExpr; this.optionsList = ImmutableList.copyOf(Objects.requireNonNull(optionsList, "In list cannot be null")); }
this.compareExpr = compareExpr;
public InPredicate(Expression compareExpr, List<Expression> optionsList) { super(new Builder<Expression>().add(compareExpr).addAll(optionsList).build().toArray(new Expression[0])); this.compareExpr = Objects.requireNonNull(compareExpr, "Compare Expr cannot be null"); this.options = ImmutableList.copyOf(Objects.requireN...
class InPredicate extends Expression { private Expression compareExpr; private List<Expression> optionsList; public <R, C> R accept(ExpressionVisitor<R, C> visitor, C context) { return visitor.visitInPredicate(this, context); } @Override public DataType getDataType() throws UnboundException { return BooleanType.INSTANC...
class InPredicate extends Expression { private final Expression compareExpr; private final List<Expression> options; public <R, C> R accept(ExpressionVisitor<R, C> visitor, C context) { return visitor.visitInPredicate(this, context); } @Override public DataType getDataType() throws UnboundException { return BooleanType...
Logic aggregate above could have project operator.
private Void tryGatherForBroadcastJoin(PhysicalHashJoinOperator node, ExpressionContext context) { List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if ((context.getChildOperator(0) instanceof LogicalAggregationOperator)) { LogicalAggregationOperator childOperator = (LogicalAggre...
if ((context.getChildOperator(0) instanceof LogicalAggregationOperator)) {
private Void tryGatherForBroadcastJoin(PhysicalHashJoinOperator node, ExpressionContext context) { List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> result = Lists.newArrayList(); if (context.getChildLogicalProperty(0).isGatherToOneInstance()) { for (Pair<PhysicalPropertySet, List<PhysicalPropertySet>> outputI...
class ChildPropertyDeriver extends OperatorVisitor<Void, ExpressionContext> { private PhysicalPropertySet requirements; private List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> outputInputProps; private final TaskContext taskContext; private final OptimizerContext context; public ChildPropertyDeriver(TaskCont...
class ChildPropertyDeriver extends OperatorVisitor<Void, ExpressionContext> { private PhysicalPropertySet requirements; private List<Pair<PhysicalPropertySet, List<PhysicalPropertySet>>> outputInputProps; private final TaskContext taskContext; private final OptimizerContext context; public ChildPropertyDeriver(TaskCont...
Failed to drop catalog xxxxx
public ShowResultSet visitDropCatalogStatement(DropCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getName(); if (!context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfExists()) { LOG.info("drop catalog[{}] which does n...
LOG.info("drop catalog[{}] which does not exist", catalogName);
public ShowResultSet visitDropCatalogStatement(DropCatalogStmt stmt, ConnectContext context) { ErrorReport.wrapWithRuntimeException(() -> { String catalogName = stmt.getName(); if (!context.getGlobalStateMgr().getCatalogMgr().catalogExists(catalogName)) { if (stmt.isIfExists()) { LOG.info("drop catalog[{}] which does n...
class StmtExecutorVisitor implements AstVisitor<ShowResultSet, ConnectContext> { private static final Logger LOG = LogManager.getLogger(StmtExecutorVisitor.class); private static final StmtExecutorVisitor INSTANCE = new StmtExecutorVisitor(); public static StmtExecutorVisitor getInstance() { return INSTANCE; } protecte...
class StmtExecutorVisitor implements AstVisitor<ShowResultSet, ConnectContext> { private static final Logger LOG = LogManager.getLogger(StmtExecutorVisitor.class); private static final StmtExecutorVisitor INSTANCE = new StmtExecutorVisitor(); public static StmtExecutorVisitor getInstance() { return INSTANCE; } protecte...
not sure your meaning, `the union of targetColumns and all auto-increment key columns are all the table columns` this is not ture.
public static void analyze(InsertStmt insertStmt, ConnectContext session) { QueryRelation query = insertStmt.getQueryStatement().getQueryRelation(); new QueryAnalyzer(session).analyze(insertStmt.getQueryStatement()); List<Table> tables = new ArrayList<>(); AnalyzerUtils.collectSpecifyExternalTables(insertStmt.getQueryS...
if (numSpecifiedKeyColumns != olapTable.getKeysNum()) {
public static void analyze(InsertStmt insertStmt, ConnectContext session) { QueryRelation query = insertStmt.getQueryStatement().getQueryRelation(); new QueryAnalyzer(session).analyze(insertStmt.getQueryStatement()); List<Table> tables = new ArrayList<>(); AnalyzerUtils.collectSpecifyExternalTables(insertStmt.getQueryS...
class InsertAnalyzer { private static void checkStaticKeyPartitionInsert(InsertStmt insertStmt, Table table, PartitionNames targetPartitionNames) { List<String> partitionColNames = targetPartitionNames.getPartitionColNames(); List<Expr> partitionColValues = targetPartitionNames.getPartitionColValues(); List<String> tab...
class InsertAnalyzer { private static void checkStaticKeyPartitionInsert(InsertStmt insertStmt, Table table, PartitionNames targetPartitionNames) { List<String> partitionColNames = targetPartitionNames.getPartitionColNames(); List<Expr> partitionColValues = targetPartitionNames.getPartitionColValues(); List<String> tab...
Please perform the doc validation soon after annotation validation
public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.FUNCTION, ...
funcNode.docAttachments.forEach(doc -> analyzeDef(doc, funcEnv));
public void visit(BLangFunction funcNode) { SymbolEnv funcEnv = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, env); funcNode.annAttachments.forEach(annotationAttachment -> { annotationAttachment.attachmentPoint = new BLangAnnotationAttachmentPoint(BLangAnnotationAttachmentPoint.AttachmentPoint.FUNCTION, ...
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private...
class SemanticAnalyzer extends BLangNodeVisitor { private static final CompilerContext.Key<SemanticAnalyzer> SYMBOL_ANALYZER_KEY = new CompilerContext.Key<>(); private SymbolTable symTable; private SymbolEnter symbolEnter; private Names names; private SymbolResolver symResolver; private TypeChecker typeChecker; private...
But it only does it if the input `isFinished()`, so that should be a non issue.
public boolean processInput() throws Exception { initializeNumRecordsIn(); StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark != null) { int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); } checkFinished(); return recordO...
checkFinished();
public boolean processInput() throws Exception { initializeNumRecordsIn(); StreamElement recordOrMark = input.pollNextNullable(); if (recordOrMark != null) { int channel = input.getLastChannel(); checkState(channel != StreamTaskInput.UNSPECIFIED); processElement(recordOrMark, channel); } checkFinished(); return recordO...
class StreamOneInputProcessor<IN> implements StreamInputProcessor { private static final Logger LOG = LoggerFactory.getLogger(StreamOneInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream...
class StreamOneInputProcessor<IN> implements StreamInputProcessor { private static final Logger LOG = LoggerFactory.getLogger(StreamOneInputProcessor.class); private final StreamTaskInput input; private final Object lock; private final OperatorChain<?, ?> operatorChain; /** Valve that controls how watermarks and stream...
For the extension tests, I wonder if we would have to dump the full test deployment classpath as we might add some deployment extensions for testing.
protected void doExecute() throws MojoExecutionException, MojoFailureException { final String lifecyclePhase = mojoExecution.getLifecyclePhase(); if (mode == null) { if (lifecyclePhase == null) { mode = "NORMAL"; } else { mode = lifecyclePhase.contains("test") ? "TEST" : "NORMAL"; } } final LaunchMode launchMode = Laun...
final List<String> deps = new ArrayList<>();
protected void doExecute() throws MojoExecutionException, MojoFailureException { final String lifecyclePhase = mojoExecution.getLifecyclePhase(); if (mode == null) { if (lifecyclePhase == null) { mode = "NORMAL"; } else { mode = lifecyclePhase.contains("test") ? "TEST" : "NORMAL"; } } final LaunchMode launchMode = Laun...
class TrackConfigChangesMojo extends QuarkusBootstrapMojo { /** * Skip the execution of this mojo */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.skip") boolean skip = false; @Parameter(property = "launchMode") String mode; @Parameter(property = "quarkus.track-config-changes.outputDirecto...
class TrackConfigChangesMojo extends QuarkusBootstrapMojo { /** * Skip the execution of this mojo */ @Parameter(defaultValue = "false", property = "quarkus.track-config-changes.skip") boolean skip = false; @Parameter(property = "launchMode") String mode; @Parameter(property = "quarkus.track-config-changes.outputDirecto...
```suggestion if (!extracted.isPresent()) { ```
public Rule build() { return logicalFilter().when(filter -> !filter.isSingleTableExpressionExtracted()).then(filter -> { List<Expression> conjuncts = ExpressionUtils.extractConjunction(filter.getPredicates()) .stream().collect(Collectors.toList()); List<Expression> redundants = Lists.newArrayList(); for (Expression con...
if (! extracted.isPresent()) {
public Rule build() { return logicalFilter().whenNot(LogicalFilter::isSingleTableExpressionExtracted).then(filter -> { List<Expression> conjuncts = ExpressionUtils.extractConjunction(filter.getPredicates()) .stream().collect(Collectors.toList()); List<Expression> redundants = Lists.newArrayList(); for (Expression conju...
class ExtractSingleTableExpressionFromDisjunction extends OneRewriteRuleFactory { @Override private String getSlotQualifierAsString(SlotReference slotReference) { StringBuilder builder = new StringBuilder(); for (String q : slotReference.getQualifier()) { builder.append(q).append('.'); } return builder.toString(); } pr...
class ExtractSingleTableExpressionFromDisjunction extends OneRewriteRuleFactory { @Override private String getSlotQualifierAsString(SlotReference slotReference) { StringBuilder builder = new StringBuilder(); for (String q : slotReference.getQualifier()) { builder.append(q).append('.'); } return builder.toString(); } pr...
Personal preference: I would prefer that to be ```suggestion reflection.produce(new ReflectiveClassBuildItem(true, true, "com.sun.jndi.ldap.LdapCtxFactory")); ```
void registerForReflection(BuildProducer<ReflectiveClassBuildItem> reflection) { reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY)); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.dns.DnsContextFactory")); reflection.produce(new Re...
reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY));
void registerForReflection(BuildProducer<ReflectiveClassBuildItem> reflection) { reflection.produce(new ReflectiveClassBuildItem(true, true, QuarkusDirContextFactory.INITIAL_CONTEXT_FACTORY)); reflection.produce(new ReflectiveClassBuildItem(false, false, "com.sun.jndi.dns.DnsContextFactory")); reflection.produce(new Re...
class ElytronSecurityLdapProcessor { @BuildStep() FeatureBuildItem feature() { return new FeatureBuildItem(Feature.SECURITY_LDAP); } @BuildStep AllowJNDIBuildItem enableJndi() { return new AllowJNDIBuildItem(); } /** * Check to see if a LdapRealmConfig was specified and enabled and create a * {@linkplain org.wildfly.se...
class ElytronSecurityLdapProcessor { @BuildStep() FeatureBuildItem feature() { return new FeatureBuildItem(Feature.SECURITY_LDAP); } @BuildStep AllowJNDIBuildItem enableJndi() { return new AllowJNDIBuildItem(); } /** * Check to see if a LdapRealmConfig was specified and enabled and create a * {@linkplain org.wildfly.se...
```suggestion "or by adding '%s' or '%s' to your Quarkus configuration", ```
private void configureBaseUrl(RestClientBuilder builder) { Optional<String> propertyOptional = getOptionalProperty(REST_URI_FORMAT, String.class); if (!propertyOptional.isPresent()) { propertyOptional = getOptionalProperty(REST_URL_FORMAT, String.class); } if (((baseUriFromAnnotation == null) || baseUriFromAnnotation.i...
"or by adding '%s' or '%s'to your Quarkus configuration",
private void configureBaseUrl(RestClientBuilder builder) { Optional<String> propertyOptional = getOptionalProperty(REST_URI_FORMAT, String.class); if (!propertyOptional.isPresent()) { propertyOptional = getOptionalProperty(REST_URL_FORMAT, String.class); } if (((baseUriFromAnnotation == null) || baseUriFromAnnotation.i...
class RestClientBase { public static final String MP_REST = "mp-rest"; public static final String REST_URL_FORMAT = "%s/" + MP_REST + "/url"; public static final String REST_URI_FORMAT = "%s/" + MP_REST + "/uri"; public static final String REST_CONNECT_TIMEOUT_FORMAT = "%s/" + MP_REST + "/connectTimeout"; public static...
class RestClientBase { public static final String MP_REST = "mp-rest"; public static final String REST_URL_FORMAT = "%s/" + MP_REST + "/url"; public static final String REST_URI_FORMAT = "%s/" + MP_REST + "/uri"; public static final String REST_CONNECT_TIMEOUT_FORMAT = "%s/" + MP_REST + "/connectTimeout"; public static...
What should I do instead?
private Table toCalciteTable(String tableName, Entry entry) { if (entry.getSchema().getColumnsCount() == 0) { throw new UnsupportedOperationException( "Entry doesn't have a schema. Please attach a schema to '" + tableName + "' in Data Catalog: " + entry.toString()); } Schema schema = SchemaUtils.fromDataCatalog(entry.g...
return tableBuilder.get().schema(schema).name(tableName).build();
private Table toCalciteTable(String tableName, Entry entry) { if (entry.getSchema().getColumnsCount() == 0) { throw new UnsupportedOperationException( "Entry doesn't have a schema. Please attach a schema to '" + tableName + "' in Data Catalog: " + entry.toString()); } Schema schema = SchemaUtils.fromDataCatalog(entry.g...
class DataCatalogTableProvider extends FullNameTableProvider { private static final TableFactory PUBSUB_TABLE_FACTORY = new PubsubTableFactory(); private static final TableFactory GCS_TABLE_FACTORY = new GcsTableFactory(); private final Map<String, TableProvider> delegateProviders; private final DataCatalogBlockingStub...
class DataCatalogTableProvider extends FullNameTableProvider { private static final TableFactory PUBSUB_TABLE_FACTORY = new PubsubTableFactory(); private static final TableFactory GCS_TABLE_FACTORY = new GcsTableFactory(); private static final Map<String, TableProvider> DELEGATE_PROVIDERS = Stream.of(new PubsubJsonTabl...
I just added `CallCountOutputStream` to test the proper number of call count.
public void testWrite() throws IOException { ByteArrayOutputStream expected = new ByteArrayOutputStream(); ByteArrayOutputStream actual = new ByteArrayOutputStream(); UnownedOutputStream osActual = new UnownedOutputStream(actual); byte[] data0 = "Hello World!".getBytes(StandardCharsets.UTF_8); byte[] data1 = "Welcome!"...
assertArrayEquals(expected.toByteArray(), actual.toByteArray());
public void testWrite() throws IOException { CallCountOutputStream fsCount = new CallCountOutputStream(); FilterOutputStream fs = new FilterOutputStream(fsCount); CallCountOutputStream osCount = new CallCountOutputStream(); UnownedOutputStream os = new UnownedOutputStream(osCount); byte[] data = "Hello World!".getBytes...
class UnownedOutputStreamTest { @Rule public ExpectedException expectedException = ExpectedException.none(); private ByteArrayOutputStream baos; private UnownedOutputStream os; @Before public void setup() { baos = new ByteArrayOutputStream(); os = new UnownedOutputStream(baos); } @Test public void testHashCodeEqualsAnd...
class UnownedOutputStreamTest { @Rule public ExpectedException expectedException = ExpectedException.none(); private ByteArrayOutputStream baos; private UnownedOutputStream os; @Before public void setup() { baos = new ByteArrayOutputStream(); os = new UnownedOutputStream(baos); } @Test public void testHashCodeEqualsAnd...
I'm not sure it would be good to emit info logs for split requests. These are per subtask and there could be hundreds at a time depending on parallelism.
public void handleSplitRequest(int subtaskId, String requesterHostname) { LOG.debug( "handleSplitRequest subtask={} sourceIndex={} pendingSplits={}", subtaskId, currentSourceIndex, pendingSplits); assignPendingSplits(subtaskId); currentEnumerator.handleSplitRequest(subtaskId, requesterHostname); }
pendingSplits);
public void handleSplitRequest(int subtaskId, String requesterHostname) { LOG.debug( "handleSplitRequest subtask={} sourceIndex={} pendingSplits={}", subtaskId, currentSourceIndex, pendingSplits); Preconditions.checkState(pendingSplits.isEmpty() || !pendingSplits.containsKey(subtaskId)); currentEnumerator.handleSplitRe...
class HybridSourceSplitEnumerator<SplitT extends SourceSplit> implements SplitEnumerator<HybridSourceSplit<SplitT>, HybridSourceEnumeratorState> { private static final Logger LOG = LoggerFactory.getLogger(HybridSourceSplitEnumerator.class); private final SplitEnumeratorContext<HybridSourceSplit> context; private final ...
class HybridSourceSplitEnumerator implements SplitEnumerator<HybridSourceSplit, HybridSourceEnumeratorState> { private static final Logger LOG = LoggerFactory.getLogger(HybridSourceSplitEnumerator.class); private final SplitEnumeratorContext<HybridSourceSplit> context; private final List<HybridSource.SourceListEntry> s...
I'm not sure that would simplify anything in this case. Am I missing something though?
protected static void assertEventRoutesEqual(EventRoute expected, String expectedId, EventRoute actual) { assertEquals(expectedId, actual.getId()); assertEquals(expected.getEndpointName(), actual.getEndpointName()); assertEquals(expected.getFilter(), actual.getFilter()); }
assertEquals(expectedId, actual.getId());
protected static void assertEventRoutesEqual(EventRoute expected, String expectedId, EventRoute actual) { assertEquals(expectedId, actual.getId()); assertEquals(expected.getEndpointName(), actual.getEndpointName()); assertEquals(expected.getFilter(), actual.getFilter()); }
class EventRoutesTestBase extends DigitalTwinsTestBase { private final ClientLogger logger = new ClientLogger(EventRoutesTestBase.class); static final String EVENT_ROUTE_ENDPOINT_NAME = "someEventHubEndpoint"; static final String FILTER = "$eventType = 'DigitalTwinTelemetryMessages' or $eventType = 'DigitalTwinLifecycl...
class EventRoutesTestBase extends DigitalTwinsTestBase { private final ClientLogger logger = new ClientLogger(EventRoutesTestBase.class); static final String EVENT_ROUTE_ENDPOINT_NAME = "someEventHubEndpoint"; static final String FILTER = "$eventType = 'DigitalTwinTelemetryMessages' or $eventType = 'DigitalTwinLifecycl...
`values::add` instead of `Consumer<Elements>`? Your choice if you still want to provide the StreamObserverClientFactory type instead of a lambda as visible in the old code.
public void testOutboundObserver() { final Collection<BeamFnApi.Elements> values = new ArrayList<>(); BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, new StreamObserverClientFactory<Elements, Elements>() { @Override public StreamObserver<Elements> outboundObserverFor( StreamObserver<E...
return TestStreams.withOnNext(
public void testOutboundObserver() { final Collection<BeamFnApi.Elements> values = new ArrayList<>(); BeamFnDataGrpcMultiplexer multiplexer = new BeamFnDataGrpcMultiplexer( DESCRIPTOR, inboundObserver -> TestStreams.withOnNext(values::add).build()); multiplexer.getOutboundObserver().onNext(ELEMENTS); assertThat(values,...
class BeamFnDataGrpcMultiplexerTest { private static final Endpoints.ApiServiceDescriptor DESCRIPTOR = Endpoints.ApiServiceDescriptor.newBuilder().setUrl("test").build(); private static final LogicalEndpoint OUTPUT_LOCATION = LogicalEndpoint.of( "777L", BeamFnApi.Target.newBuilder() .setName("name") .setPrimitiveTransf...
class BeamFnDataGrpcMultiplexerTest { private static final Endpoints.ApiServiceDescriptor DESCRIPTOR = Endpoints.ApiServiceDescriptor.newBuilder().setUrl("test").build(); private static final LogicalEndpoint OUTPUT_LOCATION = LogicalEndpoint.of( "777L", BeamFnApi.Target.newBuilder() .setName("name") .setPrimitiveTransf...
Not true, `orElseGet` can only use in the direct value. In this situation, we need get configuration in the optional value, it is unnecessary to mock the configuration when rule absent.
public void init(final ShardingSphereDatabase database, final SQLStatement sqlStatement) { Optional<ShardingRule> rule = database.getRuleMetaData().findSingleRule(ShardingRule.class); data = rule.map(shardingRule -> ((ShardingRuleConfiguration) shardingRule.getConfiguration()).getScaling().entrySet().iterator()).orElse...
data = rule.map(shardingRule -> ((ShardingRuleConfiguration) shardingRule.getConfiguration()).getScaling().entrySet().iterator()).orElse(Collections.emptyIterator());
public void init(final ShardingSphereDatabase database, final SQLStatement sqlStatement) { Optional<ShardingRule> rule = database.getRuleMetaData().findSingleRule(ShardingRule.class); data = rule.map(optional -> ((ShardingRuleConfiguration) optional.getConfiguration()).getScaling().entrySet().iterator()).orElse(Collect...
class ShardingScalingRulesQueryResultSet implements DistSQLResultSet { private Iterator<Entry<String, OnRuleAlteredActionConfiguration>> data; @Override @Override public Collection<String> getColumnNames() { return Arrays.asList("name", "input", "output", "stream_channel", "completion_detector", "data_consistency_check...
class ShardingScalingRulesQueryResultSet implements DistSQLResultSet { private Iterator<Entry<String, OnRuleAlteredActionConfiguration>> data; @Override @Override public Collection<String> getColumnNames() { return Arrays.asList("name", "input", "output", "stream_channel", "completion_detector", "data_consistency_check...
Okay @wgy8283335, I'll put `DeleteStatement` as a member in the `MergeStatement`. So that we can visit `deleteWhereClause` and put it inside `DeleteStatement`'s `WhereSegment`.
public ASTNode visitMerge(final MergeContext ctx) { OracleMergeStatement result = new OracleMergeStatement(); result.setTarget((SimpleTableSegment) visit(ctx.intoClause())); result.setSource((TableSegment) visit(ctx.usingClause())); result.setExpr((ExpressionSegment) (visit(ctx.usingClause().expr()))); if (null != ctx....
result.getUpdate().setWhere((WhereSegment) visit(ctx.mergeUpdateClause().deleteWhereClause()));
public ASTNode visitMerge(final MergeContext ctx) { OracleMergeStatement result = new OracleMergeStatement(); result.setTarget((SimpleTableSegment) visit(ctx.intoClause())); result.setSource((TableSegment) visit(ctx.usingClause())); result.setExpr((ExpressionSegment) (visit(ctx.usingClause().expr()))); if (null != ctx....
class OracleDMLStatementSQLVisitor extends OracleStatementSQLVisitor implements DMLSQLVisitor, SQLStatementVisitor { public OracleDMLStatementSQLVisitor(final Properties props) { super(props); } @Override public ASTNode visitInsert(final InsertContext ctx) { if (null != ctx.insertSingleTable()) { OracleInsertStatement ...
class OracleDMLStatementSQLVisitor extends OracleStatementSQLVisitor implements DMLSQLVisitor, SQLStatementVisitor { public OracleDMLStatementSQLVisitor(final Properties props) { super(props); } @Override public ASTNode visitInsert(final InsertContext ctx) { if (null != ctx.insertSingleTable()) { OracleInsertStatement ...
Refactored to remove `requestStartTime` from the dataclass
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); int batchSize = batch.size(); if (bat...
requestInfo.setRequestStartTime(timestampOfRequest);
private void flush() throws InterruptedException { RequestInfo requestInfo = createRequestInfo(); while (rateLimitingStrategy.shouldBlock(requestInfo)) { mailboxExecutor.yield(); requestInfo = createRequestInfo(); } List<RequestEntryT> batch = createNextAvailableBatch(requestInfo); if (batch.size() == 0) { return; } in...
class AsyncSinkWriter<InputT, RequestEntryT extends Serializable> implements StatefulSink.StatefulSinkWriter<InputT, BufferedRequestState<RequestEntryT>> { private final MailboxExecutor mailboxExecutor; private final ProcessingTimeService timeService; /* The timestamp of the previous batch of records was sent from this...
class AsyncSinkWriter<InputT, RequestEntryT extends Serializable> implements StatefulSink.StatefulSinkWriter<InputT, BufferedRequestState<RequestEntryT>> { private final MailboxExecutor mailboxExecutor; private final ProcessingTimeService timeService; /* The timestamp of the previous batch of records was sent from this...
you are right.`SqlToOperationConverter:557` is the root cause. I find the test case does not covert the full change path (not the whole commit), such as I remove the line in `TableEnvironmentImpl#1052`, ``` final CatalogFactory factory = TableFactoryService.find( CatalogFactory.class, properties); ``` The test ...
public void testCreateCatalogFromUserClassLoader() throws Exception { final String className = "UserCatalogFactory"; URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()) .addResource("META-INF/services/org.apache.flink.table.factories.TableFactory", "UserCatalogFactory") .addClass( classN...
try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) {
public void testCreateCatalogFromUserClassLoader() throws Exception { final String className = "UserCatalogFactory"; URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()) .addResource("META-INF/services/org.apache.flink.table.factories.TableFactory", "UserCatalogFactory") .addClass( classN...
class CatalogITCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testCreateCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableE...
class CatalogITCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Test public void testCreateCatalog() { String name = "c1"; TableEnvironment tableEnv = getTableEnvironment(); String ddl = String.format("create catalog %s with('type'='%s')", name, CATALOG_TYPE_VALUE_GENERIC_IN_MEMORY); tableE...
```suggestion throw new AnalysisException("Float or double can not used as a sort key, use decimal instead."); ```
private void analyzeOrderByClause() throws AnalysisException { if (selectStmt.getOrderByElements() == null) { /** * The keys type of Materialized view is aggregation. * All of group by columns are keys of materialized view. */ if (mvKeysType == KeysType.AGG_KEYS) { for (MVColumnItem mvColumnItem : mvColumnItemList) { i...
throw new AnalysisException("Float or double can not used as a key, use decimal instead.");
private void analyzeOrderByClause() throws AnalysisException { if (selectStmt.getOrderByElements() == null) { supplyOrderColumn(); return; } List<OrderByElement> orderByElements = selectStmt.getOrderByElements(); if (orderByElements.size() > mvColumnItemList.size()) { throw new AnalysisException("The number of columns ...
class CreateMaterializedViewStmt extends DdlStmt { public static final String MATERIALIZED_VIEW_NAME_PRFIX = "__doris_materialized_view_"; private String mvName; private SelectStmt selectStmt; private Map<String, String> properties; private int beginIndexOfAggregation = -1; /** * origin stmt: select k1, k2, v1, sum(v2)...
class CreateMaterializedViewStmt extends DdlStmt { public static final String MATERIALIZED_VIEW_NAME_PRFIX = "__doris_materialized_view_"; private String mvName; private SelectStmt selectStmt; private Map<String, String> properties; private int beginIndexOfAggregation = -1; /** * origin stmt: select k1, k2, v1, sum(v2)...
Exactly, so if the prefetch is enabled and drain couldn't emit downstream, we break from the while-loop in line 216; that way, we don't consider the message as consumed, hence increment won't happen.
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested...
numberConsumed++;
private void drainQueue() { if (isTerminated()) { return; } long numberRequested = REQUESTED.get(this); boolean isEmpty = bufferMessages.isEmpty(); SynchronousReceiveWork currentDownstream = null; while (numberRequested != 0L && !isEmpty) { if (isTerminated()) { break; } long numberConsumed = 0L; while (numberRequested...
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLi...
class SynchronousMessageSubscriber extends BaseSubscriber<ServiceBusReceivedMessage> { private final ClientLogger logger = new ClientLogger(SynchronousMessageSubscriber.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final AtomicInteger wip = new AtomicInteger(); private final ConcurrentLi...