comment
stringlengths 22
3.02k
| method_body
stringlengths 46
368k
| target_code
stringlengths 0
181
| method_body_after
stringlengths 12
368k
| context_before
stringlengths 11
634k
| context_after
stringlengths 11
632k
|
|---|---|---|---|---|---|
Good catch. Yes, if an external table provider implements the interface, then it will get loaded here, I haven't thought about it. I can think of multiple ways to handle it. One is, as you suggested, to have a separate interface, another is to introduce something like a "@DoNotAutoLoad" annotation. I think it can be addressed separately.
|
public Schema create(SchemaPlus parentSchema, String name, Map<String, Object> operand) {
return this;
}
|
return this;
|
public Schema create(SchemaPlus parentSchema, String name, Map<String, Object> operand) {
return this;
}
|
class LoadAllProviders extends InitialEmptySchema implements SchemaFactory {
@Override
public TableProvider toTableProvider(JdbcConnection connection) {
MetaStore metaStore = new InMemoryMetaStore();
for (TableProvider provider :
ServiceLoader.load(TableProvider.class, getClass().getClassLoader())) {
metaStore.registerProvider(provider);
}
return metaStore;
}
/** This is what Calcite calls to create an instance of the default top level schema. */
@Override
}
|
class AllProviders extends InitialEmptySchema implements SchemaFactory {
/**
* We call this in {@link
* by Calcite to a configured table provider. At this point we have a connection open and can
* use it to configure Beam schemas, e.g. with pipeline options.
*
* <p><i>Note:</i> this loads ALL available table providers marked with
* {@code @AutoService(TableProvider.class)}
*/
@Override
public TableProvider getTableProvider() {
MetaStore metaStore = new InMemoryMetaStore();
for (TableProvider provider :
ServiceLoader.load(TableProvider.class, getClass().getClassLoader())) {
metaStore.registerProvider(provider);
}
return metaStore;
}
/** This is what Calcite calls to create an instance of the default top level schema. */
@Override
}
|
Sorry I missed this but I think we need something better here. Because the exception is caught globally not only for the close() now and it could happen in the code that is in the try. So I would rather have an IllegalStateException with a proper message and the cause.
|
private List<String> getChangeLogs(LiquibaseMongodbBuildTimeConfig liquibaseBuildConfig) {
ChangeLogParameters changeLogParameters = new ChangeLogParameters();
ChangeLogParserFactory changeLogParserFactory = ChangeLogParserFactory.getInstance();
try (var classLoaderResourceAccessor = new ClassLoaderResourceAccessor(
Thread.currentThread().getContextClassLoader())) {
Set<String> resources = new LinkedHashSet<>(
findAllChangeLogFiles(liquibaseBuildConfig.changeLog, changeLogParserFactory,
classLoaderResourceAccessor, changeLogParameters));
LOGGER.debugf("Liquibase changeLogs: %s", resources);
return new ArrayList<>(resources);
} catch (Exception ex) {
throw new AssertionError(ex);
}
}
|
throw new AssertionError(ex);
|
private List<String> getChangeLogs(LiquibaseMongodbBuildTimeConfig liquibaseBuildConfig) {
ChangeLogParameters changeLogParameters = new ChangeLogParameters();
ChangeLogParserFactory changeLogParserFactory = ChangeLogParserFactory.getInstance();
try (var classLoaderResourceAccessor = new ClassLoaderResourceAccessor(
Thread.currentThread().getContextClassLoader())) {
Set<String> resources = new LinkedHashSet<>(
findAllChangeLogFiles(liquibaseBuildConfig.changeLog, changeLogParserFactory,
classLoaderResourceAccessor, changeLogParameters));
LOGGER.debugf("Liquibase changeLogs: %s", resources);
return new ArrayList<>(resources);
} catch (Exception ex) {
throw new IllegalStateException(
"Error while loading the liquibase changelogs: %s".formatted(ex.getMessage()), ex);
}
}
|
class for reflection while also registering fields for reflection
addService(services, reflective, liquibase.precondition.Precondition.class.getName(), true);
addService(services, reflective, liquibase.command.CommandStep.class.getName(), false,
"liquibase.command.core.StartH2CommandStep");
var dependencies = curateOutcome.getApplicationModel().getDependencies();
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_ARTIFACT, LIQUIBASE_PROPERTIES));
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_ARTIFACT, LIQUIBASE_DB_CHANGELOG_XSD));
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_MONGODB_ARTIFACT, LIQUIBASE_MONGODB_CHANGELOG_XSD));
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_MONGODB_ARTIFACT, LIQUIBASE_MONGODB_PARSER_XSD));
services.produce(ServiceProviderBuildItem.allProvidersOfDependency(dependencies, LIQUIBASE_ARTIFACT));
services.produce(ServiceProviderBuildItem.allProvidersOfDependency(dependencies, LIQUIBASE_MONGODB_ARTIFACT));
resourceBundle.produce(new NativeImageResourceBundleBuildItem("liquibase/i18n/liquibase-core"));
resourceBundle.produce(new NativeImageResourceBundleBuildItem("liquibase/i18n/liquibase-mongo"));
}
private void addService(BuildProducer<ServiceProviderBuildItem> services,
BuildProducer<ReflectiveClassBuildItem> reflective, String serviceClassName,
boolean shouldRegisterFieldForReflection, String... excludedImpls) {
try {
String service = ServiceProviderBuildItem.SPI_ROOT + serviceClassName;
Set<String> implementations = ServiceUtil.classNamesNamedIn(Thread.currentThread().getContextClassLoader(),
service);
if (excludedImpls.length > 0) {
implementations = new HashSet<>(implementations);
Arrays.asList(excludedImpls).forEach(implementations::remove);
}
services.produce(new ServiceProviderBuildItem(serviceClassName, implementations.toArray(new String[0])));
reflective.produce(ReflectiveClassBuildItem.builder(
implementations.toArray(new String[0]))
.constructors().methods().fields(shouldRegisterFieldForReflection).build());
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
|
class for reflection while also registering fields for reflection
addService(services, reflective, liquibase.precondition.Precondition.class.getName(), true);
addService(services, reflective, liquibase.command.CommandStep.class.getName(), false,
"liquibase.command.core.StartH2CommandStep");
var dependencies = curateOutcome.getApplicationModel().getRuntimeDependencies();
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_ARTIFACT, LIQUIBASE_RESOURCE_FILTER));
resource.produce(NativeImageResourceBuildItem.ofDependencyResources(
dependencies, LIQUIBASE_MONGODB_ARTIFACT, LIQUIBASE_MONGODB_RESOURCE_FILTER));
services.produce(ServiceProviderBuildItem.allProvidersOfDependencies(
dependencies, List.of(LIQUIBASE_ARTIFACT, LIQUIBASE_MONGODB_ARTIFACT)));
resourceBundle.produce(new NativeImageResourceBundleBuildItem("liquibase/i18n/liquibase-core"));
resourceBundle.produce(new NativeImageResourceBundleBuildItem("liquibase/i18n/liquibase-mongo"));
}
private void addService(BuildProducer<ServiceProviderBuildItem> services,
BuildProducer<ReflectiveClassBuildItem> reflective, String serviceClassName,
boolean shouldRegisterFieldForReflection, String... excludedImpls) {
try {
String service = ServiceProviderBuildItem.SPI_ROOT + serviceClassName;
Set<String> implementations = ServiceUtil.classNamesNamedIn(Thread.currentThread().getContextClassLoader(),
service);
if (excludedImpls.length > 0) {
implementations = new HashSet<>(implementations);
Arrays.asList(excludedImpls).forEach(implementations::remove);
}
services.produce(new ServiceProviderBuildItem(serviceClassName, implementations.toArray(new String[0])));
reflective.produce(ReflectiveClassBuildItem.builder(
implementations.toArray(new String[0]))
.constructors().methods().fields(shouldRegisterFieldForReflection).build());
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
|
https://github.com/reactor/reactor-core/blob/6058a391f614de6213fb85970272fc5b342bd181/reactor-core/src/main/java/reactor/util/concurrent/Queues.java#L88 Looks like that'd be 256 by default. Pretty high. We limit number of buffers. Shouldn't max concurrency == numBuffers ? Or maxConcurrency be <= numbBuffers with numBuffers being default ?
|
public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) {
other = other == null ? new ParallelTransferOptions(null, null, null) : other;
return new ParallelTransferOptions(
other.getBlockSize() == null ? Integer.valueOf(BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
: other.getBlockSize(),
other.getNumBuffers() == null ? Integer.valueOf(BlobAsyncClient.BLOB_DEFAULT_NUMBER_OF_BUFFERS)
: other.getNumBuffers(),
other.getProgressReceiver(),
other.getMaxSingleUploadSize() == null ? Integer.valueOf(BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES)
: other.getMaxSingleUploadSize(),
other.getMaxConcurrency() == null ? Integer.valueOf(Queues.SMALL_BUFFER_SIZE) : other.getMaxConcurrency());
}
|
other.getMaxConcurrency() == null ? Integer.valueOf(Queues.SMALL_BUFFER_SIZE) : other.getMaxConcurrency());
|
public static ParallelTransferOptions populateAndApplyDefaults(ParallelTransferOptions other) {
other = other == null ? new ParallelTransferOptions(null, null, null) : other;
return new ParallelTransferOptions(
other.getBlockSize() == null ? Integer.valueOf(BlobAsyncClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
: other.getBlockSize(),
other.getMaxConcurrency() == null ? Integer.valueOf(BlobAsyncClient.BLOB_DEFAULT_NUMBER_OF_BUFFERS)
: other.getMaxConcurrency(),
other.getProgressReceiver(),
other.getMaxSingleUploadSize() == null ? Integer.valueOf(BlockBlobAsyncClient.MAX_UPLOAD_BLOB_BYTES)
: other.getMaxSingleUploadSize());
}
|
class ModelHelper {
/**
* Determines whether or not the passed authority is IP style, that is, it is of the format {@code <host>:<port>}.
*
* @param authority The authority of a URL.
* @throws MalformedURLException If the authority is malformed.
* @return Whether the authority is IP style.
*/
public static boolean determineAuthorityIsIpStyle(String authority) throws MalformedURLException {
return new URL("http:
}
/**
* Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new
* object for safety.
*
* @param other The options to fill in defaults.
* @return An object with defaults filled in for null values in the original.
*/
/**
* Transforms a blob type into a common type.
* @param blobOptions {@link ParallelTransferOptions}
* @return {@link com.azure.storage.common.ParallelTransferOptions}
*/
public static com.azure.storage.common.ParallelTransferOptions wrapBlobOptions(
ParallelTransferOptions blobOptions) {
Integer blockSize = blobOptions.getBlockSize();
Integer numBuffers = blobOptions.getNumBuffers();
com.azure.storage.common.ProgressReceiver wrappedReceiver = blobOptions.getProgressReceiver() == null
? null
: blobOptions.getProgressReceiver()::reportProgress;
Integer maxSingleUploadSize = blobOptions.getMaxSingleUploadSize();
return new com.azure.storage.common.ParallelTransferOptions(blockSize, numBuffers, wrappedReceiver,
maxSingleUploadSize);
}
}
|
class ModelHelper {
/**
* Determines whether or not the passed authority is IP style, that is, it is of the format {@code <host>:<port>}.
*
* @param authority The authority of a URL.
* @throws MalformedURLException If the authority is malformed.
* @return Whether the authority is IP style.
*/
public static boolean determineAuthorityIsIpStyle(String authority) throws MalformedURLException {
return new URL("http:
}
/**
* Fills in default values for a ParallelTransferOptions where no value has been set. This will construct a new
* object for safety.
*
* @param other The options to fill in defaults.
* @return An object with defaults filled in for null values in the original.
*/
/**
* Transforms a blob type into a common type.
* @param blobOptions {@link ParallelTransferOptions}
* @return {@link com.azure.storage.common.ParallelTransferOptions}
*/
public static com.azure.storage.common.ParallelTransferOptions wrapBlobOptions(
ParallelTransferOptions blobOptions) {
Integer blockSize = blobOptions.getBlockSize();
Integer numBuffers = blobOptions.getMaxConcurrency();
com.azure.storage.common.ProgressReceiver wrappedReceiver = blobOptions.getProgressReceiver() == null
? null
: blobOptions.getProgressReceiver()::reportProgress;
Integer maxSingleUploadSize = blobOptions.getMaxSingleUploadSize();
return new com.azure.storage.common.ParallelTransferOptions(blockSize, numBuffers, wrappedReceiver,
maxSingleUploadSize);
}
}
|
Needed because ttl is milliseconds as integer, so it can't support time to live of more than 50 days. That's why we had to change it.
|
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
}
|
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
|
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage, byte[] deliveryTag) {
Message brokeredMessage;
Section body = amqpMessage.getBody();
if (body != null) {
if (body instanceof Data) {
Binary messageData = ((Data) body).getValue();
brokeredMessage = new Message(Utils.fromBinay(messageData.getArray()));
} else if (body instanceof AmqpValue) {
Object messageData = ((AmqpValue) body).getValue();
brokeredMessage = new Message(MessageBody.fromValueData(messageData));
} else if (body instanceof AmqpSequence) {
List<Object> messageData = ((AmqpSequence) body).getValue();
brokeredMessage = new Message(Utils.fromSequence(messageData));
} else {
brokeredMessage = new Message();
}
} else {
brokeredMessage = new Message();
}
ApplicationProperties applicationProperties = amqpMessage.getApplicationProperties();
if (applicationProperties != null) {
brokeredMessage.setProperties(applicationProperties.getValue());
}
brokeredMessage.setDeliveryCount(amqpMessage.getDeliveryCount() + 1);
long ttlMillis = amqpMessage.getTtl();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
if (amqpMessage.getCreationTime() != 0l && amqpMessage.getExpiryTime() != 0l) {
ttlMillis = amqpMessage.getExpiryTime() - amqpMessage.getCreationTime();
if (ttlMillis > 0l) {
brokeredMessage.setTimeToLive(Duration.ofMillis(ttlMillis));
}
}
Object messageId = amqpMessage.getMessageId();
if (messageId != null) {
brokeredMessage.setMessageId(messageId.toString());
}
brokeredMessage.setContentType(amqpMessage.getContentType());
Object correlationId = amqpMessage.getCorrelationId();
if (correlationId != null) {
brokeredMessage.setCorrelationId(correlationId.toString());
}
Properties properties = amqpMessage.getProperties();
if (properties != null) {
brokeredMessage.setTo(properties.getTo());
}
brokeredMessage.setLabel(amqpMessage.getSubject());
brokeredMessage.setReplyTo(amqpMessage.getReplyTo());
brokeredMessage.setReplyToSessionId(amqpMessage.getReplyToGroupId());
brokeredMessage.setSessionId(amqpMessage.getGroupId());
MessageAnnotations messageAnnotations = amqpMessage.getMessageAnnotations();
if (messageAnnotations != null) {
Map<Symbol, Object> messageAnnotationsMap = messageAnnotations.getValue();
if (messageAnnotationsMap != null) {
for (Map.Entry<Symbol, Object> entry : messageAnnotationsMap.entrySet()) {
String entryName = entry.getKey().toString();
switch (entryName) {
case ClientConstants.ENQUEUEDTIMEUTCNAME:
brokeredMessage.setEnqueuedTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SCHEDULEDENQUEUETIMENAME:
brokeredMessage.setScheduledEnqueueTimeUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.SEQUENCENUBMERNAME:
brokeredMessage.setSequenceNumber((long) entry.getValue());
break;
case ClientConstants.LOCKEDUNTILNAME:
brokeredMessage.setLockedUntilUtc(((Date) entry.getValue()).toInstant());
break;
case ClientConstants.PARTITIONKEYNAME:
brokeredMessage.setPartitionKey((String) entry.getValue());
break;
case ClientConstants.VIAPARTITIONKEYNAME:
brokeredMessage.setViaPartitionKey((String) entry.getValue());
break;
case ClientConstants.DEADLETTERSOURCENAME:
brokeredMessage.setDeadLetterSource((String) entry.getValue());
break;
default:
break;
}
}
}
}
if (deliveryTag != null && deliveryTag.length == ClientConstants.LOCKTOKENSIZE) {
UUID lockToken = Util.convertDotNetBytesToUUID(deliveryTag);
brokeredMessage.setLockToken(lockToken);
} else {
brokeredMessage.setLockToken(ClientConstants.ZEROLOCKTOKEN);
}
brokeredMessage.setDeliveryTag(deliveryTag);
return brokeredMessage;
}
|
class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
}
|
class MessageConverter {
public static org.apache.qpid.proton.message.Message convertBrokeredMessageToAmqpMessage(Message brokeredMessage) {
org.apache.qpid.proton.message.Message amqpMessage = Proton.message();
MessageBody body = brokeredMessage.getMessageBody();
if (body != null) {
if (body.getBodyType() == MessageBodyType.VALUE) {
amqpMessage.setBody(new AmqpValue(body.getValueData()));
} else if (body.getBodyType() == MessageBodyType.SEQUENCE) {
amqpMessage.setBody(new AmqpSequence(Utils.getSequenceFromMessageBody(body)));
} else {
amqpMessage.setBody(new Data(new Binary(Utils.getDataFromMessageBody(body))));
}
}
if (brokeredMessage.getProperties() != null) {
amqpMessage.setApplicationProperties(new ApplicationProperties(brokeredMessage.getProperties()));
}
if (brokeredMessage.getTimeToLive() != null) {
long ttlMillis = brokeredMessage.getTimeToLive().toMillis();
if (ttlMillis > ClientConstants.UNSIGNED_INT_MAX_VALUE) {
ttlMillis = ClientConstants.UNSIGNED_INT_MAX_VALUE;
}
amqpMessage.setTtl(ttlMillis);
Instant creationTime = Instant.now();
Instant absoluteExpiryTime = creationTime.plus(brokeredMessage.getTimeToLive());
amqpMessage.setCreationTime(creationTime.toEpochMilli());
amqpMessage.setExpiryTime(absoluteExpiryTime.toEpochMilli());
}
amqpMessage.setMessageId(brokeredMessage.getMessageId());
amqpMessage.setContentType(brokeredMessage.getContentType());
amqpMessage.setCorrelationId(brokeredMessage.getCorrelationId());
amqpMessage.setSubject(brokeredMessage.getLabel());
amqpMessage.getProperties().setTo(brokeredMessage.getTo());
amqpMessage.setReplyTo(brokeredMessage.getReplyTo());
amqpMessage.setReplyToGroupId(brokeredMessage.getReplyToSessionId());
amqpMessage.setGroupId(brokeredMessage.getSessionId());
Map<Symbol, Object> messageAnnotationsMap = new HashMap<>();
if (brokeredMessage.getScheduledEnqueueTimeUtc() != null) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.SCHEDULEDENQUEUETIMENAME), Date.from(brokeredMessage.getScheduledEnqueueTimeUtc()));
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.PARTITIONKEYNAME), brokeredMessage.getPartitionKey());
}
if (!StringUtil.isNullOrEmpty(brokeredMessage.getViaPartitionKey())) {
messageAnnotationsMap.put(Symbol.valueOf(ClientConstants.VIAPARTITIONKEYNAME), brokeredMessage.getViaPartitionKey());
}
amqpMessage.setMessageAnnotations(new MessageAnnotations(messageAnnotationsMap));
return amqpMessage;
}
public static Message convertAmqpMessageToBrokeredMessage(org.apache.qpid.proton.message.Message amqpMessage) {
return convertAmqpMessageToBrokeredMessage(amqpMessage, null);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithDeliveryTag amqpMessageWithDeliveryTag) {
org.apache.qpid.proton.message.Message amqpMessage = amqpMessageWithDeliveryTag.getMessage();
byte[] deliveryTag = amqpMessageWithDeliveryTag.getDeliveryTag();
return convertAmqpMessageToBrokeredMessage(amqpMessage, deliveryTag);
}
public static Message convertAmqpMessageToBrokeredMessage(MessageWithLockToken amqpMessageWithLockToken) {
Message convertedMessage = convertAmqpMessageToBrokeredMessage(amqpMessageWithLockToken.getMessage(), null);
convertedMessage.setLockToken(amqpMessageWithLockToken.getLockToken());
return convertedMessage;
}
}
|
Int compare date shoule cast int, because int are 4byte, date is 16byte。 int is more chance to SIMD
|
public static boolean canCompareDate(PrimitiveType t1, PrimitiveType t2) {
if (t1 == PrimitiveType.DATE) {
if (t2 == PrimitiveType.DATE || t2.isStringType() || t2.isIntegerType()) {
return true;
}
return false;
} else if (t2 == PrimitiveType.DATE) {
if (t1.isStringType() || t1.isIntegerType()) {
return true;
}
return false;
} else {
return false;
}
}
|
if (t2 == PrimitiveType.DATE || t2.isStringType() || t2.isIntegerType()) {
|
public static boolean canCompareDate(PrimitiveType t1, PrimitiveType t2) {
return (t1 == PrimitiveType.DATE && t2 == PrimitiveType.DATE);
}
|
class Type {
private static final Logger LOG = LogManager.getLogger(Type.class);
public static int MAX_NESTING_DEPTH = 2;
public static final ScalarType INVALID = new ScalarType(PrimitiveType.INVALID_TYPE);
public static final ScalarType NULL = new ScalarType(PrimitiveType.NULL_TYPE);
public static final ScalarType BOOLEAN = new ScalarType(PrimitiveType.BOOLEAN);
public static final ScalarType TINYINT = new ScalarType(PrimitiveType.TINYINT);
public static final ScalarType SMALLINT = new ScalarType(PrimitiveType.SMALLINT);
public static final ScalarType INT = new ScalarType(PrimitiveType.INT);
public static final ScalarType BIGINT = new ScalarType(PrimitiveType.BIGINT);
public static final ScalarType LARGEINT = new ScalarType(PrimitiveType.LARGEINT);
public static final ScalarType FLOAT = new ScalarType(PrimitiveType.FLOAT);
public static final ScalarType DOUBLE = new ScalarType(PrimitiveType.DOUBLE);
public static final ScalarType DATE = new ScalarType(PrimitiveType.DATE);
public static final ScalarType DATETIME = new ScalarType(PrimitiveType.DATETIME);
public static final ScalarType TIME = new ScalarType(PrimitiveType.TIME);
public static final ScalarType STRING = new ScalarType(PrimitiveType.STRING);
public static final ScalarType DEFAULT_DECIMALV2 = (ScalarType)
ScalarType.createDecimalV2Type(ScalarType.DEFAULT_PRECISION,
ScalarType.DEFAULT_SCALE);
public static final ScalarType DECIMALV2 = DEFAULT_DECIMALV2;
public static final ScalarType DEFAULT_VARCHAR = ScalarType.createVarcharType(-1);
public static final ScalarType VARCHAR = ScalarType.createVarcharType(-1);
public static final ScalarType HLL = ScalarType.createHllType();
public static final ScalarType CHAR = (ScalarType) ScalarType.createCharType(-1);
public static final ScalarType BITMAP = new ScalarType(PrimitiveType.BITMAP);
public static final ScalarType ALL = new ScalarType(PrimitiveType.ALL);
public static final MapType Map = new MapType();
private static ArrayList<ScalarType> integerTypes;
private static ArrayList<ScalarType> numericTypes;
private static ArrayList<ScalarType> supportedTypes;
static {
integerTypes = Lists.newArrayList();
integerTypes.add(TINYINT);
integerTypes.add(SMALLINT);
integerTypes.add(INT);
integerTypes.add(BIGINT);
integerTypes.add(LARGEINT);
numericTypes = Lists.newArrayList();
numericTypes.add(TINYINT);
numericTypes.add(SMALLINT);
numericTypes.add(INT);
numericTypes.add(BIGINT);
numericTypes.add(LARGEINT);
numericTypes.add(FLOAT);
numericTypes.add(DOUBLE);
numericTypes.add(DECIMALV2);
supportedTypes = Lists.newArrayList();
supportedTypes.add(NULL);
supportedTypes.add(BOOLEAN);
supportedTypes.add(TINYINT);
supportedTypes.add(SMALLINT);
supportedTypes.add(INT);
supportedTypes.add(BIGINT);
supportedTypes.add(LARGEINT);
supportedTypes.add(FLOAT);
supportedTypes.add(DOUBLE);
supportedTypes.add(VARCHAR);
supportedTypes.add(HLL);
supportedTypes.add(BITMAP);
supportedTypes.add(CHAR);
supportedTypes.add(DATE);
supportedTypes.add(DATETIME);
supportedTypes.add(DECIMALV2);
supportedTypes.add(TIME);
supportedTypes.add(STRING);
}
public static ArrayList<ScalarType> getIntegerTypes() {
return integerTypes;
}
public static ArrayList<ScalarType> getNumericTypes() {
return numericTypes;
}
public static ArrayList<ScalarType> getSupportedTypes() {
return supportedTypes;
}
/**
* The output of this is stored directly in the hive metastore as the column type.
* The string must match exactly.
*/
public final String toSql() { return toSql(0); }
/**
* Recursive helper for toSql() to be implemented by subclasses. Keeps track of the
* nesting depth and terminates the recursion if MAX_NESTING_DEPTH is reached.
*/
protected abstract String toSql(int depth);
/**
* Same as toSql() but adds newlines and spaces for better readability of nested types.
*/
public String prettyPrint() { return prettyPrint(0); }
/**
* Pretty prints this type with lpad number of leading spaces. Used to implement
* prettyPrint() with space-indented nested types.
*/
protected abstract String prettyPrint(int lpad);
public boolean isInvalid() {
return isScalarType(PrimitiveType.INVALID_TYPE);
}
public boolean isValid() {
return !isInvalid();
}
public boolean isNull() {
return isScalarType(PrimitiveType.NULL_TYPE);
}
public boolean isBoolean() {
return isScalarType(PrimitiveType.BOOLEAN);
}
public boolean isDecimalV2() {
return isScalarType(PrimitiveType.DECIMALV2);
}
public boolean isWildcardDecimal() { return false; }
public boolean isWildcardVarchar() { return false; }
public boolean isWildcardChar() { return false; }
public boolean isStringType() {
return isScalarType(PrimitiveType.VARCHAR)
|| isScalarType(PrimitiveType.CHAR)
|| isScalarType(PrimitiveType.STRING);
}
public boolean isOnlyMetricType() {
return isScalarType(PrimitiveType.HLL) || isScalarType(PrimitiveType.BITMAP);
}
public static final String OnlyMetricTypeErrorMsg =
"Doris hll and bitmap column must use with specific function, and don't support filter or group by." +
"please run 'help hll' or 'help bitmap' in your mysql client.";
public boolean isHllType() {
return isScalarType(PrimitiveType.HLL);
}
public boolean isBitmapType() {
return isScalarType(PrimitiveType.BITMAP);
}
public boolean isScalarType() {
return this instanceof ScalarType;
}
public boolean isScalarType(PrimitiveType t) {
return isScalarType() && ((ScalarType) this).getPrimitiveType() == t;
}
public boolean isFixedPointType() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT) ||
isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT) ||
isScalarType(PrimitiveType.LARGEINT);
}
public boolean isFloatingPointType() {
return isScalarType(PrimitiveType.FLOAT) || isScalarType(PrimitiveType.DOUBLE);
}
public boolean isIntegerType() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT)
|| isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT);
}
public boolean isInteger32Type() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT)
|| isScalarType(PrimitiveType.INT);
}
public boolean isLargeIntType() {
return isScalarType(PrimitiveType.LARGEINT);
}
public boolean isFixedLengthType() {
return false;
}
public boolean isNumericType() {
return isFixedPointType() || isFloatingPointType() || isDecimalV2();
}
public boolean isNativeType() {
return isFixedPointType() || isFloatingPointType() || isBoolean();
}
public boolean isDateType() {
return isScalarType(PrimitiveType.DATE) || isScalarType(PrimitiveType.DATETIME);
}
public boolean isDatetime() {
return isScalarType(PrimitiveType.DATETIME);
}
public boolean isTime() {
return isScalarType(PrimitiveType.TIME);
}
public boolean isComplexType() {
return isStructType() || isCollectionType();
}
public boolean isCollectionType() {
return isMapType() || isArrayType() || isMultiRowType();
}
public boolean isMapType() {
return this instanceof MapType;
}
public boolean isArrayType() {
return this instanceof ArrayType;
}
public boolean isMultiRowType() {
return this instanceof MultiRowType;
}
public boolean isStructType() {
return this instanceof StructType;
}
public boolean isDate() {
return isScalarType(PrimitiveType.DATE);
}
/**
* Returns true if Impala supports this type in the metdata. It does not mean we
* can manipulate data of this type. For tables that contain columns with these
* types, we can safely skip over them.
*/
public boolean isSupported() {
return true;
}
public int getLength() { return -1; }
/**
* Indicates whether we support partitioning tables on columns of this type.
*/
public boolean supportsTablePartitioning() {
return false;
}
public PrimitiveType getPrimitiveType() {
return PrimitiveType.INVALID_TYPE;
}
/**
* Returns the size in bytes of the fixed-length portion that a slot of this type
* occupies in a tuple.
*/
public int getSlotSize() {
if (isCollectionType()) {
return 16;
}
throw new IllegalStateException("getSlotSize() not implemented for type " + toSql());
}
public TTypeDesc toThrift() {
TTypeDesc container = new TTypeDesc();
container.setTypes(new ArrayList<TTypeNode>());
toThrift(container);
return container;
}
public TColumnType toColumnTypeThrift() {
return null;
}
/**
* Subclasses should override this method to add themselves to the thrift container.
*/
public abstract void toThrift(TTypeDesc container);
/**
* Returns true if this type is equal to t, or if t is a wildcard variant of this
* type. Subclasses should override this as appropriate. The default implementation
* here is to avoid special-casing logic in callers for concrete types.
*/
public boolean matchesType(Type t) {
return false;
}
/**
* Returns true if t1 can be implicitly cast to t2 according to Impala's casting rules.
* Implicit casts are always allowed when no loss of precision would result (i.e. every
* value of t1 can be represented exactly by a value of t2). Implicit casts are allowed
* in certain other cases such as casting numeric types to floating point types and
* converting strings to timestamps.
* If strict is true, only consider casts that result in no loss of precision.
* TODO: Support casting of non-scalar types.
*/
public static boolean isImplicitlyCastable(Type t1, Type t2, boolean strict) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.isImplicitlyCastable((ScalarType) t1, (ScalarType) t2, strict);
}
if (t1.isComplexType() || t2.isComplexType()) {
if (t1.isArrayType() && t2.isArrayType()) {
return true;
} else if (t1.isMapType() && t2.isMapType()) {
return true;
} else if (t1.isStructType() && t2.isStructType()) {
return true;
}
return false;
}
return false;
}
public static boolean canCastTo(Type t1, Type t2) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.canCastTo((ScalarType) t1, (ScalarType) t2);
}
return false;
}
/**
* Return type t such that values from both t1 and t2 can be assigned to t without an
* explicit cast. If strict, does not consider conversions that would result in loss
* of precision (e.g. converting decimal to float). Returns INVALID_TYPE if there is
* no such type or if any of t1 and t2 is INVALID_TYPE.
* TODO: Support non-scalar types.
*/
public static Type getAssignmentCompatibleType(Type t1, Type t2, boolean strict) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.getAssignmentCompatibleType((ScalarType) t1, (ScalarType) t2, strict);
}
return ScalarType.INVALID;
}
/**
* Returns null if this expr is not instance of StringLiteral or StringLiteral
* inner value could not parse to long. otherwise return parsed Long result.
*/
public static Long tryParseToLong(Expr expectStringExpr){
if (expectStringExpr instanceof StringLiteral) {
String value = ((StringLiteral)expectStringExpr).getValue();
return Longs.tryParse(value);
}
return null;
}
/**
* Returns true if this type exceeds the MAX_NESTING_DEPTH, false otherwise.
*/
public boolean exceedsMaxNestingDepth() {
return exceedsMaxNestingDepth(0);
}
/**
* Helper for exceedsMaxNestingDepth(). Recursively computes the max nesting depth,
* terminating early if MAX_NESTING_DEPTH is reached. Returns true if this type
* exceeds the MAX_NESTING_DEPTH, false otherwise.
*
* Examples of types and their nesting depth:
* INT --> 1
* STRUCT<f1:INT> --> 2
* STRUCT<f1:STRUCT<f2:INT>> --> 3
* ARRAY<INT> --> 2
* ARRAY<STRUCT<f1:INT>> --> 3
* MAP<STRING,INT> --> 2
* MAP<STRING,STRUCT<f1:INT>> --> 3
*/
private boolean exceedsMaxNestingDepth(int d) {
if (d >= MAX_NESTING_DEPTH) return true;
if (isStructType()) {
StructType structType = (StructType) this;
for (StructField f : structType.getFields()) {
if (f.getType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
}
} else if (isArrayType()) {
ArrayType arrayType = (ArrayType) this;
if (arrayType.getItemType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else if (isMultiRowType()) {
MultiRowType multiRowType = (MultiRowType) this;
if (multiRowType.getItemType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else if (isMapType()) {
MapType mapType = (MapType) this;
if (mapType.getValueType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else {
Preconditions.checkState(isScalarType());
}
return false;
}
public static Type fromPrimitiveType(PrimitiveType type) {
switch(type) {
case BOOLEAN:
return Type.BOOLEAN;
case TINYINT:
return Type.TINYINT;
case SMALLINT:
return Type.SMALLINT;
case INT:
return Type.INT;
case BIGINT:
return Type.BIGINT;
case LARGEINT:
return Type.LARGEINT;
case FLOAT:
return Type.FLOAT;
case DOUBLE:
return Type.DOUBLE;
case DATE:
return Type.DATE;
case DATETIME:
return Type.DATETIME;
case TIME:
return Type.TIME;
case DECIMALV2:
return Type.DECIMALV2;
case CHAR:
return Type.CHAR;
case VARCHAR:
return Type.VARCHAR;
case STRING:
return Type.STRING;
case HLL:
return Type.HLL;
case ARRAY:
return ArrayType.create();
case MAP:
return new MapType();
case STRUCT:
return new StructType();
case BITMAP:
return Type.BITMAP;
default:
return null;
}
}
public static List<TTypeDesc> toThrift(Type[] types) {
return toThrift(Lists.newArrayList(types));
}
public static List<TTypeDesc> toThrift(ArrayList<Type> types) {
ArrayList<TTypeDesc> result = Lists.newArrayList();
for (Type t: types) {
result.add(t.toThrift());
}
return result;
}
public static Type fromThrift(TTypeDesc thrift) {
Preconditions.checkState(thrift.types.size() > 0);
Pair<Type, Integer> t = fromThrift(thrift, 0);
Preconditions.checkState(t.second.equals(thrift.getTypesSize()));
return t.first;
}
/**
* Constructs a ColumnType rooted at the TTypeNode at nodeIdx in TColumnType.
* Returned pair: The resulting ColumnType and the next nodeIdx that is not a child
* type of the result.
*/
protected static Pair<Type, Integer> fromThrift(TTypeDesc col, int nodeIdx) {
TTypeNode node = col.getTypes().get(nodeIdx);
Type type = null;
int tmpNodeIdx = nodeIdx;
switch (node.getType()) {
case SCALAR: {
Preconditions.checkState(node.isSetScalarType());
TScalarType scalarType = node.getScalarType();
if (scalarType.getType() == TPrimitiveType.CHAR) {
Preconditions.checkState(scalarType.isSetLen());
type = ScalarType.createCharType(scalarType.getLen());
} else if (scalarType.getType() == TPrimitiveType.VARCHAR) {
Preconditions.checkState(scalarType.isSetLen());
type = ScalarType.createVarcharType(scalarType.getLen());
} else if (scalarType.getType() == TPrimitiveType.HLL) {
type = ScalarType.createHllType();
} else if (scalarType.getType() == TPrimitiveType.DECIMALV2) {
Preconditions.checkState(scalarType.isSetPrecision()
&& scalarType.isSetPrecision());
type = ScalarType.createDecimalV2Type(scalarType.getPrecision(),
scalarType.getScale());
} else {
type = ScalarType.createType(
PrimitiveType.fromThrift(scalarType.getType()));
}
++tmpNodeIdx;
break;
}
case ARRAY: {
Preconditions.checkState(tmpNodeIdx + 1 < col.getTypesSize());
Pair<Type, Integer> childType = fromThrift(col, tmpNodeIdx + 1);
type = new ArrayType(childType.first);
tmpNodeIdx = childType.second;
break;
}
case MAP: {
Preconditions.checkState(tmpNodeIdx + 2 < col.getTypesSize());
Pair<Type, Integer> keyType = fromThrift(col, tmpNodeIdx + 1);
Pair<Type, Integer> valueType = fromThrift(col, keyType.second);
type = new MapType(keyType.first, valueType.first);
tmpNodeIdx = valueType.second;
break;
}
case STRUCT: {
Preconditions.checkState(tmpNodeIdx + node.getStructFieldsSize() < col.getTypesSize());
ArrayList<StructField> structFields = Lists.newArrayList();
++tmpNodeIdx;
for (int i = 0; i < node.getStructFieldsSize(); ++i) {
TStructField thriftField = node.getStructFields().get(i);
String name = thriftField.getName();
String comment = null;
if (thriftField.isSetComment()) {
comment = thriftField.getComment();
}
Pair<Type, Integer> res = fromThrift(col, tmpNodeIdx);
tmpNodeIdx = res.second.intValue();
structFields.add(new StructField(name, res.first, comment));
}
type = new StructType(structFields);
break;
}
}
return new Pair<Type, Integer>(type, tmpNodeIdx);
}
/**
* Utility function to get the primitive type of a thrift type that is known
* to be scalar.
*/
public TPrimitiveType getTPrimitiveType(TTypeDesc ttype) {
Preconditions.checkState(ttype.getTypesSize() == 1);
Preconditions.checkState(ttype.types.get(0).getType() == TTypeNodeType.SCALAR);
return ttype.types.get(0).scalar_type.getType();
}
/**
* JDBC data type description
* Returns the column size for this type.
* For numeric data this is the maximum precision.
* For character data this is the length in characters.
* For datetime types this is the length in characters of the String representation
* (assuming the maximum allowed precision of the fractional seconds component).
* For binary data this is the length in bytes.
* Null is returned for for data types where the column size is not applicable.
*/
public Integer getColumnSize() {
if (!isScalarType()) return null;
if (isNumericType()) return getPrecision();
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case CHAR:
case VARCHAR:
case STRING:
case HLL:
return t.getLength();
default:
return null;
}
}
/**
* JDBC data type description
* For numeric types, returns the maximum precision for this type.
* For non-numeric types, returns null.
*/
public Integer getPrecision() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case TINYINT:
return 3;
case SMALLINT:
return 5;
case INT:
return 10;
case BIGINT:
return 19;
case FLOAT:
return 7;
case DOUBLE:
return 15;
case DECIMALV2:
return t.decimalPrecision();
default:
return null;
}
}
/**
* JDBC data type description
* Returns the number of fractional digits for this type, or null if not applicable.
* For timestamp/time types, returns the number of digits in the fractional seconds
* component.
*/
public Integer getDecimalDigits() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return 0;
case FLOAT:
return 7;
case DOUBLE:
return 15;
case DECIMALV2:
return t.decimalScale();
default:
return null;
}
}
/**
* JDBC data type description
* For numeric data types, either 10 or 2. If it is 10, the values in COLUMN_SIZE
* and DECIMAL_DIGITS give the number of decimal digits allowed for the column.
* For example, a DECIMAL(12,5) column would return a NUM_PREC_RADIX of 10,
* a COLUMN_SIZE of 12, and a DECIMAL_DIGITS of 5; a FLOAT column could return
* a NUM_PREC_RADIX of 10, a COLUMN_SIZE of 15, and a DECIMAL_DIGITS of NULL.
* If it is 2, the values in COLUMN_SIZE and DECIMAL_DIGITS give the number of bits
* allowed in the column. For example, a FLOAT column could return a RADIX of 2,
* a COLUMN_SIZE of 53, and a DECIMAL_DIGITS of NULL. NULL is returned for data
* types where NUM_PREC_RADIX is not applicable.
*/
public Integer getNumPrecRadix() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
case FLOAT:
case DOUBLE:
case DECIMALV2:
return 10;
default:
return null;
}
}
/**
* Matrix that records "smallest" assignment-compatible type of two types
* (INVALID_TYPE if no such type exists, ie, if the input types are fundamentally
* incompatible). A value of any of the two types could be assigned to a slot
* of the assignment-compatible type. For strict compatibility, this can be done
* without any loss of precision. For non-strict compatibility, there may be loss of
* precision, e.g. if converting from BIGINT to FLOAT.
*
* We chose not to follow MySQL's type casting behavior as described here:
* http:
* for the following reasons:
* conservative casting in arithmetic exprs: TINYINT + TINYINT -> BIGINT
* comparison of many types as double: INT < FLOAT -> comparison as DOUBLE
* special cases when dealing with dates and timestamps.
*/
protected static PrimitiveType[][] compatibilityMatrix;
/**
* If we are checking in strict mode, any non-null entry in this matrix overrides
* compatibilityMatrix. If the entry is null, the entry in compatibility matrix
* is valid.
*/
protected static PrimitiveType[][] strictCompatibilityMatrix;
static {
compatibilityMatrix = new
PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
strictCompatibilityMatrix = new
PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
for (int i = 0; i < PrimitiveType.values().length; ++i) {
compatibilityMatrix[i][i] = PrimitiveType.values()[i];
}
compatibilityMatrix[BOOLEAN.ordinal()][TINYINT.ordinal()] = PrimitiveType.TINYINT;
compatibilityMatrix[BOOLEAN.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
compatibilityMatrix[BOOLEAN.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[BOOLEAN.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BOOLEAN.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[BOOLEAN.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[BOOLEAN.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BOOLEAN.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BOOLEAN.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
compatibilityMatrix[TINYINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[TINYINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[TINYINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[TINYINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[TINYINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[TINYINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[TINYINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[SMALLINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[SMALLINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[SMALLINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[SMALLINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[SMALLINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[SMALLINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[INT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
strictCompatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][DATE.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[INT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
strictCompatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][DATE.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BIGINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BIGINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][DATE.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[LARGEINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[LARGEINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[LARGEINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[FLOAT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[FLOAT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DOUBLE ;
compatibilityMatrix[DOUBLE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[DOUBLE.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DATETIME;
compatibilityMatrix[DATE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[DATE.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[DATETIME.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][VARCHAR.ordinal()] = PrimitiveType.VARCHAR;
compatibilityMatrix[CHAR.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][STRING.ordinal()] = PrimitiveType.STRING;
compatibilityMatrix[VARCHAR.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][STRING.ordinal()] = PrimitiveType.STRING;
compatibilityMatrix[STRING.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[STRING.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[STRING.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BITMAP.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BITMAP.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TIME.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
for (int i = 0; i < PrimitiveType.values().length - 2; ++i) {
for (int j = i; j < PrimitiveType.values().length - 2; ++j) {
PrimitiveType t1 = PrimitiveType.values()[i];
PrimitiveType t2 = PrimitiveType.values()[j];
if (t1 == PrimitiveType.INVALID_TYPE ||
t2 == PrimitiveType.INVALID_TYPE) continue;
if (t1 == PrimitiveType.NULL_TYPE || t2 == PrimitiveType.NULL_TYPE) continue;
if (t1 == PrimitiveType.ARRAY || t2 == PrimitiveType.ARRAY) continue;
if (t1 == PrimitiveType.DECIMALV2 || t2 == PrimitiveType.DECIMALV2) continue;
if (t1 == PrimitiveType.TIME || t2 == PrimitiveType.TIME) continue;
if (t1 == PrimitiveType.ARRAY || t2 == PrimitiveType.ARRAY) continue;
if (t1 == PrimitiveType.MAP || t2 == PrimitiveType.MAP) continue;
if (t1 == PrimitiveType.STRUCT || t2 == PrimitiveType.STRUCT) continue;
Preconditions.checkNotNull(compatibilityMatrix[i][j]);
}
}
}
public Type getResultType() {
switch (this.getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return BIGINT;
case LARGEINT:
return LARGEINT;
case FLOAT:
case DOUBLE:
return DOUBLE;
case DATE:
case DATETIME:
case TIME:
case CHAR:
case VARCHAR:
case HLL:
case BITMAP:
return VARCHAR;
case DECIMALV2:
return DECIMALV2;
case STRING:
return STRING;
default:
return INVALID;
}
}
public static Type getCmpType(Type t1, Type t2) {
if (t1.getPrimitiveType() == PrimitiveType.NULL_TYPE) {
return t2;
}
if (t2.getPrimitiveType() == PrimitiveType.NULL_TYPE) {
return t1;
}
if (canCompareDate(t1.getPrimitiveType(), t2.getPrimitiveType())) {
return Type.DATE;
}
if (canCompareDatetime(t1.getPrimitiveType(), t2.getPrimitiveType())) {
return Type.DATETIME;
}
PrimitiveType t1ResultType = t1.getResultType().getPrimitiveType();
PrimitiveType t2ResultType = t2.getResultType().getPrimitiveType();
if (t1ResultType == PrimitiveType.VARCHAR && t2ResultType == PrimitiveType.VARCHAR) {
return Type.VARCHAR;
}
if ((t1ResultType == PrimitiveType.STRING && t2ResultType == PrimitiveType.STRING)
|| (t1ResultType == PrimitiveType.STRING && t2ResultType == PrimitiveType.VARCHAR)
|| (t1ResultType == PrimitiveType.VARCHAR && t2ResultType == PrimitiveType.STRING)) {
return Type.STRING;
}
if ((t1ResultType.isFixedPointType() && t2ResultType.isCharFamily())
|| (t2ResultType.isFixedPointType() && t1ResultType.isCharFamily())) {
return t1.isStringType() ? t1 : t2;
}
if (t1ResultType == PrimitiveType.BIGINT && t2ResultType == PrimitiveType.BIGINT) {
return getAssignmentCompatibleType(t1, t2, false);
}
if ((t1ResultType == PrimitiveType.BIGINT || t1ResultType == PrimitiveType.DECIMALV2)
&& (t2ResultType == PrimitiveType.BIGINT || t2ResultType == PrimitiveType.DECIMALV2)) {
return Type.DECIMALV2;
}
if ((t1ResultType == PrimitiveType.BIGINT || t1ResultType == PrimitiveType.LARGEINT)
&& (t2ResultType == PrimitiveType.BIGINT || t2ResultType == PrimitiveType.LARGEINT)) {
return Type.LARGEINT;
}
return Type.DOUBLE;
}
public static boolean canCompareDatetime(PrimitiveType t1, PrimitiveType t2) {
if (t1.isDateType()) {
if (t2.isDateType() || t2.isStringType() || t2.isIntegerType()) {
return true;
}
return false;
} else if (t2.isDateType()) {
if (t1.isStringType() || t1.isIntegerType()) {
return true;
}
return false;
} else {
return false;
}
}
public Type getMaxResolutionType() {
Preconditions.checkState(true, "must implemented");
return null;
}
public Type getNumResultType() {
switch (getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return Type.BIGINT;
case LARGEINT:
return Type.LARGEINT;
case FLOAT:
case DOUBLE:
case DATE:
case DATETIME:
case TIME:
case CHAR:
case VARCHAR:
case STRING:
case HLL:
return Type.DOUBLE;
case DECIMALV2:
return Type.DECIMALV2;
default:
return Type.INVALID;
}
}
public int getStorageLayoutBytes() {
return 0;
}
public int getIndexSize() {
if (this.getPrimitiveType() == PrimitiveType.CHAR) {
return ((ScalarType) this).getLength();
} else {
return this.getPrimitiveType().getOlapColumnIndexSize();
}
}
}
|
class Type {
private static final Logger LOG = LogManager.getLogger(Type.class);
public static int MAX_NESTING_DEPTH = 2;
public static final ScalarType INVALID = new ScalarType(PrimitiveType.INVALID_TYPE);
public static final ScalarType NULL = new ScalarType(PrimitiveType.NULL_TYPE);
public static final ScalarType BOOLEAN = new ScalarType(PrimitiveType.BOOLEAN);
public static final ScalarType TINYINT = new ScalarType(PrimitiveType.TINYINT);
public static final ScalarType SMALLINT = new ScalarType(PrimitiveType.SMALLINT);
public static final ScalarType INT = new ScalarType(PrimitiveType.INT);
public static final ScalarType BIGINT = new ScalarType(PrimitiveType.BIGINT);
public static final ScalarType LARGEINT = new ScalarType(PrimitiveType.LARGEINT);
public static final ScalarType FLOAT = new ScalarType(PrimitiveType.FLOAT);
public static final ScalarType DOUBLE = new ScalarType(PrimitiveType.DOUBLE);
public static final ScalarType DATE = new ScalarType(PrimitiveType.DATE);
public static final ScalarType DATETIME = new ScalarType(PrimitiveType.DATETIME);
public static final ScalarType TIME = new ScalarType(PrimitiveType.TIME);
public static final ScalarType STRING = new ScalarType(PrimitiveType.STRING);
public static final ScalarType DEFAULT_DECIMALV2 = (ScalarType)
ScalarType.createDecimalV2Type(ScalarType.DEFAULT_PRECISION,
ScalarType.DEFAULT_SCALE);
public static final ScalarType DECIMALV2 = DEFAULT_DECIMALV2;
public static final ScalarType DEFAULT_VARCHAR = ScalarType.createVarcharType(-1);
public static final ScalarType VARCHAR = ScalarType.createVarcharType(-1);
public static final ScalarType HLL = ScalarType.createHllType();
public static final ScalarType CHAR = (ScalarType) ScalarType.createCharType(-1);
public static final ScalarType BITMAP = new ScalarType(PrimitiveType.BITMAP);
public static final ScalarType ALL = new ScalarType(PrimitiveType.ALL);
public static final MapType Map = new MapType();
private static ArrayList<ScalarType> integerTypes;
private static ArrayList<ScalarType> numericTypes;
private static ArrayList<ScalarType> supportedTypes;
static {
integerTypes = Lists.newArrayList();
integerTypes.add(TINYINT);
integerTypes.add(SMALLINT);
integerTypes.add(INT);
integerTypes.add(BIGINT);
integerTypes.add(LARGEINT);
numericTypes = Lists.newArrayList();
numericTypes.add(TINYINT);
numericTypes.add(SMALLINT);
numericTypes.add(INT);
numericTypes.add(BIGINT);
numericTypes.add(LARGEINT);
numericTypes.add(FLOAT);
numericTypes.add(DOUBLE);
numericTypes.add(DECIMALV2);
supportedTypes = Lists.newArrayList();
supportedTypes.add(NULL);
supportedTypes.add(BOOLEAN);
supportedTypes.add(TINYINT);
supportedTypes.add(SMALLINT);
supportedTypes.add(INT);
supportedTypes.add(BIGINT);
supportedTypes.add(LARGEINT);
supportedTypes.add(FLOAT);
supportedTypes.add(DOUBLE);
supportedTypes.add(VARCHAR);
supportedTypes.add(HLL);
supportedTypes.add(BITMAP);
supportedTypes.add(CHAR);
supportedTypes.add(DATE);
supportedTypes.add(DATETIME);
supportedTypes.add(DECIMALV2);
supportedTypes.add(TIME);
supportedTypes.add(STRING);
}
public static ArrayList<ScalarType> getIntegerTypes() {
return integerTypes;
}
public static ArrayList<ScalarType> getNumericTypes() {
return numericTypes;
}
public static ArrayList<ScalarType> getSupportedTypes() {
return supportedTypes;
}
/**
* The output of this is stored directly in the hive metastore as the column type.
* The string must match exactly.
*/
public final String toSql() { return toSql(0); }
/**
* Recursive helper for toSql() to be implemented by subclasses. Keeps track of the
* nesting depth and terminates the recursion if MAX_NESTING_DEPTH is reached.
*/
protected abstract String toSql(int depth);
/**
* Same as toSql() but adds newlines and spaces for better readability of nested types.
*/
public String prettyPrint() { return prettyPrint(0); }
/**
* Pretty prints this type with lpad number of leading spaces. Used to implement
* prettyPrint() with space-indented nested types.
*/
protected abstract String prettyPrint(int lpad);
public boolean isInvalid() {
return isScalarType(PrimitiveType.INVALID_TYPE);
}
public boolean isValid() {
return !isInvalid();
}
public boolean isNull() {
return isScalarType(PrimitiveType.NULL_TYPE);
}
public boolean isBoolean() {
return isScalarType(PrimitiveType.BOOLEAN);
}
public boolean isDecimalV2() {
return isScalarType(PrimitiveType.DECIMALV2);
}
public boolean isWildcardDecimal() { return false; }
public boolean isWildcardVarchar() { return false; }
public boolean isWildcardChar() { return false; }
public boolean isStringType() {
return isScalarType(PrimitiveType.VARCHAR)
|| isScalarType(PrimitiveType.CHAR)
|| isScalarType(PrimitiveType.STRING);
}
public boolean isOnlyMetricType() {
return isScalarType(PrimitiveType.HLL) || isScalarType(PrimitiveType.BITMAP);
}
public static final String OnlyMetricTypeErrorMsg =
"Doris hll and bitmap column must use with specific function, and don't support filter or group by." +
"please run 'help hll' or 'help bitmap' in your mysql client.";
public boolean isHllType() {
return isScalarType(PrimitiveType.HLL);
}
public boolean isBitmapType() {
return isScalarType(PrimitiveType.BITMAP);
}
public boolean isScalarType() {
return this instanceof ScalarType;
}
public boolean isScalarType(PrimitiveType t) {
return isScalarType() && ((ScalarType) this).getPrimitiveType() == t;
}
public boolean isFixedPointType() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT) ||
isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT) ||
isScalarType(PrimitiveType.LARGEINT);
}
public boolean isFloatingPointType() {
return isScalarType(PrimitiveType.FLOAT) || isScalarType(PrimitiveType.DOUBLE);
}
public boolean isIntegerType() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT)
|| isScalarType(PrimitiveType.INT) || isScalarType(PrimitiveType.BIGINT);
}
public boolean isInteger32Type() {
return isScalarType(PrimitiveType.TINYINT) || isScalarType(PrimitiveType.SMALLINT)
|| isScalarType(PrimitiveType.INT);
}
public boolean isLargeIntType() {
return isScalarType(PrimitiveType.LARGEINT);
}
public boolean isFixedLengthType() {
return false;
}
public boolean isNumericType() {
return isFixedPointType() || isFloatingPointType() || isDecimalV2();
}
public boolean isNativeType() {
return isFixedPointType() || isFloatingPointType() || isBoolean();
}
public boolean isDateType() {
return isScalarType(PrimitiveType.DATE) || isScalarType(PrimitiveType.DATETIME);
}
public boolean isDatetime() {
return isScalarType(PrimitiveType.DATETIME);
}
public boolean isTime() {
return isScalarType(PrimitiveType.TIME);
}
public boolean isComplexType() {
return isStructType() || isCollectionType();
}
public boolean isCollectionType() {
return isMapType() || isArrayType() || isMultiRowType();
}
public boolean isMapType() {
return this instanceof MapType;
}
public boolean isArrayType() {
return this instanceof ArrayType;
}
public boolean isMultiRowType() {
return this instanceof MultiRowType;
}
public boolean isStructType() {
return this instanceof StructType;
}
public boolean isDate() {
return isScalarType(PrimitiveType.DATE);
}
/**
* Returns true if Impala supports this type in the metdata. It does not mean we
* can manipulate data of this type. For tables that contain columns with these
* types, we can safely skip over them.
*/
public boolean isSupported() {
return true;
}
public int getLength() { return -1; }
/**
* Indicates whether we support partitioning tables on columns of this type.
*/
public boolean supportsTablePartitioning() {
return false;
}
public PrimitiveType getPrimitiveType() {
return PrimitiveType.INVALID_TYPE;
}
/**
* Returns the size in bytes of the fixed-length portion that a slot of this type
* occupies in a tuple.
*/
public int getSlotSize() {
if (isCollectionType()) {
return 16;
}
throw new IllegalStateException("getSlotSize() not implemented for type " + toSql());
}
public TTypeDesc toThrift() {
TTypeDesc container = new TTypeDesc();
container.setTypes(new ArrayList<TTypeNode>());
toThrift(container);
return container;
}
public TColumnType toColumnTypeThrift() {
return null;
}
/**
* Subclasses should override this method to add themselves to the thrift container.
*/
public abstract void toThrift(TTypeDesc container);
/**
* Returns true if this type is equal to t, or if t is a wildcard variant of this
* type. Subclasses should override this as appropriate. The default implementation
* here is to avoid special-casing logic in callers for concrete types.
*/
public boolean matchesType(Type t) {
return false;
}
/**
* Returns true if t1 can be implicitly cast to t2 according to Impala's casting rules.
* Implicit casts are always allowed when no loss of precision would result (i.e. every
* value of t1 can be represented exactly by a value of t2). Implicit casts are allowed
* in certain other cases such as casting numeric types to floating point types and
* converting strings to timestamps.
* If strict is true, only consider casts that result in no loss of precision.
* TODO: Support casting of non-scalar types.
*/
public static boolean isImplicitlyCastable(Type t1, Type t2, boolean strict) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.isImplicitlyCastable((ScalarType) t1, (ScalarType) t2, strict);
}
if (t1.isComplexType() || t2.isComplexType()) {
if (t1.isArrayType() && t2.isArrayType()) {
return true;
} else if (t1.isMapType() && t2.isMapType()) {
return true;
} else if (t1.isStructType() && t2.isStructType()) {
return true;
}
return false;
}
return false;
}
public static boolean canCastTo(Type t1, Type t2) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.canCastTo((ScalarType) t1, (ScalarType) t2);
}
return false;
}
/**
* Return type t such that values from both t1 and t2 can be assigned to t without an
* explicit cast. If strict, does not consider conversions that would result in loss
* of precision (e.g. converting decimal to float). Returns INVALID_TYPE if there is
* no such type or if any of t1 and t2 is INVALID_TYPE.
* TODO: Support non-scalar types.
*/
public static Type getAssignmentCompatibleType(Type t1, Type t2, boolean strict) {
if (t1.isScalarType() && t2.isScalarType()) {
return ScalarType.getAssignmentCompatibleType((ScalarType) t1, (ScalarType) t2, strict);
}
return ScalarType.INVALID;
}
/**
* Returns null if this expr is not instance of StringLiteral or StringLiteral
* inner value could not parse to long. otherwise return parsed Long result.
*/
public static Long tryParseToLong(Expr expectStringExpr){
if (expectStringExpr instanceof StringLiteral) {
String value = ((StringLiteral)expectStringExpr).getValue();
return Longs.tryParse(value);
}
return null;
}
/**
* Returns true if this type exceeds the MAX_NESTING_DEPTH, false otherwise.
*/
public boolean exceedsMaxNestingDepth() {
return exceedsMaxNestingDepth(0);
}
/**
* Helper for exceedsMaxNestingDepth(). Recursively computes the max nesting depth,
* terminating early if MAX_NESTING_DEPTH is reached. Returns true if this type
* exceeds the MAX_NESTING_DEPTH, false otherwise.
*
* Examples of types and their nesting depth:
* INT --> 1
* STRUCT<f1:INT> --> 2
* STRUCT<f1:STRUCT<f2:INT>> --> 3
* ARRAY<INT> --> 2
* ARRAY<STRUCT<f1:INT>> --> 3
* MAP<STRING,INT> --> 2
* MAP<STRING,STRUCT<f1:INT>> --> 3
*/
private boolean exceedsMaxNestingDepth(int d) {
if (d >= MAX_NESTING_DEPTH) return true;
if (isStructType()) {
StructType structType = (StructType) this;
for (StructField f : structType.getFields()) {
if (f.getType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
}
} else if (isArrayType()) {
ArrayType arrayType = (ArrayType) this;
if (arrayType.getItemType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else if (isMultiRowType()) {
MultiRowType multiRowType = (MultiRowType) this;
if (multiRowType.getItemType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else if (isMapType()) {
MapType mapType = (MapType) this;
if (mapType.getValueType().exceedsMaxNestingDepth(d + 1)) {
return true;
}
} else {
Preconditions.checkState(isScalarType());
}
return false;
}
public static Type fromPrimitiveType(PrimitiveType type) {
switch(type) {
case BOOLEAN:
return Type.BOOLEAN;
case TINYINT:
return Type.TINYINT;
case SMALLINT:
return Type.SMALLINT;
case INT:
return Type.INT;
case BIGINT:
return Type.BIGINT;
case LARGEINT:
return Type.LARGEINT;
case FLOAT:
return Type.FLOAT;
case DOUBLE:
return Type.DOUBLE;
case DATE:
return Type.DATE;
case DATETIME:
return Type.DATETIME;
case TIME:
return Type.TIME;
case DECIMALV2:
return Type.DECIMALV2;
case CHAR:
return Type.CHAR;
case VARCHAR:
return Type.VARCHAR;
case STRING:
return Type.STRING;
case HLL:
return Type.HLL;
case ARRAY:
return ArrayType.create();
case MAP:
return new MapType();
case STRUCT:
return new StructType();
case BITMAP:
return Type.BITMAP;
default:
return null;
}
}
public static List<TTypeDesc> toThrift(Type[] types) {
return toThrift(Lists.newArrayList(types));
}
public static List<TTypeDesc> toThrift(ArrayList<Type> types) {
ArrayList<TTypeDesc> result = Lists.newArrayList();
for (Type t: types) {
result.add(t.toThrift());
}
return result;
}
public static Type fromThrift(TTypeDesc thrift) {
Preconditions.checkState(thrift.types.size() > 0);
Pair<Type, Integer> t = fromThrift(thrift, 0);
Preconditions.checkState(t.second.equals(thrift.getTypesSize()));
return t.first;
}
/**
* Constructs a ColumnType rooted at the TTypeNode at nodeIdx in TColumnType.
* Returned pair: The resulting ColumnType and the next nodeIdx that is not a child
* type of the result.
*/
protected static Pair<Type, Integer> fromThrift(TTypeDesc col, int nodeIdx) {
TTypeNode node = col.getTypes().get(nodeIdx);
Type type = null;
int tmpNodeIdx = nodeIdx;
switch (node.getType()) {
case SCALAR: {
Preconditions.checkState(node.isSetScalarType());
TScalarType scalarType = node.getScalarType();
if (scalarType.getType() == TPrimitiveType.CHAR) {
Preconditions.checkState(scalarType.isSetLen());
type = ScalarType.createCharType(scalarType.getLen());
} else if (scalarType.getType() == TPrimitiveType.VARCHAR) {
Preconditions.checkState(scalarType.isSetLen());
type = ScalarType.createVarcharType(scalarType.getLen());
} else if (scalarType.getType() == TPrimitiveType.HLL) {
type = ScalarType.createHllType();
} else if (scalarType.getType() == TPrimitiveType.DECIMALV2) {
Preconditions.checkState(scalarType.isSetPrecision()
&& scalarType.isSetPrecision());
type = ScalarType.createDecimalV2Type(scalarType.getPrecision(),
scalarType.getScale());
} else {
type = ScalarType.createType(
PrimitiveType.fromThrift(scalarType.getType()));
}
++tmpNodeIdx;
break;
}
case ARRAY: {
Preconditions.checkState(tmpNodeIdx + 1 < col.getTypesSize());
Pair<Type, Integer> childType = fromThrift(col, tmpNodeIdx + 1);
type = new ArrayType(childType.first);
tmpNodeIdx = childType.second;
break;
}
case MAP: {
Preconditions.checkState(tmpNodeIdx + 2 < col.getTypesSize());
Pair<Type, Integer> keyType = fromThrift(col, tmpNodeIdx + 1);
Pair<Type, Integer> valueType = fromThrift(col, keyType.second);
type = new MapType(keyType.first, valueType.first);
tmpNodeIdx = valueType.second;
break;
}
case STRUCT: {
Preconditions.checkState(tmpNodeIdx + node.getStructFieldsSize() < col.getTypesSize());
ArrayList<StructField> structFields = Lists.newArrayList();
++tmpNodeIdx;
for (int i = 0; i < node.getStructFieldsSize(); ++i) {
TStructField thriftField = node.getStructFields().get(i);
String name = thriftField.getName();
String comment = null;
if (thriftField.isSetComment()) {
comment = thriftField.getComment();
}
Pair<Type, Integer> res = fromThrift(col, tmpNodeIdx);
tmpNodeIdx = res.second.intValue();
structFields.add(new StructField(name, res.first, comment));
}
type = new StructType(structFields);
break;
}
}
return new Pair<Type, Integer>(type, tmpNodeIdx);
}
/**
* Utility function to get the primitive type of a thrift type that is known
* to be scalar.
*/
public TPrimitiveType getTPrimitiveType(TTypeDesc ttype) {
Preconditions.checkState(ttype.getTypesSize() == 1);
Preconditions.checkState(ttype.types.get(0).getType() == TTypeNodeType.SCALAR);
return ttype.types.get(0).scalar_type.getType();
}
/**
* JDBC data type description
* Returns the column size for this type.
* For numeric data this is the maximum precision.
* For character data this is the length in characters.
* For datetime types this is the length in characters of the String representation
* (assuming the maximum allowed precision of the fractional seconds component).
* For binary data this is the length in bytes.
* Null is returned for for data types where the column size is not applicable.
*/
public Integer getColumnSize() {
if (!isScalarType()) return null;
if (isNumericType()) return getPrecision();
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case CHAR:
case VARCHAR:
case STRING:
case HLL:
return t.getLength();
default:
return null;
}
}
/**
* JDBC data type description
* For numeric types, returns the maximum precision for this type.
* For non-numeric types, returns null.
*/
public Integer getPrecision() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case TINYINT:
return 3;
case SMALLINT:
return 5;
case INT:
return 10;
case BIGINT:
return 19;
case FLOAT:
return 7;
case DOUBLE:
return 15;
case DECIMALV2:
return t.decimalPrecision();
default:
return null;
}
}
/**
* JDBC data type description
* Returns the number of fractional digits for this type, or null if not applicable.
* For timestamp/time types, returns the number of digits in the fractional seconds
* component.
*/
public Integer getDecimalDigits() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return 0;
case FLOAT:
return 7;
case DOUBLE:
return 15;
case DECIMALV2:
return t.decimalScale();
default:
return null;
}
}
/**
* JDBC data type description
* For numeric data types, either 10 or 2. If it is 10, the values in COLUMN_SIZE
* and DECIMAL_DIGITS give the number of decimal digits allowed for the column.
* For example, a DECIMAL(12,5) column would return a NUM_PREC_RADIX of 10,
* a COLUMN_SIZE of 12, and a DECIMAL_DIGITS of 5; a FLOAT column could return
* a NUM_PREC_RADIX of 10, a COLUMN_SIZE of 15, and a DECIMAL_DIGITS of NULL.
* If it is 2, the values in COLUMN_SIZE and DECIMAL_DIGITS give the number of bits
* allowed in the column. For example, a FLOAT column could return a RADIX of 2,
* a COLUMN_SIZE of 53, and a DECIMAL_DIGITS of NULL. NULL is returned for data
* types where NUM_PREC_RADIX is not applicable.
*/
public Integer getNumPrecRadix() {
if (!isScalarType()) return null;
ScalarType t = (ScalarType) this;
switch (t.getPrimitiveType()) {
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
case FLOAT:
case DOUBLE:
case DECIMALV2:
return 10;
default:
return null;
}
}
/**
* Matrix that records "smallest" assignment-compatible type of two types
* (INVALID_TYPE if no such type exists, ie, if the input types are fundamentally
* incompatible). A value of any of the two types could be assigned to a slot
* of the assignment-compatible type. For strict compatibility, this can be done
* without any loss of precision. For non-strict compatibility, there may be loss of
* precision, e.g. if converting from BIGINT to FLOAT.
*
* We chose not to follow MySQL's type casting behavior as described here:
* http:
* for the following reasons:
* conservative casting in arithmetic exprs: TINYINT + TINYINT -> BIGINT
* comparison of many types as double: INT < FLOAT -> comparison as DOUBLE
* special cases when dealing with dates and timestamps.
*/
protected static PrimitiveType[][] compatibilityMatrix;
/**
* If we are checking in strict mode, any non-null entry in this matrix overrides
* compatibilityMatrix. If the entry is null, the entry in compatibility matrix
* is valid.
*/
protected static PrimitiveType[][] strictCompatibilityMatrix;
static {
compatibilityMatrix = new
PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
strictCompatibilityMatrix = new
PrimitiveType[PrimitiveType.values().length][PrimitiveType.values().length];
for (int i = 0; i < PrimitiveType.values().length; ++i) {
compatibilityMatrix[i][i] = PrimitiveType.values()[i];
}
compatibilityMatrix[BOOLEAN.ordinal()][TINYINT.ordinal()] = PrimitiveType.TINYINT;
compatibilityMatrix[BOOLEAN.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
compatibilityMatrix[BOOLEAN.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[BOOLEAN.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BOOLEAN.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[BOOLEAN.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[BOOLEAN.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BOOLEAN.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BOOLEAN.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BOOLEAN.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][SMALLINT.ordinal()] = PrimitiveType.SMALLINT;
compatibilityMatrix[TINYINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[TINYINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[TINYINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[TINYINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[TINYINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[TINYINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[TINYINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TINYINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][INT.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[SMALLINT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[SMALLINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[SMALLINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
compatibilityMatrix[SMALLINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[SMALLINT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[SMALLINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[SMALLINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][BIGINT.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[INT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
strictCompatibilityMatrix[INT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][DATE.ordinal()] = PrimitiveType.INT;
compatibilityMatrix[INT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[INT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[INT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][LARGEINT.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.FLOAT;
strictCompatibilityMatrix[BIGINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][DATE.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BIGINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.BIGINT;
compatibilityMatrix[BIGINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[BIGINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BIGINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][FLOAT.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][DATE.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[LARGEINT.ordinal()][DATETIME.ordinal()] = PrimitiveType.LARGEINT;
compatibilityMatrix[LARGEINT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[LARGEINT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[LARGEINT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[LARGEINT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DOUBLE.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[FLOAT.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DATETIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[FLOAT.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[FLOAT.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DATE.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DOUBLE ;
compatibilityMatrix[DOUBLE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][TIME.ordinal()] = PrimitiveType.DOUBLE;
compatibilityMatrix[DOUBLE.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DOUBLE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][DATETIME.ordinal()] = PrimitiveType.DATETIME;
compatibilityMatrix[DATE.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[DATE.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATE.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][CHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][VARCHAR.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.DECIMALV2;
compatibilityMatrix[DATETIME.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DATETIME.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][VARCHAR.ordinal()] = PrimitiveType.VARCHAR;
compatibilityMatrix[CHAR.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[CHAR.ordinal()][STRING.ordinal()] = PrimitiveType.STRING;
compatibilityMatrix[VARCHAR.ordinal()][DECIMALV2.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[VARCHAR.ordinal()][STRING.ordinal()] = PrimitiveType.STRING;
compatibilityMatrix[STRING.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[STRING.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[STRING.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][HLL.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[DECIMALV2.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][BITMAP.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[HLL.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BITMAP.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[BITMAP.ordinal()][STRING.ordinal()] = PrimitiveType.INVALID_TYPE;
compatibilityMatrix[TIME.ordinal()][TIME.ordinal()] = PrimitiveType.INVALID_TYPE;
for (int i = 0; i < PrimitiveType.values().length - 2; ++i) {
for (int j = i; j < PrimitiveType.values().length - 2; ++j) {
PrimitiveType t1 = PrimitiveType.values()[i];
PrimitiveType t2 = PrimitiveType.values()[j];
if (t1 == PrimitiveType.INVALID_TYPE ||
t2 == PrimitiveType.INVALID_TYPE) continue;
if (t1 == PrimitiveType.NULL_TYPE || t2 == PrimitiveType.NULL_TYPE) continue;
if (t1 == PrimitiveType.ARRAY || t2 == PrimitiveType.ARRAY) continue;
if (t1 == PrimitiveType.DECIMALV2 || t2 == PrimitiveType.DECIMALV2) continue;
if (t1 == PrimitiveType.TIME || t2 == PrimitiveType.TIME) continue;
if (t1 == PrimitiveType.ARRAY || t2 == PrimitiveType.ARRAY) continue;
if (t1 == PrimitiveType.MAP || t2 == PrimitiveType.MAP) continue;
if (t1 == PrimitiveType.STRUCT || t2 == PrimitiveType.STRUCT) continue;
Preconditions.checkNotNull(compatibilityMatrix[i][j]);
}
}
}
public Type getResultType() {
switch (this.getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return BIGINT;
case LARGEINT:
return LARGEINT;
case FLOAT:
case DOUBLE:
return DOUBLE;
case DATE:
case DATETIME:
case TIME:
case CHAR:
case VARCHAR:
case HLL:
case BITMAP:
return VARCHAR;
case DECIMALV2:
return DECIMALV2;
case STRING:
return STRING;
default:
return INVALID;
}
}
public static Type getCmpType(Type t1, Type t2) {
if (t1.getPrimitiveType() == PrimitiveType.NULL_TYPE) {
return t2;
}
if (t2.getPrimitiveType() == PrimitiveType.NULL_TYPE) {
return t1;
}
if (canCompareDate(t1.getPrimitiveType(), t2.getPrimitiveType())) {
return Type.DATE;
}
if (canCompareDatetime(t1.getPrimitiveType(), t2.getPrimitiveType())) {
return Type.DATETIME;
}
PrimitiveType t1ResultType = t1.getResultType().getPrimitiveType();
PrimitiveType t2ResultType = t2.getResultType().getPrimitiveType();
if (t1ResultType == PrimitiveType.VARCHAR && t2ResultType == PrimitiveType.VARCHAR) {
return Type.VARCHAR;
}
if ((t1ResultType == PrimitiveType.STRING && t2ResultType == PrimitiveType.STRING)
|| (t1ResultType == PrimitiveType.STRING && t2ResultType == PrimitiveType.VARCHAR)
|| (t1ResultType == PrimitiveType.VARCHAR && t2ResultType == PrimitiveType.STRING)) {
return Type.STRING;
}
if ((t1ResultType.isFixedPointType() && t2ResultType.isCharFamily())
|| (t2ResultType.isFixedPointType() && t1ResultType.isCharFamily())) {
return t1.isStringType() ? t1 : t2;
}
if (t1ResultType == PrimitiveType.BIGINT && t2ResultType == PrimitiveType.BIGINT) {
return getAssignmentCompatibleType(t1, t2, false);
}
if ((t1ResultType == PrimitiveType.BIGINT || t1ResultType == PrimitiveType.DECIMALV2)
&& (t2ResultType == PrimitiveType.BIGINT || t2ResultType == PrimitiveType.DECIMALV2)) {
return Type.DECIMALV2;
}
if ((t1ResultType == PrimitiveType.BIGINT || t1ResultType == PrimitiveType.LARGEINT)
&& (t2ResultType == PrimitiveType.BIGINT || t2ResultType == PrimitiveType.LARGEINT)) {
return Type.LARGEINT;
}
return Type.DOUBLE;
}
public static boolean canCompareDatetime(PrimitiveType t1, PrimitiveType t2) {
if (t1.isDateType()) {
if (t2.isDateType() || t2.isStringType() || t2.isIntegerType()) {
return true;
}
return false;
} else if (t2.isDateType()) {
if (t1.isStringType() || t1.isIntegerType()) {
return true;
}
return false;
} else {
return false;
}
}
public Type getMaxResolutionType() {
Preconditions.checkState(true, "must implemented");
return null;
}
public Type getNumResultType() {
switch (getPrimitiveType()) {
case BOOLEAN:
case TINYINT:
case SMALLINT:
case INT:
case BIGINT:
return Type.BIGINT;
case LARGEINT:
return Type.LARGEINT;
case FLOAT:
case DOUBLE:
case DATE:
case DATETIME:
case TIME:
case CHAR:
case VARCHAR:
case STRING:
case HLL:
return Type.DOUBLE;
case DECIMALV2:
return Type.DECIMALV2;
default:
return Type.INVALID;
}
}
public int getStorageLayoutBytes() {
return 0;
}
public int getIndexSize() {
if (this.getPrimitiveType() == PrimitiveType.CHAR) {
return ((ScalarType) this).getLength();
} else {
return this.getPrimitiveType().getOlapColumnIndexSize();
}
}
}
|
`config.getOptionalValue()` throws `IllegalArgumentException` if the property cannot be converted to the specified type. It was a bug in our integration - SM Config extension catches the exception as well: https://github.com/smallrye/smallrye-config/blob/master/implementation/src/main/java/io/smallrye/config/inject/ConfigExtension.java#L93-L104
|
public void validateConfigProperties(Map<String, Set<String>> properties) {
Config config = ConfigProviderResolver.instance().getConfig();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
ConfigDeploymentTemplate.class.getClassLoader();
}
for (Entry<String, Set<String>> entry : properties.entrySet()) {
Set<String> propertyTypes = entry.getValue();
for (String propertyType : propertyTypes) {
Class<?> propertyClass = load(propertyType, cl);
if (propertyClass.isArray() || propertyClass.getTypeParameters().length > 0) {
propertyClass = String.class;
}
try {
if (!config.getOptionalValue(entry.getKey(), propertyClass).isPresent()) {
throw new DeploymentException(
"No config value of type " + entry.getValue() + " exists for: " + entry.getKey());
}
} catch (IllegalArgumentException e) {
throw new DeploymentException(e);
}
}
}
}
|
throw new DeploymentException(e);
|
public void validateConfigProperties(Map<String, Set<String>> properties) {
Config config = ConfigProviderResolver.instance().getConfig();
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
ConfigDeploymentTemplate.class.getClassLoader();
}
for (Entry<String, Set<String>> entry : properties.entrySet()) {
Set<String> propertyTypes = entry.getValue();
for (String propertyType : propertyTypes) {
Class<?> propertyClass = load(propertyType, cl);
if (propertyClass.isArray() || propertyClass.getTypeParameters().length > 0) {
propertyClass = String.class;
}
try {
if (!config.getOptionalValue(entry.getKey(), propertyClass).isPresent()) {
throw new DeploymentException(
"No config value of type " + entry.getValue() + " exists for: " + entry.getKey());
}
} catch (IllegalArgumentException e) {
throw new DeploymentException(e);
}
}
}
}
|
class ConfigDeploymentTemplate {
private Class<?> load(String className, ClassLoader cl) {
switch (className) {
case "boolean":
return boolean.class;
case "byte":
return byte.class;
case "short":
return short.class;
case "int":
return int.class;
case "long":
return long.class;
case "float":
return float.class;
case "double":
return double.class;
case "char":
return char.class;
case "void":
return void.class;
}
try {
return Class.forName(className, true, cl);
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Unable to load the config property type: " + className);
}
}
}
|
class ConfigDeploymentTemplate {
private Class<?> load(String className, ClassLoader cl) {
switch (className) {
case "boolean":
return boolean.class;
case "byte":
return byte.class;
case "short":
return short.class;
case "int":
return int.class;
case "long":
return long.class;
case "float":
return float.class;
case "double":
return double.class;
case "char":
return char.class;
case "void":
return void.class;
}
try {
return Class.forName(className, true, cl);
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Unable to load the config property type: " + className);
}
}
}
|
Public OpenAI service don't have an endpoint. If use provides `endpoint`, we assume they are targeting the Azure OpenAI service. Else, targeting OpenAI service.
|
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions;
ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions;
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersFromContextPolicy());
HttpHeaders headers = new HttpHeaders();
localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue()));
if (headers.getSize() > 0) {
policies.add(new AddHeadersPolicy(headers));
}
this.pipelinePolicies.stream()
.filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL)
.forEach(p -> policies.add(p));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy()));
policies.add(new AddDatePolicy());
policies.add(new CookiePolicy());
if (keyCredential != null) {
KeyCredentialPolicy keyCredentialPolicy;
if (endpoint != null) {
keyCredentialPolicy = new KeyCredentialPolicy("api-key", keyCredential);
} else {
keyCredentialPolicy = new KeyCredentialPolicy("Authorization", keyCredential, "Bearer");
}
policies.add(keyCredentialPolicy);
}
if (tokenCredential != null) {
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES));
}
this.pipelinePolicies.stream()
.filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY)
.forEach(p -> policies.add(p));
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(localClientOptions)
.build();
return httpPipeline;
}
|
if (endpoint != null) {
|
private HttpPipeline createHttpPipeline() {
Configuration buildConfiguration =
(configuration == null) ? Configuration.getGlobalConfiguration() : configuration;
HttpLogOptions localHttpLogOptions = this.httpLogOptions == null ? new HttpLogOptions() : this.httpLogOptions;
ClientOptions localClientOptions = this.clientOptions == null ? new ClientOptions() : this.clientOptions;
List<HttpPipelinePolicy> policies = new ArrayList<>();
String clientName = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
String applicationId = CoreUtils.getApplicationId(localClientOptions, localHttpLogOptions);
policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, buildConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddHeadersFromContextPolicy());
HttpHeaders headers = new HttpHeaders();
localClientOptions.getHeaders().forEach(header -> headers.set(header.getName(), header.getValue()));
if (headers.getSize() > 0) {
policies.add(new AddHeadersPolicy(headers));
}
this.pipelinePolicies.stream()
.filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_CALL)
.forEach(p -> policies.add(p));
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(ClientBuilderUtil.validateAndGetRetryPolicy(retryPolicy, retryOptions, new RetryPolicy()));
policies.add(new AddDatePolicy());
policies.add(new CookiePolicy());
if (keyCredential != null) {
policies.add(
useNonAzureOpenAIService()
? new KeyCredentialPolicy("Authorization", keyCredential, "Bearer")
: new KeyCredentialPolicy("api-key", keyCredential));
}
if (tokenCredential != null) {
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, DEFAULT_SCOPES));
}
this.pipelinePolicies.stream()
.filter(p -> p.getPipelinePosition() == HttpPipelinePosition.PER_RETRY)
.forEach(p -> policies.add(p));
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
HttpPipeline httpPipeline =
new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.clientOptions(localClientOptions)
.build();
return httpPipeline;
}
|
class OpenAIClientBuilder
implements HttpTrait<OpenAIClientBuilder>,
ConfigurationTrait<OpenAIClientBuilder>,
TokenCredentialTrait<OpenAIClientBuilder>,
AzureKeyCredentialTrait<OpenAIClientBuilder>,
EndpointTrait<OpenAIClientBuilder> {
@Generated private static final String SDK_NAME = "name";
@Generated private static final String SDK_VERSION = "version";
@Generated
private static final String[] DEFAULT_SCOPES = new String[] {"https:
@Generated
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-ai-openai.properties");
@Generated private final List<HttpPipelinePolicy> pipelinePolicies;
/** Create an instance of the OpenAIClientBuilder. */
@Generated
public OpenAIClientBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* The HTTP pipeline to send requests through.
*/
@Generated private HttpPipeline pipeline;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder pipeline(HttpPipeline pipeline) {
if (this.pipeline != null && pipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.pipeline = pipeline;
return this;
}
/*
* The HTTP client used to send the request.
*/
@Generated private HttpClient httpClient;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
@Generated private HttpLogOptions httpLogOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The client options such as application ID and custom headers to set on a request.
*/
@Generated private ClientOptions clientOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/*
* The retry options to configure retry policy for failed requests.
*/
@Generated private RetryOptions retryOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.");
pipelinePolicies.add(customPolicy);
return this;
}
/*
* The configuration store that is used during construction of the service client.
*/
@Generated private Configuration configuration;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
@Generated private TokenCredential tokenCredential;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The AzureKeyCredential used for authentication.
*/
@Generated private AzureKeyCredential azureKeyCredential;
/** {@inheritDoc}. */
@Override
public OpenAIClientBuilder credential(AzureKeyCredential azureKeyCredential) {
return this.credential((KeyCredential) azureKeyCredential);
}
/**
* The KeyCredential used for OpenAi authentication. It could be either of Azure or Non-Azure OpenAI API key.
*/
private KeyCredential keyCredential;
/**
* The KeyCredential used for OpenAi authentication. It could be either of Azure or Non-Azure OpenAI API key.
*
* @param keyCredential The credential for OpenAI authentication.
* @return the object itself.
*/
public OpenAIClientBuilder credential(KeyCredential keyCredential) {
this.keyCredential = keyCredential;
return this;
}
/*
* The service endpoint
*/
@Generated private String endpoint;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/*
* Service version
*/
@Generated private OpenAIServiceVersion serviceVersion;
/**
* Sets Service version.
*
* @param serviceVersion the serviceVersion value.
* @return the OpenAIClientBuilder.
*/
@Generated
public OpenAIClientBuilder serviceVersion(OpenAIServiceVersion serviceVersion) {
this.serviceVersion = serviceVersion;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if applicable.
*/
@Generated private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the OpenAIClientBuilder.
*/
@Generated
public OpenAIClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Builds an instance of OpenAIClientImpl with the provided parameters.
*
* @return an instance of OpenAIClientImpl.
*/
@Generated
private OpenAIClientImpl buildInnerClient() {
HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline();
OpenAIServiceVersion localServiceVersion =
(serviceVersion != null) ? serviceVersion : OpenAIServiceVersion.getLatest();
OpenAIClientImpl client =
new OpenAIClientImpl(
localPipeline,
JacksonAdapter.createDefaultSerializerAdapter(),
this.endpoint,
localServiceVersion);
return client;
}
@Generated
private NonAzureOpenAIClientImpl buildInnerNonAzureOpenAIClient() {
HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline();
NonAzureOpenAIClientImpl client =
new NonAzureOpenAIClientImpl(localPipeline, JacksonAdapter.createDefaultSerializerAdapter());
return client;
}
/**
* Builds an instance of OpenAIAsyncClient class.
*
* @return an instance of OpenAIAsyncClient.
*/
public OpenAIAsyncClient buildAsyncClient() {
if (this.endpoint != null) {
return new OpenAIAsyncClient(buildInnerClient());
}
return new OpenAIAsyncClient(buildInnerNonAzureOpenAIClient());
}
/**
* Builds an instance of OpenAIClient class.
*
* @return an instance of OpenAIClient.
*/
public OpenAIClient buildClient() {
if (this.endpoint != null) {
return new OpenAIClient(buildInnerClient());
}
return new OpenAIClient(buildInnerNonAzureOpenAIClient());
}
private static final ClientLogger LOGGER = new ClientLogger(OpenAIClientBuilder.class);
}
|
class OpenAIClientBuilder
implements HttpTrait<OpenAIClientBuilder>,
ConfigurationTrait<OpenAIClientBuilder>,
TokenCredentialTrait<OpenAIClientBuilder>,
AzureKeyCredentialTrait<OpenAIClientBuilder>,
EndpointTrait<OpenAIClientBuilder> {
@Generated private static final String SDK_NAME = "name";
@Generated private static final String SDK_VERSION = "version";
@Generated
private static final String[] DEFAULT_SCOPES = new String[] {"https:
@Generated
private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-ai-openai.properties");
@Generated private final List<HttpPipelinePolicy> pipelinePolicies;
/** Create an instance of the OpenAIClientBuilder. */
@Generated
public OpenAIClientBuilder() {
this.pipelinePolicies = new ArrayList<>();
}
/*
* The HTTP pipeline to send requests through.
*/
@Generated private HttpPipeline pipeline;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder pipeline(HttpPipeline pipeline) {
if (this.pipeline != null && pipeline == null) {
LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.pipeline = pipeline;
return this;
}
/*
* The HTTP client used to send the request.
*/
@Generated private HttpClient httpClient;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder httpClient(HttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/*
* The logging configuration for HTTP requests and responses.
*/
@Generated private HttpLogOptions httpLogOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder httpLogOptions(HttpLogOptions httpLogOptions) {
this.httpLogOptions = httpLogOptions;
return this;
}
/*
* The client options such as application ID and custom headers to set on a request.
*/
@Generated private ClientOptions clientOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder clientOptions(ClientOptions clientOptions) {
this.clientOptions = clientOptions;
return this;
}
/*
* The retry options to configure retry policy for failed requests.
*/
@Generated private RetryOptions retryOptions;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder retryOptions(RetryOptions retryOptions) {
this.retryOptions = retryOptions;
return this;
}
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder addPolicy(HttpPipelinePolicy customPolicy) {
Objects.requireNonNull(customPolicy, "'customPolicy' cannot be null.");
pipelinePolicies.add(customPolicy);
return this;
}
/*
* The configuration store that is used during construction of the service client.
*/
@Generated private Configuration configuration;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/*
* The TokenCredential used for authentication.
*/
@Generated private TokenCredential tokenCredential;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder credential(TokenCredential tokenCredential) {
this.tokenCredential = tokenCredential;
return this;
}
/*
* The AzureKeyCredential used for authentication.
*/
@Generated private AzureKeyCredential azureKeyCredential;
/** {@inheritDoc}. */
@Override
public OpenAIClientBuilder credential(AzureKeyCredential azureKeyCredential) {
return this.credential((KeyCredential) azureKeyCredential);
}
/** The KeyCredential used for OpenAi authentication. It could be either of Azure or Non-Azure OpenAI API key. */
private KeyCredential keyCredential;
/**
* The KeyCredential used for OpenAi authentication. It could be either of Azure or Non-Azure OpenAI API key.
*
* @param keyCredential The credential for OpenAI authentication.
* @return the object itself.
*/
public OpenAIClientBuilder credential(KeyCredential keyCredential) {
this.keyCredential = keyCredential;
return this;
}
/*
* The service endpoint
*/
@Generated private String endpoint;
/** {@inheritDoc}. */
@Generated
@Override
public OpenAIClientBuilder endpoint(String endpoint) {
this.endpoint = endpoint;
return this;
}
/*
* Service version
*/
@Generated private OpenAIServiceVersion serviceVersion;
/**
* Sets Service version.
*
* @param serviceVersion the serviceVersion value.
* @return the OpenAIClientBuilder.
*/
@Generated
public OpenAIClientBuilder serviceVersion(OpenAIServiceVersion serviceVersion) {
this.serviceVersion = serviceVersion;
return this;
}
/*
* The retry policy that will attempt to retry failed requests, if applicable.
*/
@Generated private RetryPolicy retryPolicy;
/**
* Sets The retry policy that will attempt to retry failed requests, if applicable.
*
* @param retryPolicy the retryPolicy value.
* @return the OpenAIClientBuilder.
*/
@Generated
public OpenAIClientBuilder retryPolicy(RetryPolicy retryPolicy) {
this.retryPolicy = retryPolicy;
return this;
}
/**
* Builds an instance of OpenAIClientImpl with the provided parameters.
*
* @return an instance of OpenAIClientImpl.
*/
@Generated
private OpenAIClientImpl buildInnerClient() {
HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline();
OpenAIServiceVersion localServiceVersion =
(serviceVersion != null) ? serviceVersion : OpenAIServiceVersion.getLatest();
OpenAIClientImpl client =
new OpenAIClientImpl(
localPipeline,
JacksonAdapter.createDefaultSerializerAdapter(),
this.endpoint,
localServiceVersion);
return client;
}
private NonAzureOpenAIClientImpl buildInnerNonAzureOpenAIClient() {
HttpPipeline localPipeline = (pipeline != null) ? pipeline : createHttpPipeline();
NonAzureOpenAIClientImpl client =
new NonAzureOpenAIClientImpl(localPipeline, JacksonAdapter.createDefaultSerializerAdapter());
return client;
}
/**
* Builds an instance of OpenAIAsyncClient class.
*
* @return an instance of OpenAIAsyncClient.
*/
public OpenAIAsyncClient buildAsyncClient() {
return useNonAzureOpenAIService()
? new OpenAIAsyncClient(buildInnerNonAzureOpenAIClient())
: new OpenAIAsyncClient(buildInnerClient());
}
/**
* Builds an instance of OpenAIClient class.
*
* @return an instance of OpenAIClient.
*/
public OpenAIClient buildClient() {
return useNonAzureOpenAIService()
? new OpenAIClient(buildInnerNonAzureOpenAIClient())
: new OpenAIClient(buildInnerClient());
}
private static final ClientLogger LOGGER = new ClientLogger(OpenAIClientBuilder.class);
/**
* OpenAI service can be used by either not setting the endpoint or by setting the endpoint to start with
* "https:
*/
private boolean useNonAzureOpenAIService() {
return endpoint == null || endpoint.startsWith(OPEN_AI_ENDPOINT);
}
}
|
updated to do exact match first, then relaxed match
|
public static String defaultApiVersion(String id, Provider provider) {
if (id == null || provider == null) {
return null;
}
ResourceId resourceId = ResourceId.fromString(id);
String resourceTypeWithoutNamespace = getFullResourceTypeWithoutNamespace(resourceId);
String fullResourceType = resourceId.fullResourceType();
for (ProviderResourceType prt : provider.resourceTypes()) {
if (prt.resourceType().equalsIgnoreCase(resourceTypeWithoutNamespace)
|| fullResourceType.contains(prt.resourceType())) {
return prt.defaultApiVersion() == null ? prt.apiVersions().get(0) : prt.defaultApiVersion();
}
}
ResourceId parent = resourceId.parent();
if (parent != null && !CoreUtils.isNullOrEmpty(parent.id())) {
return defaultApiVersion(parent.id(), provider);
} else {
return provider.resourceTypes().get(0).apiVersions().get(0);
}
}
|
if (prt.resourceType().equalsIgnoreCase(resourceTypeWithoutNamespace)
|
public static String defaultApiVersion(String id, Provider provider) {
if (id == null || provider == null) {
return null;
}
ResourceId resourceId = ResourceId.fromString(id);
String resourceTypeWithoutNamespace = getFullResourceTypeWithoutNamespace(resourceId);
for (ProviderResourceType prt : provider.resourceTypes()) {
if (prt.resourceType().equalsIgnoreCase(resourceTypeWithoutNamespace)) {
return prt.defaultApiVersion() == null ? prt.apiVersions().get(0) : prt.defaultApiVersion();
}
}
ResourceId parent = resourceId.parent();
if (parent != null && !CoreUtils.isNullOrEmpty(parent.id())) {
return defaultApiVersion(parent.id(), provider);
} else {
return provider.resourceTypes().get(0).apiVersions().get(0);
}
}
|
class ResourceUtils {
private ResourceUtils() {
}
/**
* Extract resource group from a resource ID string.
*
* @param id the resource ID string
* @return the resource group name
*/
public static String groupFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).resourceGroupName() : null;
}
/**
* Extract the subscription ID from a resource ID string.
*
* @param id the resource ID string
* @return the subscription ID
*/
public static String subscriptionFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).subscriptionId() : null;
}
/**
* Extract resource provider from a resource ID string.
*
* @param id the resource ID string
* @return the resource group name
*/
public static String resourceProviderFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).providerNamespace() : null;
}
/**
* Extract resource type from a resource ID string.
*
* @param id the resource ID string
* @return the resource type
*/
public static String resourceTypeFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).resourceType() : null;
}
/**
* Extract parent resource ID from a resource ID string.
* E.g. subscriptions/s/resourcegroups/r/foos/foo/bars/bar will return
* subscriptions/s/resourcegroups/r/foos/foo.
*
* @param id the resource ID string
* @return the parent resource ID
*/
public static String parentResourceIdFromResourceId(String id) {
if (id == null) {
return null;
}
ResourceId resourceId = ResourceId.fromString(id);
if (resourceId.parent() != null) {
return ResourceId.fromString(id).parent().id();
}
return null;
}
/**
* Extract parent resource path from a resource ID string.
* E.g. subscriptions/s/resourcegroups/r/foos/foo/bars/bar will return foos/foo.
*
* @param id the resource ID string
* @return the parent resource ID
*/
public static String parentRelativePathFromResourceId(String id) {
if (id == null) {
return null;
}
ResourceId parent = ResourceId.fromString(id).parent();
if (parent != null) {
return parent.resourceType() + "/" + parent.name();
}
return "";
}
/**
* Extract the relative path to the current resource provider.
* E.g. subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Foo/foos/foo1 will return foos/foo1.
*
* @param id the id of the resource
* @return the relative path
*/
public static String relativePathFromResourceId(String id) {
if (id == null) {
return null;
}
String[] paths = id.split("/providers/" + resourceProviderFromResourceId(id) + "/", 2);
if (paths.length == 1) {
return "";
} else {
return paths[1];
}
}
/**
* Extract information from a resource ID string with the resource type
* as the identifier.
*
* @param id the resource ID
* @param identifier the identifier to match, e.g. "resourceGroups", "storageAccounts"
* @return the information extracted from the identifier
*/
public static String extractFromResourceId(String id, String identifier) {
if (id == null || identifier == null) {
return id;
}
Pattern pattern = Pattern.compile(identifier + "/[-\\w._]+");
Matcher matcher = pattern.matcher(id);
if (matcher.find()) {
return matcher.group().split("/")[1];
} else {
return null;
}
}
/**
* Extract name of the resource from a resource ID.
*
* @param id the resource ID
* @return the name of the resource
*/
public static String nameFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).name() : null;
}
/**
* Find out the default api version to make a REST request with from
* the resource provider.
*
* @param id the resource ID
* @param provider the resource provider
* @return the default api version to use
*/
/**
* Creates a resource ID from information of a generic resource.
*
* @param subscriptionId the subscription UUID
* @param resourceGroupName the resource group name
* @param resourceProviderNamespace the resource provider namespace
* @param resourceType the type of the resource or nested resource
* @param resourceName name of the resource or nested resource
* @param parentResourcePath parent resource's relative path to the provider,
* if the resource is a generic resource
* @return the resource ID string
*/
public static String constructResourceId(
final String subscriptionId,
final String resourceGroupName,
final String resourceProviderNamespace,
final String resourceType,
final String resourceName,
final String parentResourcePath) {
String prefixedParentPath = parentResourcePath;
if (parentResourcePath != null && !parentResourcePath.isEmpty()) {
prefixedParentPath = "/" + parentResourcePath;
}
return String.format(
"/subscriptions/%s/resourcegroups/%s/providers/%s%s/%s/%s",
subscriptionId,
resourceGroupName,
resourceProviderNamespace,
prefixedParentPath,
resourceType,
resourceName);
}
/**
* Ensure that the resource ID is properly encoded for path parameter.
*
* @param resourceId the resource ID, whether url-encoded or not
* @return properly encoded resource ID
*/
public static String encodeResourceId(String resourceId) {
if (resourceId == null) {
return null;
}
return resourceId.replaceAll(" ", "%20");
}
private static String getFullResourceTypeWithoutNamespace(ResourceId resourceId) {
return resourceId.fullResourceType()
.substring(resourceId.fullResourceType().indexOf("/") + 1)
.toLowerCase(Locale.ROOT);
}
}
|
class ResourceUtils {
private ResourceUtils() {
}
/**
* Extract resource group from a resource ID string.
*
* @param id the resource ID string
* @return the resource group name
*/
public static String groupFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).resourceGroupName() : null;
}
/**
* Extract the subscription ID from a resource ID string.
*
* @param id the resource ID string
* @return the subscription ID
*/
public static String subscriptionFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).subscriptionId() : null;
}
/**
* Extract resource provider from a resource ID string.
*
* @param id the resource ID string
* @return the resource group name
*/
public static String resourceProviderFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).providerNamespace() : null;
}
/**
* Extract resource type from a resource ID string.
*
* @param id the resource ID string
* @return the resource type
*/
public static String resourceTypeFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).resourceType() : null;
}
/**
* Extract parent resource ID from a resource ID string.
* E.g. subscriptions/s/resourcegroups/r/foos/foo/bars/bar will return
* subscriptions/s/resourcegroups/r/foos/foo.
*
* @param id the resource ID string
* @return the parent resource ID
*/
public static String parentResourceIdFromResourceId(String id) {
if (id == null) {
return null;
}
ResourceId resourceId = ResourceId.fromString(id);
if (resourceId.parent() != null) {
return ResourceId.fromString(id).parent().id();
}
return null;
}
/**
* Extract parent resource path from a resource ID string.
* E.g. subscriptions/s/resourcegroups/r/foos/foo/bars/bar will return foos/foo.
*
* @param id the resource ID string
* @return the parent resource ID
*/
public static String parentRelativePathFromResourceId(String id) {
if (id == null) {
return null;
}
ResourceId parent = ResourceId.fromString(id).parent();
if (parent != null) {
return parent.resourceType() + "/" + parent.name();
}
return "";
}
/**
* Extract the relative path to the current resource provider.
* E.g. subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Foo/foos/foo1 will return foos/foo1.
*
* @param id the id of the resource
* @return the relative path
*/
public static String relativePathFromResourceId(String id) {
if (id == null) {
return null;
}
String[] paths = id.split("/providers/" + resourceProviderFromResourceId(id) + "/", 2);
if (paths.length == 1) {
return "";
} else {
return paths[1];
}
}
/**
* Extract information from a resource ID string with the resource type
* as the identifier.
*
* @param id the resource ID
* @param identifier the identifier to match, e.g. "resourceGroups", "storageAccounts"
* @return the information extracted from the identifier
*/
public static String extractFromResourceId(String id, String identifier) {
if (id == null || identifier == null) {
return id;
}
Pattern pattern = Pattern.compile(identifier + "/[-\\w._]+");
Matcher matcher = pattern.matcher(id);
if (matcher.find()) {
return matcher.group().split("/")[1];
} else {
return null;
}
}
/**
* Extract name of the resource from a resource ID.
*
* @param id the resource ID
* @return the name of the resource
*/
public static String nameFromResourceId(String id) {
return (id != null) ? ResourceId.fromString(id).name() : null;
}
/**
* Find out the default api version to make a REST request with from
* the resource provider.
*
* @param id the resource ID
* @param provider the resource provider
* @return the default api version to use
*/
/**
* Creates a resource ID from information of a generic resource.
*
* @param subscriptionId the subscription UUID
* @param resourceGroupName the resource group name
* @param resourceProviderNamespace the resource provider namespace
* @param resourceType the type of the resource or nested resource
* @param resourceName name of the resource or nested resource
* @param parentResourcePath parent resource's relative path to the provider,
* if the resource is a generic resource
* @return the resource ID string
*/
public static String constructResourceId(
final String subscriptionId,
final String resourceGroupName,
final String resourceProviderNamespace,
final String resourceType,
final String resourceName,
final String parentResourcePath) {
String prefixedParentPath = parentResourcePath;
if (parentResourcePath != null && !parentResourcePath.isEmpty()) {
prefixedParentPath = "/" + parentResourcePath;
}
return String.format(
"/subscriptions/%s/resourcegroups/%s/providers/%s%s/%s/%s",
subscriptionId,
resourceGroupName,
resourceProviderNamespace,
prefixedParentPath,
resourceType,
resourceName);
}
/**
* Ensure that the resource ID is properly encoded for path parameter.
*
* @param resourceId the resource ID, whether url-encoded or not
* @return properly encoded resource ID
*/
public static String encodeResourceId(String resourceId) {
if (resourceId == null) {
return null;
}
return resourceId.replaceAll(" ", "%20");
}
private static String getFullResourceTypeWithoutNamespace(ResourceId resourceId) {
return resourceId.fullResourceType()
.substring(resourceId.fullResourceType().indexOf("/") + 1)
.toLowerCase(Locale.ROOT);
}
}
|
The callbacks of `with(...)` might be called just right after the subscription if data is already there or if the workload it triggers is synchronous (e.g. you compute some value in-memory and there's no async I/O call involved).
|
public Handler<RoutingContext> clearCacheHandler() {
return new DevConsolePostHandler() {
@Override
protected void handlePost(RoutingContext event, MultiMap form) {
String cacheName = form.get("name");
Optional<Cache> cache = CaffeineCacheSupplier.cacheManager().getCache(cacheName);
if (cache.isPresent() && cache.get() instanceof CaffeineCache) {
CaffeineCacheImpl caffeineCache = (CaffeineCacheImpl) cache.get();
String action = form.get("action");
if (action.equalsIgnoreCase("clearCache")) {
caffeineCache.invalidateAll().subscribe().with(ignored -> {
endResponse(event, OK, createResponseMessage(caffeineCache));
});
} else if (action.equalsIgnoreCase("refresh")) {
endResponse(event, OK, createResponseMessage(caffeineCache));
} else {
String errorMessage = "Invalid action: " + action;
endResponse(event, INTERNAL_SERVER_ERROR, createResponseError(cacheName, errorMessage));
}
} else {
String errorMessage = "Cache for " + cacheName + " not found";
endResponse(event, NOT_FOUND, createResponseError(cacheName, errorMessage));
}
}
private void endResponse(RoutingContext event, HttpResponseStatus status, String message) {
event.response().setStatusCode(status.code());
event.response().end(message);
}
private String createResponseMessage(CaffeineCacheImpl cache) {
Json.JsonObjectBuilder object = Json.object();
object.put("name", cache.getName());
object.put("size", cache.getSize());
return object.build();
}
private String createResponseError(String name, String error) {
Json.JsonObjectBuilder object = Json.object();
object.put("name", name);
object.put("error", error);
return object.build();
}
};
}
|
endResponse(event, NOT_FOUND, createResponseError(cacheName, errorMessage));
|
public Handler<RoutingContext> clearCacheHandler() {
return new DevConsolePostHandler() {
@Override
protected void handlePost(RoutingContext event, MultiMap form) {
String cacheName = form.get("name");
Optional<Cache> cache = CaffeineCacheSupplier.cacheManager().getCache(cacheName);
if (cache.isPresent() && cache.get() instanceof CaffeineCache) {
CaffeineCacheImpl caffeineCache = (CaffeineCacheImpl) cache.get();
String action = form.get("action");
if (action.equalsIgnoreCase("clearCache")) {
caffeineCache.invalidateAll().subscribe().with(ignored -> {
endResponse(event, OK, createResponseMessage(caffeineCache));
});
} else if (action.equalsIgnoreCase("refresh")) {
endResponse(event, OK, createResponseMessage(caffeineCache));
} else {
String errorMessage = "Invalid action: " + action;
endResponse(event, INTERNAL_SERVER_ERROR, createResponseError(cacheName, errorMessage));
}
} else {
String errorMessage = "Cache for " + cacheName + " not found";
endResponse(event, NOT_FOUND, createResponseError(cacheName, errorMessage));
}
}
private void endResponse(RoutingContext event, HttpResponseStatus status, String message) {
event.response().setStatusCode(status.code());
event.response().end(message);
}
@Override
protected void actionSuccess(RoutingContext event) {
}
private String createResponseMessage(CaffeineCacheImpl cache) {
Json.JsonObjectBuilder object = Json.object();
object.put("name", cache.getName());
object.put("size", cache.getSize());
return object.build();
}
private String createResponseError(String name, String error) {
Json.JsonObjectBuilder object = Json.object();
object.put("name", name);
object.put("error", error);
return object.build();
}
};
}
|
class CacheDevConsoleRecorder {
}
|
class CacheDevConsoleRecorder {
}
|
can you add the expected formats in the error message? so that users will receive actionable errors
|
public static TableReference parseTableSpec(String tableSpec) {
Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec);
if (!match.matches()) {
throw new IllegalArgumentException(
"Table reference is not in the expected " + "format: " + tableSpec);
}
TableReference ref = new TableReference();
ref.setProjectId(match.group("PROJECT"));
return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE"));
}
|
"Table reference is not in the expected " + "format: " + tableSpec);
|
public static TableReference parseTableSpec(String tableSpec) {
Matcher match = BigQueryIO.TABLE_SPEC.matcher(tableSpec);
if (!match.matches()) {
throw new IllegalArgumentException(
String.format(
"Table specification [%s] is not in one of the expected formats ("
+ " [project_id]:[dataset_id].[table_id],"
+ " [project_id].[dataset_id].[table_id],"
+ " [dataset_id].[table_id])",
tableSpec));
}
TableReference ref = new TableReference();
ref.setProjectId(match.group("PROJECT"));
return ref.setDatasetId(match.group("DATASET")).setTableId(match.group("TABLE"));
}
|
class RetryJobId {
private final String jobIdPrefix;
private final int retryIndex;
RetryJobId(String jobIdPrefix, int retryIndex) {
this.jobIdPrefix = jobIdPrefix;
this.retryIndex = retryIndex;
}
String getJobIdPrefix() {
return jobIdPrefix;
}
int getRetryIndex() {
return retryIndex;
}
String getJobId() {
return jobIdPrefix + "-" + retryIndex;
}
@Override
public String toString() {
return getJobId();
}
}
|
class RetryJobId {
private final String jobIdPrefix;
private final int retryIndex;
RetryJobId(String jobIdPrefix, int retryIndex) {
this.jobIdPrefix = jobIdPrefix;
this.retryIndex = retryIndex;
}
String getJobIdPrefix() {
return jobIdPrefix;
}
int getRetryIndex() {
return retryIndex;
}
String getJobId() {
return jobIdPrefix + "-" + retryIndex;
}
@Override
public String toString() {
return getJobId();
}
}
|
@michalvavrik How does it look now ? I've added NPE check for `next` and ISE catch block for `remove`
|
private void removeInvalidEntries() {
long now = now();
for (Iterator<Map.Entry<String, CacheEntry<T>>> it = cacheMap.entrySet().iterator(); it.hasNext();) {
Map.Entry<String, CacheEntry<T>> next = it.next();
if (isEntryExpired(next.getValue(), now)) {
it.remove();
size.decrementAndGet();
}
}
}
|
long now = now();
|
private void removeInvalidEntries() {
long now = now();
for (Iterator<Map.Entry<String, CacheEntry<T>>> it = cacheMap.entrySet().iterator(); it.hasNext();) {
Map.Entry<String, CacheEntry<T>> next = it.next();
if (next != null) {
if (isEntryExpired(next.getValue(), now)) {
try {
it.remove();
size.decrementAndGet();
} catch (IllegalStateException ex) {
}
}
}
}
}
|
class MemoryCache<T> {
private volatile Long timerId = null;
private Map<String, CacheEntry<T>> cacheMap = new ConcurrentHashMap<>();;
private AtomicInteger size = new AtomicInteger();
private final Duration cacheTimeToLive;
private final int cacheSize;
public MemoryCache(Vertx vertx, Optional<Duration> cleanUpTimerInterval,
Duration cacheTimeToLive, int cacheSize) {
this.cacheTimeToLive = cacheTimeToLive;
this.cacheSize = cacheSize;
init(vertx, cleanUpTimerInterval);
}
private void init(Vertx vertx, Optional<Duration> cleanUpTimerInterval) {
cacheMap = new ConcurrentHashMap<>();
if (cleanUpTimerInterval.isPresent()) {
timerId = vertx.setPeriodic(cleanUpTimerInterval.get().toMillis(), new Handler<Long>() {
@Override
public void handle(Long event) {
removeInvalidEntries();
}
});
}
}
public void add(String key, T result) {
if (cacheSize > 0) {
if (!prepareSpaceForNewCacheEntry()) {
clearCache();
}
cacheMap.put(key, new CacheEntry<T>(result));
}
}
public T remove(String key) {
CacheEntry<T> entry = removeCacheEntry(key);
return entry == null ? null : entry.result;
}
public T get(String key) {
CacheEntry<T> entry = cacheMap.get(key);
return entry == null ? null : entry.result;
}
public boolean containsKey(String key) {
return cacheMap.containsKey(key);
}
private boolean prepareSpaceForNewCacheEntry() {
int currentSize;
do {
currentSize = size.get();
if (currentSize == cacheSize) {
return false;
}
} while (!size.compareAndSet(currentSize, currentSize + 1));
return true;
}
private CacheEntry<T> removeCacheEntry(String token) {
CacheEntry<T> entry = cacheMap.remove(token);
if (entry != null) {
size.decrementAndGet();
}
return entry;
}
private boolean isEntryExpired(CacheEntry<T> entry, long now) {
return entry.createdTime + cacheTimeToLive.toMillis() < now;
}
private static long now() {
return System.currentTimeMillis();
}
private static class CacheEntry<T> {
volatile T result;
long createdTime = System.currentTimeMillis();
public CacheEntry(T result) {
this.result = result;
}
}
public int getCacheSize() {
return cacheMap.size();
}
public void clearCache() {
cacheMap.clear();
size.set(0);
}
public void stopTimer(Vertx vertx) {
if (timerId != null && vertx.cancelTimer(timerId)) {
timerId = null;
}
}
public boolean isTimerRunning() {
return timerId != null;
}
}
|
class MemoryCache<T> {
private volatile Long timerId = null;
private final Map<String, CacheEntry<T>> cacheMap = new ConcurrentHashMap<>();
private AtomicInteger size = new AtomicInteger();
private final Duration cacheTimeToLive;
private final int cacheSize;
public MemoryCache(Vertx vertx, Optional<Duration> cleanUpTimerInterval,
Duration cacheTimeToLive, int cacheSize) {
this.cacheTimeToLive = cacheTimeToLive;
this.cacheSize = cacheSize;
init(vertx, cleanUpTimerInterval);
}
private void init(Vertx vertx, Optional<Duration> cleanUpTimerInterval) {
if (cleanUpTimerInterval.isPresent()) {
timerId = vertx.setPeriodic(cleanUpTimerInterval.get().toMillis(), new Handler<Long>() {
@Override
public void handle(Long event) {
removeInvalidEntries();
}
});
}
}
public void add(String key, T result) {
if (cacheSize > 0) {
if (!prepareSpaceForNewCacheEntry()) {
clearCache();
}
cacheMap.put(key, new CacheEntry<T>(result));
}
}
public T remove(String key) {
CacheEntry<T> entry = removeCacheEntry(key);
return entry == null ? null : entry.result;
}
public T get(String key) {
CacheEntry<T> entry = cacheMap.get(key);
return entry == null ? null : entry.result;
}
public boolean containsKey(String key) {
return cacheMap.containsKey(key);
}
private boolean prepareSpaceForNewCacheEntry() {
int currentSize;
do {
currentSize = size.get();
if (currentSize == cacheSize) {
return false;
}
} while (!size.compareAndSet(currentSize, currentSize + 1));
return true;
}
private CacheEntry<T> removeCacheEntry(String token) {
CacheEntry<T> entry = cacheMap.remove(token);
if (entry != null) {
size.decrementAndGet();
}
return entry;
}
private boolean isEntryExpired(CacheEntry<T> entry, long now) {
return entry.createdTime + cacheTimeToLive.toMillis() < now;
}
private static long now() {
return System.currentTimeMillis();
}
private static class CacheEntry<T> {
volatile T result;
long createdTime = System.currentTimeMillis();
public CacheEntry(T result) {
this.result = result;
}
}
public int getCacheSize() {
return cacheMap.size();
}
public void clearCache() {
cacheMap.clear();
size.set(0);
}
public void stopTimer(Vertx vertx) {
if (timerId != null && vertx.cancelTimer(timerId)) {
timerId = null;
}
}
public boolean isTimerRunning() {
return timerId != null;
}
}
|
Suggest to use `getCatalogOrException()`. But should consider that if this is a replay thread, should ignore this exception if catalog does not exist. Same suggest for other exception logic in this method.
|
private void setExternalTableAutoAnalyze(Map<String, String> properties, ModifyTablePropertyOperationLog info) {
if (properties.size() != 1) {
LOG.warn("External table property should contain exactly 1 entry.");
}
if (!properties.containsKey(PropertyAnalyzer.PROPERTIES_AUTO_ANALYZE_POLICY)) {
LOG.warn("External table property should only contain auto_analyze_policy");
}
String value = properties.get(PropertyAnalyzer.PROPERTIES_AUTO_ANALYZE_POLICY);
if (!PropertyAnalyzer.ENABLE_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)
&& !PropertyAnalyzer.DISABLE_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)
&& !PropertyAnalyzer.USE_CATALOG_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)) {
LOG.warn("External table property should be 'enable', 'disable' or 'base_on_catalog'");
return;
}
try {
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(info.getCtlName());
value = value.equalsIgnoreCase(PropertyAnalyzer.USE_CATALOG_AUTO_ANALYZE_POLICY) ? null : value;
((ExternalCatalog) catalog).setAutoAnalyzePolicy(info.getDbName(), info.getTableName(), value);
} catch (Exception e) {
LOG.warn("Failed to replay external table set property.", e);
}
}
|
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr().getCatalog(info.getCtlName());
|
private void setExternalTableAutoAnalyze(Map<String, String> properties, ModifyTablePropertyOperationLog info) {
if (properties.size() != 1) {
LOG.warn("External table property should contain exactly 1 entry.");
return;
}
if (!properties.containsKey(PropertyAnalyzer.PROPERTIES_AUTO_ANALYZE_POLICY)) {
LOG.warn("External table property should only contain auto_analyze_policy");
return;
}
String value = properties.get(PropertyAnalyzer.PROPERTIES_AUTO_ANALYZE_POLICY);
if (!PropertyAnalyzer.ENABLE_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)
&& !PropertyAnalyzer.DISABLE_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)
&& !PropertyAnalyzer.USE_CATALOG_AUTO_ANALYZE_POLICY.equalsIgnoreCase(value)) {
LOG.warn("External table property should be 'enable', 'disable' or 'base_on_catalog'");
return;
}
try {
CatalogIf catalog = Env.getCurrentEnv().getCatalogMgr()
.getCatalogOrException(info.getCtlName(),
ctlName -> new DdlException("Unknown catalog " + ctlName));
value = value.equalsIgnoreCase(PropertyAnalyzer.USE_CATALOG_AUTO_ANALYZE_POLICY) ? null : value;
((ExternalCatalog) catalog).setAutoAnalyzePolicy(info.getDbName(), info.getTableName(), value);
} catch (Exception e) {
LOG.warn("Failed to replay external table set property.", e);
}
}
|
class SingletonHolder {
private static final Env INSTANCE = EnvFactory.getInstance().createEnv(false);
}
|
class SingletonHolder {
private static final Env INSTANCE = EnvFactory.getInstance().createEnv(false);
}
|
I think only HiveTableSource performs parallelism inference. But this test won't use HiveTableSource, right?
|
public void init() throws IOException {
hiveCatalog = HiveTestUtils.createHiveCatalog();
tEnv().registerCatalog(hiveCatalog.getName(), hiveCatalog);
tEnv().useCatalog(hiveCatalog.getName());
tEnv().getConfig().getConfiguration().set(
HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, false);
super.init();
}
|
HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, false);
|
public void init() throws IOException {
hiveCatalog = HiveTestUtils.createHiveCatalog();
tEnv().registerCatalog(hiveCatalog.getName(), hiveCatalog);
tEnv().useCatalog(hiveCatalog.getName());
tEnv().getConfig().getConfiguration().set(
HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, false);
super.init();
}
|
class HiveSinkCompactionITCase extends CompactionITCaseBase {
@Parameterized.Parameters(name = "format = {0}")
public static Collection<String> parameters() {
return Arrays.asList("sequencefile", "parquet");
}
@Parameterized.Parameter
public String format;
private HiveCatalog hiveCatalog;
@Override
@Before
@After
public void tearDown() throws TableNotExistException {
if (hiveCatalog != null) {
hiveCatalog.dropTable(new ObjectPath(tEnv().getCurrentDatabase(), "sink_table"), true);
hiveCatalog.close();
}
}
private void create(String path, boolean part) {
tEnv().getConfig().setSqlDialect(SqlDialect.HIVE);
tEnv().executeSql("CREATE TABLE sink_table (a int, b string" +
(part ? "" : ",c string") + ") " +
(part ? "partitioned by (c string) " : "") +
" stored as " + format +
" location '" + path + "'" +
" TBLPROPERTIES (" +
"'sink.partition-commit.policy.kind'='metastore,success-file'," +
"'auto-compaction'='true'," +
"'compaction.file-size' = '128MB'," +
"'sink.rolling-policy.file-size' = '1b'" +
")");
tEnv().getConfig().setSqlDialect(SqlDialect.DEFAULT);
}
@Override
protected String partitionField() {
return "c";
}
@Override
protected void createTable(String path) {
create(path, false);
}
@Override
protected void createPartitionTable(String path) {
create(path, true);
}
}
|
class HiveSinkCompactionITCase extends CompactionITCaseBase {
@Parameterized.Parameters(name = "format = {0}")
public static Collection<String> parameters() {
return Arrays.asList("sequencefile", "parquet");
}
@Parameterized.Parameter
public String format;
private HiveCatalog hiveCatalog;
@Override
@Before
@After
public void tearDown() throws TableNotExistException {
if (hiveCatalog != null) {
hiveCatalog.dropTable(new ObjectPath(tEnv().getCurrentDatabase(), "sink_table"), true);
hiveCatalog.close();
}
}
private void create(String path, boolean part) {
tEnv().getConfig().setSqlDialect(SqlDialect.HIVE);
tEnv().executeSql("CREATE TABLE sink_table (a int, b string" +
(part ? "" : ",c string") + ") " +
(part ? "partitioned by (c string) " : "") +
" stored as " + format +
" location '" + path + "'" +
" TBLPROPERTIES (" +
"'sink.partition-commit.policy.kind'='metastore,success-file'," +
"'auto-compaction'='true'," +
"'compaction.file-size' = '128MB'," +
"'sink.rolling-policy.file-size' = '1b'" +
")");
tEnv().getConfig().setSqlDialect(SqlDialect.DEFAULT);
}
@Override
protected String partitionField() {
return "c";
}
@Override
protected void createTable(String path) {
create(path, false);
}
@Override
protected void createPartitionTable(String path) {
create(path, true);
}
}
|
Instead of a, b, c shall we say val1, val2 and val3 because that makes more sense. Or more meaningful like hexStringArray, base64StringArray and numArray.
|
public void testByteArrayReturn() {
byte[] a = ByteArrayUtils.hexStringToByteArray("aaabcfccadafcd341a4bdfabcd8912df");
byte[] b = ByteArrayUtils.decodeBase64("aGVsbG8gYmFsbGVyaW5hICEhIQ==");
byte[] c = new byte[]{3, 4, 5, 6, 7, 8, 9};
BValue[] returns = BRunUtil.invoke(result, "testByteArrayReturn", new BValue[]{});
Assert.assertEquals(returns.length, 3);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
Assert.assertSame(returns[2].getClass(), BByteArray.class);
BByteArray byteArray1 = (BByteArray) returns[0];
BByteArray byteArray2 = (BByteArray) returns[1];
BByteArray byteArray3 = (BByteArray) returns[2];
Assert.assertEquals(a.length, byteArray1.size());
ByteArrayUtils.assertJBytesWithBBytes(a, byteArray1.getBytes());
Assert.assertEquals(b.length, byteArray2.size());
ByteArrayUtils.assertJBytesWithBBytes(b, byteArray2.getBytes());
Assert.assertEquals(c.length, byteArray3.size());
ByteArrayUtils.assertJBytesWithBBytes(c, byteArray3.getBytes());
}
|
byte[] c = new byte[]{3, 4, 5, 6, 7, 8, 9};
|
public void testByteArrayReturn() {
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray("aaabcfccadafcd341a4bdfabcd8912df");
byte[] bytes2 = ByteArrayUtils.decodeBase64("aGVsbG8gYmFsbGVyaW5hICEhIQ==");
byte[] bytes3 = new byte[]{3, 4, 5, 6, 7, 8, 9};
BValue[] returns = BRunUtil.invoke(result, "testByteArrayReturn", new BValue[]{});
Assert.assertEquals(returns.length, 3);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
Assert.assertSame(returns[2].getClass(), BByteArray.class);
BByteArray byteArray1 = (BByteArray) returns[0];
BByteArray byteArray2 = (BByteArray) returns[1];
BByteArray byteArray3 = (BByteArray) returns[2];
Assert.assertEquals(bytes1.length, byteArray1.size());
ByteArrayUtils.assertJBytesWithBBytes(bytes1, byteArray1.getBytes());
Assert.assertEquals(bytes2.length, byteArray2.size());
ByteArrayUtils.assertJBytesWithBBytes(bytes2, byteArray2.getBytes());
Assert.assertEquals(bytes3.length, byteArray3.size());
ByteArrayUtils.assertJBytesWithBBytes(bytes3, byteArray3.getBytes());
}
|
class BByteArrayValueTest {
private CompileResult result;
@BeforeClass
public void setup() {
result = BCompileUtil.compile("test-src/types/byte/byte-array-value.bal");
}
@Test(description = "Test blob value assignment")
public void testBlobParameter() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testBlobParameter", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob array")
public void testBlobArray() {
byte[] bytes1 = "string1".getBytes();
byte[] bytes2 = "string2".getBytes();
BByteArray byteArray1 = ByteArrayUtils.createBByteArray(bytes1);
BByteArray byteArray2 = ByteArrayUtils.createBByteArray(bytes2);
BValue[] args = {byteArray1, byteArray2};
BValue[] returns = BRunUtil.invoke(result, "testBlobParameterArray", args);
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob2);
}
@Test(description = "Test blob global variable1")
public void testBlobGlobalVariable1() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable1", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable2")
public void testBlobGlobalVariable2() {
String b16 = "aeeecdefabcd12345567888822";
byte[] bytes = ByteArrayUtils.hexStringToByteArray(b16);
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable2", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable3")
public void testBlobGlobalVariable3() {
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes = ByteArrayUtils.decodeBase64(b64);
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable3", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob tuple return 1")
public void testBlobReturnTuple1() {
String b1 = "aaabafac23345678";
String b2 = "a4f5njn/jnfvr+d=";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b1);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b2);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnTuple1", new BValue[]{});
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob2);
}
@Test(description = "Test blob tuple return 2")
public void testBlobReturnTuple2() {
String b0 = "aaab";
String b1 = "a4f5";
String b2 = "aeeecdefabcd12345567888822";
String b3 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes0 = ByteArrayUtils.hexStringToByteArray(b0);
byte[] bytes1 = ByteArrayUtils.decodeBase64(b1);
byte[] bytes2 = ByteArrayUtils.hexStringToByteArray(b2);
byte[] bytes3 = ByteArrayUtils.decodeBase64(b3);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnTuple2", new BValue[]{});
Assert.assertEquals(returns.length, 4);
assertResult(bytes0, returns[0]);
assertResult(bytes1, returns[1]);
assertResult(bytes2, returns[2]);
assertResult(bytes3, returns[3]);
}
private void assertResult(byte[] bytes, BValue aReturn) {
Assert.assertSame(aReturn.getClass(), BByteArray.class);
BByteArray blob = (BByteArray) aReturn;
ByteArrayUtils.assertJBytesWithBBytes(bytes, blob);
}
@Test(description = "Test return blob array")
public void testBlobReturnArray() {
String b1 = "aaab34dfca1267";
String b2 = "aaabcfccadafcd34bdfabcdferf=";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b1);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b2);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnArray", new BValue[]{});
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blobArray1 = (BByteArray) returns[0];
BByteArray blobArray2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blobArray1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blobArray2);
}
@Test(description = "Test blob field variable 1")
public void testBlobField1() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testBlobField1", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable 2")
public void testBlobField2() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
byte[] bytes = ByteArrayUtils.hexStringToByteArray(b16);
BValue[] returns = BRunUtil.invoke(result, "testBlobField2", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob hexadecimal format")
public void testBlobHexFormat() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
String b64HexStr = ByteArrayUtils.byteArrayToHexString(ByteArrayUtils.decodeBase64(b64));
BValue[] returns = BRunUtil.invoke(result, "testBlobHexFormat", new BValue[]{});
Assert.assertEquals(returns.length, 3);
BString blob1 = (BString) returns[0];
BString blob2 = (BString) returns[1];
BString blob3 = (BString) returns[2];
Assert.assertEquals(blob1.stringValue(), b16, "Invalid value returned.");
Assert.assertEquals(blob2.stringValue(), b16.toUpperCase(), "Invalid value returned.");
Assert.assertEquals(blob3.stringValue(), b64HexStr, "Invalid value returned.");
}
@Test(description = "Test blob assign")
public void testBlobAssign() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b16);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b64);
BValue[] returns = BRunUtil.invoke(result, "testBlobAssign", new BValue[]{});
Assert.assertEquals(returns.length, 2);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob2);
}
@Test(description = "Test blob default value")
public void testBlobDefaultValue() {
String b0 = "aaab";
String b1 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] empty = new byte[0];
byte[] bytes0 = ByteArrayUtils.hexStringToByteArray(b0);
byte[] bytes1 = ByteArrayUtils.decodeBase64(b1);
BValue[] returns = BRunUtil.invoke(result, "testBlobDefaultValue", new BValue[]{});
Assert.assertEquals(returns.length, 8);
assertResult(empty, returns[0]);
assertResult(empty, returns[1]);
assertResult(bytes0, returns[2]);
assertResult(bytes1, returns[3]);
assertResult(empty, returns[4]);
assertResult(bytes0, returns[5]);
assertResult(bytes1, returns[6]);
assertResult(empty, returns[7]);
}
@Test(description = "Test byte array literal value")
public void testByteArrayLiteral() {
byte[] bytes = new byte[]{1, 27, 34, (byte) 145, (byte) 224};
BValue[] returns = BRunUtil.invoke(result, "testByteArrayLiteral", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test return byte array")
}
|
class BByteArrayValueTest {
private CompileResult result;
@BeforeClass
public void setup() {
result = BCompileUtil.compile("test-src/types/byte/byte-array-value.bal");
}
@Test(description = "Test blob value assignment")
public void testBlobParameter() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testBlobParameter", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob array")
public void testBlobArray() {
byte[] bytes1 = "string1".getBytes();
byte[] bytes2 = "string2".getBytes();
BByteArray byteArray1 = ByteArrayUtils.createBByteArray(bytes1);
BByteArray byteArray2 = ByteArrayUtils.createBByteArray(bytes2);
BValue[] args = {byteArray1, byteArray2};
BValue[] returns = BRunUtil.invoke(result, "testBlobParameterArray", args);
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob2);
}
@Test(description = "Test blob global variable1")
public void testBlobGlobalVariable1() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable1", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable2")
public void testBlobGlobalVariable2() {
String b16 = "aeeecdefabcd12345567888822";
byte[] bytes = ByteArrayUtils.hexStringToByteArray(b16);
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable2", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable3")
public void testBlobGlobalVariable3() {
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes = ByteArrayUtils.decodeBase64(b64);
BValue[] returns = BRunUtil.invoke(result, "testGlobalVariable3", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob tuple return 1")
public void testBlobReturnTuple1() {
String b1 = "aaabafac23345678";
String b2 = "a4f5njn/jnfvr+d=";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b1);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b2);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnTuple1", new BValue[]{});
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob2);
}
@Test(description = "Test blob tuple return 2")
public void testBlobReturnTuple2() {
String b0 = "aaab";
String b1 = "a4f5";
String b2 = "aeeecdefabcd12345567888822";
String b3 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes0 = ByteArrayUtils.hexStringToByteArray(b0);
byte[] bytes1 = ByteArrayUtils.decodeBase64(b1);
byte[] bytes2 = ByteArrayUtils.hexStringToByteArray(b2);
byte[] bytes3 = ByteArrayUtils.decodeBase64(b3);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnTuple2", new BValue[]{});
Assert.assertEquals(returns.length, 4);
assertResult(bytes0, returns[0]);
assertResult(bytes1, returns[1]);
assertResult(bytes2, returns[2]);
assertResult(bytes3, returns[3]);
}
private void assertResult(byte[] bytes, BValue aReturn) {
Assert.assertSame(aReturn.getClass(), BByteArray.class);
BByteArray blob = (BByteArray) aReturn;
ByteArrayUtils.assertJBytesWithBBytes(bytes, blob);
}
@Test(description = "Test return blob array")
public void testBlobReturnArray() {
String b1 = "aaab34dfca1267";
String b2 = "aaabcfccadafcd34bdfabcdferf=";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b1);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b2);
BValue[] returns = BRunUtil.invoke(result, "testBlobReturnArray", new BValue[]{});
Assert.assertEquals(returns.length, 2);
Assert.assertSame(returns[0].getClass(), BByteArray.class);
Assert.assertSame(returns[1].getClass(), BByteArray.class);
BByteArray blobArray1 = (BByteArray) returns[0];
BByteArray blobArray2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blobArray1);
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blobArray2);
}
@Test(description = "Test blob field variable 1")
public void testBlobField1() {
byte[] bytes = "string".getBytes();
BValue[] args = {ByteArrayUtils.createBByteArray(bytes)};
BValue[] returns = BRunUtil.invoke(result, "testBlobField1", args);
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob global variable 2")
public void testBlobField2() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
byte[] bytes = ByteArrayUtils.hexStringToByteArray(b16);
BValue[] returns = BRunUtil.invoke(result, "testBlobField2", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test blob hexadecimal format")
public void testBlobHexFormat() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
String b64HexStr = ByteArrayUtils.byteArrayToHexString(ByteArrayUtils.decodeBase64(b64));
BValue[] returns = BRunUtil.invoke(result, "testBlobHexFormat", new BValue[]{});
Assert.assertEquals(returns.length, 3);
BString blob1 = (BString) returns[0];
BString blob2 = (BString) returns[1];
BString blob3 = (BString) returns[2];
Assert.assertEquals(blob1.stringValue(), b16, "Invalid value returned.");
Assert.assertEquals(blob2.stringValue(), b16.toUpperCase(), "Invalid value returned.");
Assert.assertEquals(blob3.stringValue(), b64HexStr, "Invalid value returned.");
}
@Test(description = "Test blob assign")
public void testBlobAssign() {
String b16 = "aaabcfccadafcd341a4bdfabcd8912df";
String b64 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] bytes1 = ByteArrayUtils.hexStringToByteArray(b16);
byte[] bytes2 = ByteArrayUtils.decodeBase64(b64);
BValue[] returns = BRunUtil.invoke(result, "testBlobAssign", new BValue[]{});
Assert.assertEquals(returns.length, 2);
BByteArray blob1 = (BByteArray) returns[0];
BByteArray blob2 = (BByteArray) returns[1];
ByteArrayUtils.assertJBytesWithBBytes(bytes2, blob1);
ByteArrayUtils.assertJBytesWithBBytes(bytes1, blob2);
}
@Test(description = "Test blob default value")
public void testBlobDefaultValue() {
String b0 = "aaab";
String b1 = "aGVsbG8gYmFsbGVyaW5hICEhIQ==";
byte[] empty = new byte[0];
byte[] bytes0 = ByteArrayUtils.hexStringToByteArray(b0);
byte[] bytes1 = ByteArrayUtils.decodeBase64(b1);
BValue[] returns = BRunUtil.invoke(result, "testBlobDefaultValue", new BValue[]{});
Assert.assertEquals(returns.length, 8);
assertResult(empty, returns[0]);
assertResult(empty, returns[1]);
assertResult(bytes0, returns[2]);
assertResult(bytes1, returns[3]);
assertResult(empty, returns[4]);
assertResult(bytes0, returns[5]);
assertResult(bytes1, returns[6]);
assertResult(empty, returns[7]);
}
@Test(description = "Test byte array literal value")
public void testByteArrayLiteral() {
byte[] bytes = new byte[]{1, 27, 34, (byte) 145, (byte) 224};
BValue[] returns = BRunUtil.invoke(result, "testByteArrayLiteral", new BValue[]{});
Assert.assertEquals(returns.length, 1);
assertResult(bytes, returns[0]);
}
@Test(description = "Test equality of byte array returned and their size")
}
|
I think `isDefaultActionUnavailable()` is not the best choice here because suspension is a temporary state; some input may come after this check. What about using `mailboxProcessor.isMailboxLoopRunning()` instead? It is updated on `InputStatus.END_OF_INPUT` which seems exactly what is needed here.
|
public void waitForInputProcessing() throws Exception {
while (taskThread.isAlive()) {
boolean allEmpty = true;
for (int i = 0; i < numInputGates; i++) {
if (!inputGates[i].allQueuesEmpty()) {
allEmpty = false;
}
}
if (allEmpty) {
break;
}
}
final AtomicBoolean allInputProcessed = new AtomicBoolean();
final MailboxProcessor mailboxProcessor = taskThread.task.mailboxProcessor;
final MailboxExecutor mailboxExecutor = mailboxProcessor.getMainMailboxExecutor();
while (taskThread.isAlive()) {
try {
final CountDownLatch latch = new CountDownLatch(1);
mailboxExecutor.execute(() -> {
allInputProcessed.set(mailboxProcessor.isDefaultActionUnavailable());
latch.countDown();
}, "query-whether-processInput-has-suspend-itself");
latch.await(1, TimeUnit.SECONDS);
} catch (RejectedExecutionException ex) {
}
if (allInputProcessed.get()) {
break;
}
try {
Thread.sleep(1);
} catch (InterruptedException ignored) {}
}
Throwable error = taskThread.getError();
if (error != null) {
throw new Exception("Exception in the task thread", error);
}
}
|
allInputProcessed.set(mailboxProcessor.isDefaultActionUnavailable());
|
public void waitForInputProcessing() throws Exception {
while (taskThread.isAlive()) {
boolean allEmpty = true;
for (int i = 0; i < numInputGates; i++) {
if (!inputGates[i].allQueuesEmpty()) {
allEmpty = false;
}
}
if (allEmpty) {
break;
}
}
final AtomicBoolean allInputProcessed = new AtomicBoolean();
final MailboxProcessor mailboxProcessor = taskThread.task.mailboxProcessor;
final MailboxExecutor mailboxExecutor = mailboxProcessor.getMainMailboxExecutor();
while (taskThread.isAlive()) {
try {
final CountDownLatch latch = new CountDownLatch(1);
mailboxExecutor.execute(() -> {
allInputProcessed.set(mailboxProcessor.isDefaultActionUnavailable());
latch.countDown();
}, "query-whether-processInput-has-suspend-itself");
latch.await(1, TimeUnit.SECONDS);
} catch (RejectedExecutionException ex) {
}
if (allInputProcessed.get()) {
break;
}
try {
Thread.sleep(1);
} catch (InterruptedException ignored) {}
}
Throwable error = taskThread.getError();
if (error != null) {
throw new Exception("Exception in the task thread", error);
}
}
|
class StreamTaskTestHarness<OUT> {
public static final int DEFAULT_MEMORY_MANAGER_SIZE = 1024 * 1024;
public static final int DEFAULT_NETWORK_BUFFER_SIZE = 1024;
private final FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory;
public long memorySize;
public int bufferSize;
protected StreamMockEnvironment mockEnv;
protected ExecutionConfig executionConfig;
public Configuration jobConfig;
public Configuration taskConfig;
protected StreamConfig streamConfig;
protected TestTaskStateManager taskStateManager;
private TypeSerializer<OUT> outputSerializer;
private TypeSerializer<StreamElement> outputStreamRecordSerializer;
private LinkedBlockingQueue<Object> outputList;
protected TaskThread taskThread;
protected int numInputGates;
protected int numInputChannelsPerGate;
private boolean setupCalled = false;
@SuppressWarnings("rawtypes")
protected StreamTestSingleInputGate[] inputGates;
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType) {
this(taskFactory, outputType, TestLocalRecoveryConfig.disabled());
}
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType,
File localRootDir) {
this(taskFactory, outputType, new LocalRecoveryConfig(true, new LocalRecoveryDirectoryProviderImpl(localRootDir, new JobID(), new JobVertexID(), 0)));
}
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType,
LocalRecoveryConfig localRecoveryConfig) {
this.taskFactory = checkNotNull(taskFactory);
this.memorySize = DEFAULT_MEMORY_MANAGER_SIZE;
this.bufferSize = DEFAULT_NETWORK_BUFFER_SIZE;
this.jobConfig = new Configuration();
this.taskConfig = new Configuration();
this.executionConfig = new ExecutionConfig();
streamConfig = new StreamConfig(taskConfig);
streamConfig.setStateBackendUsesManagedMemory(true);
streamConfig.setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.STATE_BACKEND, 1.0);
outputSerializer = outputType.createSerializer(executionConfig);
outputStreamRecordSerializer = new StreamElementSerializer<>(outputSerializer);
this.taskStateManager = new TestTaskStateManager(localRecoveryConfig);
}
public StreamMockEnvironment getEnvironment() {
return mockEnv;
}
public TimerService getTimerService() {
return taskThread.task.getTimerService();
}
@SuppressWarnings("unchecked")
public <OP extends StreamOperator<OUT>> OP getHeadOperator() {
return (OP) taskThread.task.getMainOperator();
}
/**
* This must be overwritten for OneInputStreamTask or TwoInputStreamTask test harnesses.
*/
protected void initializeInputs() throws IOException, InterruptedException {}
public TestTaskStateManager getTaskStateManager() {
return taskStateManager;
}
public void setTaskStateSnapshot(long checkpointId, TaskStateSnapshot taskStateSnapshot) {
taskStateManager.setReportedCheckpointId(checkpointId);
taskStateManager.setJobManagerTaskStateSnapshotsByCheckpointId(
Collections.singletonMap(checkpointId, taskStateSnapshot));
}
private void initializeOutput() {
outputList = new LinkedBlockingQueue<>();
mockEnv.addOutput(outputList, outputStreamRecordSerializer);
}
/**
* Users of the test harness can call this utility method to setup the stream config
* if there will only be a single operator to be tested. The method will setup the
* outgoing network connection for the operator.
*
* <p>For more advanced test cases such as testing chains of multiple operators with the harness,
* please manually configure the stream config.
*/
public void setupOutputForSingletonOperatorChain() {
Preconditions.checkState(!setupCalled, "This harness was already setup.");
setupCalled = true;
streamConfig.setChainStart();
streamConfig.setTimeCharacteristic(TimeCharacteristic.EventTime);
streamConfig.setNumberOfOutputs(1);
streamConfig.setTypeSerializerOut(outputSerializer);
streamConfig.setVertexID(0);
streamConfig.setOperatorID(new OperatorID(4711L, 123L));
StreamOperator<OUT> dummyOperator = new AbstractStreamOperator<OUT>() {
private static final long serialVersionUID = 1L;
};
List<StreamEdge> outEdgesInOrder = new LinkedList<>();
StreamNode sourceVertexDummy = new StreamNode(0, "group", null, dummyOperator, "source dummy", SourceStreamTask.class);
StreamNode targetVertexDummy = new StreamNode(1, "group", null, dummyOperator, "target dummy", SourceStreamTask.class);
outEdgesInOrder.add(new StreamEdge(sourceVertexDummy, targetVertexDummy, 0, new BroadcastPartitioner<>(), null /* output tag */));
streamConfig.setOutEdgesInOrder(outEdgesInOrder);
streamConfig.setNonChainedOutputs(outEdgesInOrder);
}
public StreamMockEnvironment createEnvironment() {
return new StreamMockEnvironment(
jobConfig,
taskConfig,
executionConfig,
memorySize,
new MockInputSplitProvider(),
bufferSize,
taskStateManager);
}
/**
* Invoke the Task. This resets the output of any previous invocation. This will start a new
* Thread to execute the Task in. Use {@link
* Task thread to finish running.
*
*/
public Thread invoke() throws Exception {
return invoke(createEnvironment());
}
/**
* Invoke the Task. This resets the output of any previous invocation. This will start a new
* Thread to execute the Task in. Use {@link
* Task thread to finish running.
*
*/
public Thread invoke(StreamMockEnvironment mockEnv) throws Exception {
checkState(this.mockEnv == null);
checkState(this.taskThread == null);
this.mockEnv = checkNotNull(mockEnv);
initializeInputs();
initializeOutput();
taskThread = new TaskThread(() -> taskFactory.apply(mockEnv));
taskThread.start();
while (taskThread.task == null) {
Thread.sleep(10L);
}
return taskThread;
}
/**
* Waits for the task completion.
*/
public void waitForTaskCompletion() throws Exception {
waitForTaskCompletion(Long.MAX_VALUE);
}
public void waitForTaskCompletion(long timeout) throws Exception {
waitForTaskCompletion(timeout, false);
}
/**
* Waits for the task completion. If this does not happen within the timeout, then a
* TimeoutException is thrown.
*
* @param timeout Timeout for the task completion
*/
public void waitForTaskCompletion(long timeout, boolean ignoreCancellationException) throws Exception {
Preconditions.checkState(taskThread != null, "Task thread was not started.");
taskThread.join(timeout);
if (taskThread.getError() != null) {
if (!ignoreCancellationException || !ExceptionUtils.findThrowable(taskThread.getError(), CancelTaskException.class).isPresent()) {
throw new Exception("error in task", taskThread.getError());
}
}
}
/**
* Waits for the task to be running.
*/
public void waitForTaskRunning() throws Exception {
Preconditions.checkState(taskThread != null, "Task thread was not started.");
StreamTask<?, ?> streamTask = taskThread.task;
while (!streamTask.isRunning()) {
Thread.sleep(10);
if (!taskThread.isAlive()) {
if (taskThread.getError() != null) {
throw new Exception("Task Thread failed due to an error.", taskThread.getError());
} else {
throw new Exception("Task Thread unexpectedly shut down.");
}
}
}
}
public StreamTask<OUT, ?> getTask() {
return taskThread.task;
}
/**
* Get all the output from the task. This contains StreamRecords and Events interleaved. Use
* {@link org.apache.flink.streaming.util.TestHarnessUtil
|
class StreamTaskTestHarness<OUT> {
public static final int DEFAULT_MEMORY_MANAGER_SIZE = 1024 * 1024;
public static final int DEFAULT_NETWORK_BUFFER_SIZE = 1024;
private final FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory;
public long memorySize;
public int bufferSize;
protected StreamMockEnvironment mockEnv;
protected ExecutionConfig executionConfig;
public Configuration jobConfig;
public Configuration taskConfig;
protected StreamConfig streamConfig;
protected TestTaskStateManager taskStateManager;
private TypeSerializer<OUT> outputSerializer;
private TypeSerializer<StreamElement> outputStreamRecordSerializer;
private LinkedBlockingQueue<Object> outputList;
protected TaskThread taskThread;
protected int numInputGates;
protected int numInputChannelsPerGate;
private boolean setupCalled = false;
@SuppressWarnings("rawtypes")
protected StreamTestSingleInputGate[] inputGates;
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType) {
this(taskFactory, outputType, TestLocalRecoveryConfig.disabled());
}
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType,
File localRootDir) {
this(taskFactory, outputType, new LocalRecoveryConfig(true, new LocalRecoveryDirectoryProviderImpl(localRootDir, new JobID(), new JobVertexID(), 0)));
}
public StreamTaskTestHarness(
FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
TypeInformation<OUT> outputType,
LocalRecoveryConfig localRecoveryConfig) {
this.taskFactory = checkNotNull(taskFactory);
this.memorySize = DEFAULT_MEMORY_MANAGER_SIZE;
this.bufferSize = DEFAULT_NETWORK_BUFFER_SIZE;
this.jobConfig = new Configuration();
this.taskConfig = new Configuration();
this.executionConfig = new ExecutionConfig();
streamConfig = new StreamConfig(taskConfig);
streamConfig.setStateBackendUsesManagedMemory(true);
streamConfig.setManagedMemoryFractionOperatorOfUseCase(ManagedMemoryUseCase.STATE_BACKEND, 1.0);
outputSerializer = outputType.createSerializer(executionConfig);
outputStreamRecordSerializer = new StreamElementSerializer<>(outputSerializer);
this.taskStateManager = new TestTaskStateManager(localRecoveryConfig);
}
public StreamMockEnvironment getEnvironment() {
return mockEnv;
}
public TimerService getTimerService() {
return taskThread.task.getTimerService();
}
@SuppressWarnings("unchecked")
public <OP extends StreamOperator<OUT>> OP getHeadOperator() {
return (OP) taskThread.task.getMainOperator();
}
/**
* This must be overwritten for OneInputStreamTask or TwoInputStreamTask test harnesses.
*/
protected void initializeInputs() throws IOException, InterruptedException {}
public TestTaskStateManager getTaskStateManager() {
return taskStateManager;
}
public void setTaskStateSnapshot(long checkpointId, TaskStateSnapshot taskStateSnapshot) {
taskStateManager.setReportedCheckpointId(checkpointId);
taskStateManager.setJobManagerTaskStateSnapshotsByCheckpointId(
Collections.singletonMap(checkpointId, taskStateSnapshot));
}
private void initializeOutput() {
outputList = new LinkedBlockingQueue<>();
mockEnv.addOutput(outputList, outputStreamRecordSerializer);
}
/**
* Users of the test harness can call this utility method to setup the stream config
* if there will only be a single operator to be tested. The method will setup the
* outgoing network connection for the operator.
*
* <p>For more advanced test cases such as testing chains of multiple operators with the harness,
* please manually configure the stream config.
*/
public void setupOutputForSingletonOperatorChain() {
Preconditions.checkState(!setupCalled, "This harness was already setup.");
setupCalled = true;
streamConfig.setChainStart();
streamConfig.setTimeCharacteristic(TimeCharacteristic.EventTime);
streamConfig.setNumberOfOutputs(1);
streamConfig.setTypeSerializerOut(outputSerializer);
streamConfig.setVertexID(0);
streamConfig.setOperatorID(new OperatorID(4711L, 123L));
StreamOperator<OUT> dummyOperator = new AbstractStreamOperator<OUT>() {
private static final long serialVersionUID = 1L;
};
List<StreamEdge> outEdgesInOrder = new LinkedList<>();
StreamNode sourceVertexDummy = new StreamNode(0, "group", null, dummyOperator, "source dummy", SourceStreamTask.class);
StreamNode targetVertexDummy = new StreamNode(1, "group", null, dummyOperator, "target dummy", SourceStreamTask.class);
outEdgesInOrder.add(new StreamEdge(sourceVertexDummy, targetVertexDummy, 0, new BroadcastPartitioner<>(), null /* output tag */));
streamConfig.setOutEdgesInOrder(outEdgesInOrder);
streamConfig.setNonChainedOutputs(outEdgesInOrder);
}
public StreamMockEnvironment createEnvironment() {
return new StreamMockEnvironment(
jobConfig,
taskConfig,
executionConfig,
memorySize,
new MockInputSplitProvider(),
bufferSize,
taskStateManager);
}
/**
* Invoke the Task. This resets the output of any previous invocation. This will start a new
* Thread to execute the Task in. Use {@link
* Task thread to finish running.
*
*/
public Thread invoke() throws Exception {
return invoke(createEnvironment());
}
/**
* Invoke the Task. This resets the output of any previous invocation. This will start a new
* Thread to execute the Task in. Use {@link
* Task thread to finish running.
*
*/
public Thread invoke(StreamMockEnvironment mockEnv) throws Exception {
checkState(this.mockEnv == null);
checkState(this.taskThread == null);
this.mockEnv = checkNotNull(mockEnv);
initializeInputs();
initializeOutput();
taskThread = new TaskThread(() -> taskFactory.apply(mockEnv));
taskThread.start();
while (taskThread.task == null) {
Thread.sleep(10L);
}
return taskThread;
}
/**
* Waits for the task completion.
*/
public void waitForTaskCompletion() throws Exception {
waitForTaskCompletion(Long.MAX_VALUE);
}
public void waitForTaskCompletion(long timeout) throws Exception {
waitForTaskCompletion(timeout, false);
}
/**
* Waits for the task completion. If this does not happen within the timeout, then a
* TimeoutException is thrown.
*
* @param timeout Timeout for the task completion
*/
public void waitForTaskCompletion(long timeout, boolean ignoreCancellationException) throws Exception {
Preconditions.checkState(taskThread != null, "Task thread was not started.");
taskThread.join(timeout);
if (taskThread.getError() != null) {
if (!ignoreCancellationException || !ExceptionUtils.findThrowable(taskThread.getError(), CancelTaskException.class).isPresent()) {
throw new Exception("error in task", taskThread.getError());
}
}
}
/**
* Waits for the task to be running.
*/
public void waitForTaskRunning() throws Exception {
Preconditions.checkState(taskThread != null, "Task thread was not started.");
StreamTask<?, ?> streamTask = taskThread.task;
while (!streamTask.isRunning()) {
Thread.sleep(10);
if (!taskThread.isAlive()) {
if (taskThread.getError() != null) {
throw new Exception("Task Thread failed due to an error.", taskThread.getError());
} else {
throw new Exception("Task Thread unexpectedly shut down.");
}
}
}
}
public StreamTask<OUT, ?> getTask() {
return taskThread.task;
}
/**
* Get all the output from the task. This contains StreamRecords and Events interleaved. Use
* {@link org.apache.flink.streaming.util.TestHarnessUtil
|
Ok, I add unit test now
|
public List<Record> fetchRecords(final int batchSize, final int timeout, final TimeUnit timeUnit) {
List<Record> result = new LinkedList<>();
long start = System.currentTimeMillis();
int recordsCount = 0;
while (batchSize > recordsCount) {
List<Record> records = queue.poll();
if (null == records || records.isEmpty()) {
TimeUnit.MILLISECONDS.sleep(100L);
} else {
recordsCount += records.size();
result.addAll(records);
}
if (timeUnit.toMillis(timeout) <= System.currentTimeMillis() - start) {
break;
}
}
return result;
}
|
TimeUnit.MILLISECONDS.sleep(100L);
|
public List<Record> fetchRecords(final int batchSize, final int timeout, final TimeUnit timeUnit) {
List<Record> result = new LinkedList<>();
long start = System.currentTimeMillis();
int recordsCount = 0;
while (batchSize > recordsCount) {
List<Record> records = queue.poll();
if (null == records || records.isEmpty()) {
TimeUnit.MILLISECONDS.sleep(Math.min(100, timeUnit.toMillis(timeout)));
} else {
recordsCount += records.size();
result.addAll(records);
}
if (timeUnit.toMillis(timeout) <= System.currentTimeMillis() - start) {
break;
}
}
return result;
}
|
class SimpleMemoryPipelineChannel implements PipelineChannel {
private final BlockingQueue<List<Record>> queue;
private final AckCallback ackCallback;
public SimpleMemoryPipelineChannel(final int blockQueueSize, final AckCallback ackCallback) {
this.queue = new ArrayBlockingQueue<>(blockQueueSize);
this.ackCallback = ackCallback;
}
@SneakyThrows(InterruptedException.class)
@Override
public void pushRecords(final List<Record> records) {
queue.put(records);
}
@SneakyThrows(InterruptedException.class)
@Override
@Override
public void ack(final List<Record> records) {
ackCallback.onAck(records);
}
@Override
public void close() {
queue.clear();
}
}
|
class SimpleMemoryPipelineChannel implements PipelineChannel {
private final BlockingQueue<List<Record>> queue;
private final AckCallback ackCallback;
public SimpleMemoryPipelineChannel(final int blockQueueSize, final AckCallback ackCallback) {
this.queue = new ArrayBlockingQueue<>(blockQueueSize);
this.ackCallback = ackCallback;
}
@SneakyThrows(InterruptedException.class)
@Override
public void pushRecords(final List<Record> records) {
queue.put(records);
}
@SneakyThrows(InterruptedException.class)
@Override
@Override
public void ack(final List<Record> records) {
ackCallback.onAck(records);
}
@Override
public void close() {
queue.clear();
}
}
|
Empty list means the solution needs no moves. null means there are no solutions. I don't understand the second paragraph - if it's null we continue because we found no solution. Otherwise we compare it to the current best, if any.
|
private Move findMitigatingMove(CapacityChecker.HostFailurePath failurePath) {
Optional<Node> nodeWhichCantMove = failurePath.failureReason.tenant;
if (nodeWhichCantMove.isEmpty()) return Move.empty();
Node node = nodeWhichCantMove.get();
NodeList allNodes = nodeRepository().list();
HostCapacity hostCapacity = new HostCapacity(allNodes, nodeRepository().resourcesCalculator());
Set<Node> spareHosts = hostCapacity.findSpareHosts(allNodes.hosts().satisfies(node.resources()).asList(), 2);
List<Node> hosts = allNodes.hosts().except(spareHosts).asList();
CapacitySolver capacitySolver = new CapacitySolver(hostCapacity, maxIterations);
List<Move> shortestMitigation = null;
for (Node spareHost : spareHosts) {
List<Move> mitigation = capacitySolver.makeRoomFor(node, spareHost, hosts, List.of(), List.of());
if (mitigation == null) continue;
if (shortestMitigation == null || shortestMitigation.size() > mitigation.size())
shortestMitigation = mitigation;
}
if (shortestMitigation == null || shortestMitigation.isEmpty()) return Move.empty();
return shortestMitigation.get(0);
}
|
if (shortestMitigation == null || shortestMitigation.size() > mitigation.size())
|
private Move findMitigatingMove(CapacityChecker.HostFailurePath failurePath) {
Optional<Node> nodeWhichCantMove = failurePath.failureReason.tenant;
if (nodeWhichCantMove.isEmpty()) return Move.empty();
Node node = nodeWhichCantMove.get();
NodeList allNodes = nodeRepository().list();
HostCapacity hostCapacity = new HostCapacity(allNodes, nodeRepository().resourcesCalculator());
Set<Node> spareHosts = hostCapacity.findSpareHosts(allNodes.hosts().satisfies(node.resources()).asList(), 2);
List<Node> hosts = allNodes.hosts().except(spareHosts).asList();
CapacitySolver capacitySolver = new CapacitySolver(hostCapacity, maxIterations);
List<Move> shortestMitigation = null;
for (Node spareHost : spareHosts) {
List<Move> mitigation = capacitySolver.makeRoomFor(node, spareHost, hosts, List.of(), List.of());
if (mitigation == null) continue;
if (shortestMitigation == null || shortestMitigation.size() > mitigation.size())
shortestMitigation = mitigation;
}
if (shortestMitigation == null || shortestMitigation.isEmpty()) return Move.empty();
return shortestMitigation.get(0);
}
|
class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
private final int maxIterations;
private final Deployer deployer;
private final Metric metric;
public SpareCapacityMaintainer(Deployer deployer,
NodeRepository nodeRepository,
Metric metric,
Duration interval) {
this(deployer, nodeRepository, metric, interval,
10_000
);
}
public SpareCapacityMaintainer(Deployer deployer,
NodeRepository nodeRepository,
Metric metric,
Duration interval,
int maxIterations) {
super(nodeRepository, interval);
this.deployer = deployer;
this.metric = metric;
this.maxIterations = maxIterations;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().getCloud().allowHostSharing()) return;
CapacityChecker capacityChecker = new CapacityChecker(nodeRepository());
List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts();
if (overcommittedHosts.size() != 0) {
log.log(Level.WARNING, String.format("%d nodes are overcommitted! [ %s ]",
overcommittedHosts.size(),
overcommittedHosts.stream().map(Node::hostname).collect(Collectors.joining(", "))));
}
metric.set("overcommittedHosts", overcommittedHosts.size(), null);
Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1;
if (spareHostCapacity == 0) {
Move move = findMitigatingMove(failurePath.get());
if (moving(move)) {
spareHostCapacity++;
}
}
metric.set("spareHostCapacity", spareHostCapacity, null);
}
}
private boolean moving(Move move) {
if (move.isEmpty()) return false;
if (move.node().allocation().get().membership().retired()) return true;
return move.execute(false, Agent.SpareCapacityMaintainer, deployer, metric, nodeRepository());
}
private static class CapacitySolver {
private final HostCapacity hostCapacity;
private final int maxIterations;
private int iterations = 0;
CapacitySolver(HostCapacity hostCapacity, int maxIterations) {
this.hostCapacity = hostCapacity;
this.maxIterations = maxIterations;
}
/** The map of subproblem solutions already found. The value is null when there is no solution. */
private Map<SolutionKey, List<Move>> solutions = new HashMap<>();
/**
* Finds the shortest sequence of moves which makes room for the given node on the given host,
* assuming the given moves already made over the given hosts' current allocation.
*
* @param node the node to make room for
* @param host the target host to make room on
* @param hosts the hosts onto which we can move nodes
* @param movesConsidered the moves already being considered to add as part of this scenario
* (after any moves made by this)
* @param movesMade the moves already made in this scenario
* @return the list of movesMade with the moves needed for this appended, in the order they should be performed,
* or null if no sequence could be found
*/
List<Move> makeRoomFor(Node node, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
SolutionKey solutionKey = new SolutionKey(node, host, movesConsidered, movesMade);
List<Move> solution = solutions.get(solutionKey);
if (solution == null) {
solution = findRoomFor(node, host, hosts, movesConsidered, movesMade);
solutions.put(solutionKey, solution);
}
return solution;
}
private List<Move> findRoomFor(Node node, Node host, List<Node> hosts,
List<Move> movesConsidered, List<Move> movesMade) {
if (iterations++ > maxIterations)
return null;
if ( ! host.resources().satisfies(node.resources())) return null;
NodeResources freeCapacity = freeCapacityWith(movesMade, host);
if (freeCapacity.satisfies(node.resources())) return List.of();
List<Move> shortest = null;
for (var i = subsets(hostCapacity.allNodes().childrenOf(host), 5); i.hasNext(); ) {
List<Node> childrenToMove = i.next();
if ( ! addResourcesOf(childrenToMove, freeCapacity).satisfies(node.resources())) continue;
List<Move> moves = move(childrenToMove, host, hosts, movesConsidered, movesMade);
if (moves == null) continue;
if (shortest == null || moves.size() < shortest.size())
shortest = moves;
}
if (shortest == null) return null;
return append(movesMade, shortest);
}
private List<Move> move(List<Node> nodes, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
List<Move> moves = new ArrayList<>();
for (Node childToMove : nodes) {
List<Move> childMoves = move(childToMove, host, hosts, movesConsidered, append(movesMade, moves));
if (childMoves == null) return null;
moves.addAll(childMoves);
}
return moves;
}
private List<Move> move(Node node, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
if (contains(node, movesConsidered)) return null;
if (contains(node, movesMade)) return null;
List<Move> shortest = null;
for (Node target : hosts) {
if (target.equals(host)) continue;
Move move = new Move(node, host, target);
List<Move> childMoves = makeRoomFor(node, target, hosts, append(movesConsidered, move), movesMade);
if (childMoves == null) continue;
if (shortest == null || shortest.size() > childMoves.size() + 1) {
shortest = new ArrayList<>(childMoves);
shortest.add(move);
}
}
return shortest;
}
private boolean contains(Node node, List<Move> moves) {
return moves.stream().anyMatch(move -> move.node().equals(node));
}
private NodeResources addResourcesOf(List<Node> nodes, NodeResources resources) {
for (Node node : nodes)
resources = resources.add(node.resources());
return resources;
}
private Iterator<List<Node>> subsets(NodeList nodes, int maxSize) {
return new SubsetIterator(nodes.asList(), maxSize);
}
private List<Move> append(List<Move> a, List<Move> b) {
List<Move> list = new ArrayList<>();
list.addAll(a);
list.addAll(b);
return list;
}
private List<Move> append(List<Move> moves, Move move) {
List<Move> list = new ArrayList<>(moves);
list.add(move);
return list;
}
private NodeResources freeCapacityWith(List<Move> moves, Node host) {
NodeResources resources = hostCapacity.freeCapacityOf(host);
for (Move move : moves) {
if ( ! move.toHost().equals(host)) continue;
resources = resources.subtract(move.node().resources());
}
for (Move move : moves) {
if ( ! move.fromHost().equals(host)) continue;
resources = resources.add(move.fromHost().resources());
}
return resources;
}
}
private static class SolutionKey {
private final Node node;
private final Node host;
private final List<Move> movesConsidered;
private final List<Move> movesMade;
private final int hash;
public SolutionKey(Node node, Node host, List<Move> movesConsidered, List<Move> movesMade) {
this.node = node;
this.host = host;
this.movesConsidered = movesConsidered;
this.movesMade = movesMade;
hash = Objects.hash(node, host, movesConsidered, movesMade);
}
@Override
public int hashCode() { return hash; }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (o == null || o.getClass() != this.getClass()) return false;
SolutionKey other = (SolutionKey)o;
if ( ! other.node.equals(this.node)) return false;
if ( ! other.host.equals(this.host)) return false;
if ( ! other.movesConsidered.equals(this.movesConsidered)) return false;
if ( ! other.movesMade.equals(this.movesMade)) return false;
return true;
}
}
private static class SubsetIterator implements Iterator<List<Node>> {
private final List<Node> nodes;
private final int maxLength;
private int i = 0;
private List<Node> next = null;
public SubsetIterator(List<Node> nodes, int maxLength) {
this.nodes = new ArrayList<>(nodes.subList(0, Math.min(nodes.size(), 31)));
this.maxLength = maxLength;
}
@Override
public boolean hasNext() {
if (next != null) return true;
while (++i < 1<<nodes.size()) {
int ones = onesIn(i);
if (ones > maxLength) continue;
next = new ArrayList<>(ones);
for (int position = 0; position < nodes.size(); position++) {
if (hasOneAtPosition(position, i))
next.add(nodes.get(position));
}
return true;
}
return false;
}
@Override
public List<Node> next() {
if ( ! hasNext()) throw new IllegalStateException("No more elements");
var current = next;
next = null;
return current;
}
private boolean hasOneAtPosition(int position, int number) {
return (number & (1 << position)) > 0;
}
private int onesIn(int number) {
int ones = 0;
for (int position = 0; Math.pow(2, position) <= number; position++) {
if (hasOneAtPosition(position, number))
ones++;
}
return ones;
}
}
}
|
class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
private final int maxIterations;
private final Deployer deployer;
private final Metric metric;
public SpareCapacityMaintainer(Deployer deployer,
NodeRepository nodeRepository,
Metric metric,
Duration interval) {
this(deployer, nodeRepository, metric, interval,
10_000
);
}
public SpareCapacityMaintainer(Deployer deployer,
NodeRepository nodeRepository,
Metric metric,
Duration interval,
int maxIterations) {
super(nodeRepository, interval);
this.deployer = deployer;
this.metric = metric;
this.maxIterations = maxIterations;
}
@Override
protected void maintain() {
if ( ! nodeRepository().zone().getCloud().allowHostSharing()) return;
CapacityChecker capacityChecker = new CapacityChecker(nodeRepository());
List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts();
if (overcommittedHosts.size() != 0) {
log.log(Level.WARNING, String.format("%d nodes are overcommitted! [ %s ]",
overcommittedHosts.size(),
overcommittedHosts.stream().map(Node::hostname).collect(Collectors.joining(", "))));
}
metric.set("overcommittedHosts", overcommittedHosts.size(), null);
Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1;
if (spareHostCapacity == 0) {
Move move = findMitigatingMove(failurePath.get());
if (moving(move)) {
spareHostCapacity++;
}
}
metric.set("spareHostCapacity", spareHostCapacity, null);
}
}
private boolean moving(Move move) {
if (move.isEmpty()) return false;
if (move.node().allocation().get().membership().retired()) return true;
return move.execute(false, Agent.SpareCapacityMaintainer, deployer, metric, nodeRepository());
}
private static class CapacitySolver {
private final HostCapacity hostCapacity;
private final int maxIterations;
private int iterations = 0;
CapacitySolver(HostCapacity hostCapacity, int maxIterations) {
this.hostCapacity = hostCapacity;
this.maxIterations = maxIterations;
}
/** The map of subproblem solutions already found. The value is null when there is no solution. */
private Map<SolutionKey, List<Move>> solutions = new HashMap<>();
/**
* Finds the shortest sequence of moves which makes room for the given node on the given host,
* assuming the given moves already made over the given hosts' current allocation.
*
* @param node the node to make room for
* @param host the target host to make room on
* @param hosts the hosts onto which we can move nodes
* @param movesConsidered the moves already being considered to add as part of this scenario
* (after any moves made by this)
* @param movesMade the moves already made in this scenario
* @return the list of movesMade with the moves needed for this appended, in the order they should be performed,
* or null if no sequence could be found
*/
List<Move> makeRoomFor(Node node, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
SolutionKey solutionKey = new SolutionKey(node, host, movesConsidered, movesMade);
List<Move> solution = solutions.get(solutionKey);
if (solution == null) {
solution = findRoomFor(node, host, hosts, movesConsidered, movesMade);
solutions.put(solutionKey, solution);
}
return solution;
}
private List<Move> findRoomFor(Node node, Node host, List<Node> hosts,
List<Move> movesConsidered, List<Move> movesMade) {
if (iterations++ > maxIterations)
return null;
if ( ! host.resources().satisfies(node.resources())) return null;
NodeResources freeCapacity = freeCapacityWith(movesMade, host);
if (freeCapacity.satisfies(node.resources())) return List.of();
List<Move> shortest = null;
for (var i = subsets(hostCapacity.allNodes().childrenOf(host), 5); i.hasNext(); ) {
List<Node> childrenToMove = i.next();
if ( ! addResourcesOf(childrenToMove, freeCapacity).satisfies(node.resources())) continue;
List<Move> moves = move(childrenToMove, host, hosts, movesConsidered, movesMade);
if (moves == null) continue;
if (shortest == null || moves.size() < shortest.size())
shortest = moves;
}
if (shortest == null) return null;
return append(movesMade, shortest);
}
private List<Move> move(List<Node> nodes, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
List<Move> moves = new ArrayList<>();
for (Node childToMove : nodes) {
List<Move> childMoves = move(childToMove, host, hosts, movesConsidered, append(movesMade, moves));
if (childMoves == null) return null;
moves.addAll(childMoves);
}
return moves;
}
private List<Move> move(Node node, Node host, List<Node> hosts, List<Move> movesConsidered, List<Move> movesMade) {
if (contains(node, movesConsidered)) return null;
if (contains(node, movesMade)) return null;
List<Move> shortest = null;
for (Node target : hosts) {
if (target.equals(host)) continue;
Move move = new Move(node, host, target);
List<Move> childMoves = makeRoomFor(node, target, hosts, append(movesConsidered, move), movesMade);
if (childMoves == null) continue;
if (shortest == null || shortest.size() > childMoves.size() + 1) {
shortest = new ArrayList<>(childMoves);
shortest.add(move);
}
}
return shortest;
}
private boolean contains(Node node, List<Move> moves) {
return moves.stream().anyMatch(move -> move.node().equals(node));
}
private NodeResources addResourcesOf(List<Node> nodes, NodeResources resources) {
for (Node node : nodes)
resources = resources.add(node.resources());
return resources;
}
private Iterator<List<Node>> subsets(NodeList nodes, int maxSize) {
return new SubsetIterator(nodes.asList(), maxSize);
}
private List<Move> append(List<Move> a, List<Move> b) {
List<Move> list = new ArrayList<>();
list.addAll(a);
list.addAll(b);
return list;
}
private List<Move> append(List<Move> moves, Move move) {
List<Move> list = new ArrayList<>(moves);
list.add(move);
return list;
}
private NodeResources freeCapacityWith(List<Move> moves, Node host) {
NodeResources resources = hostCapacity.freeCapacityOf(host);
for (Move move : moves) {
if ( ! move.toHost().equals(host)) continue;
resources = resources.subtract(move.node().resources());
}
for (Move move : moves) {
if ( ! move.fromHost().equals(host)) continue;
resources = resources.add(move.node().resources());
}
return resources;
}
}
private static class SolutionKey {
private final Node node;
private final Node host;
private final List<Move> movesConsidered;
private final List<Move> movesMade;
private final int hash;
public SolutionKey(Node node, Node host, List<Move> movesConsidered, List<Move> movesMade) {
this.node = node;
this.host = host;
this.movesConsidered = movesConsidered;
this.movesMade = movesMade;
hash = Objects.hash(node, host, movesConsidered, movesMade);
}
@Override
public int hashCode() { return hash; }
@Override
public boolean equals(Object o) {
if (o == this) return true;
if (o == null || o.getClass() != this.getClass()) return false;
SolutionKey other = (SolutionKey)o;
if ( ! other.node.equals(this.node)) return false;
if ( ! other.host.equals(this.host)) return false;
if ( ! other.movesConsidered.equals(this.movesConsidered)) return false;
if ( ! other.movesMade.equals(this.movesMade)) return false;
return true;
}
}
private static class SubsetIterator implements Iterator<List<Node>> {
private final List<Node> nodes;
private final int maxLength;
private int i = 0;
private List<Node> next = null;
public SubsetIterator(List<Node> nodes, int maxLength) {
this.nodes = new ArrayList<>(nodes.subList(0, Math.min(nodes.size(), 31)));
this.maxLength = maxLength;
}
@Override
public boolean hasNext() {
if (next != null) return true;
while (++i < 1<<nodes.size()) {
int ones = Integer.bitCount(i);
if (ones > maxLength) continue;
next = new ArrayList<>(ones);
for (int position = 0; position < nodes.size(); position++) {
if (hasOneAtPosition(position, i))
next.add(nodes.get(position));
}
return true;
}
return false;
}
@Override
public List<Node> next() {
if ( ! hasNext()) throw new IllegalStateException("No more elements");
var current = next;
next = null;
return current;
}
private boolean hasOneAtPosition(int position, int number) {
return (number & (1 << position)) > 0;
}
}
}
|
I think rollup job can be changed to same as schema change job.
|
public void processAlterTable(AlterTableStmt stmt) throws UserException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
final String clusterName = stmt.getClusterName();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
Catalog.getCurrentSystemInfo().checkClusterCapacity(clusterName);
boolean hasSchemaChange = false;
boolean hasAddMaterializedView = false;
boolean hasDropRollup = false;
boolean hasPartition = false;
boolean hasRename = false;
boolean hasModifyProp = false;
List<AlterClause> alterClauses = stmt.getOps();
boolean allIsDropOps = true;
for (AlterClause alterClause : alterClauses) {
if (!(alterClause instanceof DropPartitionClause)
&& !(alterClause instanceof DropRollupClause)) {
allIsDropOps = false;
break;
}
}
if (!allIsDropOps) {
db.checkQuota();
}
boolean needTableStable = false;
for (AlterClause alterClause : alterClauses) {
if (!needTableStable) {
needTableStable = ((AlterTableClause) alterClause).isNeedTableStable();
}
if ((alterClause instanceof AddColumnClause
|| alterClause instanceof AddColumnsClause
|| alterClause instanceof DropColumnClause
|| alterClause instanceof ModifyColumnClause
|| alterClause instanceof ReorderColumnsClause
|| alterClause instanceof CreateIndexClause
|| alterClause instanceof DropIndexClause)
&& !hasAddMaterializedView && !hasDropRollup && !hasPartition && !hasRename) {
hasSchemaChange = true;
if (alterClause instanceof CreateIndexClause) {
Table table = db.getTable(dbTableName.getTbl());
if (!(table instanceof OlapTable)) {
throw new AnalysisException("create index only support in olap table at current version.");
}
List<Index> indexes = ((OlapTable) table).getIndexes();
IndexDef indexDef = ((CreateIndexClause) alterClause).getIndexDef();
Set<String> newColset = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
newColset.addAll(indexDef.getColumns());
for (Index idx : indexes) {
if (idx.getIndexName().equalsIgnoreCase(indexDef.getIndexName())) {
throw new AnalysisException("index `" + indexDef.getIndexName() + "` already exist.");
}
Set<String> idxSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
idxSet.addAll(idx.getColumns());
if (newColset.equals(idxSet)) {
throw new AnalysisException("index for columns (" + String
.join(",", indexDef.getColumns()) + " ) already exist.");
}
}
} else if (alterClause instanceof DropIndexClause) {
Table table = db.getTable(dbTableName.getTbl());
if (!(table instanceof OlapTable)) {
throw new AnalysisException("drop index only support in olap table at current version.");
}
String indexName = ((DropIndexClause) alterClause).getIndexName();
List<Index> indexes = ((OlapTable) table).getIndexes();
Index found = null;
for (Index idx : indexes) {
if (idx.getIndexName().equalsIgnoreCase(indexName)) {
found = idx;
break;
}
}
if (found == null) {
throw new AnalysisException("index " + indexName + " does not exist");
}
}
} else if ((alterClause instanceof AddRollupClause)
&& !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasAddMaterializedView = true;
} else if (alterClause instanceof DropRollupClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasDropRollup = true;
} else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if (alterClause instanceof DropPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if (alterClause instanceof ModifyPartitionClause && !hasSchemaChange && !hasAddMaterializedView
&& !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if ((alterClause instanceof TableRenameClause || alterClause instanceof RollupRenameClause
|| alterClause instanceof PartitionRenameClause || alterClause instanceof ColumnRenameClause)
&& !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup && !hasPartition && !hasRename
&& !hasModifyProp) {
hasRename = true;
} else if (alterClause instanceof ModifyTablePropertiesClause && !hasSchemaChange && !hasAddMaterializedView
&& !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
hasModifyProp = true;
} else {
throw new DdlException("Conflicting alter clauses. see help for more information");
}
}
boolean hasAddPartition = false;
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.OLAP) {
throw new DdlException("Do not support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getPartitions().size() == 0 && !hasPartition) {
throw new DdlException("table with empty parition cannot do schema change. [" + tableName + "]");
}
if (olapTable.getState() != OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "]'s state is not NORMAL. Do not allow doing ALTER ops");
}
if (needTableStable && !hasSchemaChange) {
boolean isStable = olapTable.isStable(Catalog.getCurrentSystemInfo(),
Catalog.getCurrentCatalog().getTabletScheduler(),
db.getClusterName());
if (!isStable) {
throw new DdlException("table [" + olapTable.getName() + "] is not stable."
+ " Some tablets of this table may not be healthy or are being scheduled."
+ " You need to repair the table first"
+ " or stop cluster balance. See 'help admin;'.");
}
}
if (hasSchemaChange || hasModifyProp) {
schemaChangeHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasAddMaterializedView || hasDropRollup) {
materializedViewHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof DropPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed(olapTable);
Catalog.getInstance().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
} else if (alterClause instanceof ModifyPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed(olapTable);
Catalog.getInstance().modifyPartition(db, olapTable, ((ModifyPartitionClause) alterClause));
} else {
hasAddPartition = true;
}
} else if (hasRename) {
processRename(db, olapTable, alterClauses);
}
} finally {
db.writeUnlock();
}
if (hasAddPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof AddPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTable(tableName));
Catalog.getInstance().addPartition(db, tableName, (AddPartitionClause) alterClause);
} else {
Preconditions.checkState(false);
}
}
}
|
if (needTableStable && !hasSchemaChange) {
|
public void processAlterTable(AlterTableStmt stmt) throws UserException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
final String clusterName = stmt.getClusterName();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
Catalog.getCurrentSystemInfo().checkClusterCapacity(clusterName);
boolean hasSchemaChange = false;
boolean hasAddMaterializedView = false;
boolean hasDropRollup = false;
boolean hasPartition = false;
boolean hasRename = false;
boolean hasModifyProp = false;
List<AlterClause> alterClauses = stmt.getOps();
boolean allIsDropOps = true;
for (AlterClause alterClause : alterClauses) {
if (!(alterClause instanceof DropPartitionClause)
&& !(alterClause instanceof DropRollupClause)) {
allIsDropOps = false;
break;
}
}
if (!allIsDropOps) {
db.checkQuota();
}
boolean needTableStable = false;
for (AlterClause alterClause : alterClauses) {
if (!needTableStable) {
needTableStable = ((AlterTableClause) alterClause).isNeedTableStable();
}
if ((alterClause instanceof AddColumnClause
|| alterClause instanceof AddColumnsClause
|| alterClause instanceof DropColumnClause
|| alterClause instanceof ModifyColumnClause
|| alterClause instanceof ReorderColumnsClause
|| alterClause instanceof CreateIndexClause
|| alterClause instanceof DropIndexClause)
&& !hasAddMaterializedView && !hasDropRollup && !hasPartition && !hasRename) {
hasSchemaChange = true;
if (alterClause instanceof CreateIndexClause) {
Table table = db.getTable(dbTableName.getTbl());
if (!(table instanceof OlapTable)) {
throw new AnalysisException("create index only support in olap table at current version.");
}
List<Index> indexes = ((OlapTable) table).getIndexes();
IndexDef indexDef = ((CreateIndexClause) alterClause).getIndexDef();
Set<String> newColset = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
newColset.addAll(indexDef.getColumns());
for (Index idx : indexes) {
if (idx.getIndexName().equalsIgnoreCase(indexDef.getIndexName())) {
throw new AnalysisException("index `" + indexDef.getIndexName() + "` already exist.");
}
Set<String> idxSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
idxSet.addAll(idx.getColumns());
if (newColset.equals(idxSet)) {
throw new AnalysisException("index for columns (" + String
.join(",", indexDef.getColumns()) + " ) already exist.");
}
}
} else if (alterClause instanceof DropIndexClause) {
Table table = db.getTable(dbTableName.getTbl());
if (!(table instanceof OlapTable)) {
throw new AnalysisException("drop index only support in olap table at current version.");
}
String indexName = ((DropIndexClause) alterClause).getIndexName();
List<Index> indexes = ((OlapTable) table).getIndexes();
Index found = null;
for (Index idx : indexes) {
if (idx.getIndexName().equalsIgnoreCase(indexName)) {
found = idx;
break;
}
}
if (found == null) {
throw new AnalysisException("index " + indexName + " does not exist");
}
}
} else if ((alterClause instanceof AddRollupClause)
&& !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasAddMaterializedView = true;
} else if (alterClause instanceof DropRollupClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasDropRollup = true;
} else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if (alterClause instanceof DropPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
&& !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if (alterClause instanceof ModifyPartitionClause && !hasSchemaChange && !hasAddMaterializedView
&& !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
hasPartition = true;
} else if ((alterClause instanceof TableRenameClause || alterClause instanceof RollupRenameClause
|| alterClause instanceof PartitionRenameClause || alterClause instanceof ColumnRenameClause)
&& !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup && !hasPartition && !hasRename
&& !hasModifyProp) {
hasRename = true;
} else if (alterClause instanceof ModifyTablePropertiesClause && !hasSchemaChange && !hasAddMaterializedView
&& !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
hasModifyProp = true;
} else {
throw new DdlException("Conflicting alter clauses. see help for more information");
}
}
boolean hasAddPartition = false;
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.OLAP) {
throw new DdlException("Do not support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getPartitions().size() == 0 && !hasPartition) {
throw new DdlException("table with empty parition cannot do schema change. [" + tableName + "]");
}
if (olapTable.getState() != OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "]'s state is not NORMAL. Do not allow doing ALTER ops");
}
if (needTableStable && !hasSchemaChange && !hasAddMaterializedView) {
boolean isStable = olapTable.isStable(Catalog.getCurrentSystemInfo(),
Catalog.getCurrentCatalog().getTabletScheduler(),
db.getClusterName());
if (!isStable) {
throw new DdlException("table [" + olapTable.getName() + "] is not stable."
+ " Some tablets of this table may not be healthy or are being scheduled."
+ " You need to repair the table first"
+ " or stop cluster balance. See 'help admin;'.");
}
}
if (hasSchemaChange || hasModifyProp) {
schemaChangeHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasAddMaterializedView || hasDropRollup) {
materializedViewHandler.process(alterClauses, clusterName, db, olapTable);
} else if (hasPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof DropPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed(olapTable);
Catalog.getInstance().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
} else if (alterClause instanceof ModifyPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed(olapTable);
Catalog.getInstance().modifyPartition(db, olapTable, ((ModifyPartitionClause) alterClause));
} else {
hasAddPartition = true;
}
} else if (hasRename) {
processRename(db, olapTable, alterClauses);
}
} finally {
db.writeUnlock();
}
if (hasAddPartition) {
Preconditions.checkState(alterClauses.size() == 1);
AlterClause alterClause = alterClauses.get(0);
if (alterClause instanceof AddPartitionClause) {
DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTable(tableName));
Catalog.getInstance().addPartition(db, tableName, (AddPartitionClause) alterClause);
} else {
Preconditions.checkState(false);
}
}
}
|
class Alter {
private static final Logger LOG = LogManager.getLogger(Alter.class);
private AlterHandler schemaChangeHandler;
private AlterHandler materializedViewHandler;
private SystemHandler clusterHandler;
public Alter() {
schemaChangeHandler = new SchemaChangeHandler();
materializedViewHandler = new MaterializedViewHandler();
clusterHandler = new SystemHandler();
}
public void start() {
schemaChangeHandler.start();
materializedViewHandler.start();
clusterHandler.start();
}
public void processCreateMaterializedView(CreateMaterializedViewStmt stmt) throws DdlException, AnalysisException {
String tableName = stmt.getBaseIndexName();
Database db = Catalog.getInstance().getDb(stmt.getDBName());
Catalog.getCurrentSystemInfo().checkClusterCapacity(stmt.getClusterName());
db.checkQuota();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table.getType() != TableType.OLAP) {
throw new DdlException("Do not support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "]'s state is not NORMAL. "
+ "Do not allow doing materialized view");
}
boolean isStable = olapTable.isStable(Catalog.getCurrentSystemInfo(),
Catalog.getCurrentCatalog().getTabletScheduler(),
db.getClusterName());
if (!isStable) {
throw new DdlException("table [" + olapTable.getName() + "] is not stable."
+ " Some tablets of this table may not be healthy or are being "
+ "scheduled."
+ " You need to repair the table first"
+ " or stop cluster balance. See 'help admin;'.");
}
((MaterializedViewHandler)materializedViewHandler).processCreateMaterializedView(stmt, db, olapTable);
} finally {
db.writeUnlock();
}
}
public void processAlterView(AlterViewStmt stmt, ConnectContext ctx) throws UserException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.VIEW) {
throw new DdlException("The specified table [" + tableName + "] is not a view");
}
View view = (View) table;
modifyViewDef(db, view, stmt.getInlineViewDef(), ctx.getSessionVariable().getSqlMode(), stmt.getColumns());
} finally {
db.writeUnlock();
}
}
private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode, List<Column> newFullSchema) throws DdlException {
String viewName = view.getName();
view.setInlineViewDefWithSqlMode(inlineViewDef, sqlMode);
try {
view.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
view.setNewFullSchema(newFullSchema);
db.dropTable(viewName);
db.createTable(view);
AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(), inlineViewDef, newFullSchema, sqlMode);
Catalog.getInstance().getEditLog().logModifyViewDef(alterViewInfo);
LOG.info("modify view[{}] definition to {}", viewName, inlineViewDef);
}
public void replayModifyViewDef(AlterViewInfo alterViewInfo) throws DdlException {
long dbId = alterViewInfo.getDbId();
long tableId = alterViewInfo.getTableId();
String inlineViewDef = alterViewInfo.getInlineViewDef();
List<Column> newFullSchema = alterViewInfo.getNewFullSchema();
Database db = Catalog.getInstance().getDb(dbId);
db.writeLock();
try {
View view = (View) db.getTable(tableId);
String viewName = view.getName();
view.setInlineViewDefWithSqlMode(inlineViewDef, alterViewInfo.getSqlMode());
try {
view.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
view.setNewFullSchema(newFullSchema);
db.dropTable(viewName);
db.createTable(view);
LOG.info("replay modify view[{}] definition to {}", viewName, inlineViewDef);
} finally {
db.writeUnlock();
}
}
public void processAlterCluster(AlterSystemStmt stmt) throws UserException {
clusterHandler.process(Arrays.asList(stmt.getAlterClause()), stmt.getClusterName(), null, null);
}
private void processRename(Database db, OlapTable table, List<AlterClause> alterClauses) throws DdlException {
for (AlterClause alterClause : alterClauses) {
if (alterClause instanceof TableRenameClause) {
Catalog.getInstance().renameTable(db, table, (TableRenameClause) alterClause);
break;
} else if (alterClause instanceof RollupRenameClause) {
Catalog.getInstance().renameRollup(db, table, (RollupRenameClause) alterClause);
break;
} else if (alterClause instanceof PartitionRenameClause) {
Catalog.getInstance().renamePartition(db, table, (PartitionRenameClause) alterClause);
break;
} else if (alterClause instanceof ColumnRenameClause) {
Catalog.getInstance().renameColumn(db, table, (ColumnRenameClause) alterClause);
break;
} else {
Preconditions.checkState(false);
}
}
}
public AlterHandler getSchemaChangeHandler() {
return this.schemaChangeHandler;
}
public AlterHandler getMaterializedViewHandler() {
return this.materializedViewHandler;
}
public AlterHandler getClusterHandler() {
return this.clusterHandler;
}
}
|
class Alter {
private static final Logger LOG = LogManager.getLogger(Alter.class);
private AlterHandler schemaChangeHandler;
private AlterHandler materializedViewHandler;
private SystemHandler clusterHandler;
public Alter() {
schemaChangeHandler = new SchemaChangeHandler();
materializedViewHandler = new MaterializedViewHandler();
clusterHandler = new SystemHandler();
}
public void start() {
schemaChangeHandler.start();
materializedViewHandler.start();
clusterHandler.start();
}
public void processCreateMaterializedView(CreateMaterializedViewStmt stmt) throws DdlException, AnalysisException {
String tableName = stmt.getBaseIndexName();
Database db = Catalog.getInstance().getDb(stmt.getDBName());
Catalog.getCurrentSystemInfo().checkClusterCapacity(stmt.getClusterName());
db.checkQuota();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table.getType() != TableType.OLAP) {
throw new DdlException("Do not support alter non-OLAP table[" + tableName + "]");
}
OlapTable olapTable = (OlapTable) table;
if (olapTable.getState() != OlapTableState.NORMAL) {
throw new DdlException("Table[" + table.getName() + "]'s state is not NORMAL. "
+ "Do not allow doing materialized view");
}
boolean isStable = olapTable.isStable(Catalog.getCurrentSystemInfo(),
Catalog.getCurrentCatalog().getTabletScheduler(),
db.getClusterName());
if (!isStable) {
throw new DdlException("table [" + olapTable.getName() + "] is not stable."
+ " Some tablets of this table may not be healthy or are being "
+ "scheduled."
+ " You need to repair the table first"
+ " or stop cluster balance. See 'help admin;'.");
}
((MaterializedViewHandler)materializedViewHandler).processCreateMaterializedView(stmt, db, olapTable);
} finally {
db.writeUnlock();
}
}
public void processAlterView(AlterViewStmt stmt, ConnectContext ctx) throws UserException {
TableName dbTableName = stmt.getTbl();
String dbName = dbTableName.getDb();
Database db = Catalog.getInstance().getDb(dbName);
if (db == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbName);
}
String tableName = dbTableName.getTbl();
db.writeLock();
try {
Table table = db.getTable(tableName);
if (table == null) {
ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
}
if (table.getType() != TableType.VIEW) {
throw new DdlException("The specified table [" + tableName + "] is not a view");
}
View view = (View) table;
modifyViewDef(db, view, stmt.getInlineViewDef(), ctx.getSessionVariable().getSqlMode(), stmt.getColumns());
} finally {
db.writeUnlock();
}
}
private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode, List<Column> newFullSchema) throws DdlException {
String viewName = view.getName();
view.setInlineViewDefWithSqlMode(inlineViewDef, sqlMode);
try {
view.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
view.setNewFullSchema(newFullSchema);
db.dropTable(viewName);
db.createTable(view);
AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(), inlineViewDef, newFullSchema, sqlMode);
Catalog.getInstance().getEditLog().logModifyViewDef(alterViewInfo);
LOG.info("modify view[{}] definition to {}", viewName, inlineViewDef);
}
public void replayModifyViewDef(AlterViewInfo alterViewInfo) throws DdlException {
long dbId = alterViewInfo.getDbId();
long tableId = alterViewInfo.getTableId();
String inlineViewDef = alterViewInfo.getInlineViewDef();
List<Column> newFullSchema = alterViewInfo.getNewFullSchema();
Database db = Catalog.getCurrentCatalog().getDb(dbId);
db.writeLock();
try {
View view = (View) db.getTable(tableId);
String viewName = view.getName();
view.setInlineViewDefWithSqlMode(inlineViewDef, alterViewInfo.getSqlMode());
try {
view.init();
} catch (UserException e) {
throw new DdlException("failed to init view stmt", e);
}
view.setNewFullSchema(newFullSchema);
db.dropTable(viewName);
db.createTable(view);
LOG.info("replay modify view[{}] definition to {}", viewName, inlineViewDef);
} finally {
db.writeUnlock();
}
}
public void processAlterCluster(AlterSystemStmt stmt) throws UserException {
clusterHandler.process(Arrays.asList(stmt.getAlterClause()), stmt.getClusterName(), null, null);
}
private void processRename(Database db, OlapTable table, List<AlterClause> alterClauses) throws DdlException {
for (AlterClause alterClause : alterClauses) {
if (alterClause instanceof TableRenameClause) {
Catalog.getInstance().renameTable(db, table, (TableRenameClause) alterClause);
break;
} else if (alterClause instanceof RollupRenameClause) {
Catalog.getInstance().renameRollup(db, table, (RollupRenameClause) alterClause);
break;
} else if (alterClause instanceof PartitionRenameClause) {
Catalog.getInstance().renamePartition(db, table, (PartitionRenameClause) alterClause);
break;
} else if (alterClause instanceof ColumnRenameClause) {
Catalog.getInstance().renameColumn(db, table, (ColumnRenameClause) alterClause);
break;
} else {
Preconditions.checkState(false);
}
}
}
public AlterHandler getSchemaChangeHandler() {
return this.schemaChangeHandler;
}
public AlterHandler getMaterializedViewHandler() {
return this.materializedViewHandler;
}
public AlterHandler getClusterHandler() {
return this.clusterHandler;
}
}
|
5 is the default value, turns out we can go with lower than default, I'll change this
|
public int getPriority() {
return 9;
}
|
return 9;
|
public int getPriority() {
return 4;
}
|
class GrpcLoadBalancerProvider extends LoadBalancerProvider {
private static final Logger log = Logger.getLogger(GrpcLoadBalancerProvider.class);
@Override
public boolean isAvailable() {
return true;
}
@Override
@Override
public String getPolicyName() {
return "stork";
}
@Override
public NameResolver.ConfigOrError parseLoadBalancingPolicyConfig(Map<String, ?> rawConfig) {
String serviceName;
try {
serviceName = JsonUtil.getString(rawConfig, "service-name");
} catch (RuntimeException e) {
log.error("Failed to parse Stork configuration: " + rawConfig, e);
return NameResolver.ConfigOrError.fromError(Status.INTERNAL);
}
if (serviceName == null) {
log.error("No 'service-name' defined in the Stork for gRPC configuration: " + rawConfig);
return NameResolver.ConfigOrError.fromError(Status.INTERNAL);
}
return NameResolver.ConfigOrError
.fromConfig(new StorkLoadBalancerConfig(serviceName));
}
@Override
public LoadBalancer newLoadBalancer(LoadBalancer.Helper helper) {
return new LoadBalancer() {
String serviceName;
@Override
public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) {
List<EquivalentAddressGroup> addresses = resolvedAddresses.getAddresses();
StorkLoadBalancerConfig config = (StorkLoadBalancerConfig) resolvedAddresses.getLoadBalancingPolicyConfig();
Map<ServiceInstance, Subchannel> subChannels = new TreeMap<>(Comparator.comparingLong(ServiceInstance::getId));
Set<ServiceInstance> activeSubchannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
AtomicReference<ConnectivityState> state = new AtomicReference<>(ConnectivityState.CONNECTING);
serviceName = config.serviceName;
final StorkSubchannelPicker picker = new StorkSubchannelPicker(subChannels, serviceName, activeSubchannels);
for (EquivalentAddressGroup addressGroup : addresses) {
ServiceInstance serviceInstance = addressGroup.getAttributes()
.get(GrpcStorkServiceDiscovery.SERVICE_INSTANCE);
CreateSubchannelArgs subChannelArgs = CreateSubchannelArgs.newBuilder()
.setAddresses(addressGroup)
.setAttributes(addressGroup.getAttributes())
.build();
Subchannel subchannel = helper.createSubchannel(subChannelArgs);
subchannel.start(new SubchannelStateListener() {
@Override
public void onSubchannelState(ConnectivityStateInfo stateInfo) {
if (stateInfo.getState() == TRANSIENT_FAILURE || stateInfo.getState() == IDLE) {
Status status = stateInfo.getStatus();
log.error("gRPC Sub Channel failed", status == null ? null : status.getCause());
helper.refreshNameResolution();
}
switch (stateInfo.getState()) {
case READY:
activeSubchannels.add(serviceInstance);
if (state.getAndSet(ConnectivityState.READY) != ConnectivityState.READY) {
helper.updateBalancingState(state.get(), picker);
}
break;
case CONNECTING:
case TRANSIENT_FAILURE:
case IDLE:
case SHUTDOWN:
activeSubchannels.remove(serviceInstance);
log.debugf("subchannel changed state to %s", stateInfo.getState());
if (activeSubchannels.isEmpty()
&& state.compareAndSet(ConnectivityState.READY, stateInfo.getState())) {
helper.updateBalancingState(state.get(), picker);
}
break;
}
}
});
subChannels.put(serviceInstance, subchannel);
}
helper.updateBalancingState(state.get(), picker);
}
@Override
public void handleNameResolutionError(Status error) {
log.errorf("Name resolution failed for service '%s'", serviceName);
}
@Override
public void shutdown() {
log.debugf("Shutting down load balancer for service '%s'", serviceName);
}
};
}
static class StorkLoadBalancerConfig {
final String serviceName;
StorkLoadBalancerConfig(String serviceName) {
this.serviceName = serviceName;
}
}
static class StorkSubchannelPicker extends LoadBalancer.SubchannelPicker {
private final Map<ServiceInstance, LoadBalancer.Subchannel> subChannels;
private final String serviceName;
private final Set<ServiceInstance> activeServerInstances;
StorkSubchannelPicker(Map<ServiceInstance, LoadBalancer.Subchannel> subChannels,
String serviceName, Set<ServiceInstance> activeServerInstances) {
this.subChannels = subChannels;
this.serviceName = serviceName;
this.activeServerInstances = activeServerInstances;
}
@Override
public LoadBalancer.PickResult pickSubchannel(LoadBalancer.PickSubchannelArgs args) {
ServiceInstance serviceInstance = pickServerInstance();
LoadBalancer.Subchannel subchannel = subChannels.get(serviceInstance);
return LoadBalancer.PickResult.withSubchannel(subchannel);
}
private ServiceInstance pickServerInstance() {
io.smallrye.stork.LoadBalancer lb = Stork.getInstance().getService(serviceName).getLoadBalancer();
Set<ServiceInstance> toChooseFrom = this.activeServerInstances;
if (activeServerInstances.isEmpty()) {
toChooseFrom = subChannels.keySet();
}
return lb.selectServiceInstance(toChooseFrom);
}
}
}
|
class GrpcLoadBalancerProvider extends LoadBalancerProvider {
private static final Logger log = Logger.getLogger(GrpcLoadBalancerProvider.class);
@Override
public boolean isAvailable() {
return true;
}
@Override
@Override
public String getPolicyName() {
return Stork.STORK;
}
@Override
public NameResolver.ConfigOrError parseLoadBalancingPolicyConfig(Map<String, ?> rawConfig) {
String serviceName;
try {
serviceName = JsonUtil.getString(rawConfig, "service-name");
} catch (RuntimeException e) {
log.error("Failed to parse Stork configuration: " + rawConfig, e);
return NameResolver.ConfigOrError.fromError(Status.INTERNAL);
}
if (serviceName == null) {
log.error("No 'service-name' defined in the Stork for gRPC configuration: " + rawConfig);
return NameResolver.ConfigOrError.fromError(Status.INTERNAL);
}
return NameResolver.ConfigOrError
.fromConfig(new StorkLoadBalancerConfig(serviceName));
}
@Override
public LoadBalancer newLoadBalancer(LoadBalancer.Helper helper) {
return new LoadBalancer() {
String serviceName;
@Override
public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) {
List<EquivalentAddressGroup> addresses = resolvedAddresses.getAddresses();
Object loadBalancerConfig = resolvedAddresses.getLoadBalancingPolicyConfig();
if (!(loadBalancerConfig instanceof StorkLoadBalancerConfig)) {
throw new IllegalStateException("invalid configuration for a Stork Load Balancer : " + loadBalancerConfig);
}
StorkLoadBalancerConfig config = (StorkLoadBalancerConfig) loadBalancerConfig;
Map<ServiceInstance, Subchannel> subChannels = new TreeMap<>(Comparator.comparingLong(ServiceInstance::getId));
Set<ServiceInstance> activeSubchannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
AtomicReference<ConnectivityState> state = new AtomicReference<>(ConnectivityState.CONNECTING);
serviceName = config.serviceName;
final StorkSubchannelPicker picker = new StorkSubchannelPicker(subChannels, serviceName, activeSubchannels);
for (EquivalentAddressGroup addressGroup : addresses) {
ServiceInstance serviceInstance = addressGroup.getAttributes()
.get(GrpcStorkServiceDiscovery.SERVICE_INSTANCE);
CreateSubchannelArgs subChannelArgs = CreateSubchannelArgs.newBuilder()
.setAddresses(addressGroup)
.setAttributes(addressGroup.getAttributes())
.build();
Subchannel subchannel = helper.createSubchannel(subChannelArgs);
subchannel.start(new SubchannelStateListener() {
@Override
public void onSubchannelState(ConnectivityStateInfo stateInfo) {
if (stateInfo.getState() == TRANSIENT_FAILURE || stateInfo.getState() == IDLE) {
Status status = stateInfo.getStatus();
log.error("gRPC Sub Channel failed", status == null ? null : status.getCause());
helper.refreshNameResolution();
}
switch (stateInfo.getState()) {
case READY:
activeSubchannels.add(serviceInstance);
if (state.getAndSet(ConnectivityState.READY) != ConnectivityState.READY) {
helper.updateBalancingState(state.get(), picker);
}
break;
case CONNECTING:
case TRANSIENT_FAILURE:
case IDLE:
case SHUTDOWN:
activeSubchannels.remove(serviceInstance);
log.debugf("subchannel changed state to %s", stateInfo.getState());
if (activeSubchannels.isEmpty()
&& state.compareAndSet(ConnectivityState.READY, stateInfo.getState())) {
helper.updateBalancingState(state.get(), picker);
}
break;
}
}
});
subChannels.put(serviceInstance, subchannel);
}
helper.updateBalancingState(state.get(), picker);
}
@Override
public void handleNameResolutionError(Status error) {
log.errorf("Name resolution failed for service '%s'", serviceName);
}
@Override
public void shutdown() {
log.debugf("Shutting down load balancer for service '%s'", serviceName);
}
};
}
static class StorkLoadBalancerConfig {
final String serviceName;
StorkLoadBalancerConfig(String serviceName) {
this.serviceName = serviceName;
}
}
static class StorkSubchannelPicker extends LoadBalancer.SubchannelPicker {
private final Map<ServiceInstance, LoadBalancer.Subchannel> subChannels;
private final String serviceName;
private final Set<ServiceInstance> activeServerInstances;
StorkSubchannelPicker(Map<ServiceInstance, LoadBalancer.Subchannel> subChannels,
String serviceName, Set<ServiceInstance> activeServerInstances) {
this.subChannels = subChannels;
this.serviceName = serviceName;
this.activeServerInstances = activeServerInstances;
}
@Override
public LoadBalancer.PickResult pickSubchannel(LoadBalancer.PickSubchannelArgs args) {
ServiceInstance serviceInstance = pickServerInstance();
LoadBalancer.Subchannel subchannel = subChannels.get(serviceInstance);
return LoadBalancer.PickResult.withSubchannel(subchannel);
}
private ServiceInstance pickServerInstance() {
io.smallrye.stork.LoadBalancer lb = Stork.getInstance().getService(serviceName).getLoadBalancer();
Set<ServiceInstance> toChooseFrom = this.activeServerInstances;
if (activeServerInstances.isEmpty()) {
toChooseFrom = subChannels.keySet();
}
return lb.selectServiceInstance(toChooseFrom);
}
}
}
|
Using `StringWriter` because of https://github.com/codejive/java-properties/issues/23
|
public void writeToDisk() throws IOException {
if (rootProjectPath != null) {
Files.write(rootProjectPath.resolve(getSettingsGradlePath()), getModel().getRootSettingsContent().getBytes());
if (hasRootProjectFile(GRADLE_PROPERTIES_PATH)) {
try (StringWriter sw = new StringWriter()) {
getModel().getRootPropertiesContent().store(sw, "Gradle properties");
Files.writeString(rootProjectPath.resolve(GRADLE_PROPERTIES_PATH),
sw.toString());
}
}
} else {
writeToProjectFile(getSettingsGradlePath(), getModel().getSettingsContent().getBytes());
if (hasProjectFile(GRADLE_PROPERTIES_PATH)) {
try (StringWriter sw = new StringWriter()) {
getModel().getPropertiesContent().store(sw, "Gradle properties");
writeToProjectFile(GRADLE_PROPERTIES_PATH, sw.toString());
}
}
}
writeToProjectFile(getBuildGradlePath(), getModel().getBuildContent());
}
|
sw.toString());
|
public void writeToDisk() throws IOException {
if (rootProjectPath != null) {
Files.write(rootProjectPath.resolve(getSettingsGradlePath()), getModel().getRootSettingsContent().getBytes());
if (hasRootProjectFile(GRADLE_PROPERTIES_PATH)) {
try (StringWriter sw = new StringWriter()) {
getModel().getRootPropertiesContent().store(sw, "Gradle properties");
Files.writeString(rootProjectPath.resolve(GRADLE_PROPERTIES_PATH),
sw.toString());
}
}
} else {
writeToProjectFile(getSettingsGradlePath(), getModel().getSettingsContent().getBytes());
if (hasProjectFile(GRADLE_PROPERTIES_PATH)) {
try (StringWriter sw = new StringWriter()) {
getModel().getPropertiesContent().store(sw, "Gradle properties");
writeToProjectFile(GRADLE_PROPERTIES_PATH, sw.toString());
}
}
}
writeToProjectFile(getBuildGradlePath(), getModel().getBuildContent());
}
|
class AbstractGradleBuildFile extends BuildFile {
private static final Pattern DEPENDENCIES_SECTION = Pattern.compile("^[\\t ]*dependencies\\s*\\{\\s*$", Pattern.MULTILINE);
private static final String GRADLE_PROPERTIES_PATH = "gradle.properties";
private final Path rootProjectPath;
private final AtomicReference<Model> modelReference = new AtomicReference<>();
public AbstractGradleBuildFile(final Path projectDirPath, final ExtensionCatalog catalog) {
this(projectDirPath, catalog, null);
}
public AbstractGradleBuildFile(final Path projectDirPath, final ExtensionCatalog catalog,
Path rootProjectPath) {
super(projectDirPath, catalog);
this.rootProjectPath = rootProjectPath;
}
abstract String getSettingsGradlePath();
abstract String getBuildGradlePath();
@Override
static boolean containsProperty(ArtifactCoords coords) {
return coords.getGroupId().charAt(0) == '$' || coords.getArtifactId().charAt(0) == '$'
|| coords.getVersion() != null && coords.getVersion().charAt(0) == '$';
}
static String createDependencyCoordinatesString(ArtifactCoords coords, boolean managed, char quoteChar) {
StringBuilder newDependency = new StringBuilder().append(quoteChar)
.append(coords.getGroupId()).append(":").append(coords.getArtifactId());
if (!managed &&
(coords.getVersion() != null && !coords.getVersion().isEmpty())) {
newDependency.append(":").append(coords.getVersion());
}
boolean isBOM = "pom".equals(coords.getType());
if (isBOM && !managed) {
return String.format("enforcedPlatform(%s)", newDependency.append(quoteChar).toString());
}
return newDependency.append(quoteChar).toString();
}
static boolean addDependencyInModel(Model model, String newDependency) {
StringBuilder buildContent = new StringBuilder(model.getBuildContent());
Matcher matcher = DEPENDENCIES_SECTION.matcher(buildContent);
if (matcher.find()) {
int nextLine = buildContent.indexOf("\n", matcher.start()) + 1;
buildContent.insert(nextLine, newDependency);
} else {
buildContent.append("dependencies {").append(System.lineSeparator())
.append(newDependency)
.append("}").append(System.lineSeparator());
}
model.setBuildContent(buildContent.toString());
return true;
}
static String getProperty(Model model, String propertyName) {
final String property = model.getPropertiesContent().getProperty(propertyName);
if (property != null || model.getRootPropertiesContent() == null) {
return property;
}
return model.getRootPropertiesContent().getProperty(propertyName);
}
@Override
protected void removeDependency(ArtifactKey key) {
StringBuilder newBuildContent = new StringBuilder();
Scanner scanner = new Scanner(getModel().getBuildContent());
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (!line.contains(key.getGroupId() + ":" + key.getArtifactId())) {
newBuildContent.append(line).append(System.lineSeparator());
}
}
scanner.close();
getModel().setBuildContent(newBuildContent.toString());
}
@Override
public String getProperty(String propertyName) {
return getProperty(getModel(), propertyName);
}
Model getModel() {
return modelReference.updateAndGet(model -> {
if (model == null) {
try {
return readModel();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return model;
});
}
@Override
protected void refreshData() {
this.modelReference.set(null);
}
private boolean hasRootProjectFile(final String fileName) {
if (rootProjectPath == null) {
return false;
}
final Path filePath = rootProjectPath.resolve(fileName);
return Files.exists(filePath);
}
private byte[] readRootProjectFile(final String fileName) throws IOException {
if (rootProjectPath == null) {
throw new IllegalStateException("There is no rootProject defined in this GradleBuildFile");
}
final Path filePath = rootProjectPath.resolve(fileName);
return Files.readAllBytes(filePath);
}
private Model readModel() throws IOException {
String settingsContent = "";
String buildContent = "";
Properties propertiesContent = new Properties();
String rootSettingsContent = null;
Properties rootPropertiesContent = null;
if (hasProjectFile(getSettingsGradlePath())) {
final byte[] settings = readProjectFile(getSettingsGradlePath());
settingsContent = new String(settings, StandardCharsets.UTF_8);
}
if (hasRootProjectFile(getSettingsGradlePath())) {
final byte[] settings = readRootProjectFile(getSettingsGradlePath());
rootSettingsContent = new String(settings, StandardCharsets.UTF_8);
}
if (hasProjectFile(getBuildGradlePath())) {
final byte[] build = readProjectFile(getBuildGradlePath());
buildContent = new String(build, StandardCharsets.UTF_8);
}
if (hasProjectFile(GRADLE_PROPERTIES_PATH)) {
final byte[] properties = readProjectFile(GRADLE_PROPERTIES_PATH);
propertiesContent.load(new ByteArrayInputStream(properties));
}
if (hasRootProjectFile(GRADLE_PROPERTIES_PATH)) {
final byte[] properties = readRootProjectFile(GRADLE_PROPERTIES_PATH);
rootPropertiesContent = new Properties();
rootPropertiesContent.load(new ByteArrayInputStream(properties));
}
return new Model(settingsContent, buildContent, propertiesContent, rootSettingsContent, rootPropertiesContent);
}
protected String getBuildContent() {
return getModel().getBuildContent();
}
static class Model {
private String settingsContent;
private String buildContent;
private final Properties propertiesContent;
private final String rootSettingsContent;
private final Properties rootPropertiesContent;
public Model(String settingsContent, String buildContent, Properties propertiesContent, String rootSettingsContent,
Properties rootPropertiesContent) {
this.settingsContent = settingsContent;
this.buildContent = buildContent;
this.propertiesContent = propertiesContent;
this.rootSettingsContent = rootSettingsContent;
this.rootPropertiesContent = rootPropertiesContent;
}
public String getSettingsContent() {
return settingsContent;
}
public String getBuildContent() {
return buildContent;
}
public Properties getPropertiesContent() {
return propertiesContent;
}
public String getRootSettingsContent() {
return rootSettingsContent;
}
public Properties getRootPropertiesContent() {
return rootPropertiesContent;
}
public void setSettingsContent(String settingsContent) {
this.settingsContent = settingsContent;
}
public void setBuildContent(String buildContent) {
this.buildContent = buildContent;
}
}
}
|
class AbstractGradleBuildFile extends BuildFile {
private static final Pattern DEPENDENCIES_SECTION = Pattern.compile("^[\\t ]*dependencies\\s*\\{\\s*$", Pattern.MULTILINE);
private static final String GRADLE_PROPERTIES_PATH = "gradle.properties";
private final Path rootProjectPath;
private final AtomicReference<Model> modelReference = new AtomicReference<>();
public AbstractGradleBuildFile(final Path projectDirPath, final ExtensionCatalog catalog) {
this(projectDirPath, catalog, null);
}
public AbstractGradleBuildFile(final Path projectDirPath, final ExtensionCatalog catalog,
Path rootProjectPath) {
super(projectDirPath, catalog);
this.rootProjectPath = rootProjectPath;
}
abstract String getSettingsGradlePath();
abstract String getBuildGradlePath();
@Override
static boolean containsProperty(ArtifactCoords coords) {
return coords.getGroupId().charAt(0) == '$' || coords.getArtifactId().charAt(0) == '$'
|| coords.getVersion() != null && coords.getVersion().charAt(0) == '$';
}
static String createDependencyCoordinatesString(ArtifactCoords coords, boolean managed, char quoteChar) {
StringBuilder newDependency = new StringBuilder().append(quoteChar)
.append(coords.getGroupId()).append(":").append(coords.getArtifactId());
if (!managed &&
(coords.getVersion() != null && !coords.getVersion().isEmpty())) {
newDependency.append(":").append(coords.getVersion());
}
boolean isBOM = "pom".equals(coords.getType());
if (isBOM && !managed) {
return String.format("enforcedPlatform(%s)", newDependency.append(quoteChar).toString());
}
return newDependency.append(quoteChar).toString();
}
static boolean addDependencyInModel(Model model, String newDependency) {
StringBuilder buildContent = new StringBuilder(model.getBuildContent());
Matcher matcher = DEPENDENCIES_SECTION.matcher(buildContent);
if (matcher.find()) {
int nextLine = buildContent.indexOf("\n", matcher.start()) + 1;
buildContent.insert(nextLine, newDependency);
} else {
buildContent.append("dependencies {").append(System.lineSeparator())
.append(newDependency)
.append("}").append(System.lineSeparator());
}
model.setBuildContent(buildContent.toString());
return true;
}
static String getProperty(Model model, String propertyName) {
final String property = model.getPropertiesContent().getProperty(propertyName);
if (property != null || model.getRootPropertiesContent() == null) {
return property;
}
return model.getRootPropertiesContent().getProperty(propertyName);
}
@Override
protected void removeDependency(ArtifactKey key) {
StringBuilder newBuildContent = new StringBuilder();
Scanner scanner = new Scanner(getModel().getBuildContent());
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (!line.contains(key.getGroupId() + ":" + key.getArtifactId())) {
newBuildContent.append(line).append(System.lineSeparator());
}
}
scanner.close();
getModel().setBuildContent(newBuildContent.toString());
}
@Override
public String getProperty(String propertyName) {
return getProperty(getModel(), propertyName);
}
Model getModel() {
return modelReference.updateAndGet(model -> {
if (model == null) {
try {
return readModel();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return model;
});
}
@Override
protected void refreshData() {
this.modelReference.set(null);
}
private boolean hasRootProjectFile(final String fileName) {
if (rootProjectPath == null) {
return false;
}
final Path filePath = rootProjectPath.resolve(fileName);
return Files.exists(filePath);
}
private byte[] readRootProjectFile(final String fileName) throws IOException {
if (rootProjectPath == null) {
throw new IllegalStateException("There is no rootProject defined in this GradleBuildFile");
}
final Path filePath = rootProjectPath.resolve(fileName);
return Files.readAllBytes(filePath);
}
private Model readModel() throws IOException {
String settingsContent = "";
String buildContent = "";
Properties propertiesContent = new Properties();
String rootSettingsContent = null;
Properties rootPropertiesContent = null;
if (hasProjectFile(getSettingsGradlePath())) {
final byte[] settings = readProjectFile(getSettingsGradlePath());
settingsContent = new String(settings, StandardCharsets.UTF_8);
}
if (hasRootProjectFile(getSettingsGradlePath())) {
final byte[] settings = readRootProjectFile(getSettingsGradlePath());
rootSettingsContent = new String(settings, StandardCharsets.UTF_8);
}
if (hasProjectFile(getBuildGradlePath())) {
final byte[] build = readProjectFile(getBuildGradlePath());
buildContent = new String(build, StandardCharsets.UTF_8);
}
if (hasProjectFile(GRADLE_PROPERTIES_PATH)) {
final byte[] properties = readProjectFile(GRADLE_PROPERTIES_PATH);
propertiesContent.load(new ByteArrayInputStream(properties));
}
if (hasRootProjectFile(GRADLE_PROPERTIES_PATH)) {
final byte[] properties = readRootProjectFile(GRADLE_PROPERTIES_PATH);
rootPropertiesContent = new Properties();
rootPropertiesContent.load(new ByteArrayInputStream(properties));
}
return new Model(settingsContent, buildContent, propertiesContent, rootSettingsContent, rootPropertiesContent);
}
protected String getBuildContent() {
return getModel().getBuildContent();
}
static class Model {
private String settingsContent;
private String buildContent;
private final Properties propertiesContent;
private final String rootSettingsContent;
private final Properties rootPropertiesContent;
public Model(String settingsContent, String buildContent, Properties propertiesContent, String rootSettingsContent,
Properties rootPropertiesContent) {
this.settingsContent = settingsContent;
this.buildContent = buildContent;
this.propertiesContent = propertiesContent;
this.rootSettingsContent = rootSettingsContent;
this.rootPropertiesContent = rootPropertiesContent;
}
public String getSettingsContent() {
return settingsContent;
}
public String getBuildContent() {
return buildContent;
}
public Properties getPropertiesContent() {
return propertiesContent;
}
public String getRootSettingsContent() {
return rootSettingsContent;
}
public Properties getRootPropertiesContent() {
return rootPropertiesContent;
}
public void setSettingsContent(String settingsContent) {
this.settingsContent = settingsContent;
}
public void setBuildContent(String buildContent) {
this.buildContent = buildContent;
}
}
}
|
@c15yi ok, a jose4j instance for verifying a signature can be built manually for this case with some custom basic resolver delegating to your new method, but I can take care of it later
|
public Uni<VerificationKeyResolver> resolve(TokenCredential tokenCred) {
JsonObject headers = OidcUtils.decodeJwtHeaders(tokenCred.getToken());
Key key = findKeyInTheCache(headers);
if (key != null) {
return Uni.createFrom().item(new SingleKeyVerificationKeyResolver(key));
}
if (chainResolverFallback != null && headers.containsKey(HeaderParameterNames.X509_CERTIFICATE_CHAIN)
&& Collections.disjoint(KEY_HEADERS, headers.fieldNames())) {
return getChainResolver();
}
return client.getJsonWebKeySet(new OidcRequestContextProperties(
Map.of(OidcRequestContextProperties.TOKEN, tokenCred.getToken(),
OidcRequestContextProperties.TOKEN_CREDENTIAL, tokenCred)))
.onItem().transformToUni(new Function<JsonWebKeySet, Uni<? extends VerificationKeyResolver>>() {
@Override
public Uni<? extends VerificationKeyResolver> apply(JsonWebKeySet jwks) {
Key newKey = null;
String kid = headers.getString(HeaderParameterNames.KEY_ID);
if (kid != null) {
newKey = getKeyWithId(jwks, kid);
if (newKey == null) {
return Uni.createFrom().failure(
new UnresolvableKeyException(String.format("JWK with kid '%s' is not available", kid)));
} else {
cache.add(kid, newKey);
}
}
String thumbprint = null;
if (newKey == null) {
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT);
if (thumbprint != null) {
newKey = getKeyWithS256Thumbprint(jwks, thumbprint);
if (newKey == null) {
return Uni.createFrom().failure(
new UnresolvableKeyException(String.format(
"JWK with the SHA256 certificate thumbprint '%s' is not available",
thumbprint)));
} else {
cache.add(thumbprint, newKey);
}
}
}
if (newKey == null) {
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
if (thumbprint != null) {
newKey = getKeyWithThumbprint(jwks, thumbprint);
if (newKey == null) {
return Uni.createFrom().failure(new UnresolvableKeyException(
String.format("JWK with the certificate thumbprint '%s' is not available",
thumbprint)));
} else {
cache.add(thumbprint, newKey);
}
}
}
if (newKey == null && kid == null && thumbprint == null) {
newKey = jwks.getKeyWithoutKeyIdAndThumbprint("RSA");
}
if (newKey == null && chainResolverFallback != null) {
return getChainResolver();
}
if (newKey == null) {
return Uni.createFrom().failure(new UnresolvableKeyException(
"JWK is not available, neither 'kid' nor 'x5t
} else {
return Uni.createFrom().item(new SingleKeyVerificationKeyResolver(newKey));
}
}
});
}
|
public Uni<VerificationKeyResolver> resolve(TokenCredential tokenCred) {
JsonObject headers = OidcUtils.decodeJwtHeaders(tokenCred.getToken());
Key key = findKeyInTheCache(headers);
if (key != null) {
return Uni.createFrom().item(new SingleKeyVerificationKeyResolver(key));
}
if (chainResolverFallback != null && headers.containsKey(HeaderParameterNames.X509_CERTIFICATE_CHAIN)
&& Collections.disjoint(KEY_HEADERS, headers.fieldNames())) {
return getChainResolver();
}
return client.getJsonWebKeySet(new OidcRequestContextProperties(
Map.of(OidcRequestContextProperties.TOKEN, tokenCred.getToken(),
OidcRequestContextProperties.TOKEN_CREDENTIAL, tokenCred)))
.onItem().transformToUni(new Function<JsonWebKeySet, Uni<? extends VerificationKeyResolver>>() {
@Override
public Uni<? extends VerificationKeyResolver> apply(JsonWebKeySet jwks) {
Key newKey = null;
String kid = headers.getString(HeaderParameterNames.KEY_ID);
if (kid != null) {
newKey = getKeyWithId(jwks, kid);
if (newKey == null) {
return Uni.createFrom().failure(
new UnresolvableKeyException(String.format("JWK with kid '%s' is not available", kid)));
} else {
cache.add(kid, newKey);
}
}
String thumbprint = null;
if (newKey == null) {
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT);
if (thumbprint != null) {
newKey = getKeyWithS256Thumbprint(jwks, thumbprint);
if (newKey == null) {
return Uni.createFrom().failure(
new UnresolvableKeyException(String.format(
"JWK with the SHA256 certificate thumbprint '%s' is not available",
thumbprint)));
} else {
cache.add(thumbprint, newKey);
}
}
}
if (newKey == null) {
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
if (thumbprint != null) {
newKey = getKeyWithThumbprint(jwks, thumbprint);
if (newKey == null) {
return Uni.createFrom().failure(new UnresolvableKeyException(
String.format("JWK with the certificate thumbprint '%s' is not available",
thumbprint)));
} else {
cache.add(thumbprint, newKey);
}
}
}
if (newKey == null && kid == null && thumbprint == null) {
newKey = jwks.getKeyWithoutKeyIdAndThumbprint("RSA");
}
if (newKey == null && chainResolverFallback != null) {
return getChainResolver();
}
if (newKey == null) {
return Uni.createFrom().failure(new UnresolvableKeyException(
"JWK is not available, neither 'kid' nor 'x5t
} else {
return Uni.createFrom().item(new SingleKeyVerificationKeyResolver(newKey));
}
}
});
}
|
class DynamicVerificationKeyResolver {
private static final Logger LOG = Logger.getLogger(DynamicVerificationKeyResolver.class);
private static final Set<String> KEY_HEADERS = Set.of(HeaderParameterNames.KEY_ID,
HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT,
HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
private final OidcProviderClient client;
private final MemoryCache<Key> cache;
private final boolean tryAll;
final CertChainPublicKeyResolver chainResolverFallback;
public DynamicVerificationKeyResolver(OidcProviderClient client, OidcTenantConfig config) {
this.client = client;
this.tryAll = config.jwks.tryAll;
this.cache = new MemoryCache<Key>(client.getVertx(), config.jwks.cleanUpTimerInterval,
config.jwks.cacheTimeToLive, config.jwks.cacheSize);
if (config.certificateChain.trustStoreFile.isPresent()) {
chainResolverFallback = new CertChainPublicKeyResolver(config);
} else {
chainResolverFallback = null;
}
}
private Uni<VerificationKeyResolver> getChainResolver() {
LOG.debug("JWK is not available, neither 'kid' nor 'x5t
+ " falling back to the certificate chain resolver");
return Uni.createFrom().item(chainResolverFallback);
}
private static Key getKeyWithId(JsonWebKeySet jwks, String kid) {
if (kid != null) {
return jwks.getKeyWithId(kid);
} else {
LOG.debug("Token 'kid' header is not set");
return null;
}
}
private Key getKeyWithThumbprint(JsonWebKeySet jwks, String thumbprint) {
if (thumbprint != null) {
return jwks.getKeyWithThumbprint(thumbprint);
} else {
LOG.debug("Token 'x5t' header is not set");
return null;
}
}
private Key getKeyWithS256Thumbprint(JsonWebKeySet jwks, String thumbprint) {
if (thumbprint != null) {
return jwks.getKeyWithS256Thumbprint(thumbprint);
} else {
LOG.debug("Token 'x5tS256' header is not set");
return null;
}
}
private Key findKeyInTheCache(JsonObject headers) {
String kid = headers.getString(HeaderParameterNames.KEY_ID);
if (kid != null && cache.containsKey(kid)) {
return cache.get(kid);
}
String thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT);
if (thumbprint != null && cache.containsKey(thumbprint)) {
return cache.get(thumbprint);
}
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
if (thumbprint != null && cache.containsKey(thumbprint)) {
return cache.get(thumbprint);
}
return null;
}
static class SingleKeyVerificationKeyResolver implements VerificationKeyResolver {
private Key key;
SingleKeyVerificationKeyResolver(Key key) {
this.key = key;
}
@Override
public Key resolveKey(JsonWebSignature jws, List<JsonWebStructure> nestingContext)
throws UnresolvableKeyException {
return key;
}
}
void shutdown(@Observes ShutdownEvent event, Vertx vertx) {
cache.stopTimer(vertx);
}
}
|
class DynamicVerificationKeyResolver {
private static final Logger LOG = Logger.getLogger(DynamicVerificationKeyResolver.class);
private static final Set<String> KEY_HEADERS = Set.of(HeaderParameterNames.KEY_ID,
HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT,
HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
private final OidcProviderClient client;
private final MemoryCache<Key> cache;
private final boolean tryAll;
final CertChainPublicKeyResolver chainResolverFallback;
public DynamicVerificationKeyResolver(OidcProviderClient client, OidcTenantConfig config) {
this.client = client;
this.tryAll = config.jwks.tryAll;
this.cache = new MemoryCache<Key>(client.getVertx(), config.jwks.cleanUpTimerInterval,
config.jwks.cacheTimeToLive, config.jwks.cacheSize);
if (config.certificateChain.trustStoreFile.isPresent()) {
chainResolverFallback = new CertChainPublicKeyResolver(config);
} else {
chainResolverFallback = null;
}
}
private Uni<VerificationKeyResolver> getChainResolver() {
LOG.debug("JWK is not available, neither 'kid' nor 'x5t
+ " falling back to the certificate chain resolver");
return Uni.createFrom().item(chainResolverFallback);
}
private static Key getKeyWithId(JsonWebKeySet jwks, String kid) {
if (kid != null) {
return jwks.getKeyWithId(kid);
} else {
LOG.debug("Token 'kid' header is not set");
return null;
}
}
private Key getKeyWithThumbprint(JsonWebKeySet jwks, String thumbprint) {
if (thumbprint != null) {
return jwks.getKeyWithThumbprint(thumbprint);
} else {
LOG.debug("Token 'x5t' header is not set");
return null;
}
}
private Key getKeyWithS256Thumbprint(JsonWebKeySet jwks, String thumbprint) {
if (thumbprint != null) {
return jwks.getKeyWithS256Thumbprint(thumbprint);
} else {
LOG.debug("Token 'x5tS256' header is not set");
return null;
}
}
private Key findKeyInTheCache(JsonObject headers) {
String kid = headers.getString(HeaderParameterNames.KEY_ID);
if (kid != null && cache.containsKey(kid)) {
return cache.get(kid);
}
String thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_SHA256_THUMBPRINT);
if (thumbprint != null && cache.containsKey(thumbprint)) {
return cache.get(thumbprint);
}
thumbprint = headers.getString(HeaderParameterNames.X509_CERTIFICATE_THUMBPRINT);
if (thumbprint != null && cache.containsKey(thumbprint)) {
return cache.get(thumbprint);
}
return null;
}
static class SingleKeyVerificationKeyResolver implements VerificationKeyResolver {
private Key key;
SingleKeyVerificationKeyResolver(Key key) {
this.key = key;
}
@Override
public Key resolveKey(JsonWebSignature jws, List<JsonWebStructure> nestingContext)
throws UnresolvableKeyException {
return key;
}
}
void shutdown(@Observes ShutdownEvent event, Vertx vertx) {
cache.stopTimer(vertx);
}
}
|
|
That was the other option I pondered. I decided to go with this one.
|
public QuarkusCommandOutcome execute() throws QuarkusCommandException {
Matcher matcher = JAVA_VERSION_PATTERN
.matcher(this.javaTarget != null ? this.javaTarget : System.getProperty("java.version", ""));
if (matcher.matches() && Integer.parseInt(matcher.group(1)) < 11) {
invocation.setProperty(JAVA_TARGET, invocation.getBuildTool() == BuildTool.MAVEN ? "1.8" : "1_8");
} else {
invocation.setProperty(JAVA_TARGET, "11");
}
return new CreateProjectCommandHandler().execute(invocation);
}
|
invocation.setProperty(JAVA_TARGET, invocation.getBuildTool() == BuildTool.MAVEN ? "1.8" : "1_8");
|
public QuarkusCommandOutcome execute() throws QuarkusCommandException {
Matcher matcher = JAVA_VERSION_PATTERN
.matcher(this.javaTarget != null ? this.javaTarget : System.getProperty("java.version", ""));
if (matcher.matches() && Integer.parseInt(matcher.group(1)) < 11) {
invocation.setProperty(JAVA_TARGET, "8");
} else {
invocation.setProperty(JAVA_TARGET, "11");
}
return new CreateProjectCommandHandler().execute(invocation);
}
|
class name");
}
setProperty(CLASS_NAME, className);
return this;
}
/**
* @deprecated in 1.3.0.CR
*/
@Deprecated
public CreateProject extensions(Set<String> extensions) {
if (isSpringStyle(extensions)) {
invocation.setValue(IS_SPRING, true);
}
return this;
}
|
class name");
}
setProperty(CLASS_NAME, className);
return this;
}
/**
* @deprecated in 1.3.0.CR
*/
@Deprecated
public CreateProject extensions(Set<String> extensions) {
if (isSpringStyle(extensions)) {
invocation.setValue(IS_SPRING, true);
}
return this;
}
|
it is a replay code path, no need to print the detailed stack trace.
|
public void replayTo(long journalId) throws StarException {
JournalCursor cursor = null;
try {
cursor = bdbjeJournal.read(replayedJournalId.get() + 1, journalId);
replayJournal(cursor);
} catch (InterruptedException | JournalInconsistentException e) {
LOG.warn("got interrupt exception or inconsistent exception when replay star mgr journal, {}.",
e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
} catch (Exception e) {
LOG.warn("got exception when replay star mgr journal", e);
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
} finally {
if (cursor != null) {
cursor.close();
}
}
}
|
LOG.warn("got exception when replay star mgr journal", e);
|
public void replayTo(long journalId) throws StarException {
JournalCursor cursor = null;
try {
cursor = bdbjeJournal.read(replayedJournalId.get() + 1, journalId);
replayJournal(cursor);
} catch (InterruptedException | JournalInconsistentException e) {
LOG.warn("got interrupt exception or inconsistent exception when replay star mgr journal, {}.",
e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
} catch (Exception e) {
LOG.warn("got exception when replay star mgr journal", e);
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
} finally {
if (cursor != null) {
cursor.close();
}
}
}
|
class StarOSBDBJEJournalSystem implements JournalSystem {
private static final String JOURNAL_PREFIX = "starmgr_";
private static final int REPLAY_INTERVAL_MS = 1;
private static final Logger LOG = LogManager.getLogger(StarOSBDBJEJournalSystem.class);
private BDBJEJournal bdbjeJournal;
private JournalWriter journalWriter;
private EditLog editLog;
private AtomicLong replayedJournalId;
private Daemon replayer;
public StarOSBDBJEJournalSystem(BDBEnvironment environment) {
BlockingQueue<JournalTask> journalQueue = new ArrayBlockingQueue<JournalTask>(Config.metadata_journal_queue_size);
bdbjeJournal = new BDBJEJournal(environment, JOURNAL_PREFIX);
journalWriter = new JournalWriter(bdbjeJournal, journalQueue);
editLog = new EditLog(journalQueue);
replayedJournalId = new AtomicLong(0L);
replayer = null;
}
public StarOSBDBJEJournalSystem(BDBJEJournal journal) {
bdbjeJournal = journal;
replayedJournalId = new AtomicLong(0L);
editLog = new EditLog(null);
}
public long getReplayId() {
return replayedJournalId.get();
}
public void setReplayId(long replayId) {
replayedJournalId.set(replayId);
}
@java.lang.SuppressWarnings("squid:S2142")
public void onBecomeLeader() {
if (replayer != null) {
replayer.setStop();
try {
replayer.join();
} catch (InterruptedException e) {
LOG.warn("got exception when stopping the star mgr replayer thread, {}.", e.getMessage());
}
replayer = null;
}
try {
bdbjeJournal.open();
long replayStartTime = System.currentTimeMillis();
replayTo(JournalCursor.CURSOR_END_KEY);
long replayEndTime = System.currentTimeMillis();
LOG.info("finish star manager replay in " + (replayEndTime - replayStartTime) + " msec.");
journalWriter.init(bdbjeJournal.getMaxJournalId());
journalWriter.startDaemon();
} catch (Exception e) {
LOG.warn("star mgr prepare journal failed before becoming leader, {}.", e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
}
}
@java.lang.SuppressWarnings("squid:S2142")
public void onBecomeFollower() {
if (replayer == null) {
replayer = new Daemon("star mgr replayer", REPLAY_INTERVAL_MS) {
private JournalCursor cursor = null;
@Override
protected void runOneCycle() {
try {
if (cursor == null) {
LOG.info("star mgr start to replay from {}", replayedJournalId.get() + 1);
cursor = bdbjeJournal.read(replayedJournalId.get() + 1, -1);
} else {
cursor.refresh();
}
replayJournal(cursor);
} catch (JournalInconsistentException | InterruptedException e) {
LOG.warn("got interrupt exception or inconsistent exception when replay star mgr journal, {}.",
e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
} catch (Throwable e) {
LOG.error("star mgr replayer thread catch an exception when replay journal, {}.", e.getMessage());
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
LOG.error("star mgr replayer sleep got exception, {}.", e1.getMessage());
}
}
}
@Override
public void run() {
super.run();
if (cursor != null) {
cursor.close();
LOG.info("star mgr quit replay at {}.", replayedJournalId.get());
}
}
};
replayer.start();
}
}
public void write(Journal journal) throws StarException {
try {
editLog.logStarMgrOperation(new StarMgrJournal(journal));
} catch (Exception e) {
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
}
}
@Override
public Future<Boolean> writeAsync(Journal journal) throws StarException {
try {
return editLog.logStarMgrOperationNoWait(new StarMgrJournal(journal));
} catch (Exception e) {
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
}
}
@java.lang.SuppressWarnings("squid:S2142")
private boolean replayJournal(JournalCursor cursor)
throws JournalException, InterruptedException, JournalInconsistentException {
long startReplayId = replayedJournalId.get();
long startTime = System.currentTimeMillis();
while (true) {
JournalEntity entity = cursor.next();
if (entity == null) {
break;
}
editLog.loadJournal(GlobalStateMgr.getCurrentState(), entity);
replayedJournalId.incrementAndGet();
LOG.debug("star mgr journal {} replayed.", replayedJournalId);
}
long cost = System.currentTimeMillis() - startTime;
if (cost >= 1000) {
LOG.warn("star mgr replay journal cost too much time: {} replayedJournalId: {}.", cost, replayedJournalId);
}
if (replayedJournalId.get() - startReplayId > 0) {
LOG.info("star mgr replayed journal from {} to {}.", startReplayId, replayedJournalId);
return true;
}
return false;
}
public BDBJEJournal getJournal() {
return bdbjeJournal;
}
}
|
class StarOSBDBJEJournalSystem implements JournalSystem {
private static final String JOURNAL_PREFIX = "starmgr_";
private static final int REPLAY_INTERVAL_MS = 1;
private static final Logger LOG = LogManager.getLogger(StarOSBDBJEJournalSystem.class);
private BDBJEJournal bdbjeJournal;
private JournalWriter journalWriter;
private EditLog editLog;
private AtomicLong replayedJournalId;
private Daemon replayer;
public StarOSBDBJEJournalSystem(BDBEnvironment environment) {
BlockingQueue<JournalTask> journalQueue = new ArrayBlockingQueue<JournalTask>(Config.metadata_journal_queue_size);
bdbjeJournal = new BDBJEJournal(environment, JOURNAL_PREFIX);
journalWriter = new JournalWriter(bdbjeJournal, journalQueue);
editLog = new EditLog(journalQueue);
replayedJournalId = new AtomicLong(0L);
replayer = null;
}
public StarOSBDBJEJournalSystem(BDBJEJournal journal) {
bdbjeJournal = journal;
replayedJournalId = new AtomicLong(0L);
editLog = new EditLog(null);
}
public long getReplayId() {
return replayedJournalId.get();
}
public void setReplayId(long replayId) {
replayedJournalId.set(replayId);
}
@java.lang.SuppressWarnings("squid:S2142")
public void onBecomeLeader() {
if (replayer != null) {
replayer.setStop();
try {
replayer.join();
} catch (InterruptedException e) {
LOG.warn("got exception when stopping the star mgr replayer thread, {}.", e.getMessage());
}
replayer = null;
}
try {
bdbjeJournal.open();
long replayStartTime = System.currentTimeMillis();
replayTo(JournalCursor.CURSOR_END_KEY);
long replayEndTime = System.currentTimeMillis();
LOG.info("finish star manager replay in " + (replayEndTime - replayStartTime) + " msec.");
journalWriter.init(bdbjeJournal.getMaxJournalId());
journalWriter.startDaemon();
} catch (Exception e) {
LOG.warn("star mgr prepare journal failed before becoming leader, {}.", e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
}
}
@java.lang.SuppressWarnings("squid:S2142")
public void onBecomeFollower() {
if (replayer == null) {
replayer = new Daemon("star mgr replayer", REPLAY_INTERVAL_MS) {
private JournalCursor cursor = null;
@Override
protected void runOneCycle() {
try {
if (cursor == null) {
LOG.info("star mgr start to replay from {}", replayedJournalId.get() + 1);
cursor = bdbjeJournal.read(replayedJournalId.get() + 1, -1);
} else {
cursor.refresh();
}
replayJournal(cursor);
} catch (JournalInconsistentException | InterruptedException e) {
LOG.warn("got interrupt exception or inconsistent exception when replay star mgr journal, {}.",
e.getMessage());
Util.stdoutWithTime(e.getMessage());
System.exit(-1);
} catch (Throwable e) {
LOG.error("star mgr replayer thread catch an exception when replay journal, {}.", e.getMessage());
try {
Thread.sleep(5000);
} catch (InterruptedException e1) {
LOG.error("star mgr replayer sleep got exception, {}.", e1.getMessage());
}
}
}
@Override
public void run() {
super.run();
if (cursor != null) {
cursor.close();
LOG.info("star mgr quit replay at {}.", replayedJournalId.get());
}
}
};
replayer.start();
}
}
public void write(Journal journal) throws StarException {
try {
editLog.logStarMgrOperation(new StarMgrJournal(journal));
} catch (Exception e) {
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
}
}
@Override
public Future<Boolean> writeAsync(Journal journal) throws StarException {
try {
return editLog.logStarMgrOperationNoWait(new StarMgrJournal(journal));
} catch (Exception e) {
throw new StarException(ExceptionCode.JOURNAL, e.getMessage());
}
}
@java.lang.SuppressWarnings("squid:S2142")
private boolean replayJournal(JournalCursor cursor)
throws JournalException, InterruptedException, JournalInconsistentException {
long startReplayId = replayedJournalId.get();
long startTime = System.currentTimeMillis();
while (true) {
JournalEntity entity = cursor.next();
if (entity == null) {
break;
}
editLog.loadJournal(null /* GlobalStateMgr */, entity);
replayedJournalId.incrementAndGet();
LOG.debug("star mgr journal {} replayed.", replayedJournalId);
}
long cost = System.currentTimeMillis() - startTime;
if (cost >= 1000) {
LOG.warn("star mgr replay journal cost too much time: {} replayedJournalId: {}.", cost, replayedJournalId);
}
if (replayedJournalId.get() - startReplayId > 0) {
LOG.info("star mgr replayed journal from {} to {}.", startReplayId, replayedJournalId);
return true;
}
return false;
}
public BDBJEJournal getJournal() {
return bdbjeJournal;
}
}
|
We can have a local variable or a constant for the sending text instead of repeating it
|
public void testMetrics() throws Exception {
WebSocketTestClient client = new WebSocketTestClient("ws:
client.handshake();
client.sendText("ds");
client.sendText("ds");
client.sendText("ds");
client.sendText("ds");
client.sendText("ds");
client.sendPing(SENDING_BYTE_BUFFER);
client.sendPong(SENDING_BYTE_BUFFER);
client.sendBinary(SENDING_BYTE_BUFFER);
client.sendText("closeMe");
client.shutDown();
URL metricsEndPoint = new URL("http:
BufferedReader reader = new BufferedReader(new InputStreamReader(metricsEndPoint.openConnection()
.getInputStream()));
List<String> metricsList = reader.lines().filter(s -> !s.startsWith("
Assert.assertTrue(metricsList.size() != 0);
int count = 0;
for (String line : metricsList) {
int index = line.lastIndexOf(" ");
String key = line.substring(0, index);
String value = line.substring(index + 1);
String metric = getMetricName(key);
String connectionID = getTag(key, "connectionID");
String clientOrServer = getTag(key, "client_or_server");
String service = getTag(key, "service");
String type = getTag(key, "type");
String[] tags = {connectionID, clientOrServer, service, type};
key = generateNewKey(metric, tags);
key = key.replaceAll("connectionID=\"[^\"]*\",", "connectionID=connection_id,");
String actualValue = expectedMetrics.get(key);
if (actualValue != null) {
count++;
logger.info(key + " -- " + value);
Assert.assertEquals(value, actualValue, "Unexpected value found for metric " + key + ".");
}
}
Assert.assertEquals(count, expectedMetrics.size(), "metrics count is not equal to the expected metrics count.");
reader.close();
}
|
client.sendText("ds");
|
public void testMetrics() throws Exception {
WebSocketTestClient client = new WebSocketTestClient("ws:
client.handshake();
client.sendText(MESSAGE);
client.sendText(MESSAGE);
client.sendText(MESSAGE);
client.sendText(MESSAGE);
client.sendText(MESSAGE);
client.sendPing(SENDING_BYTE_BUFFER);
client.sendPong(SENDING_BYTE_BUFFER);
client.sendBinary(SENDING_BYTE_BUFFER);
client.sendText(CLOSE_MESSAGE);
CountDownLatch countDownLatch = new CountDownLatch(1);
countDownLatch.await(10, TimeUnit.SECONDS);
client.shutDown();
URL metricsEndPoint = new URL("http:
BufferedReader reader = new BufferedReader(new InputStreamReader(metricsEndPoint.openConnection()
.getInputStream()));
List<String> metricsList = reader.lines().filter(s -> !s.startsWith("
Assert.assertTrue(metricsList.size() != 0);
int count = 0;
for (String line : metricsList) {
int index = line.lastIndexOf(" ");
String key = line.substring(0, index);
String value = line.substring(index + 1);
String metric = getMetricName(key);
String connectionID = getTag(key, WebSocketObservabilityConstants.TAG_CONNECTION_ID);
String clientOrServer = getTag(key, WebSocketObservabilityConstants.TAG_CONTEXT);
String service = getTag(key, WebSocketObservabilityConstants.TAG_SERVICE);
String type = getTag(key, WebSocketObservabilityConstants.TAG_MESSAGE_TYPE);
String[] tags = {connectionID, clientOrServer, service, type};
key = generateNewKey(metric, tags);
String actualValue = expectedMetrics.get(key);
if (actualValue != null) {
count++;
logger.info(key + " -- " + value);
Assert.assertEquals(value, actualValue, "Unexpected value found for metric " + key + ".");
}
}
Assert.assertEquals(count, expectedMetrics.size(), "metrics count is not equal to the expected metrics count.");
reader.close();
}
|
class WebSocketMetricsTestCase extends BaseTest {
private static BServerInstance serverInstance;
private static final Logger logger = LoggerFactory.getLogger(WebSocketMetricsTestCase.class);
private static final String RESOURCE_LOCATION = "src" + File.separator + "test" + File.separator +
"resources" + File.separator + "observability" + File.separator + "metrics" + File.separator;
private Map<String, String> expectedMetrics = new HashMap<>();
private static final ByteBuffer SENDING_BYTE_BUFFER = ByteBuffer.wrap(new byte[]{1, 2, 3, 4, 5});
@BeforeGroups(value = "websocket-metrics-test", alwaysRun = true)
private void setup() throws Exception {
serverInstance = new BServerInstance(balServer);
String balFile = new File(RESOURCE_LOCATION + "websocket-metrics-test.bal").getAbsolutePath();
List<String> args = new ArrayList<>();
args.add("--" + ObservabilityConstants.CONFIG_METRICS_ENABLED + "=true");
serverInstance.startServer(balFile, null, args.toArray(new String[args.size()]), new int[] { 9090 });
addMetrics();
}
@Test
@AfterGroups(value = "websocket-metrics-test", alwaysRun = true)
private void cleanup() throws Exception {
serverInstance.shutdownServer();
}
private void addMetrics() {
expectedMetrics.put("ws_messages_received_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"binary\",}", "1.0");
expectedMetrics.put("ws_messages_sent_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"text\",}", "5.0");
expectedMetrics.put("ws_connections_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",}", "0.0");
expectedMetrics.put("ws_messages_received_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"text\",}", "6.0");
expectedMetrics.put("ws_messages_sent_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"binary\",}", "1.0");
expectedMetrics.put("ws_messages_sent_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"close\",}", "1.0");
expectedMetrics.put("ws_messages_received_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"ping\",}", "1.0");
expectedMetrics.put("ws_messages_received_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"pong\",}", "1.0");
expectedMetrics.put("ws_errors_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"close\",}", "2.0");
expectedMetrics.put("ws_messages_sent_value{connectionID=connection_id,client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"pong\",}", "1.0");
}
private String getMetricName(String key) {
int index = key.lastIndexOf("{");
return key.substring(0, index);
}
private String getTag(String key, String tag) {
Pattern connectionIDPattern = Pattern.compile(tag + "=\"[^\"]*\",");
Matcher connectionIDMatcher = connectionIDPattern.matcher(key);
if (connectionIDMatcher.find()) {
return connectionIDMatcher.group(0);
}
return "";
}
private String generateNewKey(String metric, String[] tags) {
String key = metric + "{";
for (String tag: tags) {
key = key + tag;
}
key = key + "}";
return key;
}
}
|
class WebSocketMetricsTestCase extends BaseTest {
private static BServerInstance serverInstance;
private static final Logger logger = LoggerFactory.getLogger(WebSocketMetricsTestCase.class);
private static final String MESSAGE = "test message";
private static final String CLOSE_MESSAGE = "closeMe";
private static final String RESOURCE_LOCATION = "src" + File.separator + "test" + File.separator +
"resources" + File.separator + "observability" + File.separator + "metrics" + File.separator;
private Map<String, String> expectedMetrics = new HashMap<>();
private static final ByteBuffer SENDING_BYTE_BUFFER = ByteBuffer.wrap(new byte[]{1, 2, 3, 4, 5});
@BeforeGroups(value = "websocket-metrics-test", alwaysRun = true)
private void setup() throws Exception {
serverInstance = new BServerInstance(balServer);
String balFile = new File(RESOURCE_LOCATION + "websocket-metrics-test.bal").getAbsolutePath();
List<String> args = new ArrayList<>();
args.add("--" + ObservabilityConstants.CONFIG_METRICS_ENABLED + "=true");
args.add("--b7a.log.console.loglevel=INFO");
serverInstance.startServer(balFile, null, args.toArray(new String[args.size()]), new int[] { 9090 });
addMetrics();
}
/**
* Creates a new WebSocket client and connects to the server. Sends 5 text messages, 1 ping, 1 ping, 1 binary, 1
* close message and then closes the connection.
*
* Checks whether the published metrics are the same as the expected metrics.
*
* @throws Exception Error when executing the commands.
*/
@Test
@AfterGroups(value = "websocket-metrics-test", alwaysRun = true)
private void cleanup() throws Exception {
serverInstance.shutdownServer();
}
private void addMetrics() {
expectedMetrics.put("ws_messages_received_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"binary\",}", "1.0");
expectedMetrics.put("ws_messages_sent_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"text\",}", "5.0");
expectedMetrics.put("ws_connections_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",}", "0.0");
expectedMetrics.put("ws_messages_received_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"text\",}", "6.0");
expectedMetrics.put("ws_messages_sent_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"binary\",}", "1.0");
expectedMetrics.put("ws_messages_sent_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"close\",}", "1.0");
expectedMetrics.put("ws_messages_received_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"pong\",}", "1.0");
expectedMetrics.put("ws_errors_value{client_or_server=\"server\"," +
"service=\"/basic/ws\",type=\"close\",}", "2.0");
}
private String getMetricName(String key) {
int index = key.lastIndexOf("{");
return key.substring(0, index);
}
private String getTag(String key, String tag) {
Pattern connectionIDPattern = Pattern.compile(tag + "=\"[^\"]*\",");
Matcher connectionIDMatcher = connectionIDPattern.matcher(key);
if (connectionIDMatcher.find()) {
return connectionIDMatcher.group(0);
}
return "";
}
private String generateNewKey(String metric, String[] tags) {
String key = metric + "{";
for (String tag: tags) {
key = key + tag;
}
key = key + "}";
return key;
}
}
|
Should we add a comment here? IINM, I had the same suggestion, right? May not be immediately obvious.
|
private static boolean shouldWidenExpressionTypeWithNil(BLangAssignment assignNode) {
if (!assignNode.expr.getBType().isNullable() || !isAssignmentToOptionalField(assignNode)) {
return false;
}
BLangFieldBasedAccess fieldAccessNode = (BLangFieldBasedAccess) assignNode.varRef;
BRecordType recordType = (BRecordType) Types.getImpliedType(fieldAccessNode.expr.getBType());
BField field = recordType.fields.get(fieldAccessNode.field.value);
BType fieldType = Types.getImpliedType(field.getType());
return TypeTags.isSimpleBasicType(fieldType.tag);
}
|
return false;
|
private static boolean shouldWidenExpressionTypeWithNil(BLangAssignment assignNode) {
if (!assignNode.expr.getBType().isNullable() || !isAssignmentToOptionalField(assignNode)) {
return false;
}
BLangFieldBasedAccess fieldAccessNode = (BLangFieldBasedAccess) assignNode.varRef;
BRecordType recordType = (BRecordType) Types.getImpliedType(fieldAccessNode.expr.getBType());
BField field = recordType.fields.get(fieldAccessNode.field.value);
BType fieldType = Types.getImpliedType(field.getType());
return TypeTags.isSimpleBasicType(fieldType.tag);
}
|
class definition node for which the initializer is created
* @param env The env for the type node
* @return The generated initializer method
*/
private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction generatedInitFunc = createInitFunctionForClassDefn(classDefinition, env);
if (classDefinition.initFunction == null) {
return rewrite(generatedInitFunc, env);
}
return wireUpGeneratedInitFunction(generatedInitFunc,
(BObjectTypeSymbol) classDefinition.symbol, classDefinition.initFunction);
}
|
class definition node for which the initializer is created
* @param env The env for the type node
* @return The generated initializer method
*/
private BLangFunction createGeneratedInitializerFunction(BLangClassDefinition classDefinition, SymbolEnv env) {
BLangFunction generatedInitFunc = createInitFunctionForClassDefn(classDefinition, env);
if (classDefinition.initFunction == null) {
return rewrite(generatedInitFunc, env);
}
return wireUpGeneratedInitFunction(generatedInitFunc,
(BObjectTypeSymbol) classDefinition.symbol, classDefinition.initFunction);
}
|
Don't you want to sample the value with the windowing information attached? The runner would be responsible for pulling out the attributes that are being sampled that can be introspected and/or saved.
|
public void accept(WindowedValue<T> input) throws Exception {
this.elementCountCounter.inc(input.getWindows().size());
this.sampledByteSizeDistribution.tryUpdate(input.getValue(), coder);
if (outputSampler != null) {
outputSampler.sample(input.getValue());
}
for (ConsumerAndMetadata consumerAndMetadata : consumerAndMetadatas) {
ExecutionState state = consumerAndMetadata.getExecutionState();
state.activate();
try {
consumerAndMetadata.getConsumer().accept(input);
} finally {
state.deactivate();
}
this.sampledByteSizeDistribution.finishLazyUpdate();
}
}
|
outputSampler.sample(input.getValue());
|
public void accept(WindowedValue<T> input) throws Exception {
this.elementCountCounter.inc(input.getWindows().size());
this.sampledByteSizeDistribution.tryUpdate(input.getValue(), this.coder);
if (outputSampler != null) {
outputSampler.sample(input.getValue());
}
executionState.activate();
try {
this.delegate.accept(input);
} finally {
executionState.deactivate();
}
this.sampledByteSizeDistribution.finishLazyUpdate();
}
|
class MetricTrackingFnDataReceiver<T> implements FnDataReceiver<WindowedValue<T>> {
private final FnDataReceiver<WindowedValue<T>> delegate;
private final ExecutionState executionState;
private final BundleCounter elementCountCounter;
private final SampleByteSizeDistribution<T> sampledByteSizeDistribution;
private final Coder<T> coder;
private final @Nullable OutputSampler<T> outputSampler;
public MetricTrackingFnDataReceiver(
String pCollectionId,
Coder<T> coder,
ConsumerAndMetadata consumerAndMetadata,
@Nullable DataSampler dataSampler) {
this.delegate = consumerAndMetadata.getConsumer();
this.executionState = consumerAndMetadata.getExecutionState();
HashMap<String, String> labels = new HashMap<>();
labels.put(Labels.PCOLLECTION, pCollectionId);
MonitoringInfoMetricName elementCountMetricName =
MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.ELEMENT_COUNT, labels);
String elementCountShortId =
shortIdMap.getOrCreateShortId(
new SimpleMonitoringInfoBuilder()
.setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT)
.setType(TypeUrns.SUM_INT64_TYPE)
.setLabels(labels)
.build());
this.elementCountCounter =
Metrics.bundleProcessingThreadCounter(elementCountShortId, elementCountMetricName);
bundleProgressReporterRegistrar.register(elementCountCounter);
MonitoringInfoMetricName sampledByteSizeMetricName =
MonitoringInfoMetricName.named(Urns.SAMPLED_BYTE_SIZE, labels);
String sampledByteSizeShortId =
shortIdMap.getOrCreateShortId(
new SimpleMonitoringInfoBuilder()
.setUrn(Urns.SAMPLED_BYTE_SIZE)
.setType(TypeUrns.DISTRIBUTION_INT64_TYPE)
.setLabels(labels)
.build());
BundleDistribution sampledByteSizeUnderlyingDistribution =
Metrics.bundleProcessingThreadDistribution(
sampledByteSizeShortId, sampledByteSizeMetricName);
this.sampledByteSizeDistribution =
new SampleByteSizeDistribution<>(sampledByteSizeUnderlyingDistribution);
bundleProgressReporterRegistrar.register(sampledByteSizeUnderlyingDistribution);
this.coder = coder;
if (dataSampler == null) {
this.outputSampler = null;
} else {
this.outputSampler = dataSampler.sampleOutput(pCollectionId, coder);
}
}
@Override
}
|
class MetricTrackingFnDataReceiver<T> implements FnDataReceiver<WindowedValue<T>> {
private final FnDataReceiver<WindowedValue<T>> delegate;
private final ExecutionState executionState;
private final BundleCounter elementCountCounter;
private final SampleByteSizeDistribution<T> sampledByteSizeDistribution;
private final Coder<T> coder;
private final @Nullable OutputSampler<T> outputSampler;
public MetricTrackingFnDataReceiver(
String pCollectionId,
Coder<T> coder,
ConsumerAndMetadata consumerAndMetadata,
@Nullable DataSampler dataSampler) {
this.delegate = consumerAndMetadata.getConsumer();
this.executionState = consumerAndMetadata.getExecutionState();
HashMap<String, String> labels = new HashMap<>();
labels.put(Labels.PCOLLECTION, pCollectionId);
MonitoringInfoMetricName elementCountMetricName =
MonitoringInfoMetricName.named(MonitoringInfoConstants.Urns.ELEMENT_COUNT, labels);
String elementCountShortId =
shortIdMap.getOrCreateShortId(
new SimpleMonitoringInfoBuilder()
.setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT)
.setType(TypeUrns.SUM_INT64_TYPE)
.setLabels(labels)
.build());
this.elementCountCounter =
Metrics.bundleProcessingThreadCounter(elementCountShortId, elementCountMetricName);
bundleProgressReporterRegistrar.register(elementCountCounter);
MonitoringInfoMetricName sampledByteSizeMetricName =
MonitoringInfoMetricName.named(Urns.SAMPLED_BYTE_SIZE, labels);
String sampledByteSizeShortId =
shortIdMap.getOrCreateShortId(
new SimpleMonitoringInfoBuilder()
.setUrn(Urns.SAMPLED_BYTE_SIZE)
.setType(TypeUrns.DISTRIBUTION_INT64_TYPE)
.setLabels(labels)
.build());
BundleDistribution sampledByteSizeUnderlyingDistribution =
Metrics.bundleProcessingThreadDistribution(
sampledByteSizeShortId, sampledByteSizeMetricName);
this.sampledByteSizeDistribution =
new SampleByteSizeDistribution<>(sampledByteSizeUnderlyingDistribution);
bundleProgressReporterRegistrar.register(sampledByteSizeUnderlyingDistribution);
this.coder = coder;
if (dataSampler == null) {
this.outputSampler = null;
} else {
this.outputSampler = dataSampler.sampleOutput(pCollectionId, coder);
}
}
@Override
}
|
Also lose the ":\n". Those are not for you to add :-)
|
protected void maintain() {
for (Application application : controller().applications().asList()) {
for (Deployment deployment : application.deployments().values()) {
try {
MetricsService.DeploymentMetrics metrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
DeploymentMetrics appMetrics = new DeploymentMetrics(metrics.queriesPerSecond(), metrics.writesPerSecond(),
metrics.documentCount(), metrics.queryLatencyMillis(), metrics.writeLatencyMillis());
try (Lock lock = controller().applications().lock(application.id())) {
application = controller().applications().require(application.id());
if (application == null)
break;
deployment = application.deployments().get(deployment.zone());
if (deployment == null)
continue;
controller().applications().store(application.with(deployment.withMetrics(appMetrics)), lock);
}
}
catch (UncheckedIOException e) {
log.log(Level.WARNING, "Timed out talking to YAMAS; retrying in " + maintenanceInterval() + ":\n", e);
}
}
}
}
|
log.log(Level.WARNING, "Timed out talking to YAMAS; retrying in " + maintenanceInterval() + ":\n", e);
|
protected void maintain() {
for (Application application : controller().applications().asList()) {
for (Deployment deployment : application.deployments().values()) {
try {
MetricsService.DeploymentMetrics metrics = controller().metricsService()
.getDeploymentMetrics(application.id(), deployment.zone());
DeploymentMetrics appMetrics = new DeploymentMetrics(metrics.queriesPerSecond(), metrics.writesPerSecond(),
metrics.documentCount(), metrics.queryLatencyMillis(), metrics.writeLatencyMillis());
try (Lock lock = controller().applications().lock(application.id())) {
application = controller().applications().get(application.id()).orElse(null);
if (application == null)
break;
deployment = application.deployments().get(deployment.zone());
if (deployment == null)
continue;
controller().applications().store(application.with(deployment.withMetrics(appMetrics)), lock);
}
}
catch (UncheckedIOException e) {
log.log(Level.WARNING, "Timed out talking to YAMAS; retrying in " + maintenanceInterval(), e);
}
}
}
}
|
class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
}
@Override
}
|
class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
}
@Override
}
|
Thanks for your reminder. But I also compile `flink-sql-connector-hive-connector-3.1.2`, and the same exception throw. Then, I decompile and check the class file `org.apache.hadoop.hive.ql.metadata.VirtualColumn` in the `flink-sql-connector-hive-connector-3.1.2.jar`. I found the the line `public static final org.apache.flink.hive.shaded.com.google.common.collect.ImmutableSet<java.lang.String>`. However it's different from the type `VirtualColumn.VIRTUAL_COLUMN_NAMES` in this method `checkColumnName`. So, the excpetion throw. When replace with it with a method instead of field reference, we can solve such problem.
|
private static void checkColumnName(String columnName) throws SemanticException {
ColumnInfo columnInfo = new ColumnInfo();
columnInfo.setAlias(columnName);
if (VirtualColumn.isVirtualColumnBasedOnAlias(columnInfo)) {
throw new SemanticException("Invalid column name " + columnName);
}
}
|
ColumnInfo columnInfo = new ColumnInfo();
|
private static void checkColumnName(String columnName) throws SemanticException {
ColumnInfo columnInfo = new ColumnInfo();
columnInfo.setAlias(columnName);
if (VirtualColumn.isVirtualColumnBasedOnAlias(columnInfo)) {
throw new SemanticException("Invalid column name " + columnName);
}
}
|
class HiveParserBaseSemanticAnalyzer {
private static final Logger LOG = LoggerFactory.getLogger(HiveParserBaseSemanticAnalyzer.class);
private HiveParserBaseSemanticAnalyzer() {}
public static List<FieldSchema> getColumns(HiveParserASTNode ast) throws SemanticException {
return getColumns(ast, true);
}
public static List<FieldSchema> getColumns(HiveParserASTNode ast, boolean lowerCase)
throws SemanticException {
return getColumns(ast, lowerCase, new ArrayList<>(), new ArrayList<>());
}
public static String getTypeStringFromAST(HiveParserASTNode typeNode) throws SemanticException {
switch (typeNode.getType()) {
case HiveASTParser.TOK_LIST:
return serdeConstants.LIST_TYPE_NAME
+ "<"
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(0))
+ ">";
case HiveASTParser.TOK_MAP:
return serdeConstants.MAP_TYPE_NAME
+ "<"
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(0))
+ ","
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(1))
+ ">";
case HiveASTParser.TOK_STRUCT:
return getStructTypeStringFromAST(typeNode);
case HiveASTParser.TOK_UNIONTYPE:
return getUnionTypeStringFromAST(typeNode);
default:
return HiveParserDDLSemanticAnalyzer.getTypeName(typeNode);
}
}
private static String getStructTypeStringFromAST(HiveParserASTNode typeNode)
throws SemanticException {
String typeStr = serdeConstants.STRUCT_TYPE_NAME + "<";
typeNode = (HiveParserASTNode) typeNode.getChild(0);
int children = typeNode.getChildCount();
if (children <= 0) {
throw new SemanticException("empty struct not allowed.");
}
StringBuilder buffer = new StringBuilder(typeStr);
for (int i = 0; i < children; i++) {
HiveParserASTNode child = (HiveParserASTNode) typeNode.getChild(i);
buffer.append(unescapeIdentifier(child.getChild(0).getText())).append(":");
buffer.append(getTypeStringFromAST((HiveParserASTNode) child.getChild(1)));
if (i < children - 1) {
buffer.append(",");
}
}
buffer.append(">");
return buffer.toString();
}
private static String getUnionTypeStringFromAST(HiveParserASTNode typeNode)
throws SemanticException {
String typeStr = serdeConstants.UNION_TYPE_NAME + "<";
typeNode = (HiveParserASTNode) typeNode.getChild(0);
int children = typeNode.getChildCount();
if (children <= 0) {
throw new SemanticException("empty union not allowed.");
}
StringBuilder buffer = new StringBuilder(typeStr);
for (int i = 0; i < children; i++) {
buffer.append(getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(i)));
if (i < children - 1) {
buffer.append(",");
}
}
buffer.append(">");
typeStr = buffer.toString();
return typeStr;
}
public static List<FieldSchema> getColumns(
HiveParserASTNode ast,
boolean lowerCase,
List<PrimaryKey> primaryKeys,
List<NotNullConstraint> notNulls)
throws SemanticException {
List<FieldSchema> colList = new ArrayList<>();
int numCh = ast.getChildCount();
List<PKInfo> pkInfos = new ArrayList<>();
Map<String, FieldSchema> nametoFS = new HashMap<>();
Tree parent = ast.getParent();
for (int i = 0; i < numCh; i++) {
FieldSchema col = new FieldSchema();
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
if (child.getToken().getType() == HiveASTParser.TOK_PRIMARY_KEY) {
processPrimaryKeyInfos(child, pkInfos);
} else if (child.getToken().getType() == HiveASTParser.TOK_FOREIGN_KEY) {
throw new SemanticException("FOREIGN KEY is not supported.");
} else {
Tree grandChild = child.getChild(0);
if (grandChild != null) {
String name = grandChild.getText();
if (lowerCase) {
name = name.toLowerCase();
}
checkColumnName(name);
col.setName(unescapeIdentifier(name));
HiveParserASTNode typeChild = (HiveParserASTNode) (child.getChild(1));
col.setType(getTypeStringFromAST(typeChild));
HiveParserASTNode constraintChild = null;
if (child.getChildCount() == 4) {
col.setComment(unescapeSQLString(child.getChild(2).getText()));
constraintChild = (HiveParserASTNode) child.getChild(3);
} else if (child.getChildCount() == 3
&& ((HiveParserASTNode) child.getChild(2)).getToken().getType()
== HiveASTParser.StringLiteral) {
col.setComment(unescapeSQLString(child.getChild(2).getText()));
} else if (child.getChildCount() == 3) {
constraintChild = (HiveParserASTNode) child.getChild(2);
}
if (constraintChild != null) {
String[] qualifiedTabName =
getQualifiedTableName((HiveParserASTNode) parent.getChild(0));
switch (constraintChild.getToken().getType()) {
case HiveASTParser.TOK_NOT_NULL:
notNulls.add(
processNotNull(
constraintChild,
qualifiedTabName[0],
qualifiedTabName[1],
col.getName()));
break;
default:
throw new SemanticException(
"Unsupported constraint node: " + constraintChild);
}
}
}
nametoFS.put(col.getName(), col);
colList.add(col);
}
}
if (!pkInfos.isEmpty()) {
processPrimaryKeys((HiveParserASTNode) parent, pkInfos, primaryKeys, nametoFS);
}
return colList;
}
private static NotNullConstraint processNotNull(
HiveParserASTNode nnNode, String dbName, String tblName, String colName)
throws SemanticException {
boolean enable = true;
boolean validate = false;
boolean rely = false;
for (int i = 0; i < nnNode.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) nnNode.getChild(i);
switch (child.getToken().getType()) {
case HiveASTParser.TOK_ENABLE:
case HiveASTParser.TOK_NOVALIDATE:
case HiveASTParser.TOK_NORELY:
break;
case HiveASTParser.TOK_DISABLE:
enable = false;
break;
case HiveASTParser.TOK_VALIDATE:
validate = true;
break;
case HiveASTParser.TOK_RELY:
rely = true;
break;
default:
throw new SemanticException(
"Unexpected node for NOT NULL constraint: " + child);
}
}
return new NotNullConstraint(dbName, tblName, colName, null, enable, validate, rely);
}
private static void processPrimaryKeys(
HiveParserASTNode parent,
List<PKInfo> pkInfos,
List<PrimaryKey> primaryKeys,
Map<String, FieldSchema> nametoFS)
throws SemanticException {
int cnt = 1;
String[] qualifiedTabName = getQualifiedTableName((HiveParserASTNode) parent.getChild(0));
for (PKInfo pkInfo : pkInfos) {
String pk = pkInfo.colName;
if (nametoFS.containsKey(pk)) {
PrimaryKey currPrimaryKey =
new PrimaryKey(
qualifiedTabName[0],
qualifiedTabName[1],
pk,
pkInfo.constraintName,
false,
false,
pkInfo.rely);
primaryKeys.add(currPrimaryKey);
} else {
throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(pk));
}
}
}
private static void processPrimaryKeyInfos(HiveParserASTNode pkNode, List<PKInfo> pkInfos)
throws SemanticException {
String userSpecifiedName = null;
boolean enable = true;
boolean validate = false;
boolean rely = false;
for (int i = 0; i < pkNode.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) pkNode.getChild(i);
switch (child.getType()) {
case HiveASTParser.TOK_ENABLE:
case HiveASTParser.TOK_NOVALIDATE:
case HiveASTParser.TOK_NORELY:
break;
case HiveASTParser.TOK_DISABLE:
enable = false;
break;
case HiveASTParser.TOK_VALIDATE:
validate = true;
break;
case HiveASTParser.TOK_RELY:
rely = true;
break;
case HiveASTParser.TOK_CONSTRAINT_NAME:
userSpecifiedName =
unescapeIdentifier(child.getChild(0).getText().toLowerCase());
break;
case HiveASTParser.TOK_TABCOLNAME:
for (int j = 0; j < child.getChildCount(); j++) {
String colName = child.getChild(j).getText();
checkColumnName(colName);
pkInfos.add(new PKInfo(unescapeIdentifier(colName.toLowerCase())));
}
break;
default:
throw new SemanticException(
"Unexpected node for PRIMARY KEY constraint: " + child);
}
}
if (enable) {
throw new SemanticException(
"Invalid Primary Key syntax ENABLE feature not supported yet");
}
if (validate) {
throw new SemanticException(
"Invalid Primary Key syntax VALIDATE feature not supported yet");
}
if (pkInfos.isEmpty()) {
throw new SemanticException("No column specified as the primary key");
}
for (PKInfo pkInfo : pkInfos) {
pkInfo.constraintName = userSpecifiedName;
pkInfo.rely = rely;
}
}
public static String getDotName(String[] qname) throws SemanticException {
String genericName = StringUtils.join(qname, ".");
if (qname.length != 2) {
throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName);
}
return genericName;
}
/**
* Converts parsed key/value properties pairs into a map.
*
* @param prop HiveParserASTNode parent of the key/value pairs
* @param mapProp property map which receives the mappings
*/
public static void readProps(HiveParserASTNode prop, Map<String, String> mapProp) {
for (int propChild = 0; propChild < prop.getChildCount(); propChild++) {
String key = unescapeSQLString(prop.getChild(propChild).getChild(0).getText());
String value = null;
if (prop.getChild(propChild).getChild(1) != null) {
value = unescapeSQLString(prop.getChild(propChild).getChild(1).getText());
}
mapProp.put(key, value);
}
}
public static String[] getQualifiedTableName(HiveParserASTNode tabNameNode)
throws SemanticException {
if (tabNameNode.getType() != HiveASTParser.TOK_TABNAME
|| (tabNameNode.getChildCount() != 1 && tabNameNode.getChildCount() != 2)) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_TABLE_NAME, tabNameNode));
}
if (tabNameNode.getChildCount() == 2) {
String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText());
String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText());
return new String[] {dbName, tableName};
}
String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText());
return Utilities.getDbTableName(tableName);
}
public static Tuple2<String, String> charSetString(String charSetName, String charSetString)
throws SemanticException {
try {
charSetName = charSetName.substring(1);
if (charSetString.charAt(0) == '\'') {
return Tuple2.of(
charSetName,
new String(unescapeSQLString(charSetString).getBytes(), charSetName));
} else {
assert charSetString.charAt(0) == '0';
assert charSetString.charAt(1) == 'x';
charSetString = charSetString.substring(2);
byte[] bArray = new byte[charSetString.length() / 2];
int j = 0;
for (int i = 0; i < charSetString.length(); i += 2) {
int val =
Character.digit(charSetString.charAt(i), 16) * 16
+ Character.digit(charSetString.charAt(i + 1), 16);
if (val > 127) {
val = val - 256;
}
bArray[j++] = (byte) val;
}
return Tuple2.of(charSetName, new String(bArray, charSetName));
}
} catch (UnsupportedEncodingException e) {
throw new SemanticException(e);
}
}
public static String stripQuotes(String val) {
return PlanUtils.stripQuotes(val);
}
/**
* Remove the encapsulating "`" pair from the identifier. We allow users to use "`" to escape
* identifier for table names, column names and aliases, in case that coincide with Hive
* language keywords.
*/
public static String unescapeIdentifier(String val) {
if (val == null) {
return null;
}
if (val.charAt(0) == '`' && val.charAt(val.length() - 1) == '`') {
val = val.substring(1, val.length() - 1);
}
return val;
}
/**
* Get the unqualified name from a table node. This method works for table names qualified with
* their schema (e.g., "db.table") and table names without schema qualification. In both cases,
* it returns the table name without the schema.
*
* @param node the table node
* @return the table name without schema qualification (i.e., if name is "db.table" or "table",
* returns "table")
*/
public static String getUnescapedUnqualifiedTableName(HiveParserASTNode node) {
assert node.getChildCount() <= 2;
if (node.getChildCount() == 2) {
node = (HiveParserASTNode) node.getChild(1);
}
return getUnescapedName(node);
}
/**
* Get dequoted name from a table/column node.
*
* @param tableOrColumnNode the table or column node
* @return for table node, db.tab or tab. for column node column.
*/
public static String getUnescapedName(HiveParserASTNode tableOrColumnNode) {
return getUnescapedName(tableOrColumnNode, null);
}
public static String getUnescapedName(
HiveParserASTNode tableOrColumnNode, String currentDatabase) {
int tokenType = tableOrColumnNode.getToken().getType();
if (tokenType == HiveASTParser.TOK_TABNAME) {
Map.Entry<String, String> dbTablePair = getDbTableNamePair(tableOrColumnNode);
String dbName = dbTablePair.getKey();
String tableName = dbTablePair.getValue();
if (dbName != null) {
return dbName + "." + tableName;
}
if (currentDatabase != null) {
return currentDatabase + "." + tableName;
}
return tableName;
} else if (tokenType == HiveASTParser.StringLiteral) {
return unescapeSQLString(tableOrColumnNode.getText());
}
return unescapeIdentifier(tableOrColumnNode.getText());
}
public static Map.Entry<String, String> getDbTableNamePair(HiveParserASTNode tableNameNode) {
assert (tableNameNode.getToken().getType() == HiveASTParser.TOK_TABNAME);
if (tableNameNode.getChildCount() == 2) {
String dbName = unescapeIdentifier(tableNameNode.getChild(0).getText());
String tableName = unescapeIdentifier(tableNameNode.getChild(1).getText());
return Pair.of(dbName, tableName);
} else {
String tableName = unescapeIdentifier(tableNameNode.getChild(0).getText());
return Pair.of(null, tableName);
}
}
@SuppressWarnings("nls")
public static String unescapeSQLString(String b) {
Character enclosure = null;
StringBuilder sb = new StringBuilder(b.length());
for (int i = 0; i < b.length(); i++) {
char currentChar = b.charAt(i);
if (enclosure == null) {
if (currentChar == '\'' || b.charAt(i) == '\"') {
enclosure = currentChar;
}
continue;
}
if (enclosure.equals(currentChar)) {
enclosure = null;
continue;
}
if (currentChar == '\\' && (i + 6 < b.length()) && b.charAt(i + 1) == 'u') {
int code = 0;
int base = i + 2;
for (int j = 0; j < 4; j++) {
int digit = Character.digit(b.charAt(j + base), 16);
code = (code << 4) + digit;
}
sb.append((char) code);
i += 5;
continue;
}
if (currentChar == '\\' && (i + 4 < b.length())) {
char i1 = b.charAt(i + 1);
char i2 = b.charAt(i + 2);
char i3 = b.charAt(i + 3);
if ((i1 >= '0' && i1 <= '1')
&& (i2 >= '0' && i2 <= '7')
&& (i3 >= '0' && i3 <= '7')) {
byte bVal = (byte) ((i3 - '0') + ((i2 - '0') * 8) + ((i1 - '0') * 8 * 8));
byte[] bValArr = new byte[1];
bValArr[0] = bVal;
String tmp = new String(bValArr);
sb.append(tmp);
i += 3;
continue;
}
}
if (currentChar == '\\' && (i + 2 < b.length())) {
char n = b.charAt(i + 1);
switch (n) {
case '0':
sb.append("\0");
break;
case '\'':
sb.append("'");
break;
case '"':
sb.append("\"");
break;
case 'b':
sb.append("\b");
break;
case 'n':
sb.append("\n");
break;
case 'r':
sb.append("\r");
break;
case 't':
sb.append("\t");
break;
case 'Z':
sb.append("\u001A");
break;
case '\\':
sb.append("\\");
break;
case '%':
sb.append("\\%");
break;
case '_':
sb.append("\\_");
break;
default:
sb.append(n);
}
i++;
} else {
sb.append(currentChar);
}
}
return sb.toString();
}
public static void validatePartSpec(
Table tbl,
Map<String, String> partSpec,
HiveParserASTNode astNode,
HiveConf conf,
boolean shouldBeFull,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
tbl.validatePartColumnNames(partSpec, shouldBeFull);
validatePartColumnType(tbl, partSpec, astNode, conf, frameworkConfig, cluster);
}
private static boolean getPartExprNodeDesc(
HiveParserASTNode astNode,
HiveConf conf,
Map<HiveParserASTNode, ExprNodeDesc> astExprNodeMap,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
if (astNode == null) {
return true;
} else if ((astNode.getChildren() == null) || (astNode.getChildren().size() == 0)) {
return astNode.getType() != HiveASTParser.TOK_PARTVAL;
}
HiveParserTypeCheckCtx typeCheckCtx =
new HiveParserTypeCheckCtx(null, frameworkConfig, cluster);
String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME);
boolean result = true;
for (Node childNode : astNode.getChildren()) {
HiveParserASTNode childASTNode = (HiveParserASTNode) childNode;
if (childASTNode.getType() != HiveASTParser.TOK_PARTVAL) {
result =
getPartExprNodeDesc(
childASTNode,
conf,
astExprNodeMap,
frameworkConfig,
cluster)
&& result;
} else {
boolean isDynamicPart = childASTNode.getChildren().size() <= 1;
result = !isDynamicPart && result;
if (!isDynamicPart) {
HiveParserASTNode partVal =
(HiveParserASTNode) childASTNode.getChildren().get(1);
if (!defaultPartitionName.equalsIgnoreCase(
unescapeSQLString(partVal.getText()))) {
astExprNodeMap.put(
(HiveParserASTNode) childASTNode.getChildren().get(0),
HiveParserTypeCheckProcFactory.genExprNode(partVal, typeCheckCtx)
.get(partVal));
}
}
}
}
return result;
}
private static String stripIdentifierQuotes(String val) {
if ((val.charAt(0) == '`' && val.charAt(val.length() - 1) == '`')) {
val = val.substring(1, val.length() - 1);
}
return val;
}
static List<HiveParserASTNode> doPhase1GetDistinctFuncExprs(
HashMap<String, HiveParserASTNode> aggregationTrees) {
List<HiveParserASTNode> exprs = new ArrayList<>();
for (Map.Entry<String, HiveParserASTNode> entry : aggregationTrees.entrySet()) {
HiveParserASTNode value = entry.getValue();
if (value.getToken().getType() == HiveASTParser.TOK_FUNCTIONDI) {
exprs.add(value);
}
}
return exprs;
}
static String findSimpleTableName(HiveParserASTNode tabref, int aliasIndex) {
assert tabref.getType() == HiveASTParser.TOK_TABREF;
HiveParserASTNode tableTree = (HiveParserASTNode) (tabref.getChild(0));
String alias;
if (aliasIndex != 0) {
alias = unescapeIdentifier(tabref.getChild(aliasIndex).getText());
} else {
alias = getUnescapedUnqualifiedTableName(tableTree);
}
return alias;
}
static int[] findTabRefIdxs(HiveParserASTNode tabref) {
assert tabref.getType() == HiveASTParser.TOK_TABREF;
int aliasIndex = 0;
int propsIndex = -1;
int tsampleIndex = -1;
int ssampleIndex = -1;
for (int index = 1; index < tabref.getChildCount(); index++) {
HiveParserASTNode ct = (HiveParserASTNode) tabref.getChild(index);
if (ct.getToken().getType() == HiveASTParser.TOK_TABLEBUCKETSAMPLE) {
tsampleIndex = index;
} else if (ct.getToken().getType() == HiveASTParser.TOK_TABLESPLITSAMPLE) {
ssampleIndex = index;
} else if (ct.getToken().getType() == HiveASTParser.TOK_TABLEPROPERTIES) {
propsIndex = index;
} else {
aliasIndex = index;
}
}
return new int[] {aliasIndex, propsIndex, tsampleIndex, ssampleIndex};
}
static String unparseExprForValuesClause(HiveParserASTNode expr) throws SemanticException {
switch (expr.getToken().getType()) {
case HiveASTParser.Number:
return expr.getText();
case HiveASTParser.StringLiteral:
return unescapeSQLString(expr.getText());
case HiveASTParser.KW_FALSE:
return "";
case HiveASTParser.KW_TRUE:
return "TRUE";
case HiveASTParser.MINUS:
return "-"
+ unparseExprForValuesClause((HiveParserASTNode) expr.getChildren().get(0));
case HiveASTParser.TOK_NULL:
return null;
default:
throw new SemanticException(
"Expression of type " + expr.getText() + " not supported in insert/values");
}
}
public static String getColumnInternalName(int pos) {
return HiveConf.getColumnInternalName(pos);
}
public static List<Integer> getGroupingSetsForRollup(int size) {
List<Integer> groupingSetKeys = new ArrayList<>();
for (int i = 0; i <= size; i++) {
groupingSetKeys.add((1 << i) - 1);
}
return groupingSetKeys;
}
public static List<Integer> getGroupingSetsForCube(int size) {
int count = 1 << size;
List<Integer> results = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
results.add(i);
}
return results;
}
public static List<Integer> getGroupingSets(
List<HiveParserASTNode> groupByExpr, HiveParserQBParseInfo parseInfo, String dest)
throws SemanticException {
Map<String, Integer> exprPos = new HashMap<>();
for (int i = 0; i < groupByExpr.size(); ++i) {
HiveParserASTNode node = groupByExpr.get(i);
exprPos.put(node.toStringTree(), i);
}
HiveParserASTNode root = parseInfo.getGroupByForClause(dest);
List<Integer> result = new ArrayList<>(root == null ? 0 : root.getChildCount());
if (root != null) {
for (int i = 0; i < root.getChildCount(); ++i) {
HiveParserASTNode child = (HiveParserASTNode) root.getChild(i);
if (child.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) {
continue;
}
int bitmap = com.google.common.math.IntMath.pow(2, groupByExpr.size()) - 1;
for (int j = 0; j < child.getChildCount(); ++j) {
String treeAsString = child.getChild(j).toStringTree();
Integer pos = exprPos.get(treeAsString);
if (pos == null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
(HiveParserASTNode) child.getChild(j),
ErrorMsg.HIVE_GROUPING_SETS_EXPR_NOT_IN_GROUPBY
.getErrorCodedMsg()));
}
bitmap = HiveParserUtils.unsetBit(bitmap, groupByExpr.size() - pos - 1);
}
result.add(bitmap);
}
}
if (checkForEmptyGroupingSets(
result, com.google.common.math.IntMath.pow(2, groupByExpr.size()) - 1)) {
throw new SemanticException("Empty grouping sets not allowed");
}
return result;
}
private static boolean checkForEmptyGroupingSets(List<Integer> bitmaps, int groupingIdAllSet) {
boolean ret = true;
for (int mask : bitmaps) {
ret &= mask == groupingIdAllSet;
}
return ret;
}
public static List<HiveParserASTNode> getGroupByForClause(
HiveParserQBParseInfo parseInfo, String dest) {
if (parseInfo.getSelForClause(dest).getToken().getType() == HiveASTParser.TOK_SELECTDI) {
HiveParserASTNode selectExprs = parseInfo.getSelForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(selectExprs == null ? 0 : selectExprs.getChildCount());
if (selectExprs != null) {
for (int i = 0; i < selectExprs.getChildCount(); ++i) {
if (((HiveParserASTNode) selectExprs.getChild(i)).getToken().getType()
== HiveASTParser.QUERY_HINT) {
continue;
}
HiveParserASTNode grpbyExpr =
(HiveParserASTNode) selectExprs.getChild(i).getChild(0);
result.add(grpbyExpr);
}
}
return result;
} else {
HiveParserASTNode grpByExprs = parseInfo.getGroupByForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(grpByExprs == null ? 0 : grpByExprs.getChildCount());
if (grpByExprs != null) {
for (int i = 0; i < grpByExprs.getChildCount(); ++i) {
HiveParserASTNode grpbyExpr = (HiveParserASTNode) grpByExprs.getChild(i);
if (grpbyExpr.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) {
result.add(grpbyExpr);
}
}
}
return result;
}
}
static String getAliasId(String alias, HiveParserQB qb) {
return (qb.getId() == null ? alias : qb.getId() + ":" + alias).toLowerCase();
}
public static RexWindowBound getBound(
HiveParserWindowingSpec.BoundarySpec spec, RelOptCluster cluster) {
RexWindowBound res = null;
if (spec != null) {
SqlParserPos dummyPos = new SqlParserPos(1, 1);
SqlNode amt =
spec.getAmt() == 0
|| spec.getAmt()
== HiveParserWindowingSpec.BoundarySpec.UNBOUNDED_AMOUNT
? null
: SqlLiteral.createExactNumeric(
String.valueOf(spec.getAmt()), new SqlParserPos(2, 2));
RexNode amtLiteral =
amt == null
? null
: cluster.getRexBuilder()
.makeLiteral(
spec.getAmt(),
cluster.getTypeFactory()
.createSqlType(SqlTypeName.INTEGER),
true);
switch (spec.getDirection()) {
case PRECEDING:
if (amt == null) {
res =
RexWindowBound.create(
SqlWindow.createUnboundedPreceding(dummyPos), null);
} else {
SqlCall call = (SqlCall) SqlWindow.createPreceding(amt, dummyPos);
res =
RexWindowBound.create(
call,
cluster.getRexBuilder()
.makeCall(call.getOperator(), amtLiteral));
}
break;
case CURRENT:
res = RexWindowBound.create(SqlWindow.createCurrentRow(dummyPos), null);
break;
case FOLLOWING:
if (amt == null) {
res =
RexWindowBound.create(
SqlWindow.createUnboundedFollowing(dummyPos), null);
} else {
SqlCall call = (SqlCall) SqlWindow.createFollowing(amt, dummyPos);
res =
RexWindowBound.create(
call,
cluster.getRexBuilder()
.makeCall(call.getOperator(), amtLiteral));
}
break;
}
}
return res;
}
public static Phase1Ctx initPhase1Ctx() {
Phase1Ctx ctx1 = new Phase1Ctx();
ctx1.nextNum = 0;
ctx1.dest = "reduce";
return ctx1;
}
static void warn(String msg) {
SessionState.getConsole().printInfo(String.format("Warning: %s", msg));
}
static void handleQueryWindowClauses(
HiveParserQB qb, HiveParserBaseSemanticAnalyzer.Phase1Ctx ctx1, HiveParserASTNode node)
throws SemanticException {
HiveParserWindowingSpec spec = qb.getWindowingSpec(ctx1.dest);
for (int i = 0; i < node.getChildCount(); i++) {
processQueryWindowClause(spec, (HiveParserASTNode) node.getChild(i));
}
}
public static void processPositionAlias(HiveParserASTNode ast, HiveConf conf)
throws SemanticException {
boolean isBothByPos =
HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS);
boolean isGbyByPos =
isBothByPos
|| Boolean.parseBoolean(conf.get("hive.groupby.position.alias", "false"));
boolean isObyByPos =
isBothByPos
|| Boolean.parseBoolean(conf.get("hive.orderby.position.alias", "true"));
Deque<HiveParserASTNode> stack = new ArrayDeque<>();
stack.push(ast);
while (!stack.isEmpty()) {
HiveParserASTNode next = stack.pop();
if (next.getChildCount() == 0) {
continue;
}
boolean isAllCol;
HiveParserASTNode selectNode = null;
HiveParserASTNode groupbyNode = null;
HiveParserASTNode orderbyNode = null;
int childCount = next.getChildCount();
for (int childPos = 0; childPos < childCount; ++childPos) {
HiveParserASTNode node = (HiveParserASTNode) next.getChild(childPos);
int type = node.getToken().getType();
if (type == HiveASTParser.TOK_SELECT) {
selectNode = node;
} else if (type == HiveASTParser.TOK_GROUPBY) {
groupbyNode = node;
} else if (type == HiveASTParser.TOK_ORDERBY) {
orderbyNode = node;
}
}
if (selectNode != null) {
int selectExpCnt = selectNode.getChildCount();
if (groupbyNode != null) {
for (int childPos = 0; childPos < groupbyNode.getChildCount(); ++childPos) {
HiveParserASTNode node = (HiveParserASTNode) groupbyNode.getChild(childPos);
if (node.getToken().getType() == HiveASTParser.Number) {
if (isGbyByPos) {
int pos = Integer.parseInt(node.getText());
if (pos > 0 && pos <= selectExpCnt) {
groupbyNode.setChild(
childPos, selectNode.getChild(pos - 1).getChild(0));
} else {
throw new SemanticException(
ErrorMsg.INVALID_POSITION_ALIAS_IN_GROUPBY.getMsg(
"Position alias: "
+ pos
+ " does not exist\n"
+ "The Select List is indexed from 1 to "
+ selectExpCnt));
}
} else {
warn(
"Using constant number "
+ node.getText()
+ " in group by. If you try to use position alias when hive.groupby.position.alias is false, the position alias will be ignored.");
}
}
}
}
if (orderbyNode != null) {
isAllCol = false;
for (int childPos = 0; childPos < selectNode.getChildCount(); ++childPos) {
HiveParserASTNode node =
(HiveParserASTNode) selectNode.getChild(childPos).getChild(0);
if (node != null
&& node.getToken().getType() == HiveASTParser.TOK_ALLCOLREF) {
isAllCol = true;
}
}
for (int childPos = 0; childPos < orderbyNode.getChildCount(); ++childPos) {
HiveParserASTNode colNode =
(HiveParserASTNode) orderbyNode.getChild(childPos).getChild(0);
HiveParserASTNode node = (HiveParserASTNode) colNode.getChild(0);
if (node != null && node.getToken().getType() == HiveASTParser.Number) {
if (isObyByPos) {
if (!isAllCol) {
int pos = Integer.parseInt(node.getText());
if (pos > 0 && pos <= selectExpCnt) {
colNode.setChild(
0, selectNode.getChild(pos - 1).getChild(0));
} else {
throw new SemanticException(
ErrorMsg.INVALID_POSITION_ALIAS_IN_ORDERBY.getMsg(
"Position alias: "
+ pos
+ " does not exist\n"
+ "The Select List is indexed from 1 to "
+ selectExpCnt));
}
} else {
throw new SemanticException(
ErrorMsg.NO_SUPPORTED_ORDERBY_ALLCOLREF_POS.getMsg());
}
} else {
warn(
"Using constant number "
+ node.getText()
+ " in order by. If you try to use position alias when hive.orderby.position.alias is false, the position alias will be ignored.");
}
}
}
}
}
for (int i = next.getChildren().size() - 1; i >= 0; i--) {
stack.push((HiveParserASTNode) next.getChildren().get(i));
}
}
}
static PartitionSpec processPartitionSpec(HiveParserASTNode node) {
PartitionSpec pSpec = new PartitionSpec();
int exprCnt = node.getChildCount();
for (int i = 0; i < exprCnt; i++) {
PartitionExpression exprSpec = new PartitionExpression();
exprSpec.setExpression((HiveParserASTNode) node.getChild(i));
pSpec.addExpression(exprSpec);
}
return pSpec;
}
static OrderSpec processOrderSpec(HiveParserASTNode sortNode) {
OrderSpec oSpec = new OrderSpec();
int exprCnt = sortNode.getChildCount();
for (int i = 0; i < exprCnt; i++) {
OrderExpression exprSpec = new OrderExpression();
HiveParserASTNode orderSpec = (HiveParserASTNode) sortNode.getChild(i);
HiveParserASTNode nullOrderSpec = (HiveParserASTNode) orderSpec.getChild(0);
exprSpec.setExpression((HiveParserASTNode) nullOrderSpec.getChild(0));
if (orderSpec.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
exprSpec.setOrder(Order.ASC);
} else {
exprSpec.setOrder(Order.DESC);
}
if (nullOrderSpec.getType() == HiveASTParser.TOK_NULLS_FIRST) {
exprSpec.setNullOrder(NullOrder.NULLS_FIRST);
} else {
exprSpec.setNullOrder(NullOrder.NULLS_LAST);
}
oSpec.addExpression(exprSpec);
}
return oSpec;
}
static PartitioningSpec processPTFPartitionSpec(HiveParserASTNode pSpecNode) {
PartitioningSpec partitioning = new PartitioningSpec();
HiveParserASTNode firstChild = (HiveParserASTNode) pSpecNode.getChild(0);
int type = firstChild.getType();
if (type == HiveASTParser.TOK_DISTRIBUTEBY || type == HiveASTParser.TOK_CLUSTERBY) {
PartitionSpec pSpec = processPartitionSpec(firstChild);
partitioning.setPartSpec(pSpec);
HiveParserASTNode sortNode =
pSpecNode.getChildCount() > 1
? (HiveParserASTNode) pSpecNode.getChild(1)
: null;
if (sortNode != null) {
OrderSpec oSpec = processOrderSpec(sortNode);
partitioning.setOrderSpec(oSpec);
}
} else if (type == HiveASTParser.TOK_SORTBY || type == HiveASTParser.TOK_ORDERBY) {
OrderSpec oSpec = processOrderSpec(firstChild);
partitioning.setOrderSpec(oSpec);
}
return partitioning;
}
static HiveParserWindowingSpec.WindowFunctionSpec processWindowFunction(
HiveParserASTNode node, HiveParserASTNode wsNode) throws SemanticException {
HiveParserWindowingSpec.WindowFunctionSpec wfSpec =
new HiveParserWindowingSpec.WindowFunctionSpec();
switch (node.getType()) {
case HiveASTParser.TOK_FUNCTIONSTAR:
wfSpec.setStar(true);
break;
case HiveASTParser.TOK_FUNCTIONDI:
wfSpec.setDistinct(true);
break;
}
wfSpec.setExpression(node);
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(0);
wfSpec.setName(nameNode.getText());
for (int i = 1; i < node.getChildCount() - 1; i++) {
HiveParserASTNode child = (HiveParserASTNode) node.getChild(i);
wfSpec.addArg(child);
}
if (wsNode != null) {
HiveParserWindowingSpec.WindowSpec ws = processWindowSpec(wsNode);
wfSpec.setWindowSpec(ws);
}
return wfSpec;
}
static boolean containsLeadLagUDF(HiveParserASTNode expressionTree) {
int exprTokenType = expressionTree.getToken().getType();
if (exprTokenType == HiveASTParser.TOK_FUNCTION) {
assert (expressionTree.getChildCount() != 0);
if (expressionTree.getChild(0).getType() == HiveASTParser.Identifier) {
String functionName = unescapeIdentifier(expressionTree.getChild(0).getText());
functionName = functionName.toLowerCase();
if (FunctionRegistry.LAG_FUNC_NAME.equals(functionName)
|| FunctionRegistry.LEAD_FUNC_NAME.equals(functionName)) {
return true;
}
}
}
for (int i = 0; i < expressionTree.getChildCount(); i++) {
if (containsLeadLagUDF((HiveParserASTNode) expressionTree.getChild(i))) {
return true;
}
}
return false;
}
static void processQueryWindowClause(HiveParserWindowingSpec spec, HiveParserASTNode node)
throws SemanticException {
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(0);
HiveParserASTNode wsNode = (HiveParserASTNode) node.getChild(1);
if (spec.getWindowSpecs() != null
&& spec.getWindowSpecs().containsKey(nameNode.getText())) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
nameNode,
"Duplicate definition of window "
+ nameNode.getText()
+ " is not allowed"));
}
HiveParserWindowingSpec.WindowSpec ws = processWindowSpec(wsNode);
spec.addWindowSpec(nameNode.getText(), ws);
}
static HiveParserWindowingSpec.WindowSpec processWindowSpec(HiveParserASTNode node)
throws SemanticException {
boolean hasSrcId = false, hasPartSpec = false, hasWF = false;
int srcIdIdx = -1, partIdx = -1, wfIdx = -1;
for (int i = 0; i < node.getChildCount(); i++) {
int type = node.getChild(i).getType();
switch (type) {
case HiveASTParser.Identifier:
hasSrcId = true;
srcIdIdx = i;
break;
case HiveASTParser.TOK_PARTITIONINGSPEC:
hasPartSpec = true;
partIdx = i;
break;
case HiveASTParser.TOK_WINDOWRANGE:
case HiveASTParser.TOK_WINDOWVALUES:
hasWF = true;
wfIdx = i;
break;
}
}
HiveParserWindowingSpec.WindowSpec ws = new HiveParserWindowingSpec.WindowSpec();
if (hasSrcId) {
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(srcIdIdx);
ws.setSourceId(nameNode.getText());
}
if (hasPartSpec) {
HiveParserASTNode partNode = (HiveParserASTNode) node.getChild(partIdx);
PartitioningSpec partitioning = processPTFPartitionSpec(partNode);
ws.setPartitioning(partitioning);
}
if (hasWF) {
HiveParserASTNode wfNode = (HiveParserASTNode) node.getChild(wfIdx);
HiveParserWindowingSpec.WindowFrameSpec wfSpec = processWindowFrame(wfNode);
ws.setWindowFrame(wfSpec);
}
return ws;
}
static HiveParserWindowingSpec.WindowFrameSpec processWindowFrame(HiveParserASTNode node)
throws SemanticException {
int type = node.getType();
HiveParserWindowingSpec.BoundarySpec start = null, end = null;
start = processBoundary((HiveParserASTNode) node.getChild(0));
if (node.getChildCount() > 1) {
end = processBoundary((HiveParserASTNode) node.getChild(1));
}
return new HiveParserWindowingSpec.WindowFrameSpec(
type == HiveASTParser.TOK_WINDOWVALUES
? HiveParserWindowingSpec.WindowType.RANGE
: HiveParserWindowingSpec.WindowType.ROWS,
start,
end);
}
static HiveParserWindowingSpec.BoundarySpec processBoundary(HiveParserASTNode node)
throws SemanticException {
HiveParserWindowingSpec.BoundarySpec bs = new HiveParserWindowingSpec.BoundarySpec();
int type = node.getType();
boolean hasAmt = true;
switch (type) {
case HiveASTParser.KW_PRECEDING:
bs.setDirection(WindowingSpec.Direction.PRECEDING);
break;
case HiveASTParser.KW_FOLLOWING:
bs.setDirection(WindowingSpec.Direction.FOLLOWING);
break;
case HiveASTParser.KW_CURRENT:
bs.setDirection(WindowingSpec.Direction.CURRENT);
hasAmt = false;
break;
}
if (hasAmt) {
HiveParserASTNode amtNode = (HiveParserASTNode) node.getChild(0);
if (amtNode.getType() == HiveASTParser.KW_UNBOUNDED) {
bs.setAmt(HiveParserWindowingSpec.BoundarySpec.UNBOUNDED_AMOUNT);
} else {
int amt = Integer.parseInt(amtNode.getText());
if (amt <= 0) {
throw new SemanticException(
"Window Frame Boundary Amount must be a positive integer, provided amount is: "
+ amt);
}
bs.setAmt(amt);
}
}
return bs;
}
public static void removeOBInSubQuery(HiveParserQBExpr qbExpr) {
if (qbExpr == null) {
return;
}
if (qbExpr.getOpcode() == HiveParserQBExpr.Opcode.NULLOP) {
HiveParserQB subQB = qbExpr.getQB();
HiveParserQBParseInfo parseInfo = subQB.getParseInfo();
String alias = qbExpr.getAlias();
Map<String, HiveParserASTNode> destToOrderBy = parseInfo.getDestToOrderBy();
Map<String, HiveParserASTNode> destToSortBy = parseInfo.getDestToSortBy();
final String warning =
"WARNING: Order/Sort by without limit in sub query or view ["
+ alias
+ "] is removed, as it's pointless and bad for performance.";
if (destToOrderBy != null) {
for (String dest : destToOrderBy.keySet()) {
if (parseInfo.getDestLimit(dest) == null) {
removeASTChild(destToOrderBy.get(dest));
destToOrderBy.remove(dest);
LOG.warn(warning);
}
}
}
if (destToSortBy != null) {
for (String dest : destToSortBy.keySet()) {
if (parseInfo.getDestLimit(dest) == null) {
removeASTChild(destToSortBy.get(dest));
destToSortBy.remove(dest);
LOG.warn(warning);
}
}
}
for (String subAlias : subQB.getSubqAliases()) {
removeOBInSubQuery(subQB.getSubqForAlias(subAlias));
}
} else {
removeOBInSubQuery(qbExpr.getQBExpr1());
removeOBInSubQuery(qbExpr.getQBExpr2());
}
}
public static TableType obtainTableType(Table tabMetaData) {
if (tabMetaData.getStorageHandler() != null
&& tabMetaData
.getStorageHandler()
.toString()
.equals(HiveParserConstants.DRUID_HIVE_STORAGE_HANDLER_ID)) {
return TableType.DRUID;
}
return TableType.NATIVE;
}
/* This method returns the flip big-endian representation of value */
public static ImmutableBitSet convert(int value, int length) {
BitSet bits = new BitSet();
for (int index = length - 1; index >= 0; index--) {
if (value % 2 != 0) {
bits.set(index);
}
value = value >>> 1;
}
bits.flip(0, length);
return ImmutableBitSet.fromBitSet(bits);
}
public static Map<String, Integer> buildHiveColNameToInputPosMap(
List<ExprNodeDesc> colList, HiveParserRowResolver inputRR) {
Map<Integer, ExprNodeDesc> hashCodeToColumnDesc = new HashMap<>();
HiveParserExprNodeDescUtils.getExprNodeColumnDesc(colList, hashCodeToColumnDesc);
Map<String, Integer> res = new HashMap<>();
String exprNodecolName;
for (ExprNodeDesc exprDesc : hashCodeToColumnDesc.values()) {
exprNodecolName = ((ExprNodeColumnDesc) exprDesc).getColumn();
res.put(exprNodecolName, inputRR.getPosition(exprNodecolName));
}
return res;
}
public static Map<String, Integer> buildHiveToCalciteColumnMap(HiveParserRowResolver rr) {
Map<String, Integer> map = new HashMap<>();
for (ColumnInfo ci : rr.getRowSchema().getSignature()) {
map.put(ci.getInternalName(), rr.getPosition(ci.getInternalName()));
}
return Collections.unmodifiableMap(map);
}
public static org.apache.calcite.util.Pair<List<CorrelationId>, ImmutableBitSet>
getCorrelationUse(RexCall call) {
List<CorrelationId> correlIDs = new ArrayList<>();
ImmutableBitSet.Builder requiredColumns = ImmutableBitSet.builder();
call.accept(new HiveParserUtils.CorrelationCollector(correlIDs, requiredColumns));
if (correlIDs.isEmpty()) {
return null;
}
return org.apache.calcite.util.Pair.of(correlIDs, requiredColumns.build());
}
public static boolean topLevelConjunctCheck(
HiveParserASTNode searchCond, ObjectPair<Boolean, Integer> subqInfo) {
if (searchCond.getType() == HiveASTParser.KW_OR) {
subqInfo.setFirst(Boolean.TRUE);
if (subqInfo.getSecond() > 1) {
return false;
}
}
if (searchCond.getType() == HiveASTParser.TOK_SUBQUERY_EXPR) {
subqInfo.setSecond(subqInfo.getSecond() + 1);
return subqInfo.getSecond() <= 1 || !subqInfo.getFirst();
}
for (int i = 0; i < searchCond.getChildCount(); i++) {
boolean validSubQuery =
topLevelConjunctCheck((HiveParserASTNode) searchCond.getChild(i), subqInfo);
if (!validSubQuery) {
return false;
}
}
return true;
}
public static void addToGBExpr(
HiveParserRowResolver groupByOutputRowResolver,
HiveParserRowResolver groupByInputRowResolver,
HiveParserASTNode grpbyExpr,
ExprNodeDesc grpbyExprNDesc,
List<ExprNodeDesc> gbExprNDescLst,
List<String> outputColumnNames) {
int i = gbExprNDescLst.size();
String field = getColumnInternalName(i);
outputColumnNames.add(field);
gbExprNDescLst.add(grpbyExprNDesc);
ColumnInfo outColInfo = new ColumnInfo(field, grpbyExprNDesc.getTypeInfo(), null, false);
groupByOutputRowResolver.putExpression(grpbyExpr, outColInfo);
addAlternateGByKeyMappings(
grpbyExpr, outColInfo, groupByInputRowResolver, groupByOutputRowResolver);
}
public static int getWindowSpecIndx(HiveParserASTNode wndAST) {
int wi = wndAST.getChildCount() - 1;
if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveASTParser.TOK_WINDOWSPEC)) {
wi = -1;
}
return wi;
}
private static void addAlternateGByKeyMappings(
HiveParserASTNode gByExpr,
ColumnInfo colInfo,
HiveParserRowResolver inputRR,
HiveParserRowResolver outputRR) {
if (gByExpr.getType() == HiveASTParser.DOT
&& gByExpr.getChild(0).getType() == HiveASTParser.TOK_TABLE_OR_COL) {
String tabAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(0).getChild(0).getText().toLowerCase());
String colAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(1).getText().toLowerCase());
outputRR.put(tabAlias, colAlias, colInfo);
} else if (gByExpr.getType() == HiveASTParser.TOK_TABLE_OR_COL) {
String colAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(0).getText().toLowerCase());
String tabAlias = null;
/*
* If the input to the GBy has a table alias for the column, then add an entry based on that tab_alias.
* For e.g. this query: select b.x, count(*) from t1 b group by x needs (tab_alias=b, col_alias=x) in the
* GBy RR. tab_alias=b comes from looking at the HiveParserRowResolver that is the
* ancestor before any GBy/ReduceSinks added for the GBY operation.
*/
try {
ColumnInfo pColInfo = inputRR.get(tabAlias, colAlias);
tabAlias = pColInfo == null ? null : pColInfo.getTabAlias();
} catch (SemanticException se) {
}
outputRR.put(tabAlias, colAlias, colInfo);
}
}
public static void validateNoHavingReferenceToAlias(
HiveParserQB qb,
HiveParserASTNode havingExpr,
HiveParserRowResolver inputRR,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
HiveParserQBParseInfo qbPI = qb.getParseInfo();
Map<HiveParserASTNode, String> exprToAlias = qbPI.getAllExprToColumnAlias();
for (Map.Entry<HiveParserASTNode, String> exprAndAlias : exprToAlias.entrySet()) {
final HiveParserASTNode expr = exprAndAlias.getKey();
final String alias = exprAndAlias.getValue();
if (inputRR.getExpression(expr) != null) {
inputRR.put("", alias, inputRR.getExpression(expr));
}
final Set<Object> aliasReferences = new HashSet<>();
TreeVisitorAction action =
new TreeVisitorAction() {
@Override
public Object pre(Object t) {
if (HiveASTParseDriver.ADAPTOR.getType(t)
== HiveASTParser.TOK_TABLE_OR_COL) {
Object c = HiveASTParseDriver.ADAPTOR.getChild(t, 0);
if (c != null
&& HiveASTParseDriver.ADAPTOR.getType(c)
== HiveASTParser.Identifier
&& HiveASTParseDriver.ADAPTOR.getText(c).equals(alias)) {
aliasReferences.add(t);
}
}
return t;
}
@Override
public Object post(Object t) {
return t;
}
};
new TreeVisitor(HiveASTParseDriver.ADAPTOR).visit(havingExpr, action);
if (aliasReferences.size() > 0) {
String havingClause =
semanticAnalyzer
.ctx
.getTokenRewriteStream()
.toString(
havingExpr.getTokenStartIndex(),
havingExpr.getTokenStopIndex());
String msg =
String.format(
"Encountered Select alias '%s' in having clause '%s'"
+ " This is non standard behavior.",
alias, havingClause);
LOG.warn(msg);
}
}
}
public static List<RexNode> getPartitionKeys(
PartitionSpec partitionSpec,
HiveParserRexNodeConverter converter,
HiveParserRowResolver inputRR,
HiveParserTypeCheckCtx typeCheckCtx,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
List<RexNode> res = new ArrayList<>();
if (partitionSpec != null) {
List<PartitionExpression> expressions = partitionSpec.getExpressions();
for (PartitionExpression expression : expressions) {
typeCheckCtx.setAllowStatefulFunctions(true);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
expression.getExpression(), inputRR, typeCheckCtx);
res.add(converter.convert(exp));
}
}
return res;
}
public static List<RexFieldCollation> getOrderKeys(
OrderSpec orderSpec,
HiveParserRexNodeConverter converter,
HiveParserRowResolver inputRR,
HiveParserTypeCheckCtx typeCheckCtx,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
List<RexFieldCollation> orderKeys = new ArrayList<>();
if (orderSpec != null) {
List<OrderExpression> oExprs = orderSpec.getExpressions();
for (OrderExpression oExpr : oExprs) {
typeCheckCtx.setAllowStatefulFunctions(true);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
oExpr.getExpression(), inputRR, typeCheckCtx);
RexNode ordExp = converter.convert(exp);
Set<SqlKind> flags = new HashSet<>();
if (oExpr.getOrder() == Order.DESC) {
flags.add(SqlKind.DESCENDING);
}
if (oExpr.getNullOrder() == NullOrder.NULLS_FIRST) {
flags.add(SqlKind.NULLS_FIRST);
} else if (oExpr.getNullOrder() == NullOrder.NULLS_LAST) {
flags.add(SqlKind.NULLS_LAST);
} else {
throw new SemanticException(
"Unexpected null ordering option: " + oExpr.getNullOrder());
}
orderKeys.add(new RexFieldCollation(ordExp, flags));
}
}
return orderKeys;
}
public static AggInfo getHiveAggInfo(
HiveParserASTNode aggAst,
int aggFnLstArgIndx,
HiveParserRowResolver inputRR,
HiveParserWindowingSpec.WindowFunctionSpec winFuncSpec,
HiveParserSemanticAnalyzer semanticAnalyzer,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
AggInfo aInfo;
ArrayList<ExprNodeDesc> aggParameters = new ArrayList<>();
for (int i = 1; i <= aggFnLstArgIndx; i++) {
HiveParserASTNode paraExpr = (HiveParserASTNode) aggAst.getChild(i);
ExprNodeDesc paraExprNode = semanticAnalyzer.genExprNodeDesc(paraExpr, inputRR);
aggParameters.add(paraExprNode);
}
boolean isDistinct = aggAst.getType() == HiveASTParser.TOK_FUNCTIONDI;
TypeInfo udafRetType = null;
String aggName = unescapeIdentifier(aggAst.getChild(0).getText());
boolean isAllColumns = false;
if (FunctionRegistry.isRankingFunction(aggName)) {
if (aggName.equalsIgnoreCase("percent_rank")) {
udafRetType = TypeInfoFactory.doubleTypeInfo;
} else {
udafRetType = TypeInfoFactory.intTypeInfo;
}
for (OrderExpression orderExpr : winFuncSpec.windowSpec.getOrder().getExpressions()) {
aggParameters.add(
semanticAnalyzer.genExprNodeDesc(orderExpr.getExpression(), inputRR));
}
} else {
try {
isAllColumns = aggAst.getType() == HiveASTParser.TOK_FUNCTIONSTAR;
GenericUDAFEvaluator.Mode amode =
HiveParserUtils.groupByDescModeToUDAFMode(
GroupByDesc.Mode.COMPLETE, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator;
if (aggName.toLowerCase().equals(FunctionRegistry.LEAD_FUNC_NAME)
|| aggName.toLowerCase().equals(FunctionRegistry.LAG_FUNC_NAME)) {
ArrayList<ObjectInspector> originalParameterTypeInfos =
HiveParserUtils.getWritableObjectInspector(aggParameters);
genericUDAFEvaluator =
FunctionRegistry.getGenericWindowingEvaluator(
aggName, originalParameterTypeInfos, isDistinct, isAllColumns);
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf =
HiveParserUtils.getGenericUDAFInfo(
genericUDAFEvaluator, amode, aggParameters);
udafRetType = ((ListTypeInfo) udaf.returnType).getListElementTypeInfo();
} else {
genericUDAFEvaluator =
HiveParserUtils.getGenericUDAFEvaluator(
aggName,
aggParameters,
aggAst,
isDistinct,
isAllColumns,
frameworkConfig.getOperatorTable());
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf =
HiveParserUtils.getGenericUDAFInfo(
genericUDAFEvaluator, amode, aggParameters);
if (HiveParserUtils.pivotResult(aggName)) {
udafRetType = ((ListTypeInfo) udaf.returnType).getListElementTypeInfo();
} else {
udafRetType = udaf.returnType;
}
}
} catch (Exception e) {
LOG.debug(
"CBO: Couldn't Obtain UDAF evaluators for "
+ aggName
+ ", trying to translate to GenericUDF");
}
if (udafRetType == null) {
HiveParserTypeCheckCtx tcCtx =
new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster);
tcCtx.setAllowStatefulFunctions(true);
tcCtx.setAllowDistinctFunctions(false);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
(HiveParserASTNode) aggAst.getChild(0), inputRR, tcCtx);
udafRetType = exp.getTypeInfo();
}
}
aInfo = new AggInfo(aggParameters, udafRetType, aggName, isDistinct, isAllColumns, null);
return aInfo;
}
public static RelNode genValues(
String tabAlias,
Table tmpTable,
HiveParserRowResolver rowResolver,
RelOptCluster cluster,
List<List<String>> values) {
List<TypeInfo> tmpTableTypes =
tmpTable.getCols().stream()
.map(f -> TypeInfoUtils.getTypeInfoFromTypeString(f.getType()))
.collect(Collectors.toList());
RexBuilder rexBuilder = cluster.getRexBuilder();
List<RelDataType> calciteTargetTypes =
tmpTableTypes.stream()
.map(
ti ->
HiveParserTypeConverter.convert(
(PrimitiveTypeInfo) ti,
rexBuilder.getTypeFactory()))
.collect(Collectors.toList());
List<String> calciteFieldNames =
IntStream.range(0, calciteTargetTypes.size())
.mapToObj(SqlUtil::deriveAliasFromOrdinal)
.collect(Collectors.toList());
List<RelDataType> calciteRowTypes = new ArrayList<>();
List<List<RexLiteral>> rows = new ArrayList<>();
for (List<String> value : values) {
Preconditions.checkArgument(
value.size() == tmpTableTypes.size(),
String.format(
"Values table col length (%d) and data length (%d) mismatch",
tmpTableTypes.size(), value.size()));
List<RexLiteral> row = new ArrayList<>();
for (int i = 0; i < tmpTableTypes.size(); i++) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) tmpTableTypes.get(i);
RelDataType calciteType = calciteTargetTypes.get(i);
String col = value.get(i);
if (col == null) {
row.add(rexBuilder.makeNullLiteral(calciteType));
} else {
switch (primitiveTypeInfo.getPrimitiveCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
row.add(rexBuilder.makeExactLiteral(new BigDecimal(col), calciteType));
break;
case DECIMAL:
BigDecimal bigDec = new BigDecimal(col);
row.add(
SqlTypeUtil.isValidDecimalValue(bigDec, calciteType)
? rexBuilder.makeExactLiteral(bigDec, calciteType)
: rexBuilder.makeNullLiteral(calciteType));
break;
case FLOAT:
case DOUBLE:
row.add(rexBuilder.makeApproxLiteral(new BigDecimal(col), calciteType));
break;
case BOOLEAN:
row.add(rexBuilder.makeLiteral(Boolean.parseBoolean(col)));
break;
default:
row.add(
rexBuilder.makeCharLiteral(
HiveParserUtils.asUnicodeString(col)));
}
}
}
calciteRowTypes.add(
rexBuilder
.getTypeFactory()
.createStructType(
row.stream()
.map(RexLiteral::getType)
.collect(Collectors.toList()),
calciteFieldNames));
rows.add(row);
}
RelDataType calciteRowType = rexBuilder.getTypeFactory().leastRestrictive(calciteRowTypes);
for (int i = 0; i < calciteFieldNames.size(); i++) {
ColumnInfo colInfo =
new ColumnInfo(
calciteFieldNames.get(i),
HiveParserTypeConverter.convert(
calciteRowType.getFieldList().get(i).getType()),
tabAlias,
false);
rowResolver.put(tabAlias, calciteFieldNames.get(i), colInfo);
}
return HiveParserUtils.genValuesRelNode(
cluster,
rexBuilder.getTypeFactory().createStructType(calciteRowType.getFieldList()),
rows);
}
private static void validatePartColumnType(
Table tbl,
Map<String, String> partSpec,
HiveParserASTNode astNode,
HiveConf conf,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) {
return;
}
Map<HiveParserASTNode, ExprNodeDesc> astExprNodeMap = new HashMap<>();
if (!getPartExprNodeDesc(astNode, conf, astExprNodeMap, frameworkConfig, cluster)) {
LOG.warn(
"Dynamic partitioning is used; only validating "
+ astExprNodeMap.size()
+ " columns");
}
if (astExprNodeMap.isEmpty()) {
return;
}
List<FieldSchema> parts = tbl.getPartitionKeys();
Map<String, String> partCols = new HashMap<>(parts.size());
for (FieldSchema col : parts) {
partCols.put(col.getName(), col.getType().toLowerCase());
}
for (Map.Entry<HiveParserASTNode, ExprNodeDesc> astExprNodePair :
astExprNodeMap.entrySet()) {
String astKeyName = astExprNodePair.getKey().toString().toLowerCase();
if (astExprNodePair.getKey().getType() == HiveASTParser.Identifier) {
astKeyName = stripIdentifierQuotes(astKeyName);
}
String colType = partCols.get(astKeyName);
ObjectInspector inputOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(
astExprNodePair.getValue().getTypeInfo());
TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(colType);
ObjectInspector outputOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType);
Object value = ((ExprNodeConstantDesc) astExprNodePair.getValue()).getValue();
Object convertedValue = value;
if (!inputOI.getTypeName().equals(outputOI.getTypeName())) {
convertedValue =
ObjectInspectorConverters.getConverter(inputOI, outputOI).convert(value);
if (convertedValue == null) {
throw new SemanticException(
ErrorMsg.PARTITION_SPEC_TYPE_MISMATCH,
astKeyName,
inputOI.getTypeName(),
outputOI.getTypeName());
}
if (!convertedValue.toString().equals(value.toString())) {
LOG.warn(
"Partition "
+ astKeyName
+ " expects type "
+ outputOI.getTypeName()
+ " but input value is in type "
+ inputOI.getTypeName()
+ ". Convert "
+ value.toString()
+ " to "
+ convertedValue.toString());
}
}
if (!convertedValue.toString().equals(partSpec.get(astKeyName))) {
LOG.warn(
"Partition Spec "
+ astKeyName
+ "="
+ partSpec.get(astKeyName)
+ " has been changed to "
+ astKeyName
+ "="
+ convertedValue.toString());
}
partSpec.put(astKeyName, convertedValue.toString());
}
}
private static void errorPartSpec(Map<String, String> partSpec, List<FieldSchema> parts)
throws SemanticException {
StringBuilder sb = new StringBuilder("Partition columns in the table schema are: (");
for (FieldSchema fs : parts) {
sb.append(fs.getName()).append(", ");
}
sb.setLength(sb.length() - 2);
sb.append("), while the partitions specified in the query are: (");
Iterator<String> itrPsKeys = partSpec.keySet().iterator();
while (itrPsKeys.hasNext()) {
sb.append(itrPsKeys.next()).append(", ");
}
sb.setLength(sb.length() - 2);
sb.append(").");
throw new SemanticException(ErrorMsg.PARTSPEC_DIFFER_FROM_SCHEMA.getMsg(sb.toString()));
}
/** Counterpart of hive's BaseSemanticAnalyzer.TableSpec. */
public static class TableSpec {
public String tableName;
public Table tableHandle;
public Map<String, String> partSpec;
public Partition partHandle;
public int numDynParts;
public List<Partition>
partitions;
/** SpecType. */
public enum SpecType {
TABLE_ONLY,
STATIC_PARTITION,
DYNAMIC_PARTITION
}
public TableSpec.SpecType specType;
public TableSpec(
Hive db,
HiveConf conf,
HiveParserASTNode ast,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
this(db, conf, ast, true, false, frameworkConfig, cluster);
}
public TableSpec(
Hive db,
HiveConf conf,
HiveParserASTNode ast,
boolean allowDynamicPartitionsSpec,
boolean allowPartialPartitionsSpec,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
assert (ast.getToken().getType() == HiveASTParser.TOK_TAB
|| ast.getToken().getType() == HiveASTParser.TOK_TABLE_PARTITION
|| ast.getToken().getType() == HiveASTParser.TOK_TABTYPE
|| ast.getToken().getType() == HiveASTParser.TOK_CREATETABLE
|| ast.getToken().getType() == HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW);
int childIndex = 0;
numDynParts = 0;
try {
tableName = getUnescapedName((HiveParserASTNode) ast.getChild(0));
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE);
if (testMode) {
tableName = conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX) + tableName;
}
if (ast.getToken().getType() != HiveASTParser.TOK_CREATETABLE
&& ast.getToken().getType() != HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW) {
tableHandle = db.getTable(tableName);
}
} catch (InvalidTableException ite) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_TABLE, ast.getChild(0)), ite);
} catch (HiveException e) {
throw new SemanticException("Error while retrieving table metadata", e);
}
if (ast.getChildCount() == 2
&& ast.getToken().getType() != HiveASTParser.TOK_CREATETABLE
&& ast.getToken().getType() != HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW) {
childIndex = 1;
HiveParserASTNode partspec = (HiveParserASTNode) ast.getChild(1);
partitions = new ArrayList<Partition>();
Map<String, String> tmpPartSpec = new HashMap<>(partspec.getChildCount());
for (int i = 0; i < partspec.getChildCount(); ++i) {
HiveParserASTNode partspecVal = (HiveParserASTNode) partspec.getChild(i);
String val = null;
String colName =
unescapeIdentifier(partspecVal.getChild(0).getText().toLowerCase());
if (partspecVal.getChildCount() < 2) {
if (allowDynamicPartitionsSpec) {
++numDynParts;
} else {
throw new SemanticException(
ErrorMsg.INVALID_PARTITION.getMsg(
" - Dynamic partitions not allowed"));
}
} else {
val = stripQuotes(partspecVal.getChild(1).getText());
}
tmpPartSpec.put(colName, val);
}
validatePartSpec(
tableHandle, tmpPartSpec, ast, conf, false, frameworkConfig, cluster);
List<FieldSchema> parts = tableHandle.getPartitionKeys();
partSpec = new LinkedHashMap<String, String>(partspec.getChildCount());
for (FieldSchema fs : parts) {
String partKey = fs.getName();
partSpec.put(partKey, tmpPartSpec.get(partKey));
}
if (numDynParts > 0) {
int numStaPart = parts.size() - numDynParts;
if (numStaPart == 0
&& conf.getVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE)
.equalsIgnoreCase("strict")) {
throw new SemanticException(
ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg());
}
if (partSpec.keySet().size() != parts.size()) {
errorPartSpec(partSpec, parts);
}
Iterator<String> itrPsKeys = partSpec.keySet().iterator();
for (FieldSchema fs : parts) {
if (!itrPsKeys.next().toLowerCase().equals(fs.getName().toLowerCase())) {
errorPartSpec(partSpec, parts);
}
}
for (FieldSchema fs : parts) {
if (partSpec.get(fs.getName().toLowerCase()) == null) {
if (numStaPart > 0) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.PARTITION_DYN_STA_ORDER,
ast.getChild(childIndex)));
}
break;
} else {
--numStaPart;
}
}
partHandle = null;
specType = TableSpec.SpecType.DYNAMIC_PARTITION;
} else {
try {
if (allowPartialPartitionsSpec) {
partitions = db.getPartitions(tableHandle, partSpec);
} else {
partHandle = db.getPartition(tableHandle, partSpec, false);
if (partHandle == null) {
partHandle = new Partition(tableHandle, partSpec, null);
} else {
partitions.add(partHandle);
}
}
} catch (HiveException e) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.INVALID_PARTITION, ast.getChild(childIndex)),
e);
}
specType = TableSpec.SpecType.STATIC_PARTITION;
}
} else {
specType = TableSpec.SpecType.TABLE_ONLY;
}
}
public Map<String, String> getPartSpec() {
return this.partSpec;
}
public void setPartSpec(Map<String, String> partSpec) {
this.partSpec = partSpec;
}
@Override
public String toString() {
if (partHandle != null) {
return partHandle.toString();
} else {
return tableHandle.toString();
}
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.AnalyzeRewriteContext. */
public static class AnalyzeRewriteContext {
private String tableName;
private List<String> colName;
private List<String> colType;
private boolean tblLvl;
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public List<String> getColName() {
return colName;
}
public void setColName(List<String> colName) {
this.colName = colName;
}
public boolean isTblLvl() {
return tblLvl;
}
public void setTblLvl(boolean isTblLvl) {
this.tblLvl = isTblLvl;
}
public List<String> getColType() {
return colType;
}
public void setColType(List<String> colType) {
this.colType = colType;
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.PKInfo. */
private static class PKInfo {
public String colName;
public String constraintName;
public boolean rely;
public PKInfo(String colName) {
this.colName = colName;
}
public PKInfo(String colName, String constraintName, boolean rely) {
this.colName = colName;
this.constraintName = constraintName;
this.rely = rely;
}
}
/** Counterpart of hive's SemanticAnalyzer.CTEClause. */
static class CTEClause {
CTEClause(String alias, HiveParserASTNode cteNode) {
this.alias = alias;
this.cteNode = cteNode;
}
String alias;
HiveParserASTNode cteNode;
boolean materialize;
int reference;
HiveParserQBExpr qbExpr;
List<CTEClause> parents = new ArrayList<>();
@Override
public String toString() {
return alias == null ? "<root>" : alias;
}
}
/** Counterpart of hive's SemanticAnalyzer.Phase1Ctx. */
public static class Phase1Ctx {
String dest;
int nextNum;
}
/** Counterpart of hive's SemanticAnalyzer.GenericUDAFInfo. */
public static class GenericUDAFInfo {
public ArrayList<ExprNodeDesc> convertedParameters;
public GenericUDAFEvaluator genericUDAFEvaluator;
public TypeInfo returnType;
}
/** Counterpart of hive's CalcitePlanner.TableType. */
public enum TableType {
DRUID,
NATIVE
}
/** Counterpart of hive's CalcitePlanner.AggInfo. */
public static class AggInfo {
private final List<ExprNodeDesc> aggParams;
private final TypeInfo returnType;
private final String udfName;
private final boolean distinct;
private final boolean isAllColumns;
private final String alias;
public AggInfo(
List<ExprNodeDesc> aggParams,
TypeInfo returnType,
String udfName,
boolean isDistinct,
boolean isAllColumns,
String alias) {
this.aggParams = aggParams;
this.returnType = returnType;
this.udfName = udfName;
distinct = isDistinct;
this.isAllColumns = isAllColumns;
this.alias = alias;
}
public List<ExprNodeDesc> getAggParams() {
return aggParams;
}
public TypeInfo getReturnType() {
return returnType;
}
public String getUdfName() {
return udfName;
}
public boolean isDistinct() {
return distinct;
}
public boolean isAllColumns() {
return isAllColumns;
}
public String getAlias() {
return alias;
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.RowFormatParams. */
public static class HiveParserRowFormatParams {
String fieldDelim = null;
String fieldEscape = null;
String collItemDelim = null;
String mapKeyDelim = null;
String lineDelim = null;
String nullFormat = null;
public String getFieldDelim() {
return fieldDelim;
}
public String getFieldEscape() {
return fieldEscape;
}
public String getCollItemDelim() {
return collItemDelim;
}
public String getMapKeyDelim() {
return mapKeyDelim;
}
public String getLineDelim() {
return lineDelim;
}
public String getNullFormat() {
return nullFormat;
}
public void analyzeRowFormat(HiveParserASTNode child) throws SemanticException {
child = (HiveParserASTNode) child.getChild(0);
int numChildRowFormat = child.getChildCount();
for (int numC = 0; numC < numChildRowFormat; numC++) {
HiveParserASTNode rowChild = (HiveParserASTNode) child.getChild(numC);
switch (rowChild.getToken().getType()) {
case HiveASTParser.TOK_TABLEROWFORMATFIELD:
fieldDelim = unescapeSQLString(rowChild.getChild(0).getText());
if (rowChild.getChildCount() >= 2) {
fieldEscape = unescapeSQLString(rowChild.getChild(1).getText());
}
break;
case HiveASTParser.TOK_TABLEROWFORMATCOLLITEMS:
collItemDelim = unescapeSQLString(rowChild.getChild(0).getText());
break;
case HiveASTParser.TOK_TABLEROWFORMATMAPKEYS:
mapKeyDelim = unescapeSQLString(rowChild.getChild(0).getText());
break;
case HiveASTParser.TOK_TABLEROWFORMATLINES:
lineDelim = unescapeSQLString(rowChild.getChild(0).getText());
if (!lineDelim.equals("\n") && !lineDelim.equals("10")) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
rowChild,
ErrorMsg.LINES_TERMINATED_BY_NON_NEWLINE.getMsg()));
}
break;
case HiveASTParser.TOK_TABLEROWFORMATNULL:
nullFormat = unescapeSQLString(rowChild.getChild(0).getText());
break;
default:
throw new AssertionError("Unknown Token: " + rowChild);
}
}
}
}
/** Counterpart of hive's SQLPrimaryKey. */
public static class PrimaryKey implements Serializable {
private static final long serialVersionUID = 3036210046732750293L;
private final String dbName;
private final String tblName;
private final String pk;
private final String constraintName;
private final boolean enable;
private final boolean validate;
private final boolean rely;
public PrimaryKey(
String dbName,
String tblName,
String pk,
String constraintName,
boolean enable,
boolean validate,
boolean rely) {
this.dbName = dbName;
this.tblName = tblName;
this.pk = pk;
this.constraintName = constraintName;
this.enable = enable;
this.validate = validate;
this.rely = rely;
}
public String getDbName() {
return dbName;
}
public String getTblName() {
return tblName;
}
public String getPk() {
return pk;
}
public String getConstraintName() {
return constraintName;
}
public boolean isEnable() {
return enable;
}
public boolean isValidate() {
return validate;
}
public boolean isRely() {
return rely;
}
}
/** Counterpart of hive's SQLNotNullConstraint. */
public static class NotNullConstraint implements Serializable {
private static final long serialVersionUID = 7642343368203203950L;
private final String dbName;
private final String tblName;
private final String colName;
private final String constraintName;
private final boolean enable;
private final boolean validate;
private final boolean rely;
public NotNullConstraint(
String dbName,
String tblName,
String colName,
String constraintName,
boolean enable,
boolean validate,
boolean rely) {
this.dbName = dbName;
this.tblName = tblName;
this.colName = colName;
this.constraintName = constraintName;
this.enable = enable;
this.validate = validate;
this.rely = rely;
}
public String getDbName() {
return dbName;
}
public String getTblName() {
return tblName;
}
public String getColName() {
return colName;
}
public String getConstraintName() {
return constraintName;
}
public boolean isEnable() {
return enable;
}
public boolean isValidate() {
return validate;
}
public boolean isRely() {
return rely;
}
}
}
|
class HiveParserBaseSemanticAnalyzer {
private static final Logger LOG = LoggerFactory.getLogger(HiveParserBaseSemanticAnalyzer.class);
private HiveParserBaseSemanticAnalyzer() {}
public static List<FieldSchema> getColumns(HiveParserASTNode ast) throws SemanticException {
return getColumns(ast, true);
}
public static List<FieldSchema> getColumns(HiveParserASTNode ast, boolean lowerCase)
throws SemanticException {
return getColumns(ast, lowerCase, new ArrayList<>(), new ArrayList<>());
}
public static String getTypeStringFromAST(HiveParserASTNode typeNode) throws SemanticException {
switch (typeNode.getType()) {
case HiveASTParser.TOK_LIST:
return serdeConstants.LIST_TYPE_NAME
+ "<"
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(0))
+ ">";
case HiveASTParser.TOK_MAP:
return serdeConstants.MAP_TYPE_NAME
+ "<"
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(0))
+ ","
+ getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(1))
+ ">";
case HiveASTParser.TOK_STRUCT:
return getStructTypeStringFromAST(typeNode);
case HiveASTParser.TOK_UNIONTYPE:
return getUnionTypeStringFromAST(typeNode);
default:
return HiveParserDDLSemanticAnalyzer.getTypeName(typeNode);
}
}
private static String getStructTypeStringFromAST(HiveParserASTNode typeNode)
throws SemanticException {
String typeStr = serdeConstants.STRUCT_TYPE_NAME + "<";
typeNode = (HiveParserASTNode) typeNode.getChild(0);
int children = typeNode.getChildCount();
if (children <= 0) {
throw new SemanticException("empty struct not allowed.");
}
StringBuilder buffer = new StringBuilder(typeStr);
for (int i = 0; i < children; i++) {
HiveParserASTNode child = (HiveParserASTNode) typeNode.getChild(i);
buffer.append(unescapeIdentifier(child.getChild(0).getText())).append(":");
buffer.append(getTypeStringFromAST((HiveParserASTNode) child.getChild(1)));
if (i < children - 1) {
buffer.append(",");
}
}
buffer.append(">");
return buffer.toString();
}
private static String getUnionTypeStringFromAST(HiveParserASTNode typeNode)
throws SemanticException {
String typeStr = serdeConstants.UNION_TYPE_NAME + "<";
typeNode = (HiveParserASTNode) typeNode.getChild(0);
int children = typeNode.getChildCount();
if (children <= 0) {
throw new SemanticException("empty union not allowed.");
}
StringBuilder buffer = new StringBuilder(typeStr);
for (int i = 0; i < children; i++) {
buffer.append(getTypeStringFromAST((HiveParserASTNode) typeNode.getChild(i)));
if (i < children - 1) {
buffer.append(",");
}
}
buffer.append(">");
typeStr = buffer.toString();
return typeStr;
}
public static List<FieldSchema> getColumns(
HiveParserASTNode ast,
boolean lowerCase,
List<PrimaryKey> primaryKeys,
List<NotNullConstraint> notNulls)
throws SemanticException {
List<FieldSchema> colList = new ArrayList<>();
int numCh = ast.getChildCount();
List<PKInfo> pkInfos = new ArrayList<>();
Map<String, FieldSchema> nametoFS = new HashMap<>();
Tree parent = ast.getParent();
for (int i = 0; i < numCh; i++) {
FieldSchema col = new FieldSchema();
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
if (child.getToken().getType() == HiveASTParser.TOK_PRIMARY_KEY) {
processPrimaryKeyInfos(child, pkInfos);
} else if (child.getToken().getType() == HiveASTParser.TOK_FOREIGN_KEY) {
throw new SemanticException("FOREIGN KEY is not supported.");
} else {
Tree grandChild = child.getChild(0);
if (grandChild != null) {
String name = grandChild.getText();
if (lowerCase) {
name = name.toLowerCase();
}
checkColumnName(name);
col.setName(unescapeIdentifier(name));
HiveParserASTNode typeChild = (HiveParserASTNode) (child.getChild(1));
col.setType(getTypeStringFromAST(typeChild));
HiveParserASTNode constraintChild = null;
if (child.getChildCount() == 4) {
col.setComment(unescapeSQLString(child.getChild(2).getText()));
constraintChild = (HiveParserASTNode) child.getChild(3);
} else if (child.getChildCount() == 3
&& ((HiveParserASTNode) child.getChild(2)).getToken().getType()
== HiveASTParser.StringLiteral) {
col.setComment(unescapeSQLString(child.getChild(2).getText()));
} else if (child.getChildCount() == 3) {
constraintChild = (HiveParserASTNode) child.getChild(2);
}
if (constraintChild != null) {
String[] qualifiedTabName =
getQualifiedTableName((HiveParserASTNode) parent.getChild(0));
switch (constraintChild.getToken().getType()) {
case HiveASTParser.TOK_NOT_NULL:
notNulls.add(
processNotNull(
constraintChild,
qualifiedTabName[0],
qualifiedTabName[1],
col.getName()));
break;
default:
throw new SemanticException(
"Unsupported constraint node: " + constraintChild);
}
}
}
nametoFS.put(col.getName(), col);
colList.add(col);
}
}
if (!pkInfos.isEmpty()) {
processPrimaryKeys((HiveParserASTNode) parent, pkInfos, primaryKeys, nametoFS);
}
return colList;
}
private static NotNullConstraint processNotNull(
HiveParserASTNode nnNode, String dbName, String tblName, String colName)
throws SemanticException {
boolean enable = true;
boolean validate = false;
boolean rely = false;
for (int i = 0; i < nnNode.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) nnNode.getChild(i);
switch (child.getToken().getType()) {
case HiveASTParser.TOK_ENABLE:
case HiveASTParser.TOK_NOVALIDATE:
case HiveASTParser.TOK_NORELY:
break;
case HiveASTParser.TOK_DISABLE:
enable = false;
break;
case HiveASTParser.TOK_VALIDATE:
validate = true;
break;
case HiveASTParser.TOK_RELY:
rely = true;
break;
default:
throw new SemanticException(
"Unexpected node for NOT NULL constraint: " + child);
}
}
return new NotNullConstraint(dbName, tblName, colName, null, enable, validate, rely);
}
private static void processPrimaryKeys(
HiveParserASTNode parent,
List<PKInfo> pkInfos,
List<PrimaryKey> primaryKeys,
Map<String, FieldSchema> nametoFS)
throws SemanticException {
int cnt = 1;
String[] qualifiedTabName = getQualifiedTableName((HiveParserASTNode) parent.getChild(0));
for (PKInfo pkInfo : pkInfos) {
String pk = pkInfo.colName;
if (nametoFS.containsKey(pk)) {
PrimaryKey currPrimaryKey =
new PrimaryKey(
qualifiedTabName[0],
qualifiedTabName[1],
pk,
pkInfo.constraintName,
false,
false,
pkInfo.rely);
primaryKeys.add(currPrimaryKey);
} else {
throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(pk));
}
}
}
private static void processPrimaryKeyInfos(HiveParserASTNode pkNode, List<PKInfo> pkInfos)
throws SemanticException {
String userSpecifiedName = null;
boolean enable = true;
boolean validate = false;
boolean rely = false;
for (int i = 0; i < pkNode.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) pkNode.getChild(i);
switch (child.getType()) {
case HiveASTParser.TOK_ENABLE:
case HiveASTParser.TOK_NOVALIDATE:
case HiveASTParser.TOK_NORELY:
break;
case HiveASTParser.TOK_DISABLE:
enable = false;
break;
case HiveASTParser.TOK_VALIDATE:
validate = true;
break;
case HiveASTParser.TOK_RELY:
rely = true;
break;
case HiveASTParser.TOK_CONSTRAINT_NAME:
userSpecifiedName =
unescapeIdentifier(child.getChild(0).getText().toLowerCase());
break;
case HiveASTParser.TOK_TABCOLNAME:
for (int j = 0; j < child.getChildCount(); j++) {
String colName = child.getChild(j).getText();
checkColumnName(colName);
pkInfos.add(new PKInfo(unescapeIdentifier(colName.toLowerCase())));
}
break;
default:
throw new SemanticException(
"Unexpected node for PRIMARY KEY constraint: " + child);
}
}
if (enable) {
throw new SemanticException(
"Invalid Primary Key syntax ENABLE feature not supported yet");
}
if (validate) {
throw new SemanticException(
"Invalid Primary Key syntax VALIDATE feature not supported yet");
}
if (pkInfos.isEmpty()) {
throw new SemanticException("No column specified as the primary key");
}
for (PKInfo pkInfo : pkInfos) {
pkInfo.constraintName = userSpecifiedName;
pkInfo.rely = rely;
}
}
public static String getDotName(String[] qname) throws SemanticException {
String genericName = StringUtils.join(qname, ".");
if (qname.length != 2) {
throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName);
}
return genericName;
}
/**
* Converts parsed key/value properties pairs into a map.
*
* @param prop HiveParserASTNode parent of the key/value pairs
* @param mapProp property map which receives the mappings
*/
public static void readProps(HiveParserASTNode prop, Map<String, String> mapProp) {
for (int propChild = 0; propChild < prop.getChildCount(); propChild++) {
String key = unescapeSQLString(prop.getChild(propChild).getChild(0).getText());
String value = null;
if (prop.getChild(propChild).getChild(1) != null) {
value = unescapeSQLString(prop.getChild(propChild).getChild(1).getText());
}
mapProp.put(key, value);
}
}
public static String[] getQualifiedTableName(HiveParserASTNode tabNameNode)
throws SemanticException {
if (tabNameNode.getType() != HiveASTParser.TOK_TABNAME
|| (tabNameNode.getChildCount() != 1 && tabNameNode.getChildCount() != 2)) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_TABLE_NAME, tabNameNode));
}
if (tabNameNode.getChildCount() == 2) {
String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText());
String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText());
return new String[] {dbName, tableName};
}
String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText());
return Utilities.getDbTableName(tableName);
}
public static Tuple2<String, String> charSetString(String charSetName, String charSetString)
throws SemanticException {
try {
charSetName = charSetName.substring(1);
if (charSetString.charAt(0) == '\'') {
return Tuple2.of(
charSetName,
new String(unescapeSQLString(charSetString).getBytes(), charSetName));
} else {
assert charSetString.charAt(0) == '0';
assert charSetString.charAt(1) == 'x';
charSetString = charSetString.substring(2);
byte[] bArray = new byte[charSetString.length() / 2];
int j = 0;
for (int i = 0; i < charSetString.length(); i += 2) {
int val =
Character.digit(charSetString.charAt(i), 16) * 16
+ Character.digit(charSetString.charAt(i + 1), 16);
if (val > 127) {
val = val - 256;
}
bArray[j++] = (byte) val;
}
return Tuple2.of(charSetName, new String(bArray, charSetName));
}
} catch (UnsupportedEncodingException e) {
throw new SemanticException(e);
}
}
public static String stripQuotes(String val) {
return PlanUtils.stripQuotes(val);
}
/**
* Remove the encapsulating "`" pair from the identifier. We allow users to use "`" to escape
* identifier for table names, column names and aliases, in case that coincide with Hive
* language keywords.
*/
public static String unescapeIdentifier(String val) {
if (val == null) {
return null;
}
if (val.charAt(0) == '`' && val.charAt(val.length() - 1) == '`') {
val = val.substring(1, val.length() - 1);
}
return val;
}
/**
* Get the unqualified name from a table node. This method works for table names qualified with
* their schema (e.g., "db.table") and table names without schema qualification. In both cases,
* it returns the table name without the schema.
*
* @param node the table node
* @return the table name without schema qualification (i.e., if name is "db.table" or "table",
* returns "table")
*/
public static String getUnescapedUnqualifiedTableName(HiveParserASTNode node) {
assert node.getChildCount() <= 2;
if (node.getChildCount() == 2) {
node = (HiveParserASTNode) node.getChild(1);
}
return getUnescapedName(node);
}
/**
* Get dequoted name from a table/column node.
*
* @param tableOrColumnNode the table or column node
* @return for table node, db.tab or tab. for column node column.
*/
public static String getUnescapedName(HiveParserASTNode tableOrColumnNode) {
return getUnescapedName(tableOrColumnNode, null);
}
public static String getUnescapedName(
HiveParserASTNode tableOrColumnNode, String currentDatabase) {
int tokenType = tableOrColumnNode.getToken().getType();
if (tokenType == HiveASTParser.TOK_TABNAME) {
Map.Entry<String, String> dbTablePair = getDbTableNamePair(tableOrColumnNode);
String dbName = dbTablePair.getKey();
String tableName = dbTablePair.getValue();
if (dbName != null) {
return dbName + "." + tableName;
}
if (currentDatabase != null) {
return currentDatabase + "." + tableName;
}
return tableName;
} else if (tokenType == HiveASTParser.StringLiteral) {
return unescapeSQLString(tableOrColumnNode.getText());
}
return unescapeIdentifier(tableOrColumnNode.getText());
}
public static Map.Entry<String, String> getDbTableNamePair(HiveParserASTNode tableNameNode) {
assert (tableNameNode.getToken().getType() == HiveASTParser.TOK_TABNAME);
if (tableNameNode.getChildCount() == 2) {
String dbName = unescapeIdentifier(tableNameNode.getChild(0).getText());
String tableName = unescapeIdentifier(tableNameNode.getChild(1).getText());
return Pair.of(dbName, tableName);
} else {
String tableName = unescapeIdentifier(tableNameNode.getChild(0).getText());
return Pair.of(null, tableName);
}
}
@SuppressWarnings("nls")
public static String unescapeSQLString(String b) {
Character enclosure = null;
StringBuilder sb = new StringBuilder(b.length());
for (int i = 0; i < b.length(); i++) {
char currentChar = b.charAt(i);
if (enclosure == null) {
if (currentChar == '\'' || b.charAt(i) == '\"') {
enclosure = currentChar;
}
continue;
}
if (enclosure.equals(currentChar)) {
enclosure = null;
continue;
}
if (currentChar == '\\' && (i + 6 < b.length()) && b.charAt(i + 1) == 'u') {
int code = 0;
int base = i + 2;
for (int j = 0; j < 4; j++) {
int digit = Character.digit(b.charAt(j + base), 16);
code = (code << 4) + digit;
}
sb.append((char) code);
i += 5;
continue;
}
if (currentChar == '\\' && (i + 4 < b.length())) {
char i1 = b.charAt(i + 1);
char i2 = b.charAt(i + 2);
char i3 = b.charAt(i + 3);
if ((i1 >= '0' && i1 <= '1')
&& (i2 >= '0' && i2 <= '7')
&& (i3 >= '0' && i3 <= '7')) {
byte bVal = (byte) ((i3 - '0') + ((i2 - '0') * 8) + ((i1 - '0') * 8 * 8));
byte[] bValArr = new byte[1];
bValArr[0] = bVal;
String tmp = new String(bValArr);
sb.append(tmp);
i += 3;
continue;
}
}
if (currentChar == '\\' && (i + 2 < b.length())) {
char n = b.charAt(i + 1);
switch (n) {
case '0':
sb.append("\0");
break;
case '\'':
sb.append("'");
break;
case '"':
sb.append("\"");
break;
case 'b':
sb.append("\b");
break;
case 'n':
sb.append("\n");
break;
case 'r':
sb.append("\r");
break;
case 't':
sb.append("\t");
break;
case 'Z':
sb.append("\u001A");
break;
case '\\':
sb.append("\\");
break;
case '%':
sb.append("\\%");
break;
case '_':
sb.append("\\_");
break;
default:
sb.append(n);
}
i++;
} else {
sb.append(currentChar);
}
}
return sb.toString();
}
public static void validatePartSpec(
Table tbl,
Map<String, String> partSpec,
HiveParserASTNode astNode,
HiveConf conf,
boolean shouldBeFull,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
tbl.validatePartColumnNames(partSpec, shouldBeFull);
validatePartColumnType(tbl, partSpec, astNode, conf, frameworkConfig, cluster);
}
private static boolean getPartExprNodeDesc(
HiveParserASTNode astNode,
HiveConf conf,
Map<HiveParserASTNode, ExprNodeDesc> astExprNodeMap,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
if (astNode == null) {
return true;
} else if ((astNode.getChildren() == null) || (astNode.getChildren().size() == 0)) {
return astNode.getType() != HiveASTParser.TOK_PARTVAL;
}
HiveParserTypeCheckCtx typeCheckCtx =
new HiveParserTypeCheckCtx(null, frameworkConfig, cluster);
String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME);
boolean result = true;
for (Node childNode : astNode.getChildren()) {
HiveParserASTNode childASTNode = (HiveParserASTNode) childNode;
if (childASTNode.getType() != HiveASTParser.TOK_PARTVAL) {
result =
getPartExprNodeDesc(
childASTNode,
conf,
astExprNodeMap,
frameworkConfig,
cluster)
&& result;
} else {
boolean isDynamicPart = childASTNode.getChildren().size() <= 1;
result = !isDynamicPart && result;
if (!isDynamicPart) {
HiveParserASTNode partVal =
(HiveParserASTNode) childASTNode.getChildren().get(1);
if (!defaultPartitionName.equalsIgnoreCase(
unescapeSQLString(partVal.getText()))) {
astExprNodeMap.put(
(HiveParserASTNode) childASTNode.getChildren().get(0),
HiveParserTypeCheckProcFactory.genExprNode(partVal, typeCheckCtx)
.get(partVal));
}
}
}
}
return result;
}
private static String stripIdentifierQuotes(String val) {
if ((val.charAt(0) == '`' && val.charAt(val.length() - 1) == '`')) {
val = val.substring(1, val.length() - 1);
}
return val;
}
static List<HiveParserASTNode> doPhase1GetDistinctFuncExprs(
HashMap<String, HiveParserASTNode> aggregationTrees) {
List<HiveParserASTNode> exprs = new ArrayList<>();
for (Map.Entry<String, HiveParserASTNode> entry : aggregationTrees.entrySet()) {
HiveParserASTNode value = entry.getValue();
if (value.getToken().getType() == HiveASTParser.TOK_FUNCTIONDI) {
exprs.add(value);
}
}
return exprs;
}
static String findSimpleTableName(HiveParserASTNode tabref, int aliasIndex) {
assert tabref.getType() == HiveASTParser.TOK_TABREF;
HiveParserASTNode tableTree = (HiveParserASTNode) (tabref.getChild(0));
String alias;
if (aliasIndex != 0) {
alias = unescapeIdentifier(tabref.getChild(aliasIndex).getText());
} else {
alias = getUnescapedUnqualifiedTableName(tableTree);
}
return alias;
}
static int[] findTabRefIdxs(HiveParserASTNode tabref) {
assert tabref.getType() == HiveASTParser.TOK_TABREF;
int aliasIndex = 0;
int propsIndex = -1;
int tsampleIndex = -1;
int ssampleIndex = -1;
for (int index = 1; index < tabref.getChildCount(); index++) {
HiveParserASTNode ct = (HiveParserASTNode) tabref.getChild(index);
if (ct.getToken().getType() == HiveASTParser.TOK_TABLEBUCKETSAMPLE) {
tsampleIndex = index;
} else if (ct.getToken().getType() == HiveASTParser.TOK_TABLESPLITSAMPLE) {
ssampleIndex = index;
} else if (ct.getToken().getType() == HiveASTParser.TOK_TABLEPROPERTIES) {
propsIndex = index;
} else {
aliasIndex = index;
}
}
return new int[] {aliasIndex, propsIndex, tsampleIndex, ssampleIndex};
}
static String unparseExprForValuesClause(HiveParserASTNode expr) throws SemanticException {
switch (expr.getToken().getType()) {
case HiveASTParser.Number:
return expr.getText();
case HiveASTParser.StringLiteral:
return unescapeSQLString(expr.getText());
case HiveASTParser.KW_FALSE:
return "";
case HiveASTParser.KW_TRUE:
return "TRUE";
case HiveASTParser.MINUS:
return "-"
+ unparseExprForValuesClause((HiveParserASTNode) expr.getChildren().get(0));
case HiveASTParser.TOK_NULL:
return null;
default:
throw new SemanticException(
"Expression of type " + expr.getText() + " not supported in insert/values");
}
}
public static String getColumnInternalName(int pos) {
return HiveConf.getColumnInternalName(pos);
}
public static List<Integer> getGroupingSetsForRollup(int size) {
List<Integer> groupingSetKeys = new ArrayList<>();
for (int i = 0; i <= size; i++) {
groupingSetKeys.add((1 << i) - 1);
}
return groupingSetKeys;
}
public static List<Integer> getGroupingSetsForCube(int size) {
int count = 1 << size;
List<Integer> results = new ArrayList<>(count);
for (int i = 0; i < count; ++i) {
results.add(i);
}
return results;
}
public static List<Integer> getGroupingSets(
List<HiveParserASTNode> groupByExpr, HiveParserQBParseInfo parseInfo, String dest)
throws SemanticException {
Map<String, Integer> exprPos = new HashMap<>();
for (int i = 0; i < groupByExpr.size(); ++i) {
HiveParserASTNode node = groupByExpr.get(i);
exprPos.put(node.toStringTree(), i);
}
HiveParserASTNode root = parseInfo.getGroupByForClause(dest);
List<Integer> result = new ArrayList<>(root == null ? 0 : root.getChildCount());
if (root != null) {
for (int i = 0; i < root.getChildCount(); ++i) {
HiveParserASTNode child = (HiveParserASTNode) root.getChild(i);
if (child.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) {
continue;
}
int bitmap = com.google.common.math.IntMath.pow(2, groupByExpr.size()) - 1;
for (int j = 0; j < child.getChildCount(); ++j) {
String treeAsString = child.getChild(j).toStringTree();
Integer pos = exprPos.get(treeAsString);
if (pos == null) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
(HiveParserASTNode) child.getChild(j),
ErrorMsg.HIVE_GROUPING_SETS_EXPR_NOT_IN_GROUPBY
.getErrorCodedMsg()));
}
bitmap = HiveParserUtils.unsetBit(bitmap, groupByExpr.size() - pos - 1);
}
result.add(bitmap);
}
}
if (checkForEmptyGroupingSets(
result, com.google.common.math.IntMath.pow(2, groupByExpr.size()) - 1)) {
throw new SemanticException("Empty grouping sets not allowed");
}
return result;
}
private static boolean checkForEmptyGroupingSets(List<Integer> bitmaps, int groupingIdAllSet) {
boolean ret = true;
for (int mask : bitmaps) {
ret &= mask == groupingIdAllSet;
}
return ret;
}
public static List<HiveParserASTNode> getGroupByForClause(
HiveParserQBParseInfo parseInfo, String dest) {
if (parseInfo.getSelForClause(dest).getToken().getType() == HiveASTParser.TOK_SELECTDI) {
HiveParserASTNode selectExprs = parseInfo.getSelForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(selectExprs == null ? 0 : selectExprs.getChildCount());
if (selectExprs != null) {
for (int i = 0; i < selectExprs.getChildCount(); ++i) {
if (((HiveParserASTNode) selectExprs.getChild(i)).getToken().getType()
== HiveASTParser.QUERY_HINT) {
continue;
}
HiveParserASTNode grpbyExpr =
(HiveParserASTNode) selectExprs.getChild(i).getChild(0);
result.add(grpbyExpr);
}
}
return result;
} else {
HiveParserASTNode grpByExprs = parseInfo.getGroupByForClause(dest);
List<HiveParserASTNode> result =
new ArrayList<>(grpByExprs == null ? 0 : grpByExprs.getChildCount());
if (grpByExprs != null) {
for (int i = 0; i < grpByExprs.getChildCount(); ++i) {
HiveParserASTNode grpbyExpr = (HiveParserASTNode) grpByExprs.getChild(i);
if (grpbyExpr.getType() != HiveASTParser.TOK_GROUPING_SETS_EXPRESSION) {
result.add(grpbyExpr);
}
}
}
return result;
}
}
static String getAliasId(String alias, HiveParserQB qb) {
return (qb.getId() == null ? alias : qb.getId() + ":" + alias).toLowerCase();
}
public static RexWindowBound getBound(
HiveParserWindowingSpec.BoundarySpec spec, RelOptCluster cluster) {
RexWindowBound res = null;
if (spec != null) {
SqlParserPos dummyPos = new SqlParserPos(1, 1);
SqlNode amt =
spec.getAmt() == 0
|| spec.getAmt()
== HiveParserWindowingSpec.BoundarySpec.UNBOUNDED_AMOUNT
? null
: SqlLiteral.createExactNumeric(
String.valueOf(spec.getAmt()), new SqlParserPos(2, 2));
RexNode amtLiteral =
amt == null
? null
: cluster.getRexBuilder()
.makeLiteral(
spec.getAmt(),
cluster.getTypeFactory()
.createSqlType(SqlTypeName.INTEGER),
true);
switch (spec.getDirection()) {
case PRECEDING:
if (amt == null) {
res =
RexWindowBound.create(
SqlWindow.createUnboundedPreceding(dummyPos), null);
} else {
SqlCall call = (SqlCall) SqlWindow.createPreceding(amt, dummyPos);
res =
RexWindowBound.create(
call,
cluster.getRexBuilder()
.makeCall(call.getOperator(), amtLiteral));
}
break;
case CURRENT:
res = RexWindowBound.create(SqlWindow.createCurrentRow(dummyPos), null);
break;
case FOLLOWING:
if (amt == null) {
res =
RexWindowBound.create(
SqlWindow.createUnboundedFollowing(dummyPos), null);
} else {
SqlCall call = (SqlCall) SqlWindow.createFollowing(amt, dummyPos);
res =
RexWindowBound.create(
call,
cluster.getRexBuilder()
.makeCall(call.getOperator(), amtLiteral));
}
break;
}
}
return res;
}
public static Phase1Ctx initPhase1Ctx() {
Phase1Ctx ctx1 = new Phase1Ctx();
ctx1.nextNum = 0;
ctx1.dest = "reduce";
return ctx1;
}
static void warn(String msg) {
SessionState.getConsole().printInfo(String.format("Warning: %s", msg));
}
static void handleQueryWindowClauses(
HiveParserQB qb, HiveParserBaseSemanticAnalyzer.Phase1Ctx ctx1, HiveParserASTNode node)
throws SemanticException {
HiveParserWindowingSpec spec = qb.getWindowingSpec(ctx1.dest);
for (int i = 0; i < node.getChildCount(); i++) {
processQueryWindowClause(spec, (HiveParserASTNode) node.getChild(i));
}
}
public static void processPositionAlias(HiveParserASTNode ast, HiveConf conf)
throws SemanticException {
boolean isBothByPos =
HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS);
boolean isGbyByPos =
isBothByPos
|| Boolean.parseBoolean(conf.get("hive.groupby.position.alias", "false"));
boolean isObyByPos =
isBothByPos
|| Boolean.parseBoolean(conf.get("hive.orderby.position.alias", "true"));
Deque<HiveParserASTNode> stack = new ArrayDeque<>();
stack.push(ast);
while (!stack.isEmpty()) {
HiveParserASTNode next = stack.pop();
if (next.getChildCount() == 0) {
continue;
}
boolean isAllCol;
HiveParserASTNode selectNode = null;
HiveParserASTNode groupbyNode = null;
HiveParserASTNode orderbyNode = null;
int childCount = next.getChildCount();
for (int childPos = 0; childPos < childCount; ++childPos) {
HiveParserASTNode node = (HiveParserASTNode) next.getChild(childPos);
int type = node.getToken().getType();
if (type == HiveASTParser.TOK_SELECT) {
selectNode = node;
} else if (type == HiveASTParser.TOK_GROUPBY) {
groupbyNode = node;
} else if (type == HiveASTParser.TOK_ORDERBY) {
orderbyNode = node;
}
}
if (selectNode != null) {
int selectExpCnt = selectNode.getChildCount();
if (groupbyNode != null) {
for (int childPos = 0; childPos < groupbyNode.getChildCount(); ++childPos) {
HiveParserASTNode node = (HiveParserASTNode) groupbyNode.getChild(childPos);
if (node.getToken().getType() == HiveASTParser.Number) {
if (isGbyByPos) {
int pos = Integer.parseInt(node.getText());
if (pos > 0 && pos <= selectExpCnt) {
groupbyNode.setChild(
childPos, selectNode.getChild(pos - 1).getChild(0));
} else {
throw new SemanticException(
ErrorMsg.INVALID_POSITION_ALIAS_IN_GROUPBY.getMsg(
"Position alias: "
+ pos
+ " does not exist\n"
+ "The Select List is indexed from 1 to "
+ selectExpCnt));
}
} else {
warn(
"Using constant number "
+ node.getText()
+ " in group by. If you try to use position alias when hive.groupby.position.alias is false, the position alias will be ignored.");
}
}
}
}
if (orderbyNode != null) {
isAllCol = false;
for (int childPos = 0; childPos < selectNode.getChildCount(); ++childPos) {
HiveParserASTNode node =
(HiveParserASTNode) selectNode.getChild(childPos).getChild(0);
if (node != null
&& node.getToken().getType() == HiveASTParser.TOK_ALLCOLREF) {
isAllCol = true;
}
}
for (int childPos = 0; childPos < orderbyNode.getChildCount(); ++childPos) {
HiveParserASTNode colNode =
(HiveParserASTNode) orderbyNode.getChild(childPos).getChild(0);
HiveParserASTNode node = (HiveParserASTNode) colNode.getChild(0);
if (node != null && node.getToken().getType() == HiveASTParser.Number) {
if (isObyByPos) {
if (!isAllCol) {
int pos = Integer.parseInt(node.getText());
if (pos > 0 && pos <= selectExpCnt) {
colNode.setChild(
0, selectNode.getChild(pos - 1).getChild(0));
} else {
throw new SemanticException(
ErrorMsg.INVALID_POSITION_ALIAS_IN_ORDERBY.getMsg(
"Position alias: "
+ pos
+ " does not exist\n"
+ "The Select List is indexed from 1 to "
+ selectExpCnt));
}
} else {
throw new SemanticException(
ErrorMsg.NO_SUPPORTED_ORDERBY_ALLCOLREF_POS.getMsg());
}
} else {
warn(
"Using constant number "
+ node.getText()
+ " in order by. If you try to use position alias when hive.orderby.position.alias is false, the position alias will be ignored.");
}
}
}
}
}
for (int i = next.getChildren().size() - 1; i >= 0; i--) {
stack.push((HiveParserASTNode) next.getChildren().get(i));
}
}
}
static PartitionSpec processPartitionSpec(HiveParserASTNode node) {
PartitionSpec pSpec = new PartitionSpec();
int exprCnt = node.getChildCount();
for (int i = 0; i < exprCnt; i++) {
PartitionExpression exprSpec = new PartitionExpression();
exprSpec.setExpression((HiveParserASTNode) node.getChild(i));
pSpec.addExpression(exprSpec);
}
return pSpec;
}
static OrderSpec processOrderSpec(HiveParserASTNode sortNode) {
OrderSpec oSpec = new OrderSpec();
int exprCnt = sortNode.getChildCount();
for (int i = 0; i < exprCnt; i++) {
OrderExpression exprSpec = new OrderExpression();
HiveParserASTNode orderSpec = (HiveParserASTNode) sortNode.getChild(i);
HiveParserASTNode nullOrderSpec = (HiveParserASTNode) orderSpec.getChild(0);
exprSpec.setExpression((HiveParserASTNode) nullOrderSpec.getChild(0));
if (orderSpec.getType() == HiveASTParser.TOK_TABSORTCOLNAMEASC) {
exprSpec.setOrder(Order.ASC);
} else {
exprSpec.setOrder(Order.DESC);
}
if (nullOrderSpec.getType() == HiveASTParser.TOK_NULLS_FIRST) {
exprSpec.setNullOrder(NullOrder.NULLS_FIRST);
} else {
exprSpec.setNullOrder(NullOrder.NULLS_LAST);
}
oSpec.addExpression(exprSpec);
}
return oSpec;
}
static PartitioningSpec processPTFPartitionSpec(HiveParserASTNode pSpecNode) {
PartitioningSpec partitioning = new PartitioningSpec();
HiveParserASTNode firstChild = (HiveParserASTNode) pSpecNode.getChild(0);
int type = firstChild.getType();
if (type == HiveASTParser.TOK_DISTRIBUTEBY || type == HiveASTParser.TOK_CLUSTERBY) {
PartitionSpec pSpec = processPartitionSpec(firstChild);
partitioning.setPartSpec(pSpec);
HiveParserASTNode sortNode =
pSpecNode.getChildCount() > 1
? (HiveParserASTNode) pSpecNode.getChild(1)
: null;
if (sortNode != null) {
OrderSpec oSpec = processOrderSpec(sortNode);
partitioning.setOrderSpec(oSpec);
}
} else if (type == HiveASTParser.TOK_SORTBY || type == HiveASTParser.TOK_ORDERBY) {
OrderSpec oSpec = processOrderSpec(firstChild);
partitioning.setOrderSpec(oSpec);
}
return partitioning;
}
static HiveParserWindowingSpec.WindowFunctionSpec processWindowFunction(
HiveParserASTNode node, HiveParserASTNode wsNode) throws SemanticException {
HiveParserWindowingSpec.WindowFunctionSpec wfSpec =
new HiveParserWindowingSpec.WindowFunctionSpec();
switch (node.getType()) {
case HiveASTParser.TOK_FUNCTIONSTAR:
wfSpec.setStar(true);
break;
case HiveASTParser.TOK_FUNCTIONDI:
wfSpec.setDistinct(true);
break;
}
wfSpec.setExpression(node);
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(0);
wfSpec.setName(nameNode.getText());
for (int i = 1; i < node.getChildCount() - 1; i++) {
HiveParserASTNode child = (HiveParserASTNode) node.getChild(i);
wfSpec.addArg(child);
}
if (wsNode != null) {
HiveParserWindowingSpec.WindowSpec ws = processWindowSpec(wsNode);
wfSpec.setWindowSpec(ws);
}
return wfSpec;
}
static boolean containsLeadLagUDF(HiveParserASTNode expressionTree) {
int exprTokenType = expressionTree.getToken().getType();
if (exprTokenType == HiveASTParser.TOK_FUNCTION) {
assert (expressionTree.getChildCount() != 0);
if (expressionTree.getChild(0).getType() == HiveASTParser.Identifier) {
String functionName = unescapeIdentifier(expressionTree.getChild(0).getText());
functionName = functionName.toLowerCase();
if (FunctionRegistry.LAG_FUNC_NAME.equals(functionName)
|| FunctionRegistry.LEAD_FUNC_NAME.equals(functionName)) {
return true;
}
}
}
for (int i = 0; i < expressionTree.getChildCount(); i++) {
if (containsLeadLagUDF((HiveParserASTNode) expressionTree.getChild(i))) {
return true;
}
}
return false;
}
static void processQueryWindowClause(HiveParserWindowingSpec spec, HiveParserASTNode node)
throws SemanticException {
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(0);
HiveParserASTNode wsNode = (HiveParserASTNode) node.getChild(1);
if (spec.getWindowSpecs() != null
&& spec.getWindowSpecs().containsKey(nameNode.getText())) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
nameNode,
"Duplicate definition of window "
+ nameNode.getText()
+ " is not allowed"));
}
HiveParserWindowingSpec.WindowSpec ws = processWindowSpec(wsNode);
spec.addWindowSpec(nameNode.getText(), ws);
}
static HiveParserWindowingSpec.WindowSpec processWindowSpec(HiveParserASTNode node)
throws SemanticException {
boolean hasSrcId = false, hasPartSpec = false, hasWF = false;
int srcIdIdx = -1, partIdx = -1, wfIdx = -1;
for (int i = 0; i < node.getChildCount(); i++) {
int type = node.getChild(i).getType();
switch (type) {
case HiveASTParser.Identifier:
hasSrcId = true;
srcIdIdx = i;
break;
case HiveASTParser.TOK_PARTITIONINGSPEC:
hasPartSpec = true;
partIdx = i;
break;
case HiveASTParser.TOK_WINDOWRANGE:
case HiveASTParser.TOK_WINDOWVALUES:
hasWF = true;
wfIdx = i;
break;
}
}
HiveParserWindowingSpec.WindowSpec ws = new HiveParserWindowingSpec.WindowSpec();
if (hasSrcId) {
HiveParserASTNode nameNode = (HiveParserASTNode) node.getChild(srcIdIdx);
ws.setSourceId(nameNode.getText());
}
if (hasPartSpec) {
HiveParserASTNode partNode = (HiveParserASTNode) node.getChild(partIdx);
PartitioningSpec partitioning = processPTFPartitionSpec(partNode);
ws.setPartitioning(partitioning);
}
if (hasWF) {
HiveParserASTNode wfNode = (HiveParserASTNode) node.getChild(wfIdx);
HiveParserWindowingSpec.WindowFrameSpec wfSpec = processWindowFrame(wfNode);
ws.setWindowFrame(wfSpec);
}
return ws;
}
static HiveParserWindowingSpec.WindowFrameSpec processWindowFrame(HiveParserASTNode node)
throws SemanticException {
int type = node.getType();
HiveParserWindowingSpec.BoundarySpec start = null, end = null;
start = processBoundary((HiveParserASTNode) node.getChild(0));
if (node.getChildCount() > 1) {
end = processBoundary((HiveParserASTNode) node.getChild(1));
}
return new HiveParserWindowingSpec.WindowFrameSpec(
type == HiveASTParser.TOK_WINDOWVALUES
? HiveParserWindowingSpec.WindowType.RANGE
: HiveParserWindowingSpec.WindowType.ROWS,
start,
end);
}
static HiveParserWindowingSpec.BoundarySpec processBoundary(HiveParserASTNode node)
throws SemanticException {
HiveParserWindowingSpec.BoundarySpec bs = new HiveParserWindowingSpec.BoundarySpec();
int type = node.getType();
boolean hasAmt = true;
switch (type) {
case HiveASTParser.KW_PRECEDING:
bs.setDirection(WindowingSpec.Direction.PRECEDING);
break;
case HiveASTParser.KW_FOLLOWING:
bs.setDirection(WindowingSpec.Direction.FOLLOWING);
break;
case HiveASTParser.KW_CURRENT:
bs.setDirection(WindowingSpec.Direction.CURRENT);
hasAmt = false;
break;
}
if (hasAmt) {
HiveParserASTNode amtNode = (HiveParserASTNode) node.getChild(0);
if (amtNode.getType() == HiveASTParser.KW_UNBOUNDED) {
bs.setAmt(HiveParserWindowingSpec.BoundarySpec.UNBOUNDED_AMOUNT);
} else {
int amt = Integer.parseInt(amtNode.getText());
if (amt <= 0) {
throw new SemanticException(
"Window Frame Boundary Amount must be a positive integer, provided amount is: "
+ amt);
}
bs.setAmt(amt);
}
}
return bs;
}
public static void removeOBInSubQuery(HiveParserQBExpr qbExpr) {
if (qbExpr == null) {
return;
}
if (qbExpr.getOpcode() == HiveParserQBExpr.Opcode.NULLOP) {
HiveParserQB subQB = qbExpr.getQB();
HiveParserQBParseInfo parseInfo = subQB.getParseInfo();
String alias = qbExpr.getAlias();
Map<String, HiveParserASTNode> destToOrderBy = parseInfo.getDestToOrderBy();
Map<String, HiveParserASTNode> destToSortBy = parseInfo.getDestToSortBy();
final String warning =
"WARNING: Order/Sort by without limit in sub query or view ["
+ alias
+ "] is removed, as it's pointless and bad for performance.";
if (destToOrderBy != null) {
for (String dest : destToOrderBy.keySet()) {
if (parseInfo.getDestLimit(dest) == null) {
removeASTChild(destToOrderBy.get(dest));
destToOrderBy.remove(dest);
LOG.warn(warning);
}
}
}
if (destToSortBy != null) {
for (String dest : destToSortBy.keySet()) {
if (parseInfo.getDestLimit(dest) == null) {
removeASTChild(destToSortBy.get(dest));
destToSortBy.remove(dest);
LOG.warn(warning);
}
}
}
for (String subAlias : subQB.getSubqAliases()) {
removeOBInSubQuery(subQB.getSubqForAlias(subAlias));
}
} else {
removeOBInSubQuery(qbExpr.getQBExpr1());
removeOBInSubQuery(qbExpr.getQBExpr2());
}
}
public static TableType obtainTableType(Table tabMetaData) {
if (tabMetaData.getStorageHandler() != null
&& tabMetaData
.getStorageHandler()
.toString()
.equals(HiveParserConstants.DRUID_HIVE_STORAGE_HANDLER_ID)) {
return TableType.DRUID;
}
return TableType.NATIVE;
}
/* This method returns the flip big-endian representation of value */
public static ImmutableBitSet convert(int value, int length) {
BitSet bits = new BitSet();
for (int index = length - 1; index >= 0; index--) {
if (value % 2 != 0) {
bits.set(index);
}
value = value >>> 1;
}
bits.flip(0, length);
return ImmutableBitSet.fromBitSet(bits);
}
public static Map<String, Integer> buildHiveColNameToInputPosMap(
List<ExprNodeDesc> colList, HiveParserRowResolver inputRR) {
Map<Integer, ExprNodeDesc> hashCodeToColumnDesc = new HashMap<>();
HiveParserExprNodeDescUtils.getExprNodeColumnDesc(colList, hashCodeToColumnDesc);
Map<String, Integer> res = new HashMap<>();
String exprNodecolName;
for (ExprNodeDesc exprDesc : hashCodeToColumnDesc.values()) {
exprNodecolName = ((ExprNodeColumnDesc) exprDesc).getColumn();
res.put(exprNodecolName, inputRR.getPosition(exprNodecolName));
}
return res;
}
public static Map<String, Integer> buildHiveToCalciteColumnMap(HiveParserRowResolver rr) {
Map<String, Integer> map = new HashMap<>();
for (ColumnInfo ci : rr.getRowSchema().getSignature()) {
map.put(ci.getInternalName(), rr.getPosition(ci.getInternalName()));
}
return Collections.unmodifiableMap(map);
}
public static org.apache.calcite.util.Pair<List<CorrelationId>, ImmutableBitSet>
getCorrelationUse(RexCall call) {
List<CorrelationId> correlIDs = new ArrayList<>();
ImmutableBitSet.Builder requiredColumns = ImmutableBitSet.builder();
call.accept(new HiveParserUtils.CorrelationCollector(correlIDs, requiredColumns));
if (correlIDs.isEmpty()) {
return null;
}
return org.apache.calcite.util.Pair.of(correlIDs, requiredColumns.build());
}
public static boolean topLevelConjunctCheck(
HiveParserASTNode searchCond, ObjectPair<Boolean, Integer> subqInfo) {
if (searchCond.getType() == HiveASTParser.KW_OR) {
subqInfo.setFirst(Boolean.TRUE);
if (subqInfo.getSecond() > 1) {
return false;
}
}
if (searchCond.getType() == HiveASTParser.TOK_SUBQUERY_EXPR) {
subqInfo.setSecond(subqInfo.getSecond() + 1);
return subqInfo.getSecond() <= 1 || !subqInfo.getFirst();
}
for (int i = 0; i < searchCond.getChildCount(); i++) {
boolean validSubQuery =
topLevelConjunctCheck((HiveParserASTNode) searchCond.getChild(i), subqInfo);
if (!validSubQuery) {
return false;
}
}
return true;
}
public static void addToGBExpr(
HiveParserRowResolver groupByOutputRowResolver,
HiveParserRowResolver groupByInputRowResolver,
HiveParserASTNode grpbyExpr,
ExprNodeDesc grpbyExprNDesc,
List<ExprNodeDesc> gbExprNDescLst,
List<String> outputColumnNames) {
int i = gbExprNDescLst.size();
String field = getColumnInternalName(i);
outputColumnNames.add(field);
gbExprNDescLst.add(grpbyExprNDesc);
ColumnInfo outColInfo = new ColumnInfo(field, grpbyExprNDesc.getTypeInfo(), null, false);
groupByOutputRowResolver.putExpression(grpbyExpr, outColInfo);
addAlternateGByKeyMappings(
grpbyExpr, outColInfo, groupByInputRowResolver, groupByOutputRowResolver);
}
public static int getWindowSpecIndx(HiveParserASTNode wndAST) {
int wi = wndAST.getChildCount() - 1;
if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveASTParser.TOK_WINDOWSPEC)) {
wi = -1;
}
return wi;
}
private static void addAlternateGByKeyMappings(
HiveParserASTNode gByExpr,
ColumnInfo colInfo,
HiveParserRowResolver inputRR,
HiveParserRowResolver outputRR) {
if (gByExpr.getType() == HiveASTParser.DOT
&& gByExpr.getChild(0).getType() == HiveASTParser.TOK_TABLE_OR_COL) {
String tabAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(0).getChild(0).getText().toLowerCase());
String colAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(1).getText().toLowerCase());
outputRR.put(tabAlias, colAlias, colInfo);
} else if (gByExpr.getType() == HiveASTParser.TOK_TABLE_OR_COL) {
String colAlias =
HiveParserBaseSemanticAnalyzer.unescapeIdentifier(
gByExpr.getChild(0).getText().toLowerCase());
String tabAlias = null;
/*
* If the input to the GBy has a table alias for the column, then add an entry based on that tab_alias.
* For e.g. this query: select b.x, count(*) from t1 b group by x needs (tab_alias=b, col_alias=x) in the
* GBy RR. tab_alias=b comes from looking at the HiveParserRowResolver that is the
* ancestor before any GBy/ReduceSinks added for the GBY operation.
*/
try {
ColumnInfo pColInfo = inputRR.get(tabAlias, colAlias);
tabAlias = pColInfo == null ? null : pColInfo.getTabAlias();
} catch (SemanticException se) {
}
outputRR.put(tabAlias, colAlias, colInfo);
}
}
public static void validateNoHavingReferenceToAlias(
HiveParserQB qb,
HiveParserASTNode havingExpr,
HiveParserRowResolver inputRR,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
HiveParserQBParseInfo qbPI = qb.getParseInfo();
Map<HiveParserASTNode, String> exprToAlias = qbPI.getAllExprToColumnAlias();
for (Map.Entry<HiveParserASTNode, String> exprAndAlias : exprToAlias.entrySet()) {
final HiveParserASTNode expr = exprAndAlias.getKey();
final String alias = exprAndAlias.getValue();
if (inputRR.getExpression(expr) != null) {
inputRR.put("", alias, inputRR.getExpression(expr));
}
final Set<Object> aliasReferences = new HashSet<>();
TreeVisitorAction action =
new TreeVisitorAction() {
@Override
public Object pre(Object t) {
if (HiveASTParseDriver.ADAPTOR.getType(t)
== HiveASTParser.TOK_TABLE_OR_COL) {
Object c = HiveASTParseDriver.ADAPTOR.getChild(t, 0);
if (c != null
&& HiveASTParseDriver.ADAPTOR.getType(c)
== HiveASTParser.Identifier
&& HiveASTParseDriver.ADAPTOR.getText(c).equals(alias)) {
aliasReferences.add(t);
}
}
return t;
}
@Override
public Object post(Object t) {
return t;
}
};
new TreeVisitor(HiveASTParseDriver.ADAPTOR).visit(havingExpr, action);
if (aliasReferences.size() > 0) {
String havingClause =
semanticAnalyzer
.ctx
.getTokenRewriteStream()
.toString(
havingExpr.getTokenStartIndex(),
havingExpr.getTokenStopIndex());
String msg =
String.format(
"Encountered Select alias '%s' in having clause '%s'"
+ " This is non standard behavior.",
alias, havingClause);
LOG.warn(msg);
}
}
}
public static List<RexNode> getPartitionKeys(
PartitionSpec partitionSpec,
HiveParserRexNodeConverter converter,
HiveParserRowResolver inputRR,
HiveParserTypeCheckCtx typeCheckCtx,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
List<RexNode> res = new ArrayList<>();
if (partitionSpec != null) {
List<PartitionExpression> expressions = partitionSpec.getExpressions();
for (PartitionExpression expression : expressions) {
typeCheckCtx.setAllowStatefulFunctions(true);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
expression.getExpression(), inputRR, typeCheckCtx);
res.add(converter.convert(exp));
}
}
return res;
}
public static List<RexFieldCollation> getOrderKeys(
OrderSpec orderSpec,
HiveParserRexNodeConverter converter,
HiveParserRowResolver inputRR,
HiveParserTypeCheckCtx typeCheckCtx,
HiveParserSemanticAnalyzer semanticAnalyzer)
throws SemanticException {
List<RexFieldCollation> orderKeys = new ArrayList<>();
if (orderSpec != null) {
List<OrderExpression> oExprs = orderSpec.getExpressions();
for (OrderExpression oExpr : oExprs) {
typeCheckCtx.setAllowStatefulFunctions(true);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
oExpr.getExpression(), inputRR, typeCheckCtx);
RexNode ordExp = converter.convert(exp);
Set<SqlKind> flags = new HashSet<>();
if (oExpr.getOrder() == Order.DESC) {
flags.add(SqlKind.DESCENDING);
}
if (oExpr.getNullOrder() == NullOrder.NULLS_FIRST) {
flags.add(SqlKind.NULLS_FIRST);
} else if (oExpr.getNullOrder() == NullOrder.NULLS_LAST) {
flags.add(SqlKind.NULLS_LAST);
} else {
throw new SemanticException(
"Unexpected null ordering option: " + oExpr.getNullOrder());
}
orderKeys.add(new RexFieldCollation(ordExp, flags));
}
}
return orderKeys;
}
public static AggInfo getHiveAggInfo(
HiveParserASTNode aggAst,
int aggFnLstArgIndx,
HiveParserRowResolver inputRR,
HiveParserWindowingSpec.WindowFunctionSpec winFuncSpec,
HiveParserSemanticAnalyzer semanticAnalyzer,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
AggInfo aInfo;
ArrayList<ExprNodeDesc> aggParameters = new ArrayList<>();
for (int i = 1; i <= aggFnLstArgIndx; i++) {
HiveParserASTNode paraExpr = (HiveParserASTNode) aggAst.getChild(i);
ExprNodeDesc paraExprNode = semanticAnalyzer.genExprNodeDesc(paraExpr, inputRR);
aggParameters.add(paraExprNode);
}
boolean isDistinct = aggAst.getType() == HiveASTParser.TOK_FUNCTIONDI;
TypeInfo udafRetType = null;
String aggName = unescapeIdentifier(aggAst.getChild(0).getText());
boolean isAllColumns = false;
if (FunctionRegistry.isRankingFunction(aggName)) {
if (aggName.equalsIgnoreCase("percent_rank")) {
udafRetType = TypeInfoFactory.doubleTypeInfo;
} else {
udafRetType = TypeInfoFactory.intTypeInfo;
}
for (OrderExpression orderExpr : winFuncSpec.windowSpec.getOrder().getExpressions()) {
aggParameters.add(
semanticAnalyzer.genExprNodeDesc(orderExpr.getExpression(), inputRR));
}
} else {
try {
isAllColumns = aggAst.getType() == HiveASTParser.TOK_FUNCTIONSTAR;
GenericUDAFEvaluator.Mode amode =
HiveParserUtils.groupByDescModeToUDAFMode(
GroupByDesc.Mode.COMPLETE, isDistinct);
GenericUDAFEvaluator genericUDAFEvaluator;
if (aggName.toLowerCase().equals(FunctionRegistry.LEAD_FUNC_NAME)
|| aggName.toLowerCase().equals(FunctionRegistry.LAG_FUNC_NAME)) {
ArrayList<ObjectInspector> originalParameterTypeInfos =
HiveParserUtils.getWritableObjectInspector(aggParameters);
genericUDAFEvaluator =
FunctionRegistry.getGenericWindowingEvaluator(
aggName, originalParameterTypeInfos, isDistinct, isAllColumns);
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf =
HiveParserUtils.getGenericUDAFInfo(
genericUDAFEvaluator, amode, aggParameters);
udafRetType = ((ListTypeInfo) udaf.returnType).getListElementTypeInfo();
} else {
genericUDAFEvaluator =
HiveParserUtils.getGenericUDAFEvaluator(
aggName,
aggParameters,
aggAst,
isDistinct,
isAllColumns,
frameworkConfig.getOperatorTable());
HiveParserBaseSemanticAnalyzer.GenericUDAFInfo udaf =
HiveParserUtils.getGenericUDAFInfo(
genericUDAFEvaluator, amode, aggParameters);
if (HiveParserUtils.pivotResult(aggName)) {
udafRetType = ((ListTypeInfo) udaf.returnType).getListElementTypeInfo();
} else {
udafRetType = udaf.returnType;
}
}
} catch (Exception e) {
LOG.debug(
"CBO: Couldn't Obtain UDAF evaluators for "
+ aggName
+ ", trying to translate to GenericUDF");
}
if (udafRetType == null) {
HiveParserTypeCheckCtx tcCtx =
new HiveParserTypeCheckCtx(inputRR, frameworkConfig, cluster);
tcCtx.setAllowStatefulFunctions(true);
tcCtx.setAllowDistinctFunctions(false);
ExprNodeDesc exp =
semanticAnalyzer.genExprNodeDesc(
(HiveParserASTNode) aggAst.getChild(0), inputRR, tcCtx);
udafRetType = exp.getTypeInfo();
}
}
aInfo = new AggInfo(aggParameters, udafRetType, aggName, isDistinct, isAllColumns, null);
return aInfo;
}
public static RelNode genValues(
String tabAlias,
Table tmpTable,
HiveParserRowResolver rowResolver,
RelOptCluster cluster,
List<List<String>> values) {
List<TypeInfo> tmpTableTypes =
tmpTable.getCols().stream()
.map(f -> TypeInfoUtils.getTypeInfoFromTypeString(f.getType()))
.collect(Collectors.toList());
RexBuilder rexBuilder = cluster.getRexBuilder();
List<RelDataType> calciteTargetTypes =
tmpTableTypes.stream()
.map(
ti ->
HiveParserTypeConverter.convert(
(PrimitiveTypeInfo) ti,
rexBuilder.getTypeFactory()))
.collect(Collectors.toList());
List<String> calciteFieldNames =
IntStream.range(0, calciteTargetTypes.size())
.mapToObj(SqlUtil::deriveAliasFromOrdinal)
.collect(Collectors.toList());
List<RelDataType> calciteRowTypes = new ArrayList<>();
List<List<RexLiteral>> rows = new ArrayList<>();
for (List<String> value : values) {
Preconditions.checkArgument(
value.size() == tmpTableTypes.size(),
String.format(
"Values table col length (%d) and data length (%d) mismatch",
tmpTableTypes.size(), value.size()));
List<RexLiteral> row = new ArrayList<>();
for (int i = 0; i < tmpTableTypes.size(); i++) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) tmpTableTypes.get(i);
RelDataType calciteType = calciteTargetTypes.get(i);
String col = value.get(i);
if (col == null) {
row.add(rexBuilder.makeNullLiteral(calciteType));
} else {
switch (primitiveTypeInfo.getPrimitiveCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
row.add(rexBuilder.makeExactLiteral(new BigDecimal(col), calciteType));
break;
case DECIMAL:
BigDecimal bigDec = new BigDecimal(col);
row.add(
SqlTypeUtil.isValidDecimalValue(bigDec, calciteType)
? rexBuilder.makeExactLiteral(bigDec, calciteType)
: rexBuilder.makeNullLiteral(calciteType));
break;
case FLOAT:
case DOUBLE:
row.add(rexBuilder.makeApproxLiteral(new BigDecimal(col), calciteType));
break;
case BOOLEAN:
row.add(rexBuilder.makeLiteral(Boolean.parseBoolean(col)));
break;
default:
row.add(
rexBuilder.makeCharLiteral(
HiveParserUtils.asUnicodeString(col)));
}
}
}
calciteRowTypes.add(
rexBuilder
.getTypeFactory()
.createStructType(
row.stream()
.map(RexLiteral::getType)
.collect(Collectors.toList()),
calciteFieldNames));
rows.add(row);
}
RelDataType calciteRowType = rexBuilder.getTypeFactory().leastRestrictive(calciteRowTypes);
for (int i = 0; i < calciteFieldNames.size(); i++) {
ColumnInfo colInfo =
new ColumnInfo(
calciteFieldNames.get(i),
HiveParserTypeConverter.convert(
calciteRowType.getFieldList().get(i).getType()),
tabAlias,
false);
rowResolver.put(tabAlias, calciteFieldNames.get(i), colInfo);
}
return HiveParserUtils.genValuesRelNode(
cluster,
rexBuilder.getTypeFactory().createStructType(calciteRowType.getFieldList()),
rows);
}
private static void validatePartColumnType(
Table tbl,
Map<String, String> partSpec,
HiveParserASTNode astNode,
HiveConf conf,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TYPE_CHECK_ON_INSERT)) {
return;
}
Map<HiveParserASTNode, ExprNodeDesc> astExprNodeMap = new HashMap<>();
if (!getPartExprNodeDesc(astNode, conf, astExprNodeMap, frameworkConfig, cluster)) {
LOG.warn(
"Dynamic partitioning is used; only validating "
+ astExprNodeMap.size()
+ " columns");
}
if (astExprNodeMap.isEmpty()) {
return;
}
List<FieldSchema> parts = tbl.getPartitionKeys();
Map<String, String> partCols = new HashMap<>(parts.size());
for (FieldSchema col : parts) {
partCols.put(col.getName(), col.getType().toLowerCase());
}
for (Map.Entry<HiveParserASTNode, ExprNodeDesc> astExprNodePair :
astExprNodeMap.entrySet()) {
String astKeyName = astExprNodePair.getKey().toString().toLowerCase();
if (astExprNodePair.getKey().getType() == HiveASTParser.Identifier) {
astKeyName = stripIdentifierQuotes(astKeyName);
}
String colType = partCols.get(astKeyName);
ObjectInspector inputOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(
astExprNodePair.getValue().getTypeInfo());
TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(colType);
ObjectInspector outputOI =
TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(expectedType);
Object value = ((ExprNodeConstantDesc) astExprNodePair.getValue()).getValue();
Object convertedValue = value;
if (!inputOI.getTypeName().equals(outputOI.getTypeName())) {
convertedValue =
ObjectInspectorConverters.getConverter(inputOI, outputOI).convert(value);
if (convertedValue == null) {
throw new SemanticException(
ErrorMsg.PARTITION_SPEC_TYPE_MISMATCH,
astKeyName,
inputOI.getTypeName(),
outputOI.getTypeName());
}
if (!convertedValue.toString().equals(value.toString())) {
LOG.warn(
"Partition "
+ astKeyName
+ " expects type "
+ outputOI.getTypeName()
+ " but input value is in type "
+ inputOI.getTypeName()
+ ". Convert "
+ value.toString()
+ " to "
+ convertedValue.toString());
}
}
if (!convertedValue.toString().equals(partSpec.get(astKeyName))) {
LOG.warn(
"Partition Spec "
+ astKeyName
+ "="
+ partSpec.get(astKeyName)
+ " has been changed to "
+ astKeyName
+ "="
+ convertedValue.toString());
}
partSpec.put(astKeyName, convertedValue.toString());
}
}
private static void errorPartSpec(Map<String, String> partSpec, List<FieldSchema> parts)
throws SemanticException {
StringBuilder sb = new StringBuilder("Partition columns in the table schema are: (");
for (FieldSchema fs : parts) {
sb.append(fs.getName()).append(", ");
}
sb.setLength(sb.length() - 2);
sb.append("), while the partitions specified in the query are: (");
Iterator<String> itrPsKeys = partSpec.keySet().iterator();
while (itrPsKeys.hasNext()) {
sb.append(itrPsKeys.next()).append(", ");
}
sb.setLength(sb.length() - 2);
sb.append(").");
throw new SemanticException(ErrorMsg.PARTSPEC_DIFFER_FROM_SCHEMA.getMsg(sb.toString()));
}
/** Counterpart of hive's BaseSemanticAnalyzer.TableSpec. */
public static class TableSpec {
public String tableName;
public Table tableHandle;
public Map<String, String> partSpec;
public Partition partHandle;
public int numDynParts;
public List<Partition>
partitions;
/** SpecType. */
public enum SpecType {
TABLE_ONLY,
STATIC_PARTITION,
DYNAMIC_PARTITION
}
public TableSpec.SpecType specType;
public TableSpec(
Hive db,
HiveConf conf,
HiveParserASTNode ast,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
this(db, conf, ast, true, false, frameworkConfig, cluster);
}
public TableSpec(
Hive db,
HiveConf conf,
HiveParserASTNode ast,
boolean allowDynamicPartitionsSpec,
boolean allowPartialPartitionsSpec,
FrameworkConfig frameworkConfig,
RelOptCluster cluster)
throws SemanticException {
assert (ast.getToken().getType() == HiveASTParser.TOK_TAB
|| ast.getToken().getType() == HiveASTParser.TOK_TABLE_PARTITION
|| ast.getToken().getType() == HiveASTParser.TOK_TABTYPE
|| ast.getToken().getType() == HiveASTParser.TOK_CREATETABLE
|| ast.getToken().getType() == HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW);
int childIndex = 0;
numDynParts = 0;
try {
tableName = getUnescapedName((HiveParserASTNode) ast.getChild(0));
boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE);
if (testMode) {
tableName = conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX) + tableName;
}
if (ast.getToken().getType() != HiveASTParser.TOK_CREATETABLE
&& ast.getToken().getType() != HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW) {
tableHandle = db.getTable(tableName);
}
} catch (InvalidTableException ite) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(ErrorMsg.INVALID_TABLE, ast.getChild(0)), ite);
} catch (HiveException e) {
throw new SemanticException("Error while retrieving table metadata", e);
}
if (ast.getChildCount() == 2
&& ast.getToken().getType() != HiveASTParser.TOK_CREATETABLE
&& ast.getToken().getType() != HiveASTParser.TOK_CREATE_MATERIALIZED_VIEW) {
childIndex = 1;
HiveParserASTNode partspec = (HiveParserASTNode) ast.getChild(1);
partitions = new ArrayList<Partition>();
Map<String, String> tmpPartSpec = new HashMap<>(partspec.getChildCount());
for (int i = 0; i < partspec.getChildCount(); ++i) {
HiveParserASTNode partspecVal = (HiveParserASTNode) partspec.getChild(i);
String val = null;
String colName =
unescapeIdentifier(partspecVal.getChild(0).getText().toLowerCase());
if (partspecVal.getChildCount() < 2) {
if (allowDynamicPartitionsSpec) {
++numDynParts;
} else {
throw new SemanticException(
ErrorMsg.INVALID_PARTITION.getMsg(
" - Dynamic partitions not allowed"));
}
} else {
val = stripQuotes(partspecVal.getChild(1).getText());
}
tmpPartSpec.put(colName, val);
}
validatePartSpec(
tableHandle, tmpPartSpec, ast, conf, false, frameworkConfig, cluster);
List<FieldSchema> parts = tableHandle.getPartitionKeys();
partSpec = new LinkedHashMap<String, String>(partspec.getChildCount());
for (FieldSchema fs : parts) {
String partKey = fs.getName();
partSpec.put(partKey, tmpPartSpec.get(partKey));
}
if (numDynParts > 0) {
int numStaPart = parts.size() - numDynParts;
if (numStaPart == 0
&& conf.getVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE)
.equalsIgnoreCase("strict")) {
throw new SemanticException(
ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg());
}
if (partSpec.keySet().size() != parts.size()) {
errorPartSpec(partSpec, parts);
}
Iterator<String> itrPsKeys = partSpec.keySet().iterator();
for (FieldSchema fs : parts) {
if (!itrPsKeys.next().toLowerCase().equals(fs.getName().toLowerCase())) {
errorPartSpec(partSpec, parts);
}
}
for (FieldSchema fs : parts) {
if (partSpec.get(fs.getName().toLowerCase()) == null) {
if (numStaPart > 0) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.PARTITION_DYN_STA_ORDER,
ast.getChild(childIndex)));
}
break;
} else {
--numStaPart;
}
}
partHandle = null;
specType = TableSpec.SpecType.DYNAMIC_PARTITION;
} else {
try {
if (allowPartialPartitionsSpec) {
partitions = db.getPartitions(tableHandle, partSpec);
} else {
partHandle = db.getPartition(tableHandle, partSpec, false);
if (partHandle == null) {
partHandle = new Partition(tableHandle, partSpec, null);
} else {
partitions.add(partHandle);
}
}
} catch (HiveException e) {
throw new SemanticException(
HiveParserErrorMsg.getMsg(
ErrorMsg.INVALID_PARTITION, ast.getChild(childIndex)),
e);
}
specType = TableSpec.SpecType.STATIC_PARTITION;
}
} else {
specType = TableSpec.SpecType.TABLE_ONLY;
}
}
public Map<String, String> getPartSpec() {
return this.partSpec;
}
public void setPartSpec(Map<String, String> partSpec) {
this.partSpec = partSpec;
}
@Override
public String toString() {
if (partHandle != null) {
return partHandle.toString();
} else {
return tableHandle.toString();
}
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.AnalyzeRewriteContext. */
public static class AnalyzeRewriteContext {
private String tableName;
private List<String> colName;
private List<String> colType;
private boolean tblLvl;
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public List<String> getColName() {
return colName;
}
public void setColName(List<String> colName) {
this.colName = colName;
}
public boolean isTblLvl() {
return tblLvl;
}
public void setTblLvl(boolean isTblLvl) {
this.tblLvl = isTblLvl;
}
public List<String> getColType() {
return colType;
}
public void setColType(List<String> colType) {
this.colType = colType;
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.PKInfo. */
private static class PKInfo {
public String colName;
public String constraintName;
public boolean rely;
public PKInfo(String colName) {
this.colName = colName;
}
public PKInfo(String colName, String constraintName, boolean rely) {
this.colName = colName;
this.constraintName = constraintName;
this.rely = rely;
}
}
/** Counterpart of hive's SemanticAnalyzer.CTEClause. */
static class CTEClause {
CTEClause(String alias, HiveParserASTNode cteNode) {
this.alias = alias;
this.cteNode = cteNode;
}
String alias;
HiveParserASTNode cteNode;
boolean materialize;
int reference;
HiveParserQBExpr qbExpr;
List<CTEClause> parents = new ArrayList<>();
@Override
public String toString() {
return alias == null ? "<root>" : alias;
}
}
/** Counterpart of hive's SemanticAnalyzer.Phase1Ctx. */
public static class Phase1Ctx {
String dest;
int nextNum;
}
/** Counterpart of hive's SemanticAnalyzer.GenericUDAFInfo. */
public static class GenericUDAFInfo {
public ArrayList<ExprNodeDesc> convertedParameters;
public GenericUDAFEvaluator genericUDAFEvaluator;
public TypeInfo returnType;
}
/** Counterpart of hive's CalcitePlanner.TableType. */
public enum TableType {
DRUID,
NATIVE
}
/** Counterpart of hive's CalcitePlanner.AggInfo. */
public static class AggInfo {
private final List<ExprNodeDesc> aggParams;
private final TypeInfo returnType;
private final String udfName;
private final boolean distinct;
private final boolean isAllColumns;
private final String alias;
public AggInfo(
List<ExprNodeDesc> aggParams,
TypeInfo returnType,
String udfName,
boolean isDistinct,
boolean isAllColumns,
String alias) {
this.aggParams = aggParams;
this.returnType = returnType;
this.udfName = udfName;
distinct = isDistinct;
this.isAllColumns = isAllColumns;
this.alias = alias;
}
public List<ExprNodeDesc> getAggParams() {
return aggParams;
}
public TypeInfo getReturnType() {
return returnType;
}
public String getUdfName() {
return udfName;
}
public boolean isDistinct() {
return distinct;
}
public boolean isAllColumns() {
return isAllColumns;
}
public String getAlias() {
return alias;
}
}
/** Counterpart of hive's BaseSemanticAnalyzer.RowFormatParams. */
public static class HiveParserRowFormatParams {
String fieldDelim = null;
String fieldEscape = null;
String collItemDelim = null;
String mapKeyDelim = null;
String lineDelim = null;
String nullFormat = null;
public String getFieldDelim() {
return fieldDelim;
}
public String getFieldEscape() {
return fieldEscape;
}
public String getCollItemDelim() {
return collItemDelim;
}
public String getMapKeyDelim() {
return mapKeyDelim;
}
public String getLineDelim() {
return lineDelim;
}
public String getNullFormat() {
return nullFormat;
}
public void analyzeRowFormat(HiveParserASTNode child) throws SemanticException {
child = (HiveParserASTNode) child.getChild(0);
int numChildRowFormat = child.getChildCount();
for (int numC = 0; numC < numChildRowFormat; numC++) {
HiveParserASTNode rowChild = (HiveParserASTNode) child.getChild(numC);
switch (rowChild.getToken().getType()) {
case HiveASTParser.TOK_TABLEROWFORMATFIELD:
fieldDelim = unescapeSQLString(rowChild.getChild(0).getText());
if (rowChild.getChildCount() >= 2) {
fieldEscape = unescapeSQLString(rowChild.getChild(1).getText());
}
break;
case HiveASTParser.TOK_TABLEROWFORMATCOLLITEMS:
collItemDelim = unescapeSQLString(rowChild.getChild(0).getText());
break;
case HiveASTParser.TOK_TABLEROWFORMATMAPKEYS:
mapKeyDelim = unescapeSQLString(rowChild.getChild(0).getText());
break;
case HiveASTParser.TOK_TABLEROWFORMATLINES:
lineDelim = unescapeSQLString(rowChild.getChild(0).getText());
if (!lineDelim.equals("\n") && !lineDelim.equals("10")) {
throw new SemanticException(
HiveParserUtils.generateErrorMessage(
rowChild,
ErrorMsg.LINES_TERMINATED_BY_NON_NEWLINE.getMsg()));
}
break;
case HiveASTParser.TOK_TABLEROWFORMATNULL:
nullFormat = unescapeSQLString(rowChild.getChild(0).getText());
break;
default:
throw new AssertionError("Unknown Token: " + rowChild);
}
}
}
}
/** Counterpart of hive's SQLPrimaryKey. */
public static class PrimaryKey implements Serializable {
private static final long serialVersionUID = 3036210046732750293L;
private final String dbName;
private final String tblName;
private final String pk;
private final String constraintName;
private final boolean enable;
private final boolean validate;
private final boolean rely;
public PrimaryKey(
String dbName,
String tblName,
String pk,
String constraintName,
boolean enable,
boolean validate,
boolean rely) {
this.dbName = dbName;
this.tblName = tblName;
this.pk = pk;
this.constraintName = constraintName;
this.enable = enable;
this.validate = validate;
this.rely = rely;
}
public String getDbName() {
return dbName;
}
public String getTblName() {
return tblName;
}
public String getPk() {
return pk;
}
public String getConstraintName() {
return constraintName;
}
public boolean isEnable() {
return enable;
}
public boolean isValidate() {
return validate;
}
public boolean isRely() {
return rely;
}
}
/** Counterpart of hive's SQLNotNullConstraint. */
public static class NotNullConstraint implements Serializable {
private static final long serialVersionUID = 7642343368203203950L;
private final String dbName;
private final String tblName;
private final String colName;
private final String constraintName;
private final boolean enable;
private final boolean validate;
private final boolean rely;
public NotNullConstraint(
String dbName,
String tblName,
String colName,
String constraintName,
boolean enable,
boolean validate,
boolean rely) {
this.dbName = dbName;
this.tblName = tblName;
this.colName = colName;
this.constraintName = constraintName;
this.enable = enable;
this.validate = validate;
this.rely = rely;
}
public String getDbName() {
return dbName;
}
public String getTblName() {
return tblName;
}
public String getColName() {
return colName;
}
public String getConstraintName() {
return constraintName;
}
public boolean isEnable() {
return enable;
}
public boolean isValidate() {
return validate;
}
public boolean isRely() {
return rely;
}
}
}
|
Its not clear from this code, but the `HealthChecker` is shared, so cannot be closed here.
|
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
healthChecker.ifPresent(HealthChecker::close);
context.log(logger, "Stopped");
}
|
healthChecker.ifPresent(HealthChecker::close);
|
public void stop() {
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR,
"Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
context.log(logger, "Stopped");
}
|
class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private final Object monitor = new Object();
private DockerImage imageBeingDownloaded = null;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private Node.State lastState = null;
private final Thread loopThread;
private final Optional<HealthChecker> healthChecker;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration());
}
if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration());
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
node.getCurrentRebootGeneration(), node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
boolean converged = false;
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
converged = true;
} catch (OrchestratorException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
private String stateDescription(Node.State state) {
return state == null ? "[absent]" : state.toString();
}
void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null);
if (newState != lastState) {
context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState));
lastState = newState;
}
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return;
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString());
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
}
|
class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName());
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private final Object monitor = new Object();
private DockerImage imageBeingDownloaded = null;
private final NodeAgentContext context;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer;
private final Optional<AclMaintainer> aclMaintainer;
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private final Thread loopThread;
private final Optional<HealthChecker> healthChecker;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private Consumer<String> serviceRestarter;
private Optional<Future<?>> currentFilebeatRestarter = Optional.empty();
private boolean hasResumedNode = false;
private boolean hasStartedServices = true;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* STARTING state is set just before we attempt to start a container, if successful we move to the next state.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
STARTING,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeSpec lastNode = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final NodeAgentContext context,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Clock clock,
final Duration timeBetweenEachConverge,
final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer,
final Optional<AclMaintainer> aclMaintainer,
final Optional<HealthChecker> healthChecker) {
this.context = context;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.athenzCredentialsMaintainer = athenzCredentialsMaintainer;
this.aclMaintainer = aclMaintainer;
this.healthChecker = healthChecker;
this.loopThread = new Thread(() -> {
try {
while (!terminated.get()) tick();
} catch (Throwable t) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t);
}
});
this.loopThread.setName("tick-" + context.hostname());
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
@Override
public void start() {
context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms");
loopThread.start();
serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
context, "service", service, "restart");
if (!processResult.isSuccess()) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e);
}
};
}
@Override
void startServicesIfNeeded() {
if (!hasStartedServices) {
context.log(logger, "Starting services");
dockerOperations.startServices(context);
hasStartedServices = true;
}
}
void resumeNodeIfNeeded(NodeSpec node) {
if (!hasResumedNode) {
if (!currentFilebeatRestarter.isPresent()) {
storageMaintainer.writeMetricsConfig(context, node);
currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay(
() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS));
}
context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command");
dockerOperations.resumeNode(context);
hasResumedNode = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) {
final NodeAttributes currentNodeAttributes = new NodeAttributes();
final NodeAttributes newNodeAttributes = new NodeAttributes();
if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) {
currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration());
newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration());
}
if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) {
currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration());
newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration());
}
Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN);
if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) {
currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage("")));
newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage("")));
}
publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) {
if (!currentAttributes.equals(newAttributes)) {
context.log(logger, "Publishing new set of attributes to node repo: %s -> %s",
currentAttributes, newAttributes);
nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes);
}
}
private void startContainer(NodeSpec node) {
ContainerData containerData = createContainerData(context, node);
dockerOperations.createContainer(context, node, containerData);
dockerOperations.startContainer(context);
lastCpuMetric = new CpuUsageReporter();
hasStartedServices = true;
hasResumedNode = false;
context.log(logger, "Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
context.log(logger, "Will restart services: " + restartReason);
restartServices(node, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(NodeSpec node) {
if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty();
if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get());
}
return Optional.empty();
}
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
context.log(logger, "Restarting services");
orchestratorSuspendNode();
dockerOperations.restartVespa(context);
}
}
@Override
public void stopServices() {
context.log(logger, "Stopping services");
if (containerState == ABSENT) return;
try {
hasStartedServices = hasResumedNode = false;
dockerOperations.stopServices(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
}
}
@Override
public void suspend() {
context.log(logger, "Suspending services on node");
if (containerState == ABSENT) return;
try {
hasResumedNode = false;
dockerOperations.suspendNode(context);
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
} catch (RuntimeException e) {
context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e);
}
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
final Node.State nodeState = node.getState();
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
node.getMinCpuCores(), node.getMinMainMemoryAvailableGb());
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) {
return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d",
node.getCurrentRebootGeneration(), node.getWantedRebootGeneration()));
}
if (containerState == STARTING) return Optional.of("Container failed to start");
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
context.log(logger, "Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
orchestratorSuspendNode();
}
try {
if (node.getState() != Node.State.dirty) {
suspend();
}
stopServices();
} catch (Exception e) {
context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e);
}
}
stopFilebeatSchedulerIfNeeded();
storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer));
dockerOperations.removeContainer(context, existingContainer);
containerState = ABSENT;
context.log(logger, "Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(NodeSpec node) {
if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return;
if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) {
imageBeingDownloaded = node.getWantedDockerImage().get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
context.log(logger, LogLevel.DEBUG, "Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring");
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
boolean converged = false;
if (isFrozenCopy) {
context.log(logger, LogLevel.DEBUG, "tick: isFrozen");
} else {
try {
converge();
converged = true;
} catch (OrchestratorException e) {
context.log(logger, e.getMessage());
} catch (ContainerNotFoundException e) {
containerState = ABSENT;
context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e);
}
}
}
void converge() {
final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value());
if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) {
context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)");
return;
}
final NodeSpec node = optionalNode.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname())));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!node.equals(lastNode)) {
logChangesToNodeSpec(lastNode, node);
if (container.map(c -> c.state.isRunning()).orElse(false)) {
storageMaintainer.writeMetricsConfig(context, node);
}
lastNode = node;
}
switch (node.getState()) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(context, node, container);
storageMaintainer.getDiskUsageFor(context)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb())
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context));
scheduleDownLoadIfNeeded(node);
if (isDownloadingImage()) {
context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(node, container);
if (! container.isPresent()) {
containerState = STARTING;
startContainer(node);
containerState = UNKNOWN;
aclMaintainer.ifPresent(AclMaintainer::converge);
}
startServicesIfNeeded();
resumeNodeIfNeeded(node);
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context));
healthChecker.ifPresent(checker -> checker.verifyHealth(context));
updateNodeRepoWithCurrentAttributes(node);
context.log(logger, "Call resume against Orchestrator");
orchestrator.resume(context.hostname().value());
break;
case inactive:
removeContainerIfNeededUpdateContainerState(node, container);
updateNodeRepoWithCurrentAttributes(node);
break;
case provisioned:
nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(node, container);
context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready");
athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context));
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(node);
nodeRepository.setNodeState(context.hostname().value(), Node.State.ready);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
}
}
private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState);
if (builder.length() > 0) {
context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString());
}
}
private static <T> String fieldDescription(T value) {
return value == null ? "[absent]" : value.toString();
}
private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) {
T oldValue = oldNode == null ? null : getter.apply(oldNode);
T newValue = getter.apply(newNode);
if (!Objects.equals(oldValue, newValue)) {
if (builder.length() > 0) {
builder.append(", ");
}
builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue));
}
}
private void stopFilebeatSchedulerIfNeeded() {
if (currentFilebeatRestarter.isPresent()) {
currentFilebeatRestarter.get().cancel(true);
currentFilebeatRestarter = Optional.empty();
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", context.hostname().value())
.add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType()))
.add("state", node.getState().toString());
node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent));
node.getAllowedToBeDown().ifPresent(allowed ->
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(context, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(context);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
context.log(logger, "Ask Orchestrator for permission to suspend node");
orchestrator.suspend(context.hostname().value());
}
protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) {
return (pathInContainer, data) -> {
throw new UnsupportedOperationException("addFile not implemented");
};
}
}
|
not particularly picky about this: but since we're now throwing on finding the decryption policy, could we just throw an exception in the for loop when we encounter it? That way we dont need to loop over all the rest of the policies if the first policy was a decryption policy?
|
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
decryptionPolicyPresent |= currPolicy instanceof BlobDecryptionPolicy;
policies.add(currPolicy);
}
if (!decryptionPolicyPresent) {
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver));
} else {
throw new IllegalArgumentException("The passed pipeline was already configured for "
+ "encryption/decryption in a way that might conflict with the passed key information. Please "
+ "ensure that the passed pipeline is not already configured for encryption/decryption");
}
return new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
|
if (!decryptionPolicyPresent) {
|
private HttpPipeline getHttpPipeline() {
if (httpPipeline != null) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
boolean decryptionPolicyPresent = false;
for (int i = 0; i < httpPipeline.getPolicyCount(); i++) {
HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i);
if (currPolicy instanceof BlobDecryptionPolicy) {
throw logger.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already"
+ " configured for encryption/decryption in a way that might conflict with the passed key "
+ "information. Please ensure that the passed pipeline is not already configured for "
+ "encryption/decryption"));
}
policies.add(currPolicy);
}
policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver));
return new HttpPipelineBuilder()
.httpClient(httpPipeline.getHttpClient())
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.build();
}
Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration;
List<HttpPipelinePolicy> policies = new ArrayList<>();
policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver));
String clientName = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName");
String clientVersion = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion");
policies.add(new UserAgentPolicy(logOptions.getApplicationId(), clientName, clientVersion,
userAgentConfiguration));
policies.add(new RequestIdPolicy());
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RequestRetryPolicy(retryOptions));
policies.add(new AddDatePolicy());
if (storageSharedKeyCredential != null) {
policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential));
} else if (tokenCredential != null) {
BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, logger);
policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)));
} else if (sasTokenCredential != null) {
policies.add(new SasTokenCredentialPolicy(sasTokenCredential));
}
policies.addAll(additionalPolicies);
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new ResponseValidationPolicyBuilder()
.addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID)
.addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256)
.build());
policies.add(new HttpLoggingPolicy(logOptions));
policies.add(new ScrubEtagPolicy());
return new HttpPipelineBuilder()
.policies(policies.toArray(new HttpPipelinePolicy[0]))
.httpClient(httpClient)
.build();
}
|
class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy if one is not present.
* Note that the underlying pipeline should not already be configured for configured for encryption/decryption.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
* and {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
*
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
}
|
class EncryptedBlobClientBuilder {
private final ClientLogger logger = new ClientLogger(EncryptedBlobClientBuilder.class);
private static final String SDK_NAME = "name";
private static final String SDK_VERSION = "version";
private String endpoint;
private String accountName;
private String containerName;
private String blobName;
private String snapshot;
private StorageSharedKeyCredential storageSharedKeyCredential;
private TokenCredential tokenCredential;
private SasTokenCredential sasTokenCredential;
private HttpClient httpClient;
private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>();
private HttpLogOptions logOptions;
private RequestRetryOptions retryOptions = new RequestRetryOptions();
private HttpPipeline httpPipeline;
private Configuration configuration;
private AsyncKeyEncryptionKey keyWrapper;
private AsyncKeyEncryptionKeyResolver keyResolver;
private String keyWrapAlgorithm;
private BlobServiceVersion version;
private CpkInfo customerProvidedKey;
/**
* Creates a new instance of the EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder() {
logOptions = getDefaultHttpLogOptions();
}
/**
* Creates a {@link EncryptedBlobClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient}
*
* @return a {@link EncryptedBlobClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobClient buildEncryptedBlobClient() {
return new EncryptedBlobClient(buildEncryptedBlobAsyncClient());
}
/**
* Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder.
*
* <p><strong>Code Samples</strong></p>
*
* {@codesnippet com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient}
*
* @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder.
* @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}.
*/
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() {
Objects.requireNonNull(blobName, "'blobName' cannot be null.");
checkValidEncryptionParameters();
/*
Implicit and explicit root container access are functionally equivalent, but explicit references are easier
to read and debug.
*/
if (CoreUtils.isNullOrEmpty(containerName)) {
containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME;
}
BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest();
return new EncryptedBlobAsyncClient(getHttpPipeline(),
String.format("%s/%s/%s", endpoint, containerName, blobName), serviceVersion, accountName, containerName,
blobName, snapshot, customerProvidedKey, keyWrapper, keyWrapAlgorithm);
}
/**
* Sets the encryption key parameters for the client
*
* @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key
* @param keyWrapAlgorithm The {@link String} used to wrap the key.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) {
this.keyWrapper = key;
this.keyWrapAlgorithm = keyWrapAlgorithm;
return this;
}
/**
* Sets the encryption parameters for this client
*
* @param keyResolver The key resolver used to select the correct key for decrypting existing blobs.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) {
this.keyResolver = keyResolver;
return this;
}
private void checkValidEncryptionParameters() {
if (this.keyWrapper == null && this.keyResolver == null) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null"));
}
if (this.keyWrapper != null && this.keyWrapAlgorithm == null) {
throw logger.logExceptionAsError(
new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key."));
}
}
/**
* Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service.
*
* @param credential {@link StorageSharedKeyCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) {
this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the {@link TokenCredential} used to authorize requests sent to the service.
*
* @param credential {@link TokenCredential}.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code credential} is {@code null}.
*/
public EncryptedBlobClientBuilder credential(TokenCredential credential) {
this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null.");
this.storageSharedKeyCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the SAS token used to authorize requests sent to the service.
*
* @param sasToken The SAS token to use for authenticating requests.
* @return the updated EncryptedBlobClientBuilder
* @throws NullPointerException If {@code sasToken} is {@code null}.
*/
public EncryptedBlobClientBuilder sasToken(String sasToken) {
this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken,
"'sasToken' cannot be null."));
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
return this;
}
/**
* Clears the credential used to authorize the request.
*
* <p>This is for blobs that are publicly accessible.</p>
*
* @return the updated EncryptedBlobClientBuilder
*/
public EncryptedBlobClientBuilder setAnonymousAccess() {
this.storageSharedKeyCredential = null;
this.tokenCredential = null;
this.sasTokenCredential = null;
return this;
}
/**
* Sets the connection string to connect to the service.
*
* @param connectionString Connection string of the storage account.
* @return the updated EncryptedBlobClientBuilder
* @throws IllegalArgumentException If {@code connectionString} is invalid.
*/
public EncryptedBlobClientBuilder connectionString(String connectionString) {
StorageConnectionString storageConnectionString
= StorageConnectionString.create(connectionString, logger);
StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint();
if (endpoint == null || endpoint.getPrimaryUri() == null) {
throw logger
.logExceptionAsError(new IllegalArgumentException(
"connectionString missing required settings to derive blob service endpoint."));
}
this.endpoint(endpoint.getPrimaryUri());
if (storageConnectionString.getAccountName() != null) {
this.accountName = storageConnectionString.getAccountName();
}
StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings();
if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) {
this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(),
authSettings.getAccount().getAccessKey()));
} else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) {
this.sasToken(authSettings.getSasToken());
}
return this;
}
/**
* Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name)
*
* <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name
* as the container name. With only one path element, it is impossible to distinguish between a container name and a
* blob in the root container, so it is assumed to be the container name as this is much more common. When working
* with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name
* separately using the {@link EncryptedBlobClientBuilder
*
* @param endpoint URL of the service
* @return the updated EncryptedBlobClientBuilder object
* @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL.
*/
public EncryptedBlobClientBuilder endpoint(String endpoint) {
try {
URL url = new URL(endpoint);
BlobUrlParts parts = BlobUrlParts.parse(url);
this.accountName = parts.getAccountName();
this.endpoint = BuilderHelper.getEndpoint(parts);
this.containerName = parts.getBlobContainerName();
this.blobName = Utility.urlEncode(parts.getBlobName());
this.snapshot = parts.getSnapshot();
String sasToken = parts.getCommonSasQueryParameters().encode();
if (!CoreUtils.isNullOrEmpty(sasToken)) {
this.sasToken(sasToken);
}
} catch (MalformedURLException ex) {
throw logger.logExceptionAsError(
new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."));
}
return this;
}
/**
* Sets the name of the container that contains the blob.
*
* @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root},
* will be used.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder containerName(String containerName) {
this.containerName = containerName;
return this;
}
/**
* Sets the name of the blob.
*
* @param blobName Name of the blob.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code blobName} is {@code null}
*/
public EncryptedBlobClientBuilder blobName(String blobName) {
this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName,
"'blobName' cannot be null.")));
return this;
}
/**
* Sets the snapshot identifier of the blob.
*
* @param snapshot Snapshot identifier for the blob.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder snapshot(String snapshot) {
this.snapshot = snapshot;
return this;
}
/**
* Sets the {@link HttpClient} to use for sending a receiving requests to and from the service.
*
* @param httpClient HttpClient to use for requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) {
if (this.httpClient != null && httpClient == null) {
logger.info("'httpClient' is being set to 'null' when it was previously configured.");
}
this.httpClient = httpClient;
return this;
}
/**
* Adds a {@link HttpPipelinePolicy} to apply on each request sent. The policy will be added after the retry policy.
* If the method is called multiple times, all policies will be added and their order preserved.
*
* @param pipelinePolicy a pipeline policy
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code pipelinePolicy} is {@code null}.
*/
public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) {
this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"));
return this;
}
/**
* Sets the {@link HttpLogOptions} for service requests.
*
* @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code logOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) {
this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null.");
return this;
}
/**
* Gets the default Storage whitelist log headers and query parameters.
*
* @return the default http log options.
*/
public static HttpLogOptions getDefaultHttpLogOptions() {
return BuilderHelper.getDefaultHttpLogOptions();
}
/**
* Sets the configuration object used to retrieve environment configuration values during building of the client.
*
* @param configuration Configuration store used to retrieve environment configurations.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder configuration(Configuration configuration) {
this.configuration = configuration;
return this;
}
/**
* Sets the request retry options for all the requests made through the client.
*
* @param retryOptions {@link RequestRetryOptions}.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code retryOptions} is {@code null}.
*/
public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) {
this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null.");
return this;
}
/**
* Sets the {@link HttpPipeline} to use for the service client, and adds a decryption policy if one is not present.
* Note that the underlying pipeline should not already be configured for encryption/decryption.
* <p>
* If {@code pipeline} is set, all other settings are ignored, aside from {@link
* and {@link
*
* @param httpPipeline HttpPipeline to use for sending service requests and receiving responses.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) {
if (this.httpPipeline != null && httpPipeline == null) {
logger.info("HttpPipeline is being set to 'null' when it was previously configured.");
}
this.httpPipeline = httpPipeline;
return this;
}
/**
* Sets the {@link BlobServiceVersion} that is used when making API requests.
* <p>
* If a service version is not provided, the service version that will be used will be the latest known service
* version based on the version of the client library being used. If no service version is specified, updating to a
* newer version of the client library will have the result of potentially moving to a newer service version.
* <p>
* Targeting a specific service version may also mean that the service will return an error for newer APIs.
*
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) {
this.version = version;
return this;
}
/**
* Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server.
*
* @param customerProvidedKey {@link CustomerProvidedKey}
* @return the updated EncryptedBlobClientBuilder object
*/
public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) {
if (customerProvidedKey == null) {
this.customerProvidedKey = null;
} else {
this.customerProvidedKey = new CpkInfo()
.setEncryptionKey(customerProvidedKey.getKey())
.setEncryptionKeySha256(customerProvidedKey.getKeySha256())
.setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm());
}
return this;
}
/**
* Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobClient BlobClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) {
Objects.requireNonNull(blobClient);
return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion());
}
/**
* Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline},
* {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying
* pipeline should not already be configured for encryption/decryption.
*
* <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link
* {@link
*
* <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and
* encryption scope properties from the provided client. To set CPK, please use
* {@link
*
* @param blobAsyncClient BlobAsyncClient used to configure the builder.
* @return the updated EncryptedBlobClientBuilder object
* @throws NullPointerException If {@code containerClient} is {@code null}.
*/
public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) {
Objects.requireNonNull(blobAsyncClient);
return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(),
blobAsyncClient.getServiceVersion());
}
/**
* Helper method to transform a regular client into an encrypted client
*
* @param httpPipeline {@link HttpPipeline}
* @param endpoint The endpoint.
* @param version {@link BlobServiceVersion} of the service to be used when making requests.
* @return the updated EncryptedBlobClientBuilder object
*/
private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) {
this.endpoint(endpoint);
this.serviceVersion(version);
return this.pipeline(httpPipeline);
}
}
|
I think I got it now. Thanks
|
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
|
return applySessionToken(request).then(addIntendedCollectionRid(request));
|
private Mono<Void> addIntendedCollectionRidAndSessionToken(RxDocumentServiceRequest request) {
return applySessionToken(request).then(addIntendedCollectionRid(request));
}
|
class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (apiType != null){
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if(collectionValueHolder== null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
}
|
class RxGatewayStoreModel implements RxStoreModel {
private final static byte[] EMPTY_BYTE_ARRAY = {};
private final DiagnosticsClientContext clientContext;
private final Logger logger = LoggerFactory.getLogger(RxGatewayStoreModel.class);
private final Map<String, String> defaultHeaders;
private final HttpClient httpClient;
private final QueryCompatibilityMode queryCompatibilityMode;
private final GlobalEndpointManager globalEndpointManager;
private ConsistencyLevel defaultConsistencyLevel;
private ISessionContainer sessionContainer;
private ThroughputControlStore throughputControlStore;
private boolean useMultipleWriteLocations;
private RxPartitionKeyRangeCache partitionKeyRangeCache;
private GatewayServiceConfigurationReader gatewayServiceConfigurationReader;
private RxClientCollectionCache collectionCache;
public RxGatewayStoreModel(
DiagnosticsClientContext clientContext,
ISessionContainer sessionContainer,
ConsistencyLevel defaultConsistencyLevel,
QueryCompatibilityMode queryCompatibilityMode,
UserAgentContainer userAgentContainer,
GlobalEndpointManager globalEndpointManager,
HttpClient httpClient,
ApiType apiType) {
this.clientContext = clientContext;
this.defaultHeaders = new HashMap<>();
this.defaultHeaders.put(HttpConstants.HttpHeaders.CACHE_CONTROL,
"no-cache");
this.defaultHeaders.put(HttpConstants.HttpHeaders.VERSION,
HttpConstants.Versions.CURRENT_VERSION);
if (apiType != null){
this.defaultHeaders.put(HttpConstants.HttpHeaders.API_TYPE, apiType.toString());
}
if (userAgentContainer == null) {
userAgentContainer = new UserAgentContainer();
}
this.defaultHeaders.put(HttpConstants.HttpHeaders.USER_AGENT, userAgentContainer.getUserAgent());
if (defaultConsistencyLevel != null) {
this.defaultHeaders.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL,
defaultConsistencyLevel.toString());
}
this.defaultConsistencyLevel = defaultConsistencyLevel;
this.globalEndpointManager = globalEndpointManager;
this.queryCompatibilityMode = queryCompatibilityMode;
this.httpClient = httpClient;
this.sessionContainer = sessionContainer;
}
void setGatewayServiceConfigurationReader(GatewayServiceConfigurationReader gatewayServiceConfigurationReader) {
this.gatewayServiceConfigurationReader = gatewayServiceConfigurationReader;
}
public void setPartitionKeyRangeCache(RxPartitionKeyRangeCache partitionKeyRangeCache) {
this.partitionKeyRangeCache = partitionKeyRangeCache;
}
public void setUseMultipleWriteLocations(boolean useMultipleWriteLocations) {
this.useMultipleWriteLocations = useMultipleWriteLocations;
}
boolean isUseMultipleWriteLocations() {
return useMultipleWriteLocations;
}
RxPartitionKeyRangeCache getPartitionKeyRangeCache() {
return partitionKeyRangeCache;
}
GatewayServiceConfigurationReader getGatewayServiceConfigurationReader() {
return gatewayServiceConfigurationReader;
}
RxClientCollectionCache getCollectionCache() {
return collectionCache;
}
public void setCollectionCache(RxClientCollectionCache collectionCache) {
this.collectionCache = collectionCache;
}
private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PATCH);
}
private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.PUT);
}
private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.DELETE);
}
private Mono<RxDocumentServiceResponse> deleteByPartitionKey(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> execute(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.POST);
}
private Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) {
return this.performRequest(request, HttpMethod.GET);
}
private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) {
if(request.getOperationType() != OperationType.QueryPlan) {
request.getHeaders().put(HttpConstants.HttpHeaders.IS_QUERY, "true");
}
switch (this.queryCompatibilityMode) {
case SqlQuery:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.SQL);
break;
case Default:
case Query:
default:
request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE,
RuntimeConstants.MediaTypes.QUERY_JSON);
break;
}
return this.performRequest(request, HttpMethod.POST);
}
public Mono<RxDocumentServiceResponse> performRequest(RxDocumentServiceRequest request, HttpMethod method) {
try {
if (request.requestContext.cosmosDiagnostics == null) {
request.requestContext.cosmosDiagnostics = clientContext.createDiagnostics();
}
URI uri = getUri(request);
request.requestContext.resourcePhysicalAddress = uri.toString();
if (this.throughputControlStore != null) {
return this.throughputControlStore.processRequest(request, performRequestInternal(request, method, uri));
}
return this.performRequestInternal(request, method, uri);
} catch (Exception e) {
return Mono.error(e);
}
}
/**
* Given the request it creates an flux which upon subscription issues HTTP call and emits one RxDocumentServiceResponse.
*
* @param request
* @param method
* @param requestUri
* @return Flux<RxDocumentServiceResponse>
*/
public Mono<RxDocumentServiceResponse> performRequestInternal(RxDocumentServiceRequest request, HttpMethod method, URI requestUri) {
try {
HttpHeaders httpHeaders = this.getHttpRequestHeaders(request.getHeaders());
Flux<byte[]> contentAsByteArray = request.getContentAsByteArrayFlux();
HttpRequest httpRequest = new HttpRequest(method,
requestUri,
requestUri.getPort(),
httpHeaders,
contentAsByteArray);
Duration responseTimeout = Duration.ofSeconds(Configs.getHttpResponseTimeoutInSeconds());
if (OperationType.QueryPlan.equals(request.getOperationType())) {
responseTimeout = Duration.ofSeconds(Configs.getQueryPlanResponseTimeoutInSeconds());
} else if (request.isAddressRefresh()) {
responseTimeout = Duration.ofSeconds(Configs.getAddressRefreshResponseTimeoutInSeconds());
}
Mono<HttpResponse> httpResponseMono = this.httpClient.send(httpRequest, responseTimeout);
return toDocumentServiceResponse(httpResponseMono, request, httpRequest);
} catch (Exception e) {
return Mono.error(e);
}
}
private HttpHeaders getHttpRequestHeaders(Map<String, String> headers) {
HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size());
for (Entry<String, String> entry : this.defaultHeaders.entrySet()) {
if (!headers.containsKey(entry.getKey())) {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
if (headers != null) {
for (Entry<String, String> entry : headers.entrySet()) {
if (entry.getValue() == null) {
httpHeaders.set(entry.getKey(), "");
} else {
httpHeaders.set(entry.getKey(), entry.getValue());
}
}
}
return httpHeaders;
}
private URI getUri(RxDocumentServiceRequest request) throws URISyntaxException {
URI rootUri = request.getEndpointOverride();
if (rootUri == null) {
if (request.getIsMedia()) {
rootUri = this.globalEndpointManager.getWriteEndpoints().get(0);
} else {
rootUri = this.globalEndpointManager.resolveServiceEndpoint(request);
}
}
String path = PathsHelper.generatePath(request.getResourceType(), request, request.isFeed);
if(request.getResourceType().equals(ResourceType.DatabaseAccount)) {
path = StringUtils.EMPTY;
}
return new URI("https",
null,
rootUri.getHost(),
rootUri.getPort(),
ensureSlashPrefixed(path),
null,
null);
}
private String ensureSlashPrefixed(String path) {
if (path == null) {
return null;
}
if (path.startsWith("/")) {
return path;
}
return "/" + path;
}
/**
* Transforms the reactor netty's client response Observable to RxDocumentServiceResponse Observable.
*
*
* Once the customer code subscribes to the observable returned by the CRUD APIs,
* the subscription goes up till it reaches the source reactor netty's observable, and at that point the HTTP invocation will be made.
*
* @param httpResponseMono
* @param request
* @return {@link Mono}
*/
private Mono<RxDocumentServiceResponse> toDocumentServiceResponse(Mono<HttpResponse> httpResponseMono,
RxDocumentServiceRequest request,
HttpRequest httpRequest) {
return httpResponseMono.flatMap(httpResponse -> {
HttpHeaders httpResponseHeaders = httpResponse.headers();
int httpResponseStatus = httpResponse.statusCode();
Mono<byte[]> contentObservable = httpResponse
.bodyAsByteArray()
.switchIfEmpty(Mono.just(EMPTY_BYTE_ARRAY));
return contentObservable
.map(content -> {
ReactorNettyRequestRecord reactorNettyRequestRecord = httpResponse.request().reactorNettyRequestRecord();
if (reactorNettyRequestRecord != null) {
reactorNettyRequestRecord.setTimeCompleted(Instant.now());
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
reactorNettyRequestRecord.takeTimelineSnapshot());
}
validateOrThrow(request, HttpResponseStatus.valueOf(httpResponseStatus), httpResponseHeaders, content);
StoreResponse rsp = new StoreResponse(httpResponseStatus,
HttpUtils.unescape(httpResponseHeaders.toMap().entrySet()),
content);
DirectBridgeInternal.setRequestTimeline(rsp, reactorNettyRequestRecord.takeTimelineSnapshot());
if (request.requestContext.cosmosDiagnostics != null) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, rsp, null);
DirectBridgeInternal.setCosmosDiagnostics(rsp, request.requestContext.cosmosDiagnostics);
}
return rsp;
})
.single();
}).map(rsp -> {
if (httpRequest.reactorNettyRequestRecord() != null) {
return new RxDocumentServiceResponse(this.clientContext, rsp,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
} else {
return new RxDocumentServiceResponse(this.clientContext, rsp);
}
}).onErrorResume(throwable -> {
Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable);
if (!(unwrappedException instanceof Exception)) {
logger.error("Unexpected failure {}", unwrappedException.getMessage(), unwrappedException);
return Mono.error(unwrappedException);
}
Exception exception = (Exception) unwrappedException;
CosmosException dce;
if (!(exception instanceof CosmosException)) {
logger.error("Network failure", exception);
dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, 0, exception);
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
} else {
dce = (CosmosException) exception;
}
if (WebExceptionUtility.isNetworkFailure(dce)) {
if (WebExceptionUtility.isReadTimeoutException(dce)) {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_READ_TIMEOUT);
} else {
BridgeInternal.setSubStatusCode(dce, HttpConstants.SubStatusCodes.GATEWAY_ENDPOINT_UNAVAILABLE);
}
}
if (request.requestContext.cosmosDiagnostics != null) {
if (BridgeInternal.getClientSideRequestStatics(request.requestContext.cosmosDiagnostics).getGatewayRequestTimeline() == null && httpRequest.reactorNettyRequestRecord() != null) {
BridgeInternal.setGatewayRequestTimelineOnDiagnostics(request.requestContext.cosmosDiagnostics,
httpRequest.reactorNettyRequestRecord().takeTimelineSnapshot());
}
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
});
}
private void validateOrThrow(RxDocumentServiceRequest request,
HttpResponseStatus status,
HttpHeaders headers,
byte[] bodyAsBytes) {
int statusCode = status.code();
if (statusCode >= HttpConstants.StatusCodes.MINIMUM_STATUSCODE_AS_ERROR_GATEWAY) {
String statusCodeString = status.reasonPhrase() != null
? status.reasonPhrase().replace(" ", "")
: "";
String body = bodyAsBytes != null ? new String(bodyAsBytes) : null;
CosmosError cosmosError;
cosmosError = (StringUtils.isNotEmpty(body)) ? new CosmosError(body) : new CosmosError();
cosmosError = new CosmosError(statusCodeString,
String.format("%s, StatusCode: %s", cosmosError.getMessage(), statusCodeString),
cosmosError.getPartitionedQueryExecutionInfo());
CosmosException dce = BridgeInternal.createCosmosException(request.requestContext.resourcePhysicalAddress, statusCode, cosmosError, headers.toMap());
BridgeInternal.setRequestHeaders(dce, request.getHeaders());
throw dce;
}
}
private Mono<RxDocumentServiceResponse> invokeAsyncInternal(RxDocumentServiceRequest request) {
switch (request.getOperationType()) {
case Create:
case Batch:
return this.create(request);
case Patch:
return this.patch(request);
case Upsert:
return this.upsert(request);
case Delete:
if (request.getResourceType() == ResourceType.PartitionKey) {
return this.deleteByPartitionKey(request);
}
return this.delete(request);
case ExecuteJavaScript:
return this.execute(request);
case Read:
return this.read(request);
case ReadFeed:
return this.readFeed(request);
case Replace:
return this.replace(request);
case SqlQuery:
case Query:
case QueryPlan:
return this.query(request);
default:
throw new IllegalStateException("Unknown operation setType " + request.getOperationType());
}
}
private Mono<RxDocumentServiceResponse> invokeAsync(RxDocumentServiceRequest request) {
Callable<Mono<RxDocumentServiceResponse>> funcDelegate = () -> invokeAsyncInternal(request).single();
return BackoffRetryUtility.executeRetry(funcDelegate, new WebExceptionRetryPolicy(BridgeInternal.getRetryContext(request.requestContext.cosmosDiagnostics)));
}
@Override
public Mono<RxDocumentServiceResponse> processMessage(RxDocumentServiceRequest request) {
Mono<RxDocumentServiceResponse> responseObs = this.addIntendedCollectionRidAndSessionToken(request).then(invokeAsync(request));
return responseObs.onErrorResume(
e -> {
CosmosException dce = Utils.as(e, CosmosException.class);
if (dce == null) {
logger.error("unexpected failure {}", e.getMessage(), e);
return Mono.error(e);
}
if ((!ReplicatedResourceClientUtils.isMasterResource(request.getResourceType())) &&
(dce.getStatusCode() == HttpConstants.StatusCodes.PRECONDITION_FAILED ||
dce.getStatusCode() == HttpConstants.StatusCodes.CONFLICT ||
(
dce.getStatusCode() == HttpConstants.StatusCodes.NOTFOUND &&
!Exceptions.isSubStatusCode(dce,
HttpConstants.SubStatusCodes.READ_SESSION_NOT_AVAILABLE)))) {
this.captureSessionToken(request, dce.getResponseHeaders());
}
if (Exceptions.isThroughputControlRequestRateTooLargeException(dce)) {
BridgeInternal.recordGatewayResponse(request.requestContext.cosmosDiagnostics, request, null, dce);
BridgeInternal.setCosmosDiagnostics(dce, request.requestContext.cosmosDiagnostics);
}
return Mono.error(dce);
}
).flatMap(response ->
this.captureSessionTokenAndHandlePartitionSplit(request, response.getResponseHeaders()).then(Mono.just(response))
);
}
@Override
public void enableThroughputControl(ThroughputControlStore throughputControlStore) {
}
private void captureSessionToken(RxDocumentServiceRequest request, Map<String, String> responseHeaders) {
if (request.getResourceType() == ResourceType.DocumentCollection &&
request.getOperationType() == OperationType.Delete) {
String resourceId;
if (request.getIsNameBased()) {
resourceId = responseHeaders.get(HttpConstants.HttpHeaders.OWNER_ID);
} else {
resourceId = request.getResourceId();
}
this.sessionContainer.clearTokenByResourceId(resourceId);
} else {
this.sessionContainer.setSessionToken(request, responseHeaders);
}
}
private Mono<Void> captureSessionTokenAndHandlePartitionSplit(RxDocumentServiceRequest request,
Map<String, String> responseHeaders) {
this.captureSessionToken(request, responseHeaders);
if (request.requestContext.resolvedPartitionKeyRange != null &&
StringUtils.isNotEmpty(request.requestContext.resolvedCollectionRid) &&
StringUtils.isNotEmpty(responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID)) &&
!responseHeaders.get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID).equals(request.requestContext.resolvedPartitionKeyRange.getId())) {
return this.partitionKeyRangeCache.refreshAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request.requestContext.resolvedCollectionRid)
.flatMap(collectionRoutingMapValueHolder -> Mono.empty());
}
return Mono.empty();
}
private Mono<Void> addIntendedCollectionRid(RxDocumentServiceRequest request) {
if (this.collectionCache != null && request.getResourceType().equals(ResourceType.Document)) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).flatMap(documentCollectionValueHolder -> {
if (StringUtils.isEmpty(request.getHeaders().get(INTENDED_COLLECTION_RID_HEADER))) {
request.getHeaders().put(INTENDED_COLLECTION_RID_HEADER,
request.requestContext.resolvedCollectionRid);
} else {
request.intendedCollectionRidPassedIntoSDK = true;
}
return Mono.empty();
});
}
return Mono.empty();
}
private Mono<Void> applySessionToken(RxDocumentServiceRequest request) {
Map<String, String> headers = request.getHeaders();
Objects.requireNonNull(headers, "RxDocumentServiceRequest::headers is required and cannot be null");
if (isMasterOperation(request.getResourceType(), request.getOperationType())) {
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
boolean sessionConsistency = RequestHelper.getConsistencyLevelToUse(this.gatewayServiceConfigurationReader,
request) == ConsistencyLevel.SESSION;
if (!Strings.isNullOrEmpty(request.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN))) {
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)){
request.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN);
}
return Mono.empty();
}
if (!sessionConsistency ||
(!request.isReadOnlyRequest() && request.getOperationType() != OperationType.Batch && !this.useMultipleWriteLocations)) {
return Mono.empty();
}
if (this.collectionCache != null && this.partitionKeyRangeCache != null) {
return this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request).
flatMap(collectionValueHolder -> {
if(collectionValueHolder== null || collectionValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
return partitionKeyRangeCache.tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics),
collectionValueHolder.v.getResourceId(),
null,
null).flatMap(collectionRoutingMapValueHolder -> {
if (collectionRoutingMapValueHolder == null || collectionRoutingMapValueHolder.v == null) {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
String partitionKeyRangeId =
request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY_RANGE_ID);
PartitionKeyInternal partitionKeyInternal = request.getPartitionKeyInternal();
if (StringUtils.isNotEmpty(partitionKeyRangeId)) {
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByPartitionKeyRangeId(partitionKeyRangeId);
request.requestContext.resolvedPartitionKeyRange = range;
if (request.requestContext.resolvedPartitionKeyRange == null) {
SessionTokenHelper.setPartitionLocalSessionToken(request, partitionKeyRangeId,
sessionContainer);
} else {
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
}
} else if (partitionKeyInternal != null) {
String effectivePartitionKeyString = PartitionKeyInternalHelper
.getEffectivePartitionKeyString(
partitionKeyInternal,
collectionValueHolder.v.getPartitionKey());
PartitionKeyRange range =
collectionRoutingMapValueHolder.v.getRangeByEffectivePartitionKey(effectivePartitionKeyString);
request.requestContext.resolvedPartitionKeyRange = range;
SessionTokenHelper.setPartitionLocalSessionToken(request, sessionContainer);
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
}
return Mono.empty();
});
});
} else {
String sessionToken = this.sessionContainer.resolveGlobalSessionToken(request);
if (!Strings.isNullOrEmpty(sessionToken)) {
headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, sessionToken);
}
return Mono.empty();
}
}
private static boolean isMasterOperation(ResourceType resourceType, OperationType operationType) {
return ReplicatedResourceClientUtils.isMasterResource(resourceType) ||
isStoredProcedureMasterOperation(resourceType, operationType) ||
operationType == OperationType.QueryPlan;
}
private static boolean isStoredProcedureMasterOperation(ResourceType resourceType, OperationType operationType) {
return resourceType == ResourceType.StoredProcedure && operationType != OperationType.ExecuteJavaScript;
}
}
|
Actually, it was the same. This method declare a dependency on the compile classpath which will make sure that gradle resolve all dependency even included build (which was not always guaranteed). This has no effect on the compile classpath of the task.
|
public FileCollection getClasspath() {
return QuarkusGradleUtils.getSourceSet(getProject(), SourceSet.MAIN_SOURCE_SET_NAME).getCompileClasspath();
}
|
return QuarkusGradleUtils.getSourceSet(getProject(), SourceSet.MAIN_SOURCE_SET_NAME).getCompileClasspath();
|
public FileCollection getClasspath() {
return QuarkusGradleUtils.getSourceSet(getProject(), SourceSet.MAIN_SOURCE_SET_NAME).getCompileClasspath();
}
|
class QuarkusGenerateCode extends QuarkusTask {
public static final String QUARKUS_GENERATED_SOURCES = "quarkus-generated-sources";
public static final String QUARKUS_TEST_GENERATED_SOURCES = "quarkus-test-generated-sources";
public static final String[] CODE_GENERATION_PROVIDER = new String[] { "grpc" };
public static final String INIT_AND_RUN = "initAndRun";
private Set<Path> sourcesDirectories;
private Consumer<Path> sourceRegistrar;
private boolean test = false;
public QuarkusGenerateCode() {
super("Performs Quarkus pre-build preparations, such as sources generation");
}
/**
* Create a dependency on classpath resolution. This makes sure included build are build this task runs.
*
* @return resolved compile classpath
*/
@CompileClasspath
@TaskAction
public void prepareQuarkus() {
getLogger().lifecycle("preparing quarkus application");
final AppArtifact appArtifact = extension().getAppArtifact();
appArtifact.setPaths(QuarkusGradleUtils.getOutputPaths(getProject()));
final AppModelResolver modelResolver = extension().getAppModelResolver();
final Properties realProperties = getBuildSystemProperties(appArtifact);
Path buildDir = getProject().getBuildDir().toPath();
try (CuratedApplication appCreationContext = QuarkusBootstrap.builder()
.setBaseClassLoader(getClass().getClassLoader())
.setAppModelResolver(modelResolver)
.setTargetDirectory(buildDir)
.setBaseName(extension().finalName())
.setBuildSystemProperties(realProperties)
.setAppArtifact(appArtifact)
.setLocalProjectDiscovery(false)
.setIsolateDeployment(true)
.build().bootstrap()) {
final Convention convention = getProject().getConvention();
JavaPluginConvention javaConvention = convention.findPlugin(JavaPluginConvention.class);
if (javaConvention != null) {
final String generateSourcesDir = test ? QUARKUS_TEST_GENERATED_SOURCES : QUARKUS_GENERATED_SOURCES;
final SourceSet generatedSources = javaConvention.getSourceSets().findByName(generateSourcesDir);
List<Path> paths = new ArrayList<>();
generatedSources.getOutput()
.filter(f -> f.getName().equals(generateSourcesDir))
.forEach(f -> paths.add(f.toPath()));
if (paths.isEmpty()) {
throw new GradleException("Failed to create quarkus-generated-sources");
}
getLogger().debug("Will trigger preparing sources for source directory: {} buildDir: {}",
sourcesDirectories, getProject().getBuildDir().getAbsolutePath());
QuarkusClassLoader deploymentClassLoader = appCreationContext.createDeploymentClassLoader();
Class<?> codeGenerator = deploymentClassLoader.loadClass(CodeGenerator.class.getName());
Optional<Method> initAndRun = Arrays.stream(codeGenerator.getMethods())
.filter(m -> m.getName().equals(INIT_AND_RUN))
.findAny();
if (!initAndRun.isPresent()) {
throw new GradleException("Failed to find " + INIT_AND_RUN + " method in " + CodeGenerator.class.getName());
}
initAndRun.get().invoke(null, deploymentClassLoader,
sourcesDirectories,
paths.iterator().next(),
buildDir,
sourceRegistrar,
appCreationContext.getAppModel(),
realProperties);
}
} catch (BootstrapException | IllegalAccessException | InvocationTargetException | ClassNotFoundException e) {
throw new GradleException("Failed to generate sources in the QuarkusPrepare task", e);
}
}
public void setSourcesDirectories(Set<Path> sourcesDirectories) {
this.sourcesDirectories = sourcesDirectories;
}
public void setSourceRegistrar(Consumer<Path> sourceRegistrar) {
this.sourceRegistrar = sourceRegistrar;
}
public void setTest(boolean test) {
this.test = test;
}
}
|
class QuarkusGenerateCode extends QuarkusTask {
public static final String QUARKUS_GENERATED_SOURCES = "quarkus-generated-sources";
public static final String QUARKUS_TEST_GENERATED_SOURCES = "quarkus-test-generated-sources";
public static final String[] CODE_GENERATION_PROVIDER = new String[] { "grpc" };
public static final String INIT_AND_RUN = "initAndRun";
private Set<Path> sourcesDirectories;
private Consumer<Path> sourceRegistrar;
private boolean test = false;
public QuarkusGenerateCode() {
super("Performs Quarkus pre-build preparations, such as sources generation");
}
/**
* Create a dependency on classpath resolution. This makes sure included build are build this task runs.
*
* @return resolved compile classpath
*/
@CompileClasspath
@TaskAction
public void prepareQuarkus() {
getLogger().lifecycle("preparing quarkus application");
final AppArtifact appArtifact = extension().getAppArtifact();
appArtifact.setPaths(QuarkusGradleUtils.getOutputPaths(getProject()));
final AppModelResolver modelResolver = extension().getAppModelResolver();
final Properties realProperties = getBuildSystemProperties(appArtifact);
Path buildDir = getProject().getBuildDir().toPath();
try (CuratedApplication appCreationContext = QuarkusBootstrap.builder()
.setBaseClassLoader(getClass().getClassLoader())
.setAppModelResolver(modelResolver)
.setTargetDirectory(buildDir)
.setBaseName(extension().finalName())
.setBuildSystemProperties(realProperties)
.setAppArtifact(appArtifact)
.setLocalProjectDiscovery(false)
.setIsolateDeployment(true)
.build().bootstrap()) {
final Convention convention = getProject().getConvention();
JavaPluginConvention javaConvention = convention.findPlugin(JavaPluginConvention.class);
if (javaConvention != null) {
final String generateSourcesDir = test ? QUARKUS_TEST_GENERATED_SOURCES : QUARKUS_GENERATED_SOURCES;
final SourceSet generatedSources = javaConvention.getSourceSets().findByName(generateSourcesDir);
List<Path> paths = new ArrayList<>();
generatedSources.getOutput()
.filter(f -> f.getName().equals(generateSourcesDir))
.forEach(f -> paths.add(f.toPath()));
if (paths.isEmpty()) {
throw new GradleException("Failed to create quarkus-generated-sources");
}
getLogger().debug("Will trigger preparing sources for source directory: {} buildDir: {}",
sourcesDirectories, getProject().getBuildDir().getAbsolutePath());
QuarkusClassLoader deploymentClassLoader = appCreationContext.createDeploymentClassLoader();
Class<?> codeGenerator = deploymentClassLoader.loadClass(CodeGenerator.class.getName());
Optional<Method> initAndRun = Arrays.stream(codeGenerator.getMethods())
.filter(m -> m.getName().equals(INIT_AND_RUN))
.findAny();
if (!initAndRun.isPresent()) {
throw new GradleException("Failed to find " + INIT_AND_RUN + " method in " + CodeGenerator.class.getName());
}
initAndRun.get().invoke(null, deploymentClassLoader,
sourcesDirectories,
paths.iterator().next(),
buildDir,
sourceRegistrar,
appCreationContext.getAppModel(),
realProperties);
}
} catch (BootstrapException | IllegalAccessException | InvocationTargetException | ClassNotFoundException e) {
throw new GradleException("Failed to generate sources in the QuarkusPrepare task", e);
}
}
public void setSourcesDirectories(Set<Path> sourcesDirectories) {
this.sourcesDirectories = sourcesDirectories;
}
public void setSourceRegistrar(Consumer<Path> sourceRegistrar) {
this.sourceRegistrar = sourceRegistrar;
}
public void setTest(boolean test) {
this.test = test;
}
}
|
why UniqueKey and PrimaryKey are different
|
public void testBitmapWithPrimaryKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
stmt.analyze(analyzer);
}
|
expectedEx.expect(AnalysisException.class);
|
public void testBitmapWithPrimaryKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
|
class CreateTableStmtTest {
private static final Logger LOG = LoggerFactory.getLogger(CreateTableStmtTest.class);
private TableName tblName;
private TableName tblNameNoDb;
private List<ColumnDef> cols;
private List<ColumnDef> invalidCols;
private List<String> colsName;
private List<String> invalidColsName;
private HashDistributionDesc hashDistributioin;
private Analyzer analyzer;
@Mocked
private Auth auth;
@Mocked
private ConnectContext ctx;
@Rule
public ExpectedException expectedEx = ExpectedException.none();
@Before
public void setUp() {
analyzer = AccessTestUtil.fetchAdminAnalyzer(false);
tblName = new TableName("db1", "table1");
tblNameNoDb = new TableName("", "table1");
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT))));
cols.add(new ColumnDef("col2", new TypeDef(ScalarType.createCharType(10))));
colsName = Lists.newArrayList();
colsName.add("col1");
colsName.add("col2");
invalidCols = Lists.newArrayList();
invalidCols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT))));
invalidCols.add(new ColumnDef("col2", new TypeDef(ScalarType.createCharType(10))));
invalidCols.add(new ColumnDef("col2", new TypeDef(ScalarType.createCharType(10))));
invalidColsName = Lists.newArrayList();
invalidColsName.add("col1");
invalidColsName.add("col2");
invalidColsName.add("col2");
hashDistributioin = new HashDistributionDesc(10, Lists.newArrayList("col1"));
MockedAuth.mockedAuth(auth);
MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1");
}
@Test
public void testNormal() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
Assert.assertEquals("testCluster:db1", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getProperties());
}
@Test
public void testCreateTableWithRollup() throws UserException {
List<AlterClause> ops = Lists.newArrayList();
ops.add(new AddRollupClause("index1", Lists.newArrayList("col1", "col2"), null, "table1", null));
ops.add(new AddRollupClause("index2", Lists.newArrayList("col2", "col3"), null, "table1", null));
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "", ops);
stmt.analyze(analyzer);
Assert.assertEquals("testCluster:db1", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getProperties());
Assert.assertTrue(stmt.toSql()
.contains("rollup( `index1` (`col1`, `col2`) FROM `table1`, `index2` (`col2`, `col3`) FROM `table1`)"));
}
@Test
public void testDefaultDbNormal() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
Assert.assertEquals("testDb", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getPartitionDesc());
Assert.assertNull(stmt.getProperties());
}
@Test(expected = AnalysisException.class)
public void testNoDb(@Mocked Analyzer noDbAnalyzer) throws UserException {
new Expectations() {
{
noDbAnalyzer.getDefaultDb();
minTimes = 0;
result = "";
noDbAnalyzer.getClusterName();
minTimes = 0;
result = "cluster";
}
};
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(noDbAnalyzer);
}
@Test(expected = AnalysisException.class)
public void testEmptyCol() throws UserException {
List<ColumnDef> emptyCols = Lists.newArrayList();
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, emptyCols, "olap",
new KeysDesc(), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test(expected = AnalysisException.class)
public void testDupCol() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, invalidCols, "olap",
new KeysDesc(KeysType.AGG_KEYS, invalidColsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test
public void testBitmapKey() throws Exception {
ColumnDef bitmap = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(bitmap);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'BITMAP'");
stmt.analyze(analyzer);
cols.remove(bitmap);
}
@Test
public void testHLLKey() throws Exception {
ColumnDef hll = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
cols.add(hll);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'HLL'");
stmt.analyze(analyzer);
}
@Test
public void testPercentileKey() throws Exception {
ColumnDef bitmap = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.PERCENTILE)));
cols.add(bitmap);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'PERCENTILE'");
stmt.analyze(analyzer);
}
@Test
public void testArrayKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(new ArrayType(ScalarType.createType(PrimitiveType.INT))));
cols.add(col3);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'ARRAY<INT>'");
stmt.analyze(analyzer);
}
@Test
public void testBitmapWithoutAggregateMethod() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("AGG_KEYS table should specify aggregate type for non-key column[col3]");
stmt.analyze(analyzer);
}
@Test
@Test
public void testBitmapWithDuplicateKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testBitmapWithUniqueKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.UNIQUE_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test
public void testHLLWithoutAggregateMethod() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testPercentileWithoutAggregateMethod() throws Exception {
ColumnDef percentile = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.PERCENTILE)));
cols.add(percentile);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForBitmap() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForHLL() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForArray() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(new ArrayType(ScalarType.createType(PrimitiveType.INT))));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testPrimaryKeyNullable() throws Exception {
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT)), true, null, true,
DefaultValueDef.NOT_SET, ""));
colsName = Lists.newArrayList();
colsName.add("col1");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
stmt.analyze(analyzer);
}
@Test
public void testPrimaryKeyChar() throws Exception {
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createCharType(10)), true, null, false,
DefaultValueDef.NOT_SET, ""));
colsName = Lists.newArrayList();
colsName.add("col1");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
stmt.analyze(analyzer);
}
}
|
class CreateTableStmtTest {
private static final Logger LOG = LoggerFactory.getLogger(CreateTableStmtTest.class);
private TableName tblName;
private TableName tblNameNoDb;
private List<ColumnDef> cols;
private List<ColumnDef> invalidCols;
private List<String> colsName;
private List<String> invalidColsName;
private HashDistributionDesc hashDistributioin;
private Analyzer analyzer;
@Mocked
private Auth auth;
@Mocked
private ConnectContext ctx;
@Rule
public ExpectedException expectedEx = ExpectedException.none();
@Before
public void setUp() {
analyzer = AccessTestUtil.fetchAdminAnalyzer(false);
tblName = new TableName("db1", "table1");
tblNameNoDb = new TableName("", "table1");
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT))));
cols.add(new ColumnDef("col2", new TypeDef(ScalarType.createVarcharType(10))));
colsName = Lists.newArrayList();
colsName.add("col1");
colsName.add("col2");
invalidCols = Lists.newArrayList();
invalidCols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT))));
invalidCols.add(new ColumnDef("col2", new TypeDef(ScalarType.createCharType(10))));
invalidCols.add(new ColumnDef("col2", new TypeDef(ScalarType.createCharType(10))));
invalidColsName = Lists.newArrayList();
invalidColsName.add("col1");
invalidColsName.add("col2");
invalidColsName.add("col2");
hashDistributioin = new HashDistributionDesc(10, Lists.newArrayList("col1"));
MockedAuth.mockedAuth(auth);
MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1");
}
@Test
public void testNormal() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
Assert.assertEquals("testCluster:db1", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getProperties());
}
@Test
public void testCreateTableWithRollup() throws UserException {
List<AlterClause> ops = Lists.newArrayList();
ops.add(new AddRollupClause("index1", Lists.newArrayList("col1", "col2"), null, "table1", null));
ops.add(new AddRollupClause("index2", Lists.newArrayList("col2", "col3"), null, "table1", null));
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "", ops);
stmt.analyze(analyzer);
Assert.assertEquals("testCluster:db1", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getProperties());
Assert.assertTrue(stmt.toSql()
.contains("rollup( `index1` (`col1`, `col2`) FROM `table1`, `index2` (`col2`, `col3`) FROM `table1`)"));
}
@Test
public void testDefaultDbNormal() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
Assert.assertEquals("testDb", stmt.getDbName());
Assert.assertEquals("table1", stmt.getTableName());
Assert.assertNull(stmt.getPartitionDesc());
Assert.assertNull(stmt.getProperties());
}
@Test(expected = AnalysisException.class)
public void testNoDb(@Mocked Analyzer noDbAnalyzer) throws UserException {
new Expectations() {
{
noDbAnalyzer.getDefaultDb();
minTimes = 0;
result = "";
noDbAnalyzer.getClusterName();
minTimes = 0;
result = "cluster";
}
};
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(noDbAnalyzer);
}
@Test(expected = AnalysisException.class)
public void testEmptyCol() throws UserException {
List<ColumnDef> emptyCols = Lists.newArrayList();
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, emptyCols, "olap",
new KeysDesc(), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test(expected = AnalysisException.class)
public void testDupCol() throws UserException {
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, invalidCols, "olap",
new KeysDesc(KeysType.AGG_KEYS, invalidColsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test
public void testBitmapKey() throws Exception {
ColumnDef bitmap = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(bitmap);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'BITMAP'");
stmt.analyze(analyzer);
cols.remove(bitmap);
}
@Test
public void testHLLKey() throws Exception {
ColumnDef hll = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
cols.add(hll);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'HLL'");
stmt.analyze(analyzer);
}
@Test
public void testPercentileKey() throws Exception {
ColumnDef bitmap = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.PERCENTILE)));
cols.add(bitmap);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'PERCENTILE'");
stmt.analyze(analyzer);
}
@Test
public void testArrayKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(new ArrayType(ScalarType.createType(PrimitiveType.INT))));
cols.add(col3);
colsName.add("col3");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid data type of key column 'col3': 'ARRAY<INT>'");
stmt.analyze(analyzer);
}
@Test
public void testBitmapWithoutAggregateMethod() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("AGG_KEYS table should specify aggregate type for non-key column[col3]");
stmt.analyze(analyzer);
}
@Test
@Test
public void testBitmapWithUniqueKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.UNIQUE_KEYS, colsName), null,
hashDistributioin, null, null, "");
stmt.analyze(analyzer);
}
@Test
public void testBitmapWithDuplicateKey() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testHLLWithoutAggregateMethod() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testPercentileWithoutAggregateMethod() throws Exception {
ColumnDef percentile = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.PERCENTILE)));
cols.add(percentile);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.DUP_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("No aggregate function specified for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForBitmap() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.BITMAP)));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForHLL() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(ScalarType.createType(PrimitiveType.HLL)));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testInvalidAggregateFuncForArray() throws Exception {
ColumnDef col3 = new ColumnDef("col3", new TypeDef(new ArrayType(ScalarType.createType(PrimitiveType.INT))));
col3.setAggregateType(AggregateType.SUM);
cols.add(col3);
CreateTableStmt stmt = new CreateTableStmt(false, false, tblNameNoDb, cols, "olap",
new KeysDesc(KeysType.AGG_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
expectedEx.expectMessage("Invalid aggregate function 'SUM' for 'col3'");
stmt.analyze(analyzer);
}
@Test
public void testPrimaryKeyNullable() throws Exception {
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createType(PrimitiveType.INT)), true, null, true,
DefaultValueDef.NOT_SET, ""));
colsName = Lists.newArrayList();
colsName.add("col1");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
stmt.analyze(analyzer);
}
@Test
public void testPrimaryKeyChar() throws Exception {
cols = Lists.newArrayList();
cols.add(new ColumnDef("col1", new TypeDef(ScalarType.createCharType(10)), true, null, false,
DefaultValueDef.NOT_SET, ""));
colsName = Lists.newArrayList();
colsName.add("col1");
CreateTableStmt stmt = new CreateTableStmt(false, false, tblName, cols, "olap",
new KeysDesc(KeysType.PRIMARY_KEYS, colsName), null,
hashDistributioin, null, null, "");
expectedEx.expect(AnalysisException.class);
stmt.analyze(analyzer);
}
}
|
Ideal way is to keep a default configuration and return values from it.
|
public String getProperty(String key) {
String result = null;
if (key !=null) {
if (this.properties.containsKey(key)) {
result = this.properties.getString(key);
} else {
return key;
}
result = decodeTokenText(result);
}
return result;
}
|
return key;
|
public String getProperty(String key) {
if (this.properties.containsKey(key)) {
return this.properties.getString(key);
}
assert false;
return this.defaultProperties.getString(key);
}
|
class ParserConfigurations {
private ResourceBundle properties = null;
private static volatile ParserConfigurations instance = null;
private static String language = "en";
private static String country = "LK";
private ParserConfigurations() {
Locale currentLanguage;
if (language == null || country == null) {
currentLanguage = Locale.getDefault();
} else {
currentLanguage = new Locale(language, country);
}
this.properties = ResourceBundle.getBundle("token", currentLanguage);
}
public static ParserConfigurations getInstance() {
if (instance == null) {
synchronized (ParserConfigurations.class) {
if (instance == null) {
instance = new ParserConfigurations();
}
}
}
return instance;
}
public String decodeTokenText(String encodedTokenText) {
return new String(Base64.getDecoder().decode(encodedTokenText));
}
}
|
class ParserConfigurations {
private ResourceBundle properties;
private ResourceBundle defaultProperties;
private static volatile ParserConfigurations instance = null;
private static String language = "en";
private static String country = "LK";
private ParserConfigurations() {
Locale currentLanguage = new Locale(language, country);
this.properties = ResourceBundle.getBundle("token", currentLanguage);
this.defaultProperties = ResourceBundle.getBundle("token", new Locale("en", "LK"));
}
public static ParserConfigurations getInstance() {
if (instance == null) {
synchronized (ParserConfigurations.class) {
if (instance == null) {
instance = new ParserConfigurations();
}
}
}
return instance;
}
}
|
The check is too strict then. We won't get `RUNNING` directly after starting the container. We need something like a repeated check which times out after some max startup time.
|
public RemoteEnvironment createEnvironment(Environment environment) throws Exception {
Preconditions.checkState(
environment
.getUrn()
.equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)),
"The passed environment does not contain a DockerPayload.");
final RunnerApi.DockerPayload dockerPayload =
RunnerApi.DockerPayload.parseFrom(environment.getPayload());
final String workerId = idGenerator.getId();
String containerImage = dockerPayload.getContainerImage();
String loggingEndpoint = loggingServiceServer.getApiServiceDescriptor().getUrl();
String artifactEndpoint = retrievalServiceServer.getApiServiceDescriptor().getUrl();
String provisionEndpoint = provisioningServiceServer.getApiServiceDescriptor().getUrl();
String controlEndpoint = controlServiceServer.getApiServiceDescriptor().getUrl();
ImmutableList.Builder<String> dockerArgsBuilder =
ImmutableList.<String>builder()
.addAll(gcsCredentialArgs())
.add("--network=host")
.add("--env=DOCKER_MAC_CONTAINER=" + System.getenv("DOCKER_MAC_CONTAINER"));
if (!retainDockerContainer) {
dockerArgsBuilder.add("--rm");
}
List<String> args =
ImmutableList.of(
String.format("--id=%s", workerId),
String.format("--logging_endpoint=%s", loggingEndpoint),
String.format("--artifact_endpoint=%s", artifactEndpoint),
String.format("--provision_endpoint=%s", provisionEndpoint),
String.format("--control_endpoint=%s", controlEndpoint));
LOG.debug("Creating Docker Container with ID {}", workerId);
String containerId = null;
InstructionRequestHandler instructionHandler = null;
try {
containerId = docker.runImage(containerImage, dockerArgsBuilder.build(), args);
LOG.debug("Created Docker Container with Container ID {}", containerId);
while (instructionHandler == null) {
Preconditions.checkArgument(
docker.isContainerRunning(containerId), "No container running for id " + containerId);
try {
instructionHandler = clientSource.take(workerId, Duration.ofSeconds(30));
} catch (TimeoutException timeoutEx) {
LOG.info(
"Still waiting for startup of environment {} for worker id {}",
dockerPayload.getContainerImage(),
workerId);
} catch (InterruptedException interruptEx) {
Thread.currentThread().interrupt();
throw new RuntimeException(interruptEx);
}
}
} catch (Exception e) {
if (containerId != null) {
try {
docker.killContainer(containerId);
} catch (Exception dockerException) {
e.addSuppressed(dockerException);
}
}
throw e;
}
return DockerContainerEnvironment.create(docker, environment, containerId, instructionHandler);
}
|
docker.isContainerRunning(containerId), "No container running for id " + containerId);
|
public RemoteEnvironment createEnvironment(Environment environment) throws Exception {
Preconditions.checkState(
environment
.getUrn()
.equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.DOCKER)),
"The passed environment does not contain a DockerPayload.");
final RunnerApi.DockerPayload dockerPayload =
RunnerApi.DockerPayload.parseFrom(environment.getPayload());
final String workerId = idGenerator.getId();
String containerImage = dockerPayload.getContainerImage();
String loggingEndpoint = loggingServiceServer.getApiServiceDescriptor().getUrl();
String artifactEndpoint = retrievalServiceServer.getApiServiceDescriptor().getUrl();
String provisionEndpoint = provisioningServiceServer.getApiServiceDescriptor().getUrl();
String controlEndpoint = controlServiceServer.getApiServiceDescriptor().getUrl();
ImmutableList.Builder<String> dockerArgsBuilder =
ImmutableList.<String>builder()
.addAll(gcsCredentialArgs())
.add("--network=host")
.add("--env=DOCKER_MAC_CONTAINER=" + System.getenv("DOCKER_MAC_CONTAINER"));
if (!retainDockerContainer) {
dockerArgsBuilder.add("--rm");
}
List<String> args =
ImmutableList.of(
String.format("--id=%s", workerId),
String.format("--logging_endpoint=%s", loggingEndpoint),
String.format("--artifact_endpoint=%s", artifactEndpoint),
String.format("--provision_endpoint=%s", provisionEndpoint),
String.format("--control_endpoint=%s", controlEndpoint));
LOG.debug("Creating Docker Container with ID {}", workerId);
String containerId = null;
InstructionRequestHandler instructionHandler = null;
try {
containerId = docker.runImage(containerImage, dockerArgsBuilder.build(), args);
LOG.debug("Created Docker Container with Container ID {}", containerId);
while (instructionHandler == null) {
try {
instructionHandler = clientSource.take(workerId, Duration.ofMinutes(1));
} catch (TimeoutException timeoutEx) {
Preconditions.checkArgument(
docker.isContainerRunning(containerId), "No container running for id " + containerId);
LOG.info(
"Still waiting for startup of environment {} for worker id {}",
dockerPayload.getContainerImage(),
workerId);
} catch (InterruptedException interruptEx) {
Thread.currentThread().interrupt();
throw new RuntimeException(interruptEx);
}
}
} catch (Exception e) {
if (containerId != null) {
try {
docker.killContainer(containerId);
} catch (Exception dockerException) {
e.addSuppressed(dockerException);
}
}
throw e;
}
return DockerContainerEnvironment.create(docker, environment, containerId, instructionHandler);
}
|
class DockerEnvironmentFactory implements EnvironmentFactory {
private static final Logger LOG = LoggerFactory.getLogger(DockerEnvironmentFactory.class);
static DockerEnvironmentFactory forServicesWithDocker(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool.Source clientSource,
IdGenerator idGenerator,
boolean retainDockerContainer) {
return new DockerEnvironmentFactory(
docker,
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
idGenerator,
clientSource,
retainDockerContainer);
}
private final DockerCommand docker;
private final GrpcFnServer<FnApiControlClientPoolService> controlServiceServer;
private final GrpcFnServer<GrpcLoggingService> loggingServiceServer;
private final GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer;
private final GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer;
private final IdGenerator idGenerator;
private final ControlClientPool.Source clientSource;
private final boolean retainDockerContainer;
private DockerEnvironmentFactory(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
IdGenerator idGenerator,
ControlClientPool.Source clientSource,
boolean retainDockerContainer) {
this.docker = docker;
this.controlServiceServer = controlServiceServer;
this.loggingServiceServer = loggingServiceServer;
this.retrievalServiceServer = retrievalServiceServer;
this.provisioningServiceServer = provisioningServiceServer;
this.idGenerator = idGenerator;
this.clientSource = clientSource;
this.retainDockerContainer = retainDockerContainer;
}
/** Creates a new, active {@link RemoteEnvironment} backed by a local Docker container. */
@Override
private List<String> gcsCredentialArgs() {
String dockerGcloudConfig = "/root/.config/gcloud";
String localGcloudConfig =
firstNonNull(
System.getenv("CLOUDSDK_CONFIG"),
Paths.get(System.getProperty("user.home"), ".config", "gcloud").toString());
if (Files.exists(Paths.get(localGcloudConfig))) {
return ImmutableList.of(
"--mount",
String.format("type=bind,src=%s,dst=%s", localGcloudConfig, dockerGcloudConfig));
} else {
return ImmutableList.of();
}
}
/**
* NOTE: Deployment on Macs is intended for local development. As of 18.03, Docker-for-Mac does
* not implement host networking (--networking=host is effectively a no-op). Instead, we use a
* special DNS entry that points to the host:
* https:
* hostname has historically changed between versions, so this is subject to breakages and will
* likely only support the latest version at any time.
*/
private static class DockerOnMac {
private static final String DOCKER_FOR_MAC_HOST = "host.docker.internal";
private static final boolean RUNNING_INSIDE_DOCKER_ON_MAC =
"1".equals(System.getenv("DOCKER_MAC_CONTAINER"));
private static final int MAC_PORT_START = 8100;
private static final int MAC_PORT_END = 8200;
private static final AtomicInteger MAC_PORT = new AtomicInteger(MAC_PORT_START);
private static ServerFactory getServerFactory() {
ServerFactory.UrlFactory dockerUrlFactory =
(host, port) -> HostAndPort.fromParts(DOCKER_FOR_MAC_HOST, port).toString();
if (RUNNING_INSIDE_DOCKER_ON_MAC) {
return ServerFactory.createWithUrlFactoryAndPortSupplier(
dockerUrlFactory,
() -> MAC_PORT.getAndUpdate(val -> val == MAC_PORT_END ? MAC_PORT_START : val + 1));
} else {
return ServerFactory.createWithUrlFactory(dockerUrlFactory);
}
}
}
/** Provider for DockerEnvironmentFactory. */
public static class Provider implements EnvironmentFactory.Provider {
private final boolean retainDockerContainer;
public Provider(PipelineOptions options) {
this.retainDockerContainer =
options.as(ManualDockerEnvironmentOptions.class).getRetainDockerContainers();
}
@Override
public EnvironmentFactory createEnvironmentFactory(
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool clientPool,
IdGenerator idGenerator) {
return DockerEnvironmentFactory.forServicesWithDocker(
DockerCommand.getDefault(),
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
clientPool.getSource(),
idGenerator,
retainDockerContainer);
}
@Override
public ServerFactory getServerFactory() {
switch (getPlatform()) {
case LINUX:
return ServerFactory.createDefault();
case MAC:
return DockerOnMac.getServerFactory();
default:
LOG.warn("Unknown Docker platform. Falling back to default server factory");
return ServerFactory.createDefault();
}
}
private static Platform getPlatform() {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.startsWith("mac") || DockerOnMac.RUNNING_INSIDE_DOCKER_ON_MAC) {
return Platform.MAC;
} else if (osName.startsWith("linux")) {
return Platform.LINUX;
}
return Platform.OTHER;
}
private enum Platform {
MAC,
LINUX,
OTHER,
}
}
}
|
class DockerEnvironmentFactory implements EnvironmentFactory {
private static final Logger LOG = LoggerFactory.getLogger(DockerEnvironmentFactory.class);
static DockerEnvironmentFactory forServicesWithDocker(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool.Source clientSource,
IdGenerator idGenerator,
boolean retainDockerContainer) {
return new DockerEnvironmentFactory(
docker,
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
idGenerator,
clientSource,
retainDockerContainer);
}
private final DockerCommand docker;
private final GrpcFnServer<FnApiControlClientPoolService> controlServiceServer;
private final GrpcFnServer<GrpcLoggingService> loggingServiceServer;
private final GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer;
private final GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer;
private final IdGenerator idGenerator;
private final ControlClientPool.Source clientSource;
private final boolean retainDockerContainer;
private DockerEnvironmentFactory(
DockerCommand docker,
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
IdGenerator idGenerator,
ControlClientPool.Source clientSource,
boolean retainDockerContainer) {
this.docker = docker;
this.controlServiceServer = controlServiceServer;
this.loggingServiceServer = loggingServiceServer;
this.retrievalServiceServer = retrievalServiceServer;
this.provisioningServiceServer = provisioningServiceServer;
this.idGenerator = idGenerator;
this.clientSource = clientSource;
this.retainDockerContainer = retainDockerContainer;
}
/** Creates a new, active {@link RemoteEnvironment} backed by a local Docker container. */
@Override
private List<String> gcsCredentialArgs() {
String dockerGcloudConfig = "/root/.config/gcloud";
String localGcloudConfig =
firstNonNull(
System.getenv("CLOUDSDK_CONFIG"),
Paths.get(System.getProperty("user.home"), ".config", "gcloud").toString());
if (Files.exists(Paths.get(localGcloudConfig))) {
return ImmutableList.of(
"--mount",
String.format("type=bind,src=%s,dst=%s", localGcloudConfig, dockerGcloudConfig));
} else {
return ImmutableList.of();
}
}
/**
* NOTE: Deployment on Macs is intended for local development. As of 18.03, Docker-for-Mac does
* not implement host networking (--networking=host is effectively a no-op). Instead, we use a
* special DNS entry that points to the host:
* https:
* hostname has historically changed between versions, so this is subject to breakages and will
* likely only support the latest version at any time.
*/
private static class DockerOnMac {
private static final String DOCKER_FOR_MAC_HOST = "host.docker.internal";
private static final boolean RUNNING_INSIDE_DOCKER_ON_MAC =
"1".equals(System.getenv("DOCKER_MAC_CONTAINER"));
private static final int MAC_PORT_START = 8100;
private static final int MAC_PORT_END = 8200;
private static final AtomicInteger MAC_PORT = new AtomicInteger(MAC_PORT_START);
private static ServerFactory getServerFactory() {
ServerFactory.UrlFactory dockerUrlFactory =
(host, port) -> HostAndPort.fromParts(DOCKER_FOR_MAC_HOST, port).toString();
if (RUNNING_INSIDE_DOCKER_ON_MAC) {
return ServerFactory.createWithUrlFactoryAndPortSupplier(
dockerUrlFactory,
() -> MAC_PORT.getAndUpdate(val -> val == MAC_PORT_END ? MAC_PORT_START : val + 1));
} else {
return ServerFactory.createWithUrlFactory(dockerUrlFactory);
}
}
}
/** Provider for DockerEnvironmentFactory. */
public static class Provider implements EnvironmentFactory.Provider {
private final boolean retainDockerContainer;
public Provider(PipelineOptions options) {
this.retainDockerContainer =
options.as(ManualDockerEnvironmentOptions.class).getRetainDockerContainers();
}
@Override
public EnvironmentFactory createEnvironmentFactory(
GrpcFnServer<FnApiControlClientPoolService> controlServiceServer,
GrpcFnServer<GrpcLoggingService> loggingServiceServer,
GrpcFnServer<ArtifactRetrievalService> retrievalServiceServer,
GrpcFnServer<StaticGrpcProvisionService> provisioningServiceServer,
ControlClientPool clientPool,
IdGenerator idGenerator) {
return DockerEnvironmentFactory.forServicesWithDocker(
DockerCommand.getDefault(),
controlServiceServer,
loggingServiceServer,
retrievalServiceServer,
provisioningServiceServer,
clientPool.getSource(),
idGenerator,
retainDockerContainer);
}
@Override
public ServerFactory getServerFactory() {
switch (getPlatform()) {
case LINUX:
return ServerFactory.createDefault();
case MAC:
return DockerOnMac.getServerFactory();
default:
LOG.warn("Unknown Docker platform. Falling back to default server factory");
return ServerFactory.createDefault();
}
}
private static Platform getPlatform() {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.startsWith("mac") || DockerOnMac.RUNNING_INSIDE_DOCKER_ON_MAC) {
return Platform.MAC;
} else if (osName.startsWith("linux")) {
return Platform.LINUX;
}
return Platform.OTHER;
}
private enum Platform {
MAC,
LINUX,
OTHER,
}
}
}
|
`s/Failed reload/Failed to reload/` or `s/Failed reload/Failed reload of/`
|
public void run() {
try {
reloadCryptoMaterial(TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile), trustManager, keyManager);
} catch (Throwable t) {
log.log(Level.SEVERE, String.format("Failed reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t);
}
}
|
log.log(Level.SEVERE, String.format("Failed reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t);
|
public void run() {
try {
reloadCryptoMaterial(TransportSecurityOptions.fromJsonFile(tlsOptionsConfigFile), trustManager, keyManager);
} catch (Throwable t) {
log.log(Level.SEVERE, String.format("Failed to reload crypto material (path='%s'): %s", tlsOptionsConfigFile, t.getMessage()), t);
}
}
|
class CryptoMaterialReloader implements Runnable {
@Override
}
|
class CryptoMaterialReloader implements Runnable {
@Override
}
|
This no longer works, after parallell steps. It is also superseded by the validateSteps which is called after this method in the constructor.
|
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
|
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
|
private static List<Step> completeSteps(List<Step> steps) {
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeclaredZone(Environment.test));
}
DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
|
class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<ChangeBlocker> changeBlockers;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<ChangeBlocker> changeBlockers, List<Step> steps) {
this(globalServiceId, upgradePolicy, changeBlockers, steps, null);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.changeBlockers = changeBlockers;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private void validateZones(List<Step> steps) {
Set<DeclaredZone> zones = new HashSet<>();
for (Step step : steps)
for (DeclaredZone zone : step.zones())
ensureUnique(zone, zones);
}
private void ensureUnique(DeclaredZone zone, Set<DeclaredZone> zones) {
if ( ! zones.add(zone))
throw new IllegalArgumentException(zone + " is listed twice in deployment.xml");
}
/** Adds missing required steps and reorders steps to a permissible order */
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns whether upgrade can occur at the given instant */
public boolean canUpgradeAt(Instant instant) {
return changeBlockers.stream().filter(block -> block.blocksVersions())
.noneMatch(block -> block.window().includes(instant));
}
/** Returns whether an application revision change can occur at the given instant */
public boolean canChangeRevisionAt(Instant instant) {
return changeBlockers.stream().filter(block -> block.blocksRevisions())
.noneMatch(block -> block.window().includes(instant));
}
/** Returns time windows where upgrades are disallowed */
public List<ChangeBlocker> changeBlocker() { return changeBlockers; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns all the DeclaredZone deployment steps in the order they are declared */
public List<DeclaredZone> zones() {
return steps.stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + ( region.isPresent() ? "." + region.get() : "");
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
@Override
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
}
}
|
class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
private final List<ChangeBlocker> changeBlockers;
private final List<Step> steps;
private final String xmlForm;
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<ChangeBlocker> changeBlockers, List<Step> steps) {
this(globalServiceId, upgradePolicy, changeBlockers, steps, null);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
this.changeBlockers = changeBlockers;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
/** Throw an IllegalArgumentException if any production zone is declared multiple times */
private void validateZones(List<Step> steps) {
Set<DeclaredZone> zones = new HashSet<>();
for (Step step : steps)
for (DeclaredZone zone : step.zones())
ensureUnique(zone, zones);
}
private void ensureUnique(DeclaredZone zone, Set<DeclaredZone> zones) {
if ( ! zones.add(zone))
throw new IllegalArgumentException(zone + " is listed twice in deployment.xml");
}
/** Adds missing required steps and reorders steps to a permissible order */
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if (steps.get(i).deploysTo(environment))
return (DeclaredZone)steps.remove(i);
}
return null;
}
/** Returns the ID of the service to expose through global routing, if present */
public Optional<String> globalServiceId() {
return globalServiceId;
}
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
/** Returns whether upgrade can occur at the given instant */
public boolean canUpgradeAt(Instant instant) {
return changeBlockers.stream().filter(block -> block.blocksVersions())
.noneMatch(block -> block.window().includes(instant));
}
/** Returns whether an application revision change can occur at the given instant */
public boolean canChangeRevisionAt(Instant instant) {
return changeBlockers.stream().filter(block -> block.blocksRevisions())
.noneMatch(block -> block.window().includes(instant));
}
/** Returns time windows where upgrades are disallowed */
public List<ChangeBlocker> changeBlocker() { return changeBlockers; }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
/** Returns all the DeclaredZone deployment steps in the order they are declared */
public List<DeclaredZone> zones() {
return steps.stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
/** Returns whether this deployment spec specifies the given zone, either implicitly or explicitly */
public boolean includes(Environment environment, Optional<RegionName> region) {
for (Step step : steps)
if (step.deploysTo(environment, region)) return true;
return false;
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private Optional<RegionName> region;
private final boolean active;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && ! region.isPresent())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + ( region.isPresent() ? "." + region.get() : "");
}
}
/** A deployment step which is to run deployment to multiple zones in parallel */
public static class ParallelZones extends Step {
private final List<DeclaredZone> zones;
public ParallelZones(List<DeclaredZone> zones) {
this.zones = ImmutableList.copyOf(zones);
}
@Override
public List<DeclaredZone> zones() { return this.zones; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return zones.stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(zones, that.zones);
}
@Override
public int hashCode() {
return Objects.hash(zones);
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
}
}
|
IMO, that copied code could be much simpler for Quarkus, but I rather copied it 1:1. Not sure what the general policy is here.
|
private static String getBuildInfo(String propertyId) {
if (liquibaseBuildProperties == null) {
try {
liquibaseBuildProperties = new Properties();
final Enumeration<URL> propertiesUrls = Scope.getCurrentScope().getClassLoader()
.getResources("liquibase.build.properties");
while (propertiesUrls.hasMoreElements()) {
final URL url = propertiesUrls.nextElement();
try (InputStream buildProperties = url.openStream()) {
if (buildProperties != null) {
liquibaseBuildProperties.load(buildProperties);
}
}
}
} catch (IOException e) {
Scope.getCurrentScope().getLog(LiquibaseUtil.class).severe("Cannot read liquibase.build.properties", e);
}
}
String value;
value = liquibaseBuildProperties.getProperty(propertyId);
if (value == null) {
value = "UNKNOWN";
}
return value;
}
|
private static String getBuildInfo(String propertyId) {
if (liquibaseBuildProperties == null) {
try {
liquibaseBuildProperties = new Properties();
final Enumeration<URL> propertiesUrls = Scope.getCurrentScope().getClassLoader()
.getResources("liquibase.build.properties");
while (propertiesUrls.hasMoreElements()) {
final URL url = propertiesUrls.nextElement();
try (InputStream buildProperties = url.openStream()) {
if (buildProperties != null) {
liquibaseBuildProperties.load(buildProperties);
}
}
}
} catch (IOException e) {
Scope.getCurrentScope().getLog(LiquibaseUtil.class).severe("Cannot read liquibase.build.properties", e);
}
}
String value;
value = liquibaseBuildProperties.getProperty(propertyId);
if (value == null) {
value = "UNKNOWN";
}
return value;
}
|
class SubstituteLiquibaseUtil {
@Alias
private static Properties liquibaseBuildProperties;
@Substitute
}
|
class SubstituteLiquibaseUtil {
@Alias
private static Properties liquibaseBuildProperties;
@Substitute
}
|
|
There is duplicate code here and above for creating the `AccessToken`, which is a possible source of bugs in the future if they are not kept in sync. Consider creating a method to centralise it.
|
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.<MSIToken>deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry()));
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
|
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry()));
|
public Mono<AccessToken> authenticateToIMDSEndpoint(TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
StringBuilder payload = new StringBuilder();
final int imdsUpgradeTimeInMs = 70 * 1000;
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
payload.append("&resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
if (clientId != null) {
payload.append("&client_id=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
return checkIMDSAvailable().flatMap(available -> Mono.fromCallable(() -> {
int retry = 1;
while (retry <= options.getMaxRetry()) {
URL url = null;
HttpURLConnection connection = null;
try {
url =
new URL(String.format("http:
payload.toString()));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result,
MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} catch (IOException exception) {
if (connection == null) {
throw logger.logExceptionAsError(new RuntimeException(
String.format("Could not connect to the url: %s.", url), exception));
}
int responseCode = connection.getResponseCode();
if (responseCode == 410
|| responseCode == 429
|| responseCode == 404
|| (responseCode >= 500 && responseCode <= 599)) {
int retryTimeoutInMs = options.getRetryTimeout()
.apply(Duration.ofSeconds(RANDOM.nextInt(retry))).getNano() / 1000;
retryTimeoutInMs =
(responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs
: retryTimeoutInMs;
retry++;
if (retry > options.getMaxRetry()) {
break;
} else {
sleep(retryTimeoutInMs);
}
} else {
throw logger.logExceptionAsError(new RuntimeException(
"Couldn't acquire access token from IMDS, verify your objectId, "
+ "clientId or msiResourceId", exception));
}
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
throw logger.logExceptionAsError(new RuntimeException(
String.format("MSI: Failed to acquire tokens after retrying %s times",
options.getMaxRetry())));
}));
}
|
class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
if (options.getHttpPipeline() != null) {
publicClientApplicationBuilder = publicClientApplicationBuilder
.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
if (options.getHttpPipeline() != null) {
applicationBuilder.httpClient(new HttpPipelineAdapter(options.getHttpPipeline()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters)).map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
try {
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
} catch (IOException exception) {
return Mono.error(exception);
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return Mono.just(new AccessToken(msiToken.getToken(),
msiToken.getExpiresAt().plusMinutes(2).minus(options.getRefreshBeforeExpiry())));
} catch (IOException e) {
return Mono.error(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
}
|
class IdentityClient {
private static final SerializerAdapter SERIALIZER_ADAPTER = JacksonAdapter.createDefaultSerializerAdapter();
private static final Random RANDOM = new Random();
private final ClientLogger logger = new ClientLogger(IdentityClient.class);
private final IdentityClientOptions options;
private final PublicClientApplication publicClientApplication;
private final String tenantId;
private final String clientId;
private HttpPipelineAdapter httpPipelineAdapter;
/**
* Creates an IdentityClient with the given options.
*
* @param tenantId the tenant ID of the application.
* @param clientId the client ID of the application.
* @param options the options configuring the client.
*/
IdentityClient(String tenantId, String clientId, IdentityClientOptions options) {
if (tenantId == null) {
tenantId = "common";
}
if (options == null) {
options = new IdentityClientOptions();
}
this.tenantId = tenantId;
this.clientId = clientId;
this.options = options;
if (clientId == null) {
this.publicClientApplication = null;
} else {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/organizations/" + tenantId;
PublicClientApplication.Builder publicClientApplicationBuilder = PublicClientApplication.builder(clientId);
try {
publicClientApplicationBuilder = publicClientApplicationBuilder.authority(authorityUrl);
} catch (MalformedURLException e) {
throw logger.logExceptionAsWarning(new IllegalStateException(e));
}
HttpPipeline httpPipeline = options.getHttpPipeline();
if (httpPipeline != null) {
httpPipelineAdapter = new HttpPipelineAdapter(httpPipeline);
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else {
HttpClient httpClient = options.getHttpClient();
if (httpClient != null) {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(httpClient));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
publicClientApplicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
} else {
httpPipelineAdapter = new HttpPipelineAdapter(setupPipeline(HttpClient.createDefault()));
publicClientApplicationBuilder.httpClient(httpPipelineAdapter);
}
}
this.publicClientApplication = publicClientApplicationBuilder.build();
}
}
/**
* Asynchronously acquire a token from Active Directory with a client secret.
*
* @param clientSecret the client secret of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithClientSecret(String clientSecret, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromSecret(clientSecret))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
}
private HttpPipeline setupPipeline(HttpClient httpClient) {
List<HttpPipelinePolicy> policies = new ArrayList<>();
HttpLogOptions httpLogOptions = new HttpLogOptions();
HttpPolicyProviders.addBeforeRetryPolicies(policies);
policies.add(new RetryPolicy());
HttpPolicyProviders.addAfterRetryPolicies(policies);
policies.add(new HttpLoggingPolicy(httpLogOptions));
return new HttpPipelineBuilder().httpClient(httpClient)
.policies(policies.toArray(new HttpPipelinePolicy[0])).build();
}
/**
* Asynchronously acquire a token from Active Directory with a PKCS12 certificate.
*
* @param pfxCertificatePath the path to the PKCS12 certificate of the application
* @param pfxCertificatePassword the password protecting the PFX certificate
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPfxCertificate(String pfxCertificatePath, String pfxCertificatePassword,
TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return Mono.fromCallable(() -> {
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
new FileInputStream(pfxCertificatePath), pfxCertificatePassword))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
return applicationBuilder.build();
}).flatMap(application -> Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes())).build())))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with a PEM certificate.
*
* @param pemCertificatePath the path to the PEM certificate of the application
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateWithPemCertificate(String pemCertificatePath, TokenRequestContext request) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
try {
byte[] pemCertificateBytes = Files.readAllBytes(Paths.get(pemCertificatePath));
ConfidentialClientApplication.Builder applicationBuilder =
ConfidentialClientApplication.builder(clientId, ClientCredentialFactory.createFromCertificate(
CertificateUtil.privateKeyFromPem(pemCertificateBytes),
CertificateUtil.publicKeyFromPem(pemCertificateBytes)))
.authority(authorityUrl);
if (httpPipelineAdapter != null) {
applicationBuilder.httpClient(httpPipelineAdapter);
} else if (options.getProxyOptions() != null) {
applicationBuilder.proxy(proxyOptionsToJavaNetProxy(options.getProxyOptions()));
}
ConfidentialClientApplication application = applicationBuilder.build();
return Mono.fromFuture(application.acquireToken(
ClientCredentialParameters.builder(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
} catch (IOException e) {
return Mono.error(e);
}
}
/**
* Asynchronously acquire a token from Active Directory with a username and a password.
*
* @param request the details of the token request
* @param username the username of the user
* @param password the password of the user
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUsernamePassword(TokenRequestContext request,
String username, String password) {
return Mono.fromFuture(publicClientApplication.acquireToken(
UserNamePasswordParameters.builder(new HashSet<>(request.getScopes()), username, password.toCharArray())
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from the currently logged in client.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithUserRefreshToken(TokenRequestContext request, MsalToken msalToken) {
SilentParameters parameters;
if (msalToken.getAccount() != null) {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes()), msalToken.getAccount()).build();
} else {
parameters = SilentParameters.builder(new HashSet<>(request.getScopes())).build();
}
return Mono.defer(() -> {
try {
return Mono.fromFuture(publicClientApplication.acquireTokenSilently(parameters))
.map(ar -> new MsalToken(ar, options));
} catch (MalformedURLException e) {
return Mono.error(e);
}
});
}
/**
* Asynchronously acquire a token from Active Directory with a device code challenge. Active Directory will provide
* a device code for login and the user must meet the challenge by authenticating in a browser on the current or a
* different device.
*
* @param request the details of the token request
* @param deviceCodeConsumer the user provided closure that will consume the device code challenge
* @return a Publisher that emits an AccessToken when the device challenge is met, or an exception if the device
* code expires
*/
public Mono<MsalToken> authenticateWithDeviceCode(TokenRequestContext request,
Consumer<DeviceCodeInfo> deviceCodeConsumer) {
return Mono.fromFuture(() -> {
DeviceCodeFlowParameters parameters = DeviceCodeFlowParameters.builder(new HashSet<>(request.getScopes()),
dc -> deviceCodeConsumer.accept(new DeviceCodeInfo(dc.userCode(), dc.deviceCode(),
dc.verificationUri(), OffsetDateTime.now().plusSeconds(dc.expiresIn()), dc.message()))).build();
return publicClientApplication.acquireToken(parameters);
}).map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory with an authorization code from an oauth flow.
*
* @param request the details of the token request
* @param authorizationCode the oauth2 authorization code
* @param redirectUrl the redirectUrl where the authorization code is sent to
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithAuthorizationCode(TokenRequestContext request, String authorizationCode,
URI redirectUrl) {
return Mono.fromFuture(() -> publicClientApplication.acquireToken(
AuthorizationCodeParameters.builder(authorizationCode, redirectUrl)
.scopes(new HashSet<>(request.getScopes()))
.build()))
.map(ar -> new MsalToken(ar, options));
}
/**
* Asynchronously acquire a token from Active Directory by opening a browser and wait for the user to login. The
* credential will run a minimal local HttpServer at the given port, so {@code http:
* listed as a valid reply URL for the application.
*
* @param request the details of the token request
* @param port the port on which the HTTP server is listening
* @return a Publisher that emits an AccessToken
*/
public Mono<MsalToken> authenticateWithBrowserInteraction(TokenRequestContext request, int port) {
String authorityUrl = options.getAuthorityHost().replaceAll("/+$", "") + "/" + tenantId;
return AuthorizationCodeListener.create(port)
.flatMap(server -> {
URI redirectUri;
String browserUri;
try {
redirectUri = new URI(String.format("http:
browserUri =
String.format("%s/oauth2/v2.0/authorize?response_type=code&response_mode=query&prompt"
+ "=select_account&client_id=%s&redirect_uri=%s&state=%s&scope=%s",
authorityUrl,
clientId,
redirectUri.toString(),
UUID.randomUUID(),
String.join(" ", request.getScopes()));
} catch (URISyntaxException e) {
return server.dispose().then(Mono.error(e));
}
return server.listen()
.mergeWith(Mono.<String>fromRunnable(() -> {
try {
openUrl(browserUri);
} catch (IOException e) {
throw logger.logExceptionAsError(new IllegalStateException(e));
}
}).subscribeOn(Schedulers.newSingle("browser")))
.next()
.flatMap(code -> authenticateWithAuthorizationCode(request, code, redirectUri))
.onErrorResume(t -> server.dispose().then(Mono.error(t)))
.flatMap(msalToken -> server.dispose().then(Mono.just(msalToken)));
});
}
/**
* Asynchronously acquire a token from the App Service Managed Service Identity endpoint.
*
* @param msiEndpoint the endpoint to acquire token from
* @param msiSecret the secret to acquire token with
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
public Mono<AccessToken> authenticateToManagedIdentityEndpoint(String msiEndpoint, String msiSecret,
TokenRequestContext request) {
return Mono.fromCallable(() -> {
String resource = ScopeUtil.scopesToResource(request.getScopes());
HttpURLConnection connection = null;
StringBuilder payload = new StringBuilder();
payload.append("resource=");
payload.append(URLEncoder.encode(resource, "UTF-8"));
payload.append("&api-version=");
payload.append(URLEncoder.encode("2017-09-01", "UTF-8"));
if (clientId != null) {
payload.append("&clientid=");
payload.append(URLEncoder.encode(clientId, "UTF-8"));
}
try {
URL url = new URL(String.format("%s?%s", msiEndpoint, payload));
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
if (msiSecret != null) {
connection.setRequestProperty("Secret", msiSecret);
}
connection.setRequestProperty("Metadata", "true");
connection.connect();
Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name())
.useDelimiter("\\A");
String result = s.hasNext() ? s.next() : "";
MSIToken msiToken = SERIALIZER_ADAPTER.deserialize(result, MSIToken.class, SerializerEncoding.JSON);
return new IdentityToken(msiToken.getToken(), msiToken.getExpiresAt(), options);
} finally {
if (connection != null) {
connection.disconnect();
}
}
});
}
/**
* Asynchronously acquire a token from the Virtual Machine IMDS endpoint.
*
* @param request the details of the token request
* @return a Publisher that emits an AccessToken
*/
private Mono<Boolean> checkIMDSAvailable() {
StringBuilder payload = new StringBuilder();
try {
payload.append("api-version=");
payload.append(URLEncoder.encode("2018-02-01", "UTF-8"));
} catch (IOException exception) {
return Mono.error(exception);
}
return Mono.fromCallable(() -> {
HttpURLConnection connection = null;
URL url = new URL(String.format("http:
payload.toString()));
try {
connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.setConnectTimeout(500);
connection.connect();
} finally {
if (connection != null) {
connection.disconnect();
}
}
return true;
});
}
private static void sleep(int millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
throw new IllegalStateException(ex);
}
}
private static Proxy proxyOptionsToJavaNetProxy(ProxyOptions options) {
switch (options.getType()) {
case SOCKS4:
case SOCKS5:
return new Proxy(Type.SOCKS, options.getAddress());
case HTTP:
default:
return new Proxy(Type.HTTP, options.getAddress());
}
}
void openUrl(String url) throws IOException {
Runtime rt = Runtime.getRuntime();
String os = System.getProperty("os.name").toLowerCase(Locale.ROOT);
if (os.contains("win")) {
rt.exec("rundll32 url.dll,FileProtocolHandler " + url);
} else if (os.contains("mac")) {
rt.exec("open " + url);
} else if (os.contains("nix") || os.contains("nux")) {
rt.exec("xdg-open " + url);
} else {
logger.error("Browser could not be opened - please open {} in a browser on this device.", url);
}
}
}
|
_Maybe_ INFO, why would there be a warning for something that is expected and not a problem?
|
void converge() {
final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname);
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.writeMetricsConfig(containerName, nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false);
storageMaintainer.getDiskUsageFor(containerName)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb)
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName));
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false);
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded();
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.cleanupNodeStorage(containerName, nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
|
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
|
void converge() {
final Optional<ContainerNodeSpec> nodeSpecOptional = nodeRepository.getContainerNodeSpec(hostname);
if (!nodeSpecOptional.isPresent() && expectNodeNotInNodeRepo) return;
final ContainerNodeSpec nodeSpec = nodeSpecOptional.orElseThrow(() ->
new IllegalStateException(String.format("Node '%s' missing from node repository.", hostname)));
expectNodeNotInNodeRepo = false;
Optional<Container> container = getContainer();
if (!nodeSpec.equals(lastNodeSpec)) {
addDebugMessage("Loading new node spec: " + nodeSpec.toString());
lastNodeSpec = nodeSpec;
if (container.isPresent()) {
storageMaintainer.writeMetricsConfig(containerName, nodeSpec);
}
}
switch (nodeSpec.nodeState) {
case ready:
case reserved:
case parked:
case failed:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case active:
storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false);
storageMaintainer.getDiskUsageFor(containerName)
.map(diskUsage -> (double) diskUsage / BYTES_IN_GB / nodeSpec.minDiskAvailableGb)
.filter(diskUtil -> diskUtil >= 0.8)
.ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(containerName));
scheduleDownLoadIfNeeded(nodeSpec);
if (isDownloadingImage()) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
return;
}
container = removeContainerIfNeededUpdateContainerState(nodeSpec, container);
if (! container.isPresent()) {
storageMaintainer.handleCoreDumpsForContainer(containerName, nodeSpec, false);
startContainer(nodeSpec);
}
runLocalResumeScriptIfNeeded();
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(hostname);
break;
case inactive:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
updateNodeRepoWithCurrentAttributes(nodeSpec);
break;
case provisioned:
nodeRepository.markAsDirty(hostname);
break;
case dirty:
removeContainerIfNeededUpdateContainerState(nodeSpec, container);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
storageMaintainer.cleanupNodeStorage(containerName, nodeSpec);
updateNodeRepoWithCurrentAttributes(nodeSpec);
nodeRepository.markNodeAvailableForNewAllocation(hostname);
expectNodeNotInNodeRepo = true;
break;
default:
throw new RuntimeException("UNKNOWN STATE " + nodeSpec.nodeState.name());
}
}
|
class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final ContainerName containerName;
private final String hostname;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Runnable aclMaintainer;
private final Environment environment;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Runnable aclMaintainer,
final Environment environment,
final Clock clock,
final Duration timeBetweenEachConverge) {
this.containerName = ContainerName.fromHostname(hostName);
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.hostname = hostName;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.aclMaintainer = aclMaintainer;
this.environment = environment;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
this.loopThread.setName("tick-" + hostname);
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start() {
String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms";
logger.info(message);
addDebugMessage(message);
loopThread.start();
serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
logger.info("Stopped");
}
private void runLocalResumeScriptIfNeeded() {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.run();
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.writeMetricsConfig(containerName, nodeSpec);
storageMaintainer.writeFilebeatConfig(containerName, nodeSpec);
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb);
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer, nodeSpec);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) {
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerException, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
}
|
class NodeAgentImpl implements NodeAgent {
private static final long BYTES_IN_GB = 1_000_000_000L;
private final AtomicBoolean terminated = new AtomicBoolean(false);
private boolean isFrozen = true;
private boolean wantFrozen = false;
private boolean workToDoNow = true;
private boolean expectNodeNotInNodeRepo = false;
private final Object monitor = new Object();
private final PrefixLogger logger;
private DockerImage imageBeingDownloaded = null;
private final ContainerName containerName;
private final String hostname;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
private final StorageMaintainer storageMaintainer;
private final Runnable aclMaintainer;
private final Environment environment;
private final Clock clock;
private final Duration timeBetweenEachConverge;
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private final LinkedList<String> debugMessages = new LinkedList<>();
private int numberOfUnhandledException = 0;
private Instant lastConverge;
private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
private Consumer<String> serviceRestarter;
private Future<?> currentFilebeatRestarter;
private boolean resumeScriptRun = false;
/**
* ABSENT means container is definitely absent - A container that was absent will not suddenly appear without
* NodeAgent explicitly starting it.
* Otherwise we can't be certain. A container that was running a minute ago may no longer be running without
* NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon
* to get updated state of the container.
*/
enum ContainerState {
ABSENT,
UNKNOWN
}
private ContainerState containerState = UNKNOWN;
private NodeAttributes lastAttributesSet = null;
private ContainerNodeSpec lastNodeSpec = null;
private CpuUsageReporter lastCpuMetric = new CpuUsageReporter();
public NodeAgentImpl(
final String hostName,
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
final StorageMaintainer storageMaintainer,
final Runnable aclMaintainer,
final Environment environment,
final Clock clock,
final Duration timeBetweenEachConverge) {
this.containerName = ContainerName.fromHostname(hostName);
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, containerName);
this.hostname = hostName;
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.dockerOperations = dockerOperations;
this.storageMaintainer = storageMaintainer;
this.aclMaintainer = aclMaintainer;
this.environment = environment;
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
this.loopThread = new Thread(() -> {
while (!terminated.get()) tick();
});
this.loopThread.setName("tick-" + hostname);
}
@Override
public boolean setFrozen(boolean frozen) {
synchronized (monitor) {
if (wantFrozen != frozen) {
wantFrozen = frozen;
addDebugMessage(wantFrozen ? "Freezing" : "Unfreezing");
signalWorkToBeDone();
}
return isFrozen == frozen;
}
}
private void addDebugMessage(String message) {
synchronized (debugMessages) {
while (debugMessages.size() > 1000) {
debugMessages.pop();
}
logger.debug(message);
debugMessages.add("[" + sdf.format(new Date()) + "] " + message);
}
}
@Override
public Map<String, Object> debugInfo() {
Map<String, Object> debug = new LinkedHashMap<>();
debug.put("Hostname", hostname);
debug.put("isFrozen", isFrozen);
debug.put("wantFrozen", wantFrozen);
debug.put("terminated", terminated);
debug.put("workToDoNow", workToDoNow);
synchronized (debugMessages) {
debug.put("History", new LinkedList<>(debugMessages));
}
debug.put("Node repo state", lastNodeSpec.nodeState.name());
return debug;
}
@Override
public void start() {
String message = "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms";
logger.info(message);
addDebugMessage(message);
loopThread.start();
serviceRestarter = service -> {
try {
ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot(
containerName, "service", service, "restart");
if (!processResult.isSuccess()) {
logger.error("Failed to restart service " + service + ": " + processResult);
}
} catch (Exception e) {
logger.error("Failed to restart service " + service, e);
}
};
}
@Override
public void stop() {
addDebugMessage("Stopping");
filebeatRestarter.shutdown();
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
signalWorkToBeDone();
do {
try {
loopThread.join();
filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
logger.error("Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown");
}
} while (loopThread.isAlive() || !filebeatRestarter.isTerminated());
logger.info("Stopped");
}
private void runLocalResumeScriptIfNeeded() {
if (! resumeScriptRun) {
addDebugMessage("Starting optional node program resume command");
dockerOperations.resumeNode(containerName);
resumeScriptRun = true;
}
}
private void updateNodeRepoWithCurrentAttributes(final ContainerNodeSpec nodeSpec) {
final NodeAttributes nodeAttributes = new NodeAttributes()
.withRestartGeneration(nodeSpec.wantedRestartGeneration.orElse(null))
.withRebootGeneration(nodeSpec.wantedRebootGeneration.orElse(0L))
.withDockerImage(nodeSpec.wantedDockerImage.filter(node -> containerState != ABSENT).orElse(new DockerImage("")))
.withVespaVersion(nodeSpec.wantedVespaVersion.filter(node -> containerState != ABSENT).orElse(""));
publishStateToNodeRepoIfChanged(nodeAttributes);
}
private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes) {
if (!currentAttributes.equals(lastAttributesSet)) {
logger.info("Publishing new set of attributes to node repo: "
+ lastAttributesSet + " -> " + currentAttributes);
addDebugMessage("Publishing new set of attributes to node repo: {" +
lastAttributesSet + "} -> {" + currentAttributes + "}");
nodeRepository.updateNodeAttributes(hostname, currentAttributes);
lastAttributesSet = currentAttributes;
}
}
private void startContainer(ContainerNodeSpec nodeSpec) {
aclMaintainer.run();
dockerOperations.startContainer(containerName, nodeSpec);
lastCpuMetric = new CpuUsageReporter();
currentFilebeatRestarter = filebeatRestarter.scheduleWithFixedDelay(() -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS);
storageMaintainer.writeMetricsConfig(containerName, nodeSpec);
storageMaintainer.writeFilebeatConfig(containerName, nodeSpec);
resumeScriptRun = false;
containerState = UNKNOWN;
logger.info("Container successfully started, new containerState is " + containerState);
}
private Optional<Container> removeContainerIfNeededUpdateContainerState(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) {
return existingContainer
.flatMap(container -> removeContainerIfNeeded(nodeSpec, container))
.map(container -> {
shouldRestartServices(nodeSpec).ifPresent(restartReason -> {
logger.info("Will restart services for container " + container + ": " + restartReason);
restartServices(nodeSpec, container);
});
return container;
});
}
private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) {
if (!nodeSpec.wantedRestartGeneration.isPresent()) return Optional.empty();
if (!nodeSpec.currentRestartGeneration.isPresent() ||
nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) {
return Optional.of("Restart requested - wanted restart generation has been bumped: "
+ nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get());
}
return Optional.empty();
}
private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer) {
if (existingContainer.state.isRunning() && nodeSpec.nodeState == Node.State.active) {
ContainerName containerName = existingContainer.name;
logger.info("Restarting services for " + containerName);
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
}
}
@Override
public void stopServices() {
logger.info("Stopping services for " + containerName);
dockerOperations.trySuspendNode(containerName);
dockerOperations.stopServicesOnNode(containerName);
}
private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Container existingContainer) {
final Node.State nodeState = nodeSpec.nodeState;
if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) {
return Optional.of("Node in state " + nodeState + ", container should no longer be running");
}
if (nodeSpec.wantedDockerImage.isPresent() && !nodeSpec.wantedDockerImage.get().equals(existingContainer.image)) {
return Optional.of("The node is supposed to run a new Docker image: "
+ existingContainer + " -> " + nodeSpec.wantedDockerImage.get());
}
if (!existingContainer.state.isRunning()) {
return Optional.of("Container no longer running");
}
ContainerResources wantedContainerResources = ContainerResources.from(
nodeSpec.minCpuCores, nodeSpec.minMainMemoryAvailableGb);
if (!wantedContainerResources.equals(existingContainer.resources)) {
return Optional.of("Container should be running with different resource allocation, wanted: " +
wantedContainerResources + ", actual: " + existingContainer.resources);
}
return Optional.empty();
}
private Optional<Container> removeContainerIfNeeded(ContainerNodeSpec nodeSpec, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer);
if (removeReason.isPresent()) {
logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (nodeSpec.nodeState == Node.State.active) {
orchestratorSuspendNode();
}
try {
stopServices();
} catch (Exception e) {
logger.info("Failed stopping services, ignoring", e);
}
}
if (currentFilebeatRestarter != null) currentFilebeatRestarter.cancel(true);
dockerOperations.removeContainer(existingContainer, nodeSpec);
containerState = ABSENT;
logger.info("Container successfully removed, new containerState is " + containerState);
return Optional.empty();
}
return Optional.of(existingContainer);
}
private void scheduleDownLoadIfNeeded(ContainerNodeSpec nodeSpec) {
if (nodeSpec.currentDockerImage.equals(nodeSpec.wantedDockerImage)) return;
if (dockerOperations.pullImageAsyncIfNeeded(nodeSpec.wantedDockerImage.get())) {
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
} else if (imageBeingDownloaded != null) {
imageBeingDownloaded = null;
}
}
private void signalWorkToBeDone() {
synchronized (monitor) {
if (!workToDoNow) {
workToDoNow = true;
addDebugMessage("Signaling work to be done");
monitor.notifyAll();
}
}
}
void tick() {
boolean isFrozenCopy;
synchronized (monitor) {
while (!workToDoNow) {
long remainder = timeBetweenEachConverge
.minus(Duration.between(lastConverge, clock.instant()))
.toMillis();
if (remainder > 0) {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
logger.error("Interrupted, but ignoring this: " + hostname);
}
} else break;
}
lastConverge = clock.instant();
workToDoNow = false;
if (isFrozen != wantFrozen) {
isFrozen = wantFrozen;
logger.info("Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen);
}
isFrozenCopy = isFrozen;
}
if (isFrozenCopy) {
addDebugMessage("tick: isFrozen");
} else {
try {
converge();
} catch (OrchestratorException e) {
logger.info(e.getMessage());
addDebugMessage(e.getMessage());
} catch (DockerException e) {
numberOfUnhandledException++;
containerState = UNKNOWN;
logger.error("Caught a DockerException, resetting containerState to " + containerState, e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
logger.error("Unhandled throwable, taking down system.", t);
System.exit(234);
}
}
}
@SuppressWarnings("unchecked")
public void updateContainerNodeMetrics() {
final ContainerNodeSpec nodeSpec = lastNodeSpec;
if (nodeSpec == null || containerState == ABSENT) return;
Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
.add("host", hostname)
.add("role", "tenants")
.add("state", nodeSpec.nodeState.toString())
.add("parentHostname", environment.getParentHostHostname());
Dimensions dimensions = dimensionsBuilder.build();
Docker.ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue();
final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue();
final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue();
final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue();
final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue();
final long diskTotalBytes = (long) (nodeSpec.minDiskAvailableGb * BYTES_IN_GB);
final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(containerName);
lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime);
final double allocatedCpuRatio = nodeSpec.minCpuCores / totalNumCpuCores;
double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio;
double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio;
long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache;
double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes;
Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes);
List<DimensionMetrics> metrics = new ArrayList<>();
DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions)
.withMetric("mem.limit", memoryTotalBytes)
.withMetric("mem.used", memoryTotalBytesUsed)
.withMetric("mem.util", 100 * memoryUsageRatio)
.withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated)
.withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated)
.withMetric("disk.limit", diskTotalBytes);
diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed));
diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio));
metrics.add(systemMetricsBuilder.build());
stats.getNetworks().forEach((interfaceName, interfaceStats) -> {
Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build();
Map<String, Number> infStats = (Map<String, Number>) interfaceStats;
DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims)
.withMetric("net.in.bytes", infStats.get("rx_bytes").longValue())
.withMetric("net.in.errors", infStats.get("rx_errors").longValue())
.withMetric("net.in.dropped", infStats.get("rx_dropped").longValue())
.withMetric("net.out.bytes", infStats.get("tx_bytes").longValue())
.withMetric("net.out.errors", infStats.get("tx_errors").longValue())
.withMetric("net.out.dropped", infStats.get("tx_dropped").longValue())
.build();
metrics.add(networkMetrics);
});
pushMetricsToContainer(metrics);
}
private void pushMetricsToContainer(List<DimensionMetrics> metrics) {
StringBuilder params = new StringBuilder();
try {
for (DimensionMetrics dimensionMetrics : metrics) {
params.append(dimensionMetrics.toSecretAgentReport());
}
String wrappedMetrics = "s:" + params.toString();
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
logger.warning("Unable to push metrics to container: " + containerName, e);
}
}
private Optional<Container> getContainer() {
if (containerState == ABSENT) return Optional.empty();
Optional<Container> container = dockerOperations.getContainer(containerName);
if (! container.isPresent()) containerState = ABSENT;
return container;
}
@Override
public String getHostname() {
return hostname;
}
@Override
public boolean isDownloadingImage() {
return imageBeingDownloaded != null;
}
@Override
public int getAndResetNumberOfUnhandledExceptions() {
int temp = numberOfUnhandledException;
numberOfUnhandledException = 0;
return temp;
}
class CpuUsageReporter {
private long containerKernelUsage = 0;
private long totalContainerUsage = 0;
private long totalSystemUsage = 0;
private long deltaContainerKernelUsage;
private long deltaContainerUsage;
private long deltaSystemUsage;
private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) {
deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage);
deltaContainerUsage = totalContainerUsage - this.totalContainerUsage;
deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage;
this.totalSystemUsage = totalSystemUsage;
this.totalContainerUsage = totalContainerUsage;
this.containerKernelUsage = containerKernelUsage;
}
/**
* Returns the CPU usage ratio for the docker container that this NodeAgent is managing
* in the time between the last two times updateCpuDeltas() was called. This is calculated
* by dividing the CPU time used by the container with the CPU time used by the entire system.
*/
double getCpuUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage;
}
double getCpuKernelUsageRatio() {
return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage;
}
}
private void orchestratorSuspendNode() {
logger.info("Ask Orchestrator for permission to suspend node " + hostname);
orchestrator.suspend(hostname);
}
}
|
we can directly return instruction.pos
|
private Location getDesugaredPosition(BIRBasicBlock basicBlock) {
Location desugaredPos = basicBlock.terminator.pos;
for (BIRNonTerminator instruction : basicBlock.instructions) {
if (instruction.pos != null) {
desugaredPos = instruction.pos;
break;
}
}
return desugaredPos;
}
|
desugaredPos = instruction.pos;
|
private Location getDesugaredPosition(BIRBasicBlock basicBlock) {
for (BIRNonTerminator instruction : basicBlock.instructions) {
if (instruction.pos != null) {
return instruction.pos;
}
}
return basicBlock.terminator.pos;
}
|
class JvmObservabilityGen {
private static final String ENTRY_POINT_MAIN_METHOD_NAME = "main";
private static final String NEW_BB_PREFIX = "observabilityDesugaredBB";
private static final String INVOCATION_INSTRUMENTATION_TYPE = "invocation";
private static final String FUNC_BODY_INSTRUMENTATION_TYPE = "funcBody";
private static final String FILE_NAME_STRING = "fileName";
private static final String START_LINE_STRING = "startLine";
private static final String START_COLUMN_STRING = "startCol";
private static final Location COMPILE_TIME_CONST_POS =
new BLangDiagnosticLocation(null, -1, -1, -1, -1, 0, 0);
private final PackageCache packageCache;
private final SymbolTable symbolTable;
private int lambdaIndex;
private int desugaredBBIndex;
private int localVarIndex;
private int constantIndex;
private int defaultServiceIndex;
private final Map<Object, BIROperand> compileTimeConstants;
private final Map<Name, String> svcAttachPoints;
private final Map<String, BIROperand> tempLocalVarsMap;
private final Map<BIRBasicBlock, List<BIRBasicBlock>> predecessorMap;
JvmObservabilityGen(PackageCache packageCache, SymbolTable symbolTable) {
this.compileTimeConstants = new HashMap<>();
this.svcAttachPoints = new HashMap<>();
this.tempLocalVarsMap = new HashMap<>();
this.predecessorMap = new HashMap<>();
this.packageCache = packageCache;
this.symbolTable = symbolTable;
this.lambdaIndex = 0;
this.desugaredBBIndex = 0;
this.constantIndex = 0;
this.localVarIndex = 0;
this.defaultServiceIndex = 0;
}
/**
* Instrument the package by rewriting the BIR to add relevant Observability related instructions.
*
* @param pkg The package to instrument
*/
public void instrumentPackage(BIRPackage pkg) {
initializeTempLocalVariables();
for (int i = 0; i < pkg.functions.size(); i++) {
localVarIndex = 0;
BIRFunction func = pkg.functions.get(i);
if (ENTRY_POINT_MAIN_METHOD_NAME.equals(func.name.value)) {
rewriteControlFlowInvocation(func, pkg);
}
rewriteAsyncInvocations(func, null, pkg);
rewriteObservableFunctionInvocations(func, pkg);
if (ENTRY_POINT_MAIN_METHOD_NAME.equals(func.name.value)) {
rewriteObservableFunctionBody(func, pkg, null, func.name.value, null, false, false, true, false);
} else if ((func.flags & Flags.WORKER) == Flags.WORKER) {
rewriteObservableFunctionBody(func, pkg, null, func.workerName.value, null, false, false, false, true);
}
}
for (BIRNode.BIRServiceDeclaration serviceDecl : pkg.serviceDecls) {
List<String> attachPoint = serviceDecl.attachPoint;
String attachPointLiteral = serviceDecl.attachPointLiteral;
if (attachPoint != null) {
svcAttachPoints.put(serviceDecl.associatedClassName, "/" + String.join("/", attachPoint));
} else if (attachPointLiteral != null) {
svcAttachPoints.put(serviceDecl.associatedClassName, attachPointLiteral);
}
}
for (BIRTypeDefinition typeDef : pkg.typeDefs) {
BType bType = JvmCodeGenUtil.getReferredType(typeDef.type);
if ((typeDef.flags & Flags.CLASS) != Flags.CLASS && bType.tag == TypeTags.OBJECT) {
continue;
}
boolean isService = (bType.flags & Flags.SERVICE) == Flags.SERVICE;
String serviceName = null;
if (isService) {
for (BIRNode.BIRAnnotationAttachment annotationAttachment : typeDef.annotAttachments) {
if (DISPLAY_ANNOTATION.equals(annotationAttachment.annotTagRef.value)) {
BIRNode.ConstValue annotValue =
((BIRNode.BIRConstAnnotationAttachment) annotationAttachment).annotValue;
Map<String, BIRNode.ConstValue> annotationMap =
(Map<String, BIRNode.ConstValue>) annotValue.value;
serviceName = annotationMap.get("label").value.toString();
break;
}
}
if (serviceName == null) {
String basePath = this.svcAttachPoints.get(typeDef.name);
serviceName = Objects.requireNonNullElseGet(basePath, () ->
pkg.packageID.orgName.value + "_" + pkg.packageID.name.value + "_svc_" +
defaultServiceIndex++);
}
}
for (int i = 0; i < typeDef.attachedFuncs.size(); i++) {
BIRFunction func = typeDef.attachedFuncs.get(i);
localVarIndex = 0;
if (isService && ((func.flags & Flags.RESOURCE) == Flags.RESOURCE ||
(func.flags & Flags.REMOTE) == Flags.REMOTE)) {
rewriteControlFlowInvocation(func, pkg);
}
rewriteAsyncInvocations(func, typeDef, pkg);
rewriteObservableFunctionInvocations(func, pkg);
if (isService) {
if ((func.flags & Flags.RESOURCE) == Flags.RESOURCE) {
rewriteObservableFunctionBody(func, pkg, typeDef, func.name.value, serviceName,
true, false, false, false);
} else if ((func.flags & Flags.REMOTE) == Flags.REMOTE) {
rewriteObservableFunctionBody(func, pkg, typeDef, func.name.value, serviceName,
false, true, false, false);
}
}
}
}
BIRFunction initFunc = pkg.functions.get(0);
BIRBasicBlock constInitBB = initFunc.basicBlocks.get(0);
for (Map.Entry<Object, BIROperand> entry : compileTimeConstants.entrySet()) {
BIROperand operand = entry.getValue();
ConstantLoad constLoadIns = new ConstantLoad(COMPILE_TIME_CONST_POS, entry.getKey(),
operand.variableDcl.type, operand);
constInitBB.instructions.add(constLoadIns);
}
}
/**
* Adding Java Interop calls to basic blocks.
* Here the JI calls are added for all kinds of terminators.
* First we check if there are position details for instructions, if present we add the JI calls with those
* positions else, we consider the terminator position to create the JI call.
*
* @param func The function of which the instructions should be rewritten
* @param pkg The package containing the function
*/
private void rewriteControlFlowInvocation(BIRFunction func, BIRPackage pkg) {
populatePredecessorMap(func.basicBlocks);
for (Map.Entry<BIRBasicBlock, List<BIRBasicBlock>> entry : this.predecessorMap.entrySet()) {
BIRBasicBlock currentBB = entry.getKey();
Location desugaredPos = getDesugaredPosition(currentBB);
if (desugaredPos != null && desugaredPos.lineRange().startLine().line() >= 0) {
List<BIRBasicBlock> predecessors = entry.getValue();
int callInsOffset = 0;
if (!desugaredPosAlreadyLoaded(desugaredPos, predecessors)) {
updatePositionArgsConstLoadIns(desugaredPos, currentBB);
callInsOffset = 2;
}
injectCheckpointCall(currentBB, pkg, callInsOffset);
}
}
}
private boolean desugaredPosAlreadyLoaded(Location desugaredPos, List<BIRBasicBlock> predecessors) {
for (BIRBasicBlock bb : predecessors) {
Location predecessorDesugaredPos = getDesugaredPosition(bb);
if (predecessorDesugaredPos != null && predecessorDesugaredPos.equals(desugaredPos)) {
return true;
}
}
return false;
}
/**
* Inject checkpoint JI method call to a basic block.
*
* @param currentBB The basic block to which the checkpoint call should be injected
* @param pkg The package the invocation belongs to
* @param offset The recordCheckPointCall instruction offset
*/
private void injectCheckpointCall(BIRBasicBlock currentBB, BIRPackage pkg, int offset) {
BIROperand pkgOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
generatePackageId(pkg.packageID));
BIROperand fileNameOperand = tempLocalVarsMap.get(FILE_NAME_STRING);
BIROperand startLineOperand = tempLocalVarsMap.get(START_LINE_STRING);
BIROperand startColOperand = tempLocalVarsMap.get(START_COLUMN_STRING);
JMethodCallInstruction recordCheckPointCallIns = new JMethodCallInstruction(null);
recordCheckPointCallIns.invocationType = INVOKESTATIC;
recordCheckPointCallIns.jClassName = OBSERVE_UTILS;
recordCheckPointCallIns.jMethodVMSig = CHECKPOINT_CALL;
recordCheckPointCallIns.name = RECORD_CHECKPOINT_METHOD;
recordCheckPointCallIns.args = new ArrayList<>(Arrays.asList(pkgOperand, fileNameOperand,
startLineOperand, startColOperand));
currentBB.instructions.add(offset, recordCheckPointCallIns);
}
private void populatePredecessorMap(List<BIRBasicBlock> basicBlocks) {
this.predecessorMap.clear();
for (BIRBasicBlock basicBlock : basicBlocks) {
this.predecessorMap.computeIfAbsent(basicBlock, k -> new ArrayList<>());
for (BIRBasicBlock bb : basicBlock.terminator.getNextBasicBlocks()) {
this.predecessorMap.computeIfAbsent(bb, k -> new ArrayList<>()).add(basicBlock);
}
}
}
/**
* Rewrite the invocations in the function bodies to call a lambda asynchronously which in turn calls the
* actual function synchronously. This is done so that the actual invocation can be observed accurately.
* Without this wrapper, the start and end time recorded would only reflect the time it took to give the async
* invocation to the scheduler. However, we require the actual time it took for the invocation.
*
* @param func The function of which the instructions in the body should be rewritten
* @param attachedTypeDef The type definition to which the function was attached to or null
* @param pkg The package containing the function
*/
private void rewriteAsyncInvocations(BIRFunction func, BIRTypeDefinition attachedTypeDef, BIRPackage pkg) {
PackageID packageID = pkg.packageID;
Name org = new Name(Utils.decodeIdentifier(packageID.orgName.getValue()));
Name module = new Name(Utils.decodeIdentifier(packageID.name.getValue()));
PackageID currentPkgId = new PackageID(org, module, module, packageID.version, packageID.sourceFileName,
packageID.isTestPkg);
BSymbol functionOwner;
List<BIRFunction> scopeFunctionsList;
if (attachedTypeDef == null) {
functionOwner = packageCache.getSymbol(currentPkgId);
scopeFunctionsList = pkg.functions;
} else {
functionOwner = attachedTypeDef.type.tsymbol;
scopeFunctionsList = attachedTypeDef.attachedFuncs;
}
for (BIRBasicBlock currentBB : func.basicBlocks) {
if (currentBB.terminator.kind != InstructionKind.ASYNC_CALL
|| !isObservable((AsyncCall) currentBB.terminator)) {
continue;
}
AsyncCall asyncCallIns = (AsyncCall) currentBB.terminator;
/*
* The wrapper function generated below invokes the actual function synchronously, allowing the
* instrumentation to record the actual start and end times of the function. The wrapper function
* is invoked asynchronously preserving the asynchronous behaviour.
*/
BType returnType = ((BFutureType) asyncCallIns.lhsOp.variableDcl.type).constraint;
List<BType> argTypes = new ArrayList<>();
for (BIROperand arg : asyncCallIns.args) {
BType type = arg.variableDcl.type;
argTypes.add(type);
}
Name lambdaName = new Name("$lambda$observability" + lambdaIndex++ + "$" +
asyncCallIns.name.value.replace(".", "_"));
BInvokableType bInvokableType = new BInvokableType(argTypes, null,
returnType, null);
BIRFunction desugaredFunc = new BIRFunction(asyncCallIns.pos, lambdaName, 0, bInvokableType,
func.workerName, 0, VIRTUAL);
desugaredFunc.receiver = func.receiver;
scopeFunctionsList.add(desugaredFunc);
BIRVariableDcl funcReturnVariableDcl = new BIRVariableDcl(returnType,
new Name("$" + lambdaName.value + "$retVal"), VarScope.FUNCTION, VarKind.RETURN);
BIROperand funcReturnOperand = new BIROperand(funcReturnVariableDcl);
desugaredFunc.localVars.add(funcReturnVariableDcl);
desugaredFunc.returnVariable = funcReturnVariableDcl;
BInvokableSymbol invokableSymbol = new BInvokableSymbol(SymTag.FUNCTION, 0, lambdaName,
currentPkgId, bInvokableType, functionOwner,
desugaredFunc.pos, VIRTUAL);
invokableSymbol.retType = funcReturnVariableDcl.type;
invokableSymbol.kind = SymbolKind.FUNCTION;
List<BVarSymbol> list = new ArrayList<>();
for (BIROperand arg : asyncCallIns.args) {
BVarSymbol bVarSymbol = new BVarSymbol(0, arg.variableDcl.name, currentPkgId, arg.variableDcl.type,
invokableSymbol, arg.pos, VIRTUAL);
list.add(bVarSymbol);
}
invokableSymbol.params = list;
invokableSymbol.scope = new Scope(invokableSymbol);
invokableSymbol.params.forEach(param -> invokableSymbol.scope.define(param.name, param));
if (attachedTypeDef == null) {
functionOwner.scope.define(lambdaName, invokableSymbol);
}
List<BIROperand> funcParamOperands = new ArrayList<>();
Name selfArgName = new Name("%self");
for (int i = 0; i < asyncCallIns.args.size(); i++) {
BIROperand arg = asyncCallIns.args.get(i);
BIRFunctionParameter funcParam;
if (arg.variableDcl.kind == VarKind.SELF) {
funcParam = new BIRFunctionParameter(asyncCallIns.pos, arg.variableDcl.type, selfArgName,
VarScope.FUNCTION, VarKind.SELF, selfArgName.value, false);
} else {
Name argName = new Name("$funcParam%d" + i);
funcParam = new BIRFunctionParameter(asyncCallIns.pos, arg.variableDcl.type,
argName, VarScope.FUNCTION, VarKind.ARG, argName.value, false);
desugaredFunc.localVars.add(funcParam);
desugaredFunc.parameters.add(funcParam);
desugaredFunc.requiredParams.add(new BIRParameter(asyncCallIns.pos, argName, 0));
desugaredFunc.argsCount++;
}
funcParamOperands.add(new BIROperand(funcParam));
}
BIRBasicBlock callInsBB = insertBasicBlock(desugaredFunc, 0);
BIRBasicBlock returnInsBB = insertBasicBlock(desugaredFunc, 1);
callInsBB.terminator = new Call(asyncCallIns.pos, InstructionKind.CALL, asyncCallIns.isVirtual,
asyncCallIns.calleePkg, asyncCallIns.name, funcParamOperands, funcReturnOperand,
returnInsBB, asyncCallIns.calleeAnnotAttachments, asyncCallIns.calleeFlags);
returnInsBB.terminator = new Return(asyncCallIns.pos);
asyncCallIns.name = lambdaName;
asyncCallIns.calleePkg = currentPkgId;
asyncCallIns.isVirtual = attachedTypeDef != null;
if (attachedTypeDef != null) {
asyncCallIns.args.add(0, new BIROperand(new BIRVariableDcl(attachedTypeDef.type, selfArgName,
VarScope.FUNCTION, VarKind.SELF)));
}
}
}
/**
* Rewrite a function so that the internal body will be observed. This adds the relevant start and stop calls at
* the beginning and return basic blocks of the function.
* This is only to be used in service resource functions, workers and main method.
* This method expects that Observable invocations had already been instrumented properly before this method is
* called. This is because the uncaught panics thrown from such observable invocations are reported to the
* observation covering the function body by using the re-panic terminators which gets added in
* rewriteObservableFunctionInvocations method.
*
* @param func The function to instrument
* @param pkg The package which contains the function
* @param attachedTypeDef The type definition the function is attached to
* @param functionName The name of the function which will be observed
* @param isResource True if the function is a resource function
* @param isRemote True if the function is a remote function
* @param isMainEntryPoint True if the function is the main entry point
* @param isWorker True if the function was a worker
*/
private void rewriteObservableFunctionBody(BIRFunction func, BIRPackage pkg, BIRTypeDefinition attachedTypeDef,
String functionName, String serviceName, boolean isResource,
boolean isRemote, boolean isMainEntryPoint, boolean isWorker) {
{
BIRBasicBlock startBB = func.basicBlocks.get(0);
BIRBasicBlock newStartBB = insertBasicBlock(func, 1);
swapBasicBlockContent(func, startBB, newStartBB);
if (isResource || isRemote) {
String resourcePathOrFunction = functionName;
String resourceAccessor = null;
if (isResource) {
for (BAttachedFunction attachedFunc : ((BClassSymbol) attachedTypeDef.type.tsymbol).attachedFuncs) {
if (Objects.equals(attachedFunc.funcName.value, functionName)) {
BResourceFunction resourceFunction = (BResourceFunction) attachedFunc;
StringBuilder resourcePathOrFunctionBuilder = new StringBuilder();
for (Name name : resourceFunction.resourcePath) {
resourcePathOrFunctionBuilder.append("/").append(name.value);
}
resourcePathOrFunction = resourcePathOrFunctionBuilder.toString();
resourceAccessor = resourceFunction.accessor.value;
break;
}
}
}
injectStartResourceObservationCall(func, startBB, serviceName, resourcePathOrFunction, resourceAccessor,
isResource, isRemote, pkg, func.pos);
} else {
BIROperand objectTypeOperand = generateGlobalConstantOperand(pkg, symbolTable.nilType, null);
injectStartCallableObservationCall(func, startBB, null, false, isMainEntryPoint, isWorker,
objectTypeOperand, functionName, pkg, func.pos);
}
startBB.terminator.thenBB = newStartBB;
}
boolean isErrorCheckRequired = isErrorAssignable(func.returnVariable);
BIROperand returnValOperand = new BIROperand(func.returnVariable);
int i = 1;
while (i < func.basicBlocks.size()) {
BIRBasicBlock currentBB = func.basicBlocks.get(i);
if (currentBB.terminator.kind == InstructionKind.RETURN) {
if (isErrorCheckRequired) {
BIRBasicBlock errorReportBB = insertBasicBlock(func, i + 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, i + 2);
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 3);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectCheckErrorCalls(func, currentBB, errorReportBB, observeEndBB, null,
returnValOperand, FUNC_BODY_INSTRUMENTATION_TYPE);
injectReportErrorCall(func, errorReportBB, null, returnValOperand,
FUNC_BODY_INSTRUMENTATION_TYPE);
injectStopObservationCall(observeEndBB, null);
observeEndBB.terminator.thenBB = newCurrentBB;
errorReportBB.terminator.thenBB = observeEndBB;
i += 3;
} else {
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 1);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectStopObservationCall(currentBB, null);
currentBB.terminator.thenBB = newCurrentBB;
i += 1;
}
} else if (currentBB.terminator.kind == InstructionKind.PANIC) {
Panic panicCall = (Panic) currentBB.terminator;
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 1);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectStopObservationWithErrorCall(func, currentBB, newCurrentBB.terminator.pos,
panicCall.errorOp, FUNC_BODY_INSTRUMENTATION_TYPE);
currentBB.terminator.thenBB = newCurrentBB;
i += 1;
}
i++;
}
{
int initialBBCount = func.basicBlocks.size();
BIRBasicBlock startBB = func.basicBlocks.get(0);
BIRBasicBlock endBB = func.basicBlocks.get(initialBBCount - 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, initialBBCount);
BIRBasicBlock rePanicBB = insertBasicBlock(func, initialBBCount + 1);
BIROperand trappedErrorOperand = generateTempLocalVariable(func, "functionTrappedError",
symbolTable.errorOrNilType);
injectStopObservationWithErrorCall(func, observeEndBB, null, trappedErrorOperand,
FUNC_BODY_INSTRUMENTATION_TYPE);
rePanicBB.terminator = new Panic(null, trappedErrorOperand);
BIRErrorEntry errorEntry = new BIRErrorEntry(startBB, endBB, trappedErrorOperand, observeEndBB);
func.errorTable.add(errorEntry);
observeEndBB.terminator.thenBB = rePanicBB;
}
}
/**
* Re-write the relevant basic blocks in the list of basic blocks to observe function invocations.
*
* @param func The function of which the instructions in the body should be instrumented
* @param pkg The package which contains the instruction which will be observed
*/
private void rewriteObservableFunctionInvocations(BIRFunction func, BIRPackage pkg) {
int i = 0;
while (i < func.basicBlocks.size()) {
BIRBasicBlock currentBB = func.basicBlocks.get(i);
if (currentBB.terminator.kind == InstructionKind.CALL && isObservable((Call) currentBB.terminator)) {
Call callIns = (Call) currentBB.terminator;
Location desugaredInsPosition = callIns.pos;
BIRBasicBlock observeStartBB = insertBasicBlock(func, i + 1);
int newCurrentIndex = i + 2;
BIRBasicBlock newCurrentBB = insertBasicBlock(func, newCurrentIndex);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
{
BIROperand objectTypeOperand;
String action;
if (callIns.isVirtual) {
objectTypeOperand = callIns.args.get(0);
if (callIns.name.value.contains(".")) {
String[] split = callIns.name.value.split("\\.");
action = split[1];
} else {
action = callIns.name.value;
}
} else {
objectTypeOperand = generateGlobalConstantOperand(pkg, symbolTable.nilType, null);
action = callIns.name.value;
}
currentBB.terminator = new GOTO(desugaredInsPosition, observeStartBB);
BIRBasicBlock observeEndBB;
boolean isRemote = callIns.calleeFlags.contains(Flag.REMOTE);
Location originalInsPos = callIns.pos;
if (isErrorAssignable(callIns.lhsOp.variableDcl)) {
BIRBasicBlock errorCheckBB = insertBasicBlock(func, i + 3);
BIRBasicBlock errorReportBB = insertBasicBlock(func, i + 4);
observeEndBB = insertBasicBlock(func, i + 5);
injectStartCallableObservationCall(func, observeStartBB, desugaredInsPosition,
isRemote, false, false, objectTypeOperand, action, pkg,
originalInsPos);
injectCheckErrorCalls(func, errorCheckBB, errorReportBB, observeEndBB,
desugaredInsPosition, callIns.lhsOp, INVOCATION_INSTRUMENTATION_TYPE);
injectReportErrorCall(func, errorReportBB, desugaredInsPosition, callIns.lhsOp,
INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationCall(observeEndBB, desugaredInsPosition);
observeEndBB.terminator.thenBB = newCurrentBB.terminator.thenBB;
errorReportBB.terminator.thenBB = observeEndBB;
newCurrentBB.terminator.thenBB = errorCheckBB;
observeStartBB.terminator.thenBB = newCurrentBB;
i += 5;
} else {
observeEndBB = insertBasicBlock(func, i + 3);
injectStartCallableObservationCall(func, observeStartBB, desugaredInsPosition,
isRemote, false, false, objectTypeOperand, action, pkg,
originalInsPos);
injectStopObservationCall(observeEndBB, desugaredInsPosition);
observeEndBB.terminator.thenBB = newCurrentBB.terminator.thenBB;
newCurrentBB.terminator.thenBB = observeEndBB;
observeStartBB.terminator.thenBB = newCurrentBB;
i += 3;
}
fixErrorTable(func, currentBB, observeEndBB);
}
{
/*
* Adding panic traps for the invocations. These report the error to the Observation covering
* the invocation. In the above instrumentation, only errors returned by functions are
* considered.
*/
Optional<BIRErrorEntry> existingEE = Optional.empty();
for (BIRErrorEntry birErrorEntry : func.errorTable) {
if (isBBCoveredInErrorEntry(birErrorEntry, func.basicBlocks, newCurrentBB)) {
existingEE = Optional.of(birErrorEntry);
break;
}
}
Location desugaredInsPos = callIns.pos;
if (existingEE.isPresent()) {
BIRErrorEntry errorEntry = existingEE.get();
int eeTargetIndex = func.basicBlocks.indexOf(errorEntry.targetBB);
if (eeTargetIndex == -1) {
throw new BLangCompilerException("Invalid Error Entry pointing to non-existent " +
"target Basic Block " + errorEntry.targetBB.id);
}
BIRBasicBlock observeEndBB = insertBasicBlock(func, eeTargetIndex + 1);
BIRBasicBlock newTargetBB = insertBasicBlock(func, eeTargetIndex + 2);
swapBasicBlockContent(func, errorEntry.targetBB, newTargetBB);
injectCheckErrorCalls(func, errorEntry.targetBB, observeEndBB, newTargetBB,
desugaredInsPos, errorEntry.errorOp, INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationWithErrorCall(func, observeEndBB, desugaredInsPos,
errorEntry.errorOp, INVOCATION_INSTRUMENTATION_TYPE);
observeEndBB.terminator.thenBB = newTargetBB;
fixErrorTable(func, errorEntry.targetBB, newTargetBB);
} else {
BIRBasicBlock errorCheckBB = insertBasicBlock(func, newCurrentIndex + 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, newCurrentIndex + 2);
BIRBasicBlock rePanicBB = insertBasicBlock(func, newCurrentIndex + 3);
BIROperand trappedErrorOperand = generateTempLocalVariable(func, "trappedError",
symbolTable.errorOrNilType);
injectCheckErrorCalls(func, errorCheckBB, observeEndBB, newCurrentBB.terminator.thenBB,
newCurrentBB.terminator.pos, trappedErrorOperand,
INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationWithErrorCall(func, observeEndBB, newCurrentBB.terminator.pos,
trappedErrorOperand, INVOCATION_INSTRUMENTATION_TYPE);
rePanicBB.terminator = new Panic(newCurrentBB.terminator.pos, trappedErrorOperand);
BIRErrorEntry errorEntry = new BIRErrorEntry(newCurrentBB, newCurrentBB,
trappedErrorOperand, errorCheckBB);
func.errorTable.add(errorEntry);
newCurrentBB.terminator.thenBB = errorCheckBB;
observeEndBB.terminator.thenBB = rePanicBB;
i += 3;
}
}
}
i += 1;
}
}
/**
* Inject start observation call to a basic block.
* @param func Bir Function
* @param observeStartBB The basic block to which the start observation call should be injected
* @param serviceName The service to which the instruction was attached to
* @param resourcePathOrFunction The resource path or function name
* @param resourceAccessor The resource accessor if this is a resource
* @param isResource True if the function is a resource
* @param isRemote True if the function is a remote
* @param pkg The package the invocation belongs to
* @param originalInsPosition The source code position of the invocation
*/
private void injectStartResourceObservationCall(BIRFunction func, BIRBasicBlock observeStartBB, String serviceName,
String resourcePathOrFunction, String resourceAccessor,
boolean isResource, boolean isRemote, BIRPackage pkg,
Location originalInsPosition) {
BIROperand serviceNameOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType, serviceName);
BIROperand resourcePathOrFunctionOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
resourcePathOrFunction);
BIROperand resourceAccessorOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
resourceAccessor);
BIROperand isResourceOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isResource);
BIROperand isRemoteOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isRemote);
JIMethodCall observeStartCallTerminator = new JIMethodCall(null);
observeStartCallTerminator.invocationType = INVOKESTATIC;
observeStartCallTerminator.jClassName = OBSERVE_UTILS;
observeStartCallTerminator.jMethodVMSig = START_RESOURCE_OBSERVATION;
observeStartCallTerminator.name = START_RESOURCE_OBSERVATION_METHOD;
List<BIROperand> positionOperands = generatePositionArgs(pkg, func, observeStartBB, originalInsPosition);
List<BIROperand> otherOperands = Arrays.asList(serviceNameOperand, resourcePathOrFunctionOperand,
resourceAccessorOperand, isResourceOperand, isRemoteOperand);
positionOperands.addAll(otherOperands);
observeStartCallTerminator.args = positionOperands;
observeStartBB.terminator = observeStartCallTerminator;
}
/**
* Inject start observation call to a basic block.
*
* @param func Bir Function
* @param observeStartBB The basic block to which the start observation call should be injected
* @param desugaredInsLocation The position of all instructions, variables declarations, terminators to be generated
* @param isRemote True if a remote function will be observed by the observation
* @param isMainEntryPoint True if the main function will be observed by the observation
* @param isWorker True if a worker function will be observed by the observation
* @param objectOperand The object the function was attached to
* @param action The name of the action which will be observed
* @param pkg The package the invocation belongs to
* @param originalInsPosition The source code position of the invocation
*/
private void injectStartCallableObservationCall(BIRFunction func, BIRBasicBlock observeStartBB,
Location desugaredInsLocation, boolean isRemote,
boolean isMainEntryPoint, boolean isWorker,
BIROperand objectOperand, String action,
BIRPackage pkg, Location originalInsPosition) {
BIROperand actionOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType, action);
BIROperand isMainEntryPointOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType,
isMainEntryPoint);
BIROperand isRemoteOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isRemote);
BIROperand isWorkerOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isWorker);
JIMethodCall observeStartCallTerminator = new JIMethodCall(desugaredInsLocation);
observeStartCallTerminator.invocationType = INVOKESTATIC;
observeStartCallTerminator.jClassName = OBSERVE_UTILS;
observeStartCallTerminator.jMethodVMSig = START_CALLABLE_OBSERVATION;
observeStartCallTerminator.name = START_CALLABLE_OBSERVATION_METHOD;
List<BIROperand> positionOperands = generatePositionArgs(pkg, func, observeStartBB, originalInsPosition);
List<BIROperand> otherOperands = Arrays.asList(objectOperand, actionOperand, isMainEntryPointOperand,
isRemoteOperand, isWorkerOperand);
positionOperands.addAll(otherOperands);
observeStartCallTerminator.args = positionOperands;
observeStartBB.terminator = observeStartCallTerminator;
}
/**
* Inject branch condition for checking if a value is an error.
*
* @param func The BIR function in which the call is injected
* @param errorCheckBB The basic block to which the error check should be injected
* @param isErrorBB The basic block to which errors should go to
* @param noErrorBB The basic block to which no errors should go to
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param valueOperand Operand for passing the value which should be checked if it is an error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectCheckErrorCalls(BIRFunction func, BIRBasicBlock errorCheckBB, BIRBasicBlock isErrorBB,
BIRBasicBlock noErrorBB, Location pos, BIROperand valueOperand,
String uniqueId) {
BIROperand isErrorOperand = tempLocalVarsMap.get(uniqueId + "$isError");
addLocalVarIfAbsent(func, isErrorOperand.variableDcl);
TypeTest errorTypeTestInstruction = new TypeTest(pos, symbolTable.errorType, isErrorOperand, valueOperand);
errorCheckBB.instructions.add(errorTypeTestInstruction);
errorCheckBB.terminator = new Branch(pos, isErrorOperand, isErrorBB, noErrorBB);
}
/**
* Inject report error call.
*
* @param func The BIR function in which the call is injected
* @param errorReportBB The basic block to which the report error call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param errorOperand Operand for passing the error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectReportErrorCall(BIRFunction func, BIRBasicBlock errorReportBB, Location pos,
BIROperand errorOperand, String uniqueId) {
BIROperand castedErrorOperand = tempLocalVarsMap.get(uniqueId + "$castedError");
addLocalVarIfAbsent(func, castedErrorOperand.variableDcl);
TypeCast errorCastInstruction = new TypeCast(pos, castedErrorOperand, errorOperand, symbolTable.errorType,
false);
errorReportBB.instructions.add(errorCastInstruction);
JIMethodCall reportErrorCallTerminator = new JIMethodCall(pos);
reportErrorCallTerminator.invocationType = INVOKESTATIC;
reportErrorCallTerminator.jClassName = OBSERVE_UTILS;
reportErrorCallTerminator.jMethodVMSig = ERROR_CALL;
reportErrorCallTerminator.name = REPORT_ERROR_METHOD;
reportErrorCallTerminator.args = Collections.singletonList(castedErrorOperand);
errorReportBB.terminator = reportErrorCallTerminator;
}
/**
* Inject a stop observation call to a basic block.
*
* @param observeEndBB The basic block to which the stop observation call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
*/
private void injectStopObservationCall(BIRBasicBlock observeEndBB, Location pos) {
JIMethodCall observeEndCallTerminator = new JIMethodCall(pos);
observeEndCallTerminator.invocationType = INVOKESTATIC;
observeEndCallTerminator.jClassName = OBSERVE_UTILS;
observeEndCallTerminator.jMethodVMSig = STOP_OBSERVATION;
observeEndCallTerminator.name = STOP_OBSERVATION_METHOD;
observeEndCallTerminator.args = Collections.emptyList();
observeEndBB.terminator = observeEndCallTerminator;
}
/**
* Inject stop observation with an error call.
*
* @param func The BIR function in which the call is injected
* @param observeEndBB The basic block to which the stop observation call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param errorOperand Operand for passing the error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectStopObservationWithErrorCall(BIRFunction func, BIRBasicBlock observeEndBB, Location pos,
BIROperand errorOperand, String uniqueId) {
BIROperand castedErrorOperand = tempLocalVarsMap.get(uniqueId + "$castedError");
addLocalVarIfAbsent(func, castedErrorOperand.variableDcl);
TypeCast errorCastInstruction = new TypeCast(pos, castedErrorOperand, errorOperand, symbolTable.errorType,
false);
observeEndBB.instructions.add(errorCastInstruction);
JIMethodCall observeEndBBCallTerminator = new JIMethodCall(pos);
observeEndBBCallTerminator.invocationType = INVOKESTATIC;
observeEndBBCallTerminator.jClassName = OBSERVE_UTILS;
observeEndBBCallTerminator.jMethodVMSig = ERROR_CALL;
observeEndBBCallTerminator.name = STOP_OBSERVATION_WITH_ERROR_METHOD;
observeEndBBCallTerminator.args = Collections.singletonList(castedErrorOperand);
observeEndBB.terminator = observeEndBBCallTerminator;
}
/**
* Generate a constant operand from a compile-time known value.
*
* @param pkg The package which should contain the constant
* @param constantType The type of the constant
* @param constantValue The constant value which should end up being passed in the operand
* @return The generated operand which will pass the constant
*/
private BIROperand generateGlobalConstantOperand(BIRPackage pkg, BType constantType, Object constantValue) {
return compileTimeConstants.computeIfAbsent(constantValue, k -> {
PackageID pkgId = pkg.packageID;
Name name = new Name("$observabilityConst" + constantIndex++);
BIRGlobalVariableDcl constLoadVariableDcl =
new BIRGlobalVariableDcl(COMPILE_TIME_CONST_POS, 0,
constantType, pkgId, name, name,
VarScope.GLOBAL, VarKind.CONSTANT, "", VIRTUAL);
pkg.globalVars.add(constLoadVariableDcl);
return new BIROperand(constLoadVariableDcl);
});
}
/**
* Create and insert a new basic block into a function in the specified index.
*
* @param func The function to which the basic block should be injected
* @param insertIndex The index at which the basic block should be injected
* @return The injected new BB
*/
private BIRBasicBlock insertBasicBlock(BIRFunction func, int insertIndex) {
BIRBasicBlock newBB = new BIRBasicBlock(new Name(NEW_BB_PREFIX + desugaredBBIndex++));
func.basicBlocks.add(insertIndex, newBB);
return newBB;
}
/**
* Swap the effective content of two basic blocks.
*
* @param func The BIR function
* @param firstBB The first BB of which content should end up in second BB
* @param secondBB The second BB of which content should end up in first BB
*/
private void swapBasicBlockContent(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
List<BIRNonTerminator> firstBBInstructions = firstBB.instructions;
firstBB.instructions = secondBB.instructions;
secondBB.instructions = firstBBInstructions;
resetEndBasicBlock(func, firstBB, secondBB);
swapBasicBlockTerminator(func, firstBB, secondBB);
}
/**
* Swap the terminators of two basic blocks.
*
* @param func The BIR function
* @param firstBB The first BB of which terminator should end up in second BB
* @param secondBB The second BB of which terminator should end up in first BB
*/
private void swapBasicBlockTerminator(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
BIRTerminator firstBBTerminator = firstBB.terminator;
firstBB.terminator = secondBB.terminator;
secondBB.terminator = firstBBTerminator;
resetEndBasicBlock(func, firstBB, secondBB);
}
/**
* Reset endBBs of local variables after swapping basic blocks content.
*
* @param func The BIR function
* @param firstBB The first BB of which content should end up in second BB
* @param secondBB The second BB of which content should end up in first BB
*/
private void resetEndBasicBlock(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
for (BIRVariableDcl localVar : func.localVars) {
if (localVar.endBB == firstBB) {
localVar.endBB = secondBB;
}
}
}
/**
* Fix the ending BB of error entries in the error table of a function.
* When desugar instructions were added after the original BB,
* where the original BB is a trap ending BB, the new trap ending BBs changes.
* This needs to be adjusted properly.
*
* @param func The function of which the error table should be fixed
* @param oldBB The old ending BB of error entries to be fixed
* @param newBB The new ending BB which should be updated to in the error entries to be fixed
*/
private void fixErrorTable(BIRFunction func, BIRBasicBlock oldBB, BIRBasicBlock newBB) {
for (BIRErrorEntry errorEntry : func.errorTable) {
if (errorEntry.endBB == oldBB) {
errorEntry.endBB = newBB;
}
}
}
/**
* Check if a call instruction is observable.
*
* @param callIns The call instruction to check
* @return True if the call instruction is observable
*/
private boolean isObservable(Call callIns) {
boolean isRemote = callIns.calleeFlags.contains(Flag.REMOTE);
boolean isObservableAnnotationPresent = false;
for (BIRAnnotationAttachment annot : callIns.calleeAnnotAttachments) {
if (OBSERVABLE_ANNOTATION.equals(
JvmCodeGenUtil.getPackageName(
new PackageID(annot.annotPkgId.orgName, annot.annotPkgId.name, Names.EMPTY)) +
annot.annotTagRef.value)) {
isObservableAnnotationPresent = true;
break;
}
}
return isRemote || isObservableAnnotationPresent;
}
/**
* Check is an error is assignable to a variable declaration.
*
* @param variableDcl The variable declaration which should be checked.
* @return True if an error can be assigned and false otherwise
*/
private boolean isErrorAssignable(BIRVariableDcl variableDcl) {
boolean isErrorAssignable = false;
if (variableDcl.type instanceof BUnionType) {
BUnionType returnUnionType = (BUnionType) variableDcl.type;
boolean b = false;
for (BType type : returnUnionType.getMemberTypes()) {
if (type instanceof BErrorType) {
b = true;
break;
}
}
isErrorAssignable = b;
} else if (variableDcl.type instanceof BErrorType) {
isErrorAssignable = true;
}
return isErrorAssignable;
}
/**
* Check if a basic block is covered into an error entry.
*
* @param errorEntry The error entry from the error table
* @param basicBlocksList The basic blocks list which contains the basic block to be checked for
* @param basicBlock The basic block which should be checked for
* @return True if the basic block is covered in the error entry
*/
private boolean isBBCoveredInErrorEntry(BIRErrorEntry errorEntry, List<BIRBasicBlock> basicBlocksList,
BIRBasicBlock basicBlock) {
boolean isCovered = Objects.equals(basicBlock, errorEntry.trapBB)
|| Objects.equals(basicBlock, errorEntry.endBB);
if (!isCovered) {
/*
* Traverse in the same way MethodGen.generateBasicBlocks traverses through basic blocks to generate
* method body to check if the basic block is covered in the error entry.
*/
int i = 0;
for (; i < basicBlocksList.size(); i++) {
BIRBasicBlock currentBB = basicBlocksList.get(i);
if (currentBB == errorEntry.trapBB) {
break;
}
}
for (; i < basicBlocksList.size(); i++) {
BIRBasicBlock currentBB = basicBlocksList.get(i);
if (currentBB == basicBlock) {
isCovered = true;
break;
}
if (currentBB == errorEntry.endBB) {
break;
}
}
}
return isCovered;
}
/**
* Generate a ID for a ballerina module.
*
* @param pkg The module for which the ID should be generated
* @return The generated ID
*/
private String generatePackageId(PackageID pkg) {
return pkg.orgName.value + "/" + pkg.name.value + ":" + pkg.version.value;
}
/**
* Generate operands for location.
*
* @param pkg Bir package
* @param func Bir Function
* @param observeStartBB Observe start basic block
* @param pos Location
* @return List of operands for source file name, position start line and start column
*/
private List<BIROperand> generatePositionArgs(BIRPackage pkg, BIRFunction func, BIRBasicBlock observeStartBB,
Location pos) {
BIROperand pkgOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
generatePackageId(pkg.packageID));
BIROperand fileNameOperand = getTempLocalVariable(FILE_NAME_STRING, pos, pos.lineRange().filePath(),
symbolTable.stringType, observeStartBB);
addLocalVarIfAbsent(func, fileNameOperand.variableDcl);
BIROperand startLineOperand = getTempLocalVariable(START_LINE_STRING, pos,
pos.lineRange().startLine().line() + 1, symbolTable.intType, observeStartBB);
addLocalVarIfAbsent(func, startLineOperand.variableDcl);
BIROperand startColOperand = getTempLocalVariable(START_COLUMN_STRING, pos,
pos.lineRange().startLine().offset() + 1, symbolTable.intType, observeStartBB);
addLocalVarIfAbsent(func, startColOperand.variableDcl);
return new ArrayList<>(Arrays.asList(pkgOperand, fileNameOperand, startLineOperand, startColOperand));
}
private BIROperand getTempLocalVariable(String name, Location pos, Object value, BType variableType,
BIRBasicBlock currentBB) {
BIROperand birOperand = tempLocalVarsMap.get(name);
addConstantLoadIns(pos, value, variableType, birOperand, currentBB);
return birOperand;
}
private void updatePositionArgsConstLoadIns(Location pos, BIRBasicBlock currentBB) {
addConstantLoadIns(pos, pos.lineRange().startLine().line() + 1, symbolTable.intType,
tempLocalVarsMap.get(START_LINE_STRING), currentBB, 0);
addConstantLoadIns(pos, pos.lineRange().startLine().offset() + 1, symbolTable.intType,
tempLocalVarsMap.get(START_COLUMN_STRING), currentBB, 1);
}
private void addConstantLoadIns(Location pos, Object value, BType variableType, BIROperand birOperand,
BIRBasicBlock currentBB) {
ConstantLoad constantLoad = new ConstantLoad(pos, value, variableType, birOperand);
currentBB.instructions.add(constantLoad);
}
private void addConstantLoadIns(Location pos, Object value, BType variableType, BIROperand birOperand,
BIRBasicBlock currentBB, int index) {
ConstantLoad constantLoad = new ConstantLoad(pos, value, variableType, birOperand);
currentBB.instructions.add(index, constantLoad);
}
private void addLocalVarIfAbsent(BIRFunction func, BIRVariableDcl variableDcl) {
if (!func.localVars.contains(variableDcl)) {
func.localVars.add(variableDcl);
}
}
/**
* Initializes the temporary local variables which can be reused.
*
*/
private void initializeTempLocalVariables() {
generateTempLocalVariable(FILE_NAME_STRING, symbolTable.stringType);
generateTempLocalVariable(START_LINE_STRING, symbolTable.intType);
generateTempLocalVariable(START_COLUMN_STRING, symbolTable.intType);
generateTempLocalVariable(FUNC_BODY_INSTRUMENTATION_TYPE + "$castedError", symbolTable.errorType);
generateTempLocalVariable(INVOCATION_INSTRUMENTATION_TYPE + "$castedError", symbolTable.errorType);
generateTempLocalVariable(FUNC_BODY_INSTRUMENTATION_TYPE + "$isError", symbolTable.booleanType);
generateTempLocalVariable(INVOCATION_INSTRUMENTATION_TYPE + "$isError", symbolTable.booleanType);
}
private void generateTempLocalVariable(String name, BType variableType) {
BIRVariableDcl variableDcl = new BIRVariableDcl(variableType, new Name("$observability$" + name),
VarScope.FUNCTION, VarKind.TEMP);
BIROperand birOperand = new BIROperand(variableDcl);
tempLocalVarsMap.put(name, birOperand);
}
/**
* Generate a temporary function scope variable.
*
* @param func The BIR function to which the variable should be added
* @param name The name of the variable
* @param variableType The type of the variable
* @return The generated operand for the variable declaration
*/
private BIROperand generateTempLocalVariable(BIRFunction func, String name, BType variableType) {
Name variableName = new Name("$observability$" + name + "$" + localVarIndex++);
BIRVariableDcl variableDcl = new BIRVariableDcl(variableType, variableName, VarScope.FUNCTION, VarKind.TEMP);
func.localVars.add(variableDcl);
return new BIROperand(variableDcl);
}
}
|
class JvmObservabilityGen {
private static final String ENTRY_POINT_MAIN_METHOD_NAME = "main";
private static final String NEW_BB_PREFIX = "observabilityDesugaredBB";
private static final String INVOCATION_INSTRUMENTATION_TYPE = "invocation";
private static final String FUNC_BODY_INSTRUMENTATION_TYPE = "funcBody";
private static final String FILE_NAME_STRING = "fileName";
private static final String START_LINE_STRING = "startLine";
private static final String START_COLUMN_STRING = "startCol";
private static final Location COMPILE_TIME_CONST_POS =
new BLangDiagnosticLocation(null, -1, -1, -1, -1, 0, 0);
private final PackageCache packageCache;
private final SymbolTable symbolTable;
private int lambdaIndex;
private int desugaredBBIndex;
private int localVarIndex;
private int constantIndex;
private int defaultServiceIndex;
private final Map<Object, BIROperand> compileTimeConstants;
private final Map<Name, String> svcAttachPoints;
private final Map<String, BIROperand> tempLocalVarsMap;
private final Map<BIRBasicBlock, List<BIRBasicBlock>> predecessorMap;
JvmObservabilityGen(PackageCache packageCache, SymbolTable symbolTable) {
this.compileTimeConstants = new HashMap<>();
this.svcAttachPoints = new HashMap<>();
this.tempLocalVarsMap = new HashMap<>();
this.predecessorMap = new HashMap<>();
this.packageCache = packageCache;
this.symbolTable = symbolTable;
this.lambdaIndex = 0;
this.desugaredBBIndex = 0;
this.constantIndex = 0;
this.localVarIndex = 0;
this.defaultServiceIndex = 0;
}
/**
* Instrument the package by rewriting the BIR to add relevant Observability related instructions.
*
* @param pkg The package to instrument
*/
public void instrumentPackage(BIRPackage pkg) {
initializeTempLocalVariables();
for (int i = 0; i < pkg.functions.size(); i++) {
localVarIndex = 0;
BIRFunction func = pkg.functions.get(i);
if (ENTRY_POINT_MAIN_METHOD_NAME.equals(func.name.value)) {
rewriteControlFlowInvocation(func, pkg);
}
rewriteAsyncInvocations(func, null, pkg);
rewriteObservableFunctionInvocations(func, pkg);
if (ENTRY_POINT_MAIN_METHOD_NAME.equals(func.name.value)) {
rewriteObservableFunctionBody(func, pkg, null, func.name.value, null, false, false, true, false);
} else if ((func.flags & Flags.WORKER) == Flags.WORKER) {
rewriteObservableFunctionBody(func, pkg, null, func.workerName.value, null, false, false, false, true);
}
}
for (BIRNode.BIRServiceDeclaration serviceDecl : pkg.serviceDecls) {
List<String> attachPoint = serviceDecl.attachPoint;
String attachPointLiteral = serviceDecl.attachPointLiteral;
if (attachPoint != null) {
svcAttachPoints.put(serviceDecl.associatedClassName, "/" + String.join("/", attachPoint));
} else if (attachPointLiteral != null) {
svcAttachPoints.put(serviceDecl.associatedClassName, attachPointLiteral);
}
}
for (BIRTypeDefinition typeDef : pkg.typeDefs) {
BType bType = JvmCodeGenUtil.getReferredType(typeDef.type);
if ((typeDef.flags & Flags.CLASS) != Flags.CLASS && bType.tag == TypeTags.OBJECT) {
continue;
}
boolean isService = (bType.flags & Flags.SERVICE) == Flags.SERVICE;
String serviceName = null;
if (isService) {
for (BIRNode.BIRAnnotationAttachment annotationAttachment : typeDef.annotAttachments) {
if (DISPLAY_ANNOTATION.equals(annotationAttachment.annotTagRef.value)) {
BIRNode.ConstValue annotValue =
((BIRNode.BIRConstAnnotationAttachment) annotationAttachment).annotValue;
Map<String, BIRNode.ConstValue> annotationMap =
(Map<String, BIRNode.ConstValue>) annotValue.value;
serviceName = annotationMap.get("label").value.toString();
break;
}
}
if (serviceName == null) {
String basePath = this.svcAttachPoints.get(typeDef.name);
serviceName = Objects.requireNonNullElseGet(basePath, () ->
pkg.packageID.orgName.value + "_" + pkg.packageID.name.value + "_svc_" +
defaultServiceIndex++);
}
}
for (int i = 0; i < typeDef.attachedFuncs.size(); i++) {
BIRFunction func = typeDef.attachedFuncs.get(i);
localVarIndex = 0;
if (isService && ((func.flags & Flags.RESOURCE) == Flags.RESOURCE ||
(func.flags & Flags.REMOTE) == Flags.REMOTE)) {
rewriteControlFlowInvocation(func, pkg);
}
rewriteAsyncInvocations(func, typeDef, pkg);
rewriteObservableFunctionInvocations(func, pkg);
if (isService) {
if ((func.flags & Flags.RESOURCE) == Flags.RESOURCE) {
rewriteObservableFunctionBody(func, pkg, typeDef, func.name.value, serviceName,
true, false, false, false);
} else if ((func.flags & Flags.REMOTE) == Flags.REMOTE) {
rewriteObservableFunctionBody(func, pkg, typeDef, func.name.value, serviceName,
false, true, false, false);
}
}
}
}
BIRFunction initFunc = pkg.functions.get(0);
BIRBasicBlock constInitBB = initFunc.basicBlocks.get(0);
for (Map.Entry<Object, BIROperand> entry : compileTimeConstants.entrySet()) {
BIROperand operand = entry.getValue();
ConstantLoad constLoadIns = new ConstantLoad(COMPILE_TIME_CONST_POS, entry.getKey(),
operand.variableDcl.type, operand);
constInitBB.instructions.add(constLoadIns);
}
}
/**
* Adding Java Interop calls to basic blocks.
* Here the JI calls are added for all kinds of terminators.
* First we check if there are position details for instructions, if present we add the JI calls with those
* positions else, we consider the terminator position to create the JI call.
*
* @param func The function of which the instructions should be rewritten
* @param pkg The package containing the function
*/
private void rewriteControlFlowInvocation(BIRFunction func, BIRPackage pkg) {
populatePredecessorMap(func.basicBlocks);
for (Map.Entry<BIRBasicBlock, List<BIRBasicBlock>> entry : this.predecessorMap.entrySet()) {
BIRBasicBlock currentBB = entry.getKey();
Location desugaredPos = getDesugaredPosition(currentBB);
if (desugaredPos != null && desugaredPos.lineRange().startLine().line() >= 0) {
List<BIRBasicBlock> predecessors = entry.getValue();
int callInsOffset = 0;
if (!desugaredPosAlreadyLoaded(desugaredPos, predecessors)) {
updatePositionArgsConstLoadIns(desugaredPos, currentBB);
callInsOffset = 2;
}
injectCheckpointCall(currentBB, pkg, callInsOffset);
}
}
}
private boolean desugaredPosAlreadyLoaded(Location desugaredPos, List<BIRBasicBlock> predecessors) {
for (BIRBasicBlock bb : predecessors) {
Location predecessorDesugaredPos = getDesugaredPosition(bb);
if (predecessorDesugaredPos != null && predecessorDesugaredPos.equals(desugaredPos)) {
return true;
}
}
return false;
}
/**
* Inject checkpoint JI method call to a basic block.
*
* @param currentBB The basic block to which the checkpoint call should be injected
* @param pkg The package the invocation belongs to
* @param offset The recordCheckPointCall instruction offset
*/
private void injectCheckpointCall(BIRBasicBlock currentBB, BIRPackage pkg, int offset) {
BIROperand pkgOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
generatePackageId(pkg.packageID));
BIROperand fileNameOperand = tempLocalVarsMap.get(FILE_NAME_STRING);
BIROperand startLineOperand = tempLocalVarsMap.get(START_LINE_STRING);
BIROperand startColOperand = tempLocalVarsMap.get(START_COLUMN_STRING);
JMethodCallInstruction recordCheckPointCallIns = new JMethodCallInstruction(null);
recordCheckPointCallIns.invocationType = INVOKESTATIC;
recordCheckPointCallIns.jClassName = OBSERVE_UTILS;
recordCheckPointCallIns.jMethodVMSig = CHECKPOINT_CALL;
recordCheckPointCallIns.name = RECORD_CHECKPOINT_METHOD;
recordCheckPointCallIns.args = new ArrayList<>(Arrays.asList(pkgOperand, fileNameOperand,
startLineOperand, startColOperand));
currentBB.instructions.add(offset, recordCheckPointCallIns);
}
private void populatePredecessorMap(List<BIRBasicBlock> basicBlocks) {
this.predecessorMap.clear();
for (BIRBasicBlock basicBlock : basicBlocks) {
this.predecessorMap.computeIfAbsent(basicBlock, k -> new ArrayList<>());
for (BIRBasicBlock bb : basicBlock.terminator.getNextBasicBlocks()) {
this.predecessorMap.computeIfAbsent(bb, k -> new ArrayList<>()).add(basicBlock);
}
}
}
/**
* Rewrite the invocations in the function bodies to call a lambda asynchronously which in turn calls the
* actual function synchronously. This is done so that the actual invocation can be observed accurately.
* Without this wrapper, the start and end time recorded would only reflect the time it took to give the async
* invocation to the scheduler. However, we require the actual time it took for the invocation.
*
* @param func The function of which the instructions in the body should be rewritten
* @param attachedTypeDef The type definition to which the function was attached to or null
* @param pkg The package containing the function
*/
private void rewriteAsyncInvocations(BIRFunction func, BIRTypeDefinition attachedTypeDef, BIRPackage pkg) {
PackageID packageID = pkg.packageID;
Name org = new Name(Utils.decodeIdentifier(packageID.orgName.getValue()));
Name module = new Name(Utils.decodeIdentifier(packageID.name.getValue()));
PackageID currentPkgId = new PackageID(org, module, module, packageID.version, packageID.sourceFileName,
packageID.isTestPkg);
BSymbol functionOwner;
List<BIRFunction> scopeFunctionsList;
if (attachedTypeDef == null) {
functionOwner = packageCache.getSymbol(currentPkgId);
scopeFunctionsList = pkg.functions;
} else {
functionOwner = attachedTypeDef.type.tsymbol;
scopeFunctionsList = attachedTypeDef.attachedFuncs;
}
for (BIRBasicBlock currentBB : func.basicBlocks) {
if (currentBB.terminator.kind != InstructionKind.ASYNC_CALL
|| !isObservable((AsyncCall) currentBB.terminator)) {
continue;
}
AsyncCall asyncCallIns = (AsyncCall) currentBB.terminator;
/*
* The wrapper function generated below invokes the actual function synchronously, allowing the
* instrumentation to record the actual start and end times of the function. The wrapper function
* is invoked asynchronously preserving the asynchronous behaviour.
*/
BType returnType = ((BFutureType) asyncCallIns.lhsOp.variableDcl.type).constraint;
List<BType> argTypes = new ArrayList<>();
for (BIROperand arg : asyncCallIns.args) {
BType type = arg.variableDcl.type;
argTypes.add(type);
}
Name lambdaName = new Name("$lambda$observability" + lambdaIndex++ + "$" +
asyncCallIns.name.value.replace(".", "_"));
BInvokableType bInvokableType = new BInvokableType(argTypes, null,
returnType, null);
BIRFunction desugaredFunc = new BIRFunction(asyncCallIns.pos, lambdaName, 0, bInvokableType,
func.workerName, 0, VIRTUAL);
desugaredFunc.receiver = func.receiver;
scopeFunctionsList.add(desugaredFunc);
BIRVariableDcl funcReturnVariableDcl = new BIRVariableDcl(returnType,
new Name("$" + lambdaName.value + "$retVal"), VarScope.FUNCTION, VarKind.RETURN);
BIROperand funcReturnOperand = new BIROperand(funcReturnVariableDcl);
desugaredFunc.localVars.add(funcReturnVariableDcl);
desugaredFunc.returnVariable = funcReturnVariableDcl;
BInvokableSymbol invokableSymbol = new BInvokableSymbol(SymTag.FUNCTION, 0, lambdaName,
currentPkgId, bInvokableType, functionOwner,
desugaredFunc.pos, VIRTUAL);
invokableSymbol.retType = funcReturnVariableDcl.type;
invokableSymbol.kind = SymbolKind.FUNCTION;
List<BVarSymbol> list = new ArrayList<>();
for (BIROperand arg : asyncCallIns.args) {
BVarSymbol bVarSymbol = new BVarSymbol(0, arg.variableDcl.name, currentPkgId, arg.variableDcl.type,
invokableSymbol, arg.pos, VIRTUAL);
list.add(bVarSymbol);
}
invokableSymbol.params = list;
invokableSymbol.scope = new Scope(invokableSymbol);
invokableSymbol.params.forEach(param -> invokableSymbol.scope.define(param.name, param));
if (attachedTypeDef == null) {
functionOwner.scope.define(lambdaName, invokableSymbol);
}
List<BIROperand> funcParamOperands = new ArrayList<>();
Name selfArgName = new Name("%self");
for (int i = 0; i < asyncCallIns.args.size(); i++) {
BIROperand arg = asyncCallIns.args.get(i);
BIRFunctionParameter funcParam;
if (arg.variableDcl.kind == VarKind.SELF) {
funcParam = new BIRFunctionParameter(asyncCallIns.pos, arg.variableDcl.type, selfArgName,
VarScope.FUNCTION, VarKind.SELF, selfArgName.value, false);
} else {
Name argName = new Name("$funcParam%d" + i);
funcParam = new BIRFunctionParameter(asyncCallIns.pos, arg.variableDcl.type,
argName, VarScope.FUNCTION, VarKind.ARG, argName.value, false);
desugaredFunc.localVars.add(funcParam);
desugaredFunc.parameters.add(funcParam);
desugaredFunc.requiredParams.add(new BIRParameter(asyncCallIns.pos, argName, 0));
desugaredFunc.argsCount++;
}
funcParamOperands.add(new BIROperand(funcParam));
}
BIRBasicBlock callInsBB = insertBasicBlock(desugaredFunc, 0);
BIRBasicBlock returnInsBB = insertBasicBlock(desugaredFunc, 1);
callInsBB.terminator = new Call(asyncCallIns.pos, InstructionKind.CALL, asyncCallIns.isVirtual,
asyncCallIns.calleePkg, asyncCallIns.name, funcParamOperands, funcReturnOperand,
returnInsBB, asyncCallIns.calleeAnnotAttachments, asyncCallIns.calleeFlags);
returnInsBB.terminator = new Return(asyncCallIns.pos);
asyncCallIns.name = lambdaName;
asyncCallIns.calleePkg = currentPkgId;
asyncCallIns.isVirtual = attachedTypeDef != null;
if (attachedTypeDef != null) {
asyncCallIns.args.add(0, new BIROperand(new BIRVariableDcl(attachedTypeDef.type, selfArgName,
VarScope.FUNCTION, VarKind.SELF)));
}
}
}
/**
* Rewrite a function so that the internal body will be observed. This adds the relevant start and stop calls at
* the beginning and return basic blocks of the function.
* This is only to be used in service resource functions, workers and main method.
* This method expects that Observable invocations had already been instrumented properly before this method is
* called. This is because the uncaught panics thrown from such observable invocations are reported to the
* observation covering the function body by using the re-panic terminators which gets added in
* rewriteObservableFunctionInvocations method.
*
* @param func The function to instrument
* @param pkg The package which contains the function
* @param attachedTypeDef The type definition the function is attached to
* @param functionName The name of the function which will be observed
* @param isResource True if the function is a resource function
* @param isRemote True if the function is a remote function
* @param isMainEntryPoint True if the function is the main entry point
* @param isWorker True if the function was a worker
*/
private void rewriteObservableFunctionBody(BIRFunction func, BIRPackage pkg, BIRTypeDefinition attachedTypeDef,
String functionName, String serviceName, boolean isResource,
boolean isRemote, boolean isMainEntryPoint, boolean isWorker) {
{
BIRBasicBlock startBB = func.basicBlocks.get(0);
BIRBasicBlock newStartBB = insertBasicBlock(func, 1);
swapBasicBlockContent(func, startBB, newStartBB);
if (isResource || isRemote) {
String resourcePathOrFunction = functionName;
String resourceAccessor = null;
if (isResource) {
for (BAttachedFunction attachedFunc : ((BClassSymbol) attachedTypeDef.type.tsymbol).attachedFuncs) {
if (Objects.equals(attachedFunc.funcName.value, functionName)) {
BResourceFunction resourceFunction = (BResourceFunction) attachedFunc;
StringBuilder resourcePathOrFunctionBuilder = new StringBuilder();
for (Name name : resourceFunction.resourcePath) {
resourcePathOrFunctionBuilder.append("/").append(name.value);
}
resourcePathOrFunction = resourcePathOrFunctionBuilder.toString();
resourceAccessor = resourceFunction.accessor.value;
break;
}
}
}
injectStartResourceObservationCall(func, startBB, serviceName, resourcePathOrFunction, resourceAccessor,
isResource, isRemote, pkg, func.pos);
} else {
BIROperand objectTypeOperand = generateGlobalConstantOperand(pkg, symbolTable.nilType, null);
injectStartCallableObservationCall(func, startBB, null, false, isMainEntryPoint, isWorker,
objectTypeOperand, functionName, pkg, func.pos);
}
startBB.terminator.thenBB = newStartBB;
}
boolean isErrorCheckRequired = isErrorAssignable(func.returnVariable);
BIROperand returnValOperand = new BIROperand(func.returnVariable);
int i = 1;
while (i < func.basicBlocks.size()) {
BIRBasicBlock currentBB = func.basicBlocks.get(i);
if (currentBB.terminator.kind == InstructionKind.RETURN) {
if (isErrorCheckRequired) {
BIRBasicBlock errorReportBB = insertBasicBlock(func, i + 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, i + 2);
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 3);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectCheckErrorCalls(func, currentBB, errorReportBB, observeEndBB, null,
returnValOperand, FUNC_BODY_INSTRUMENTATION_TYPE);
injectReportErrorCall(func, errorReportBB, null, returnValOperand,
FUNC_BODY_INSTRUMENTATION_TYPE);
injectStopObservationCall(observeEndBB, null);
observeEndBB.terminator.thenBB = newCurrentBB;
errorReportBB.terminator.thenBB = observeEndBB;
i += 3;
} else {
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 1);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectStopObservationCall(currentBB, null);
currentBB.terminator.thenBB = newCurrentBB;
i += 1;
}
} else if (currentBB.terminator.kind == InstructionKind.PANIC) {
Panic panicCall = (Panic) currentBB.terminator;
BIRBasicBlock newCurrentBB = insertBasicBlock(func, i + 1);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
injectStopObservationWithErrorCall(func, currentBB, newCurrentBB.terminator.pos,
panicCall.errorOp, FUNC_BODY_INSTRUMENTATION_TYPE);
currentBB.terminator.thenBB = newCurrentBB;
i += 1;
}
i++;
}
{
int initialBBCount = func.basicBlocks.size();
BIRBasicBlock startBB = func.basicBlocks.get(0);
BIRBasicBlock endBB = func.basicBlocks.get(initialBBCount - 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, initialBBCount);
BIRBasicBlock rePanicBB = insertBasicBlock(func, initialBBCount + 1);
BIROperand trappedErrorOperand = generateTempLocalVariable(func, "functionTrappedError",
symbolTable.errorOrNilType);
injectStopObservationWithErrorCall(func, observeEndBB, null, trappedErrorOperand,
FUNC_BODY_INSTRUMENTATION_TYPE);
rePanicBB.terminator = new Panic(null, trappedErrorOperand);
BIRErrorEntry errorEntry = new BIRErrorEntry(startBB, endBB, trappedErrorOperand, observeEndBB);
func.errorTable.add(errorEntry);
observeEndBB.terminator.thenBB = rePanicBB;
}
}
/**
* Re-write the relevant basic blocks in the list of basic blocks to observe function invocations.
*
* @param func The function of which the instructions in the body should be instrumented
* @param pkg The package which contains the instruction which will be observed
*/
private void rewriteObservableFunctionInvocations(BIRFunction func, BIRPackage pkg) {
int i = 0;
while (i < func.basicBlocks.size()) {
BIRBasicBlock currentBB = func.basicBlocks.get(i);
if (currentBB.terminator.kind == InstructionKind.CALL && isObservable((Call) currentBB.terminator)) {
Call callIns = (Call) currentBB.terminator;
Location desugaredInsPosition = callIns.pos;
BIRBasicBlock observeStartBB = insertBasicBlock(func, i + 1);
int newCurrentIndex = i + 2;
BIRBasicBlock newCurrentBB = insertBasicBlock(func, newCurrentIndex);
swapBasicBlockTerminator(func, currentBB, newCurrentBB);
{
BIROperand objectTypeOperand;
String action;
if (callIns.isVirtual) {
objectTypeOperand = callIns.args.get(0);
if (callIns.name.value.contains(".")) {
String[] split = callIns.name.value.split("\\.");
action = split[1];
} else {
action = callIns.name.value;
}
} else {
objectTypeOperand = generateGlobalConstantOperand(pkg, symbolTable.nilType, null);
action = callIns.name.value;
}
currentBB.terminator = new GOTO(desugaredInsPosition, observeStartBB);
BIRBasicBlock observeEndBB;
boolean isRemote = callIns.calleeFlags.contains(Flag.REMOTE);
Location originalInsPos = callIns.pos;
if (isErrorAssignable(callIns.lhsOp.variableDcl)) {
BIRBasicBlock errorCheckBB = insertBasicBlock(func, i + 3);
BIRBasicBlock errorReportBB = insertBasicBlock(func, i + 4);
observeEndBB = insertBasicBlock(func, i + 5);
injectStartCallableObservationCall(func, observeStartBB, desugaredInsPosition,
isRemote, false, false, objectTypeOperand, action, pkg,
originalInsPos);
injectCheckErrorCalls(func, errorCheckBB, errorReportBB, observeEndBB,
desugaredInsPosition, callIns.lhsOp, INVOCATION_INSTRUMENTATION_TYPE);
injectReportErrorCall(func, errorReportBB, desugaredInsPosition, callIns.lhsOp,
INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationCall(observeEndBB, desugaredInsPosition);
observeEndBB.terminator.thenBB = newCurrentBB.terminator.thenBB;
errorReportBB.terminator.thenBB = observeEndBB;
newCurrentBB.terminator.thenBB = errorCheckBB;
observeStartBB.terminator.thenBB = newCurrentBB;
i += 5;
} else {
observeEndBB = insertBasicBlock(func, i + 3);
injectStartCallableObservationCall(func, observeStartBB, desugaredInsPosition,
isRemote, false, false, objectTypeOperand, action, pkg,
originalInsPos);
injectStopObservationCall(observeEndBB, desugaredInsPosition);
observeEndBB.terminator.thenBB = newCurrentBB.terminator.thenBB;
newCurrentBB.terminator.thenBB = observeEndBB;
observeStartBB.terminator.thenBB = newCurrentBB;
i += 3;
}
fixErrorTable(func, currentBB, observeEndBB);
}
{
/*
* Adding panic traps for the invocations. These report the error to the Observation covering
* the invocation. In the above instrumentation, only errors returned by functions are
* considered.
*/
Optional<BIRErrorEntry> existingEE = Optional.empty();
for (BIRErrorEntry birErrorEntry : func.errorTable) {
if (isBBCoveredInErrorEntry(birErrorEntry, func.basicBlocks, newCurrentBB)) {
existingEE = Optional.of(birErrorEntry);
break;
}
}
Location desugaredInsPos = callIns.pos;
if (existingEE.isPresent()) {
BIRErrorEntry errorEntry = existingEE.get();
int eeTargetIndex = func.basicBlocks.indexOf(errorEntry.targetBB);
if (eeTargetIndex == -1) {
throw new BLangCompilerException("Invalid Error Entry pointing to non-existent " +
"target Basic Block " + errorEntry.targetBB.id);
}
BIRBasicBlock observeEndBB = insertBasicBlock(func, eeTargetIndex + 1);
BIRBasicBlock newTargetBB = insertBasicBlock(func, eeTargetIndex + 2);
swapBasicBlockContent(func, errorEntry.targetBB, newTargetBB);
injectCheckErrorCalls(func, errorEntry.targetBB, observeEndBB, newTargetBB,
desugaredInsPos, errorEntry.errorOp, INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationWithErrorCall(func, observeEndBB, desugaredInsPos,
errorEntry.errorOp, INVOCATION_INSTRUMENTATION_TYPE);
observeEndBB.terminator.thenBB = newTargetBB;
fixErrorTable(func, errorEntry.targetBB, newTargetBB);
} else {
BIRBasicBlock errorCheckBB = insertBasicBlock(func, newCurrentIndex + 1);
BIRBasicBlock observeEndBB = insertBasicBlock(func, newCurrentIndex + 2);
BIRBasicBlock rePanicBB = insertBasicBlock(func, newCurrentIndex + 3);
BIROperand trappedErrorOperand = generateTempLocalVariable(func, "trappedError",
symbolTable.errorOrNilType);
injectCheckErrorCalls(func, errorCheckBB, observeEndBB, newCurrentBB.terminator.thenBB,
newCurrentBB.terminator.pos, trappedErrorOperand,
INVOCATION_INSTRUMENTATION_TYPE);
injectStopObservationWithErrorCall(func, observeEndBB, newCurrentBB.terminator.pos,
trappedErrorOperand, INVOCATION_INSTRUMENTATION_TYPE);
rePanicBB.terminator = new Panic(newCurrentBB.terminator.pos, trappedErrorOperand);
BIRErrorEntry errorEntry = new BIRErrorEntry(newCurrentBB, newCurrentBB,
trappedErrorOperand, errorCheckBB);
func.errorTable.add(errorEntry);
newCurrentBB.terminator.thenBB = errorCheckBB;
observeEndBB.terminator.thenBB = rePanicBB;
i += 3;
}
}
}
i += 1;
}
}
/**
* Inject start observation call to a basic block.
* @param func Bir Function
* @param observeStartBB The basic block to which the start observation call should be injected
* @param serviceName The service to which the instruction was attached to
* @param resourcePathOrFunction The resource path or function name
* @param resourceAccessor The resource accessor if this is a resource
* @param isResource True if the function is a resource
* @param isRemote True if the function is a remote
* @param pkg The package the invocation belongs to
* @param originalInsPosition The source code position of the invocation
*/
private void injectStartResourceObservationCall(BIRFunction func, BIRBasicBlock observeStartBB, String serviceName,
String resourcePathOrFunction, String resourceAccessor,
boolean isResource, boolean isRemote, BIRPackage pkg,
Location originalInsPosition) {
BIROperand serviceNameOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType, serviceName);
BIROperand resourcePathOrFunctionOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
resourcePathOrFunction);
BIROperand resourceAccessorOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
resourceAccessor);
BIROperand isResourceOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isResource);
BIROperand isRemoteOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isRemote);
JIMethodCall observeStartCallTerminator = new JIMethodCall(null);
observeStartCallTerminator.invocationType = INVOKESTATIC;
observeStartCallTerminator.jClassName = OBSERVE_UTILS;
observeStartCallTerminator.jMethodVMSig = START_RESOURCE_OBSERVATION;
observeStartCallTerminator.name = START_RESOURCE_OBSERVATION_METHOD;
List<BIROperand> positionOperands = generatePositionArgs(pkg, func, observeStartBB, originalInsPosition);
List<BIROperand> otherOperands = Arrays.asList(serviceNameOperand, resourcePathOrFunctionOperand,
resourceAccessorOperand, isResourceOperand, isRemoteOperand);
positionOperands.addAll(otherOperands);
observeStartCallTerminator.args = positionOperands;
observeStartBB.terminator = observeStartCallTerminator;
}
/**
* Inject start observation call to a basic block.
*
* @param func Bir Function
* @param observeStartBB The basic block to which the start observation call should be injected
* @param desugaredInsLocation The position of all instructions, variables declarations, terminators to be generated
* @param isRemote True if a remote function will be observed by the observation
* @param isMainEntryPoint True if the main function will be observed by the observation
* @param isWorker True if a worker function will be observed by the observation
* @param objectOperand The object the function was attached to
* @param action The name of the action which will be observed
* @param pkg The package the invocation belongs to
* @param originalInsPosition The source code position of the invocation
*/
private void injectStartCallableObservationCall(BIRFunction func, BIRBasicBlock observeStartBB,
Location desugaredInsLocation, boolean isRemote,
boolean isMainEntryPoint, boolean isWorker,
BIROperand objectOperand, String action,
BIRPackage pkg, Location originalInsPosition) {
BIROperand actionOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType, action);
BIROperand isMainEntryPointOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType,
isMainEntryPoint);
BIROperand isRemoteOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isRemote);
BIROperand isWorkerOperand = generateGlobalConstantOperand(pkg, symbolTable.booleanType, isWorker);
JIMethodCall observeStartCallTerminator = new JIMethodCall(desugaredInsLocation);
observeStartCallTerminator.invocationType = INVOKESTATIC;
observeStartCallTerminator.jClassName = OBSERVE_UTILS;
observeStartCallTerminator.jMethodVMSig = START_CALLABLE_OBSERVATION;
observeStartCallTerminator.name = START_CALLABLE_OBSERVATION_METHOD;
List<BIROperand> positionOperands = generatePositionArgs(pkg, func, observeStartBB, originalInsPosition);
List<BIROperand> otherOperands = Arrays.asList(objectOperand, actionOperand, isMainEntryPointOperand,
isRemoteOperand, isWorkerOperand);
positionOperands.addAll(otherOperands);
observeStartCallTerminator.args = positionOperands;
observeStartBB.terminator = observeStartCallTerminator;
}
/**
* Inject branch condition for checking if a value is an error.
*
* @param func The BIR function in which the call is injected
* @param errorCheckBB The basic block to which the error check should be injected
* @param isErrorBB The basic block to which errors should go to
* @param noErrorBB The basic block to which no errors should go to
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param valueOperand Operand for passing the value which should be checked if it is an error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectCheckErrorCalls(BIRFunction func, BIRBasicBlock errorCheckBB, BIRBasicBlock isErrorBB,
BIRBasicBlock noErrorBB, Location pos, BIROperand valueOperand,
String uniqueId) {
BIROperand isErrorOperand = tempLocalVarsMap.get(uniqueId + "$isError");
addLocalVarIfAbsent(func, isErrorOperand.variableDcl);
TypeTest errorTypeTestInstruction = new TypeTest(pos, symbolTable.errorType, isErrorOperand, valueOperand);
errorCheckBB.instructions.add(errorTypeTestInstruction);
errorCheckBB.terminator = new Branch(pos, isErrorOperand, isErrorBB, noErrorBB);
}
/**
* Inject report error call.
*
* @param func The BIR function in which the call is injected
* @param errorReportBB The basic block to which the report error call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param errorOperand Operand for passing the error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectReportErrorCall(BIRFunction func, BIRBasicBlock errorReportBB, Location pos,
BIROperand errorOperand, String uniqueId) {
BIROperand castedErrorOperand = tempLocalVarsMap.get(uniqueId + "$castedError");
addLocalVarIfAbsent(func, castedErrorOperand.variableDcl);
TypeCast errorCastInstruction = new TypeCast(pos, castedErrorOperand, errorOperand, symbolTable.errorType,
false);
errorReportBB.instructions.add(errorCastInstruction);
JIMethodCall reportErrorCallTerminator = new JIMethodCall(pos);
reportErrorCallTerminator.invocationType = INVOKESTATIC;
reportErrorCallTerminator.jClassName = OBSERVE_UTILS;
reportErrorCallTerminator.jMethodVMSig = ERROR_CALL;
reportErrorCallTerminator.name = REPORT_ERROR_METHOD;
reportErrorCallTerminator.args = Collections.singletonList(castedErrorOperand);
errorReportBB.terminator = reportErrorCallTerminator;
}
/**
* Inject a stop observation call to a basic block.
*
* @param observeEndBB The basic block to which the stop observation call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
*/
private void injectStopObservationCall(BIRBasicBlock observeEndBB, Location pos) {
JIMethodCall observeEndCallTerminator = new JIMethodCall(pos);
observeEndCallTerminator.invocationType = INVOKESTATIC;
observeEndCallTerminator.jClassName = OBSERVE_UTILS;
observeEndCallTerminator.jMethodVMSig = STOP_OBSERVATION;
observeEndCallTerminator.name = STOP_OBSERVATION_METHOD;
observeEndCallTerminator.args = Collections.emptyList();
observeEndBB.terminator = observeEndCallTerminator;
}
/**
* Inject stop observation with an error call.
*
* @param func The BIR function in which the call is injected
* @param observeEndBB The basic block to which the stop observation call should be injected
* @param pos The position of all instructions, variables declarations, terminators, etc.
* @param errorOperand Operand for passing the error
* @param uniqueId A unique ID to identify the check error call
*/
private void injectStopObservationWithErrorCall(BIRFunction func, BIRBasicBlock observeEndBB, Location pos,
BIROperand errorOperand, String uniqueId) {
BIROperand castedErrorOperand = tempLocalVarsMap.get(uniqueId + "$castedError");
addLocalVarIfAbsent(func, castedErrorOperand.variableDcl);
TypeCast errorCastInstruction = new TypeCast(pos, castedErrorOperand, errorOperand, symbolTable.errorType,
false);
observeEndBB.instructions.add(errorCastInstruction);
JIMethodCall observeEndBBCallTerminator = new JIMethodCall(pos);
observeEndBBCallTerminator.invocationType = INVOKESTATIC;
observeEndBBCallTerminator.jClassName = OBSERVE_UTILS;
observeEndBBCallTerminator.jMethodVMSig = ERROR_CALL;
observeEndBBCallTerminator.name = STOP_OBSERVATION_WITH_ERROR_METHOD;
observeEndBBCallTerminator.args = Collections.singletonList(castedErrorOperand);
observeEndBB.terminator = observeEndBBCallTerminator;
}
/**
* Generate a constant operand from a compile-time known value.
*
* @param pkg The package which should contain the constant
* @param constantType The type of the constant
* @param constantValue The constant value which should end up being passed in the operand
* @return The generated operand which will pass the constant
*/
private BIROperand generateGlobalConstantOperand(BIRPackage pkg, BType constantType, Object constantValue) {
return compileTimeConstants.computeIfAbsent(constantValue, k -> {
PackageID pkgId = pkg.packageID;
Name name = new Name("$observabilityConst" + constantIndex++);
BIRGlobalVariableDcl constLoadVariableDcl =
new BIRGlobalVariableDcl(COMPILE_TIME_CONST_POS, 0,
constantType, pkgId, name, name,
VarScope.GLOBAL, VarKind.CONSTANT, "", VIRTUAL);
pkg.globalVars.add(constLoadVariableDcl);
return new BIROperand(constLoadVariableDcl);
});
}
/**
* Create and insert a new basic block into a function in the specified index.
*
* @param func The function to which the basic block should be injected
* @param insertIndex The index at which the basic block should be injected
* @return The injected new BB
*/
private BIRBasicBlock insertBasicBlock(BIRFunction func, int insertIndex) {
BIRBasicBlock newBB = new BIRBasicBlock(new Name(NEW_BB_PREFIX + desugaredBBIndex++));
func.basicBlocks.add(insertIndex, newBB);
return newBB;
}
/**
* Swap the effective content of two basic blocks.
*
* @param func The BIR function
* @param firstBB The first BB of which content should end up in second BB
* @param secondBB The second BB of which content should end up in first BB
*/
private void swapBasicBlockContent(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
List<BIRNonTerminator> firstBBInstructions = firstBB.instructions;
firstBB.instructions = secondBB.instructions;
secondBB.instructions = firstBBInstructions;
resetEndBasicBlock(func, firstBB, secondBB);
swapBasicBlockTerminator(func, firstBB, secondBB);
}
/**
* Swap the terminators of two basic blocks.
*
* @param func The BIR function
* @param firstBB The first BB of which terminator should end up in second BB
* @param secondBB The second BB of which terminator should end up in first BB
*/
private void swapBasicBlockTerminator(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
BIRTerminator firstBBTerminator = firstBB.terminator;
firstBB.terminator = secondBB.terminator;
secondBB.terminator = firstBBTerminator;
resetEndBasicBlock(func, firstBB, secondBB);
}
/**
* Reset endBBs of local variables after swapping basic blocks content.
*
* @param func The BIR function
* @param firstBB The first BB of which content should end up in second BB
* @param secondBB The second BB of which content should end up in first BB
*/
private void resetEndBasicBlock(BIRFunction func, BIRBasicBlock firstBB, BIRBasicBlock secondBB) {
for (BIRVariableDcl localVar : func.localVars) {
if (localVar.endBB == firstBB) {
localVar.endBB = secondBB;
}
}
}
/**
* Fix the ending BB of error entries in the error table of a function.
* When desugar instructions were added after the original BB,
* where the original BB is a trap ending BB, the new trap ending BBs changes.
* This needs to be adjusted properly.
*
* @param func The function of which the error table should be fixed
* @param oldBB The old ending BB of error entries to be fixed
* @param newBB The new ending BB which should be updated to in the error entries to be fixed
*/
private void fixErrorTable(BIRFunction func, BIRBasicBlock oldBB, BIRBasicBlock newBB) {
for (BIRErrorEntry errorEntry : func.errorTable) {
if (errorEntry.endBB == oldBB) {
errorEntry.endBB = newBB;
}
}
}
/**
* Check if a call instruction is observable.
*
* @param callIns The call instruction to check
* @return True if the call instruction is observable
*/
private boolean isObservable(Call callIns) {
boolean isRemote = callIns.calleeFlags.contains(Flag.REMOTE);
boolean isObservableAnnotationPresent = false;
for (BIRAnnotationAttachment annot : callIns.calleeAnnotAttachments) {
if (OBSERVABLE_ANNOTATION.equals(
JvmCodeGenUtil.getPackageName(
new PackageID(annot.annotPkgId.orgName, annot.annotPkgId.name, Names.EMPTY)) +
annot.annotTagRef.value)) {
isObservableAnnotationPresent = true;
break;
}
}
return isRemote || isObservableAnnotationPresent;
}
/**
* Check is an error is assignable to a variable declaration.
*
* @param variableDcl The variable declaration which should be checked.
* @return True if an error can be assigned and false otherwise
*/
private boolean isErrorAssignable(BIRVariableDcl variableDcl) {
boolean isErrorAssignable = false;
if (variableDcl.type instanceof BUnionType) {
BUnionType returnUnionType = (BUnionType) variableDcl.type;
boolean b = false;
for (BType type : returnUnionType.getMemberTypes()) {
if (type instanceof BErrorType) {
b = true;
break;
}
}
isErrorAssignable = b;
} else if (variableDcl.type instanceof BErrorType) {
isErrorAssignable = true;
}
return isErrorAssignable;
}
/**
* Check if a basic block is covered into an error entry.
*
* @param errorEntry The error entry from the error table
* @param basicBlocksList The basic blocks list which contains the basic block to be checked for
* @param basicBlock The basic block which should be checked for
* @return True if the basic block is covered in the error entry
*/
private boolean isBBCoveredInErrorEntry(BIRErrorEntry errorEntry, List<BIRBasicBlock> basicBlocksList,
BIRBasicBlock basicBlock) {
boolean isCovered = Objects.equals(basicBlock, errorEntry.trapBB)
|| Objects.equals(basicBlock, errorEntry.endBB);
if (!isCovered) {
/*
* Traverse in the same way MethodGen.generateBasicBlocks traverses through basic blocks to generate
* method body to check if the basic block is covered in the error entry.
*/
int i = 0;
for (; i < basicBlocksList.size(); i++) {
BIRBasicBlock currentBB = basicBlocksList.get(i);
if (currentBB == errorEntry.trapBB) {
break;
}
}
for (; i < basicBlocksList.size(); i++) {
BIRBasicBlock currentBB = basicBlocksList.get(i);
if (currentBB == basicBlock) {
isCovered = true;
break;
}
if (currentBB == errorEntry.endBB) {
break;
}
}
}
return isCovered;
}
/**
* Generate a ID for a ballerina module.
*
* @param pkg The module for which the ID should be generated
* @return The generated ID
*/
private String generatePackageId(PackageID pkg) {
return pkg.orgName.value + "/" + pkg.name.value + ":" + pkg.version.value;
}
/**
* Generate operands for location.
*
* @param pkg Bir package
* @param func Bir Function
* @param observeStartBB Observe start basic block
* @param pos Location
* @return List of operands for source file name, position start line and start column
*/
private List<BIROperand> generatePositionArgs(BIRPackage pkg, BIRFunction func, BIRBasicBlock observeStartBB,
Location pos) {
BIROperand pkgOperand = generateGlobalConstantOperand(pkg, symbolTable.stringType,
generatePackageId(pkg.packageID));
BIROperand fileNameOperand = getTempLocalVariable(FILE_NAME_STRING, pos, pos.lineRange().filePath(),
symbolTable.stringType, observeStartBB);
addLocalVarIfAbsent(func, fileNameOperand.variableDcl);
BIROperand startLineOperand = getTempLocalVariable(START_LINE_STRING, pos,
pos.lineRange().startLine().line() + 1, symbolTable.intType, observeStartBB);
addLocalVarIfAbsent(func, startLineOperand.variableDcl);
BIROperand startColOperand = getTempLocalVariable(START_COLUMN_STRING, pos,
pos.lineRange().startLine().offset() + 1, symbolTable.intType, observeStartBB);
addLocalVarIfAbsent(func, startColOperand.variableDcl);
return new ArrayList<>(Arrays.asList(pkgOperand, fileNameOperand, startLineOperand, startColOperand));
}
private BIROperand getTempLocalVariable(String name, Location pos, Object value, BType variableType,
BIRBasicBlock currentBB) {
BIROperand birOperand = tempLocalVarsMap.get(name);
addConstantLoadIns(pos, value, variableType, birOperand, currentBB);
return birOperand;
}
private void updatePositionArgsConstLoadIns(Location pos, BIRBasicBlock currentBB) {
addConstantLoadIns(pos, pos.lineRange().startLine().line() + 1, symbolTable.intType,
tempLocalVarsMap.get(START_LINE_STRING), currentBB, 0);
addConstantLoadIns(pos, pos.lineRange().startLine().offset() + 1, symbolTable.intType,
tempLocalVarsMap.get(START_COLUMN_STRING), currentBB, 1);
}
private void addConstantLoadIns(Location pos, Object value, BType variableType, BIROperand birOperand,
BIRBasicBlock currentBB) {
ConstantLoad constantLoad = new ConstantLoad(pos, value, variableType, birOperand);
currentBB.instructions.add(constantLoad);
}
private void addConstantLoadIns(Location pos, Object value, BType variableType, BIROperand birOperand,
BIRBasicBlock currentBB, int index) {
ConstantLoad constantLoad = new ConstantLoad(pos, value, variableType, birOperand);
currentBB.instructions.add(index, constantLoad);
}
private void addLocalVarIfAbsent(BIRFunction func, BIRVariableDcl variableDcl) {
if (!func.localVars.contains(variableDcl)) {
func.localVars.add(variableDcl);
}
}
/**
* Initializes the temporary local variables which can be reused.
*
*/
private void initializeTempLocalVariables() {
generateTempLocalVariable(FILE_NAME_STRING, symbolTable.stringType);
generateTempLocalVariable(START_LINE_STRING, symbolTable.intType);
generateTempLocalVariable(START_COLUMN_STRING, symbolTable.intType);
generateTempLocalVariable(FUNC_BODY_INSTRUMENTATION_TYPE + "$castedError", symbolTable.errorType);
generateTempLocalVariable(INVOCATION_INSTRUMENTATION_TYPE + "$castedError", symbolTable.errorType);
generateTempLocalVariable(FUNC_BODY_INSTRUMENTATION_TYPE + "$isError", symbolTable.booleanType);
generateTempLocalVariable(INVOCATION_INSTRUMENTATION_TYPE + "$isError", symbolTable.booleanType);
}
private void generateTempLocalVariable(String name, BType variableType) {
BIRVariableDcl variableDcl = new BIRVariableDcl(variableType, new Name("$observability$" + name),
VarScope.FUNCTION, VarKind.TEMP);
BIROperand birOperand = new BIROperand(variableDcl);
tempLocalVarsMap.put(name, birOperand);
}
/**
* Generate a temporary function scope variable.
*
* @param func The BIR function to which the variable should be added
* @param name The name of the variable
* @param variableType The type of the variable
* @return The generated operand for the variable declaration
*/
private BIROperand generateTempLocalVariable(BIRFunction func, String name, BType variableType) {
Name variableName = new Name("$observability$" + name + "$" + localVarIndex++);
BIRVariableDcl variableDcl = new BIRVariableDcl(variableType, variableName, VarScope.FUNCTION, VarKind.TEMP);
func.localVars.add(variableDcl);
return new BIROperand(variableDcl);
}
}
|
Yes it is. `connection.action()` returns BuildActionExecuter and if you check docs: for forTasks method: `Specifies the tasks to execute before executing the BuildAction.` https://docs.gradle.org/current/javadoc/org/gradle/tooling/BuildActionExecuter.html
|
public static QuarkusModel create(File projectDir, String mode, List<String> jvmArgs, String... tasks) {
try (ProjectConnection connection = GradleConnector.newConnector()
.forProjectDirectory(projectDir)
.connect()) {
return connection.action(new QuarkusModelBuildAction(mode)).forTasks(tasks).addJvmArguments(jvmArgs).run();
}
}
|
return connection.action(new QuarkusModelBuildAction(mode)).forTasks(tasks).addJvmArguments(jvmArgs).run();
|
public static QuarkusModel create(File projectDir, String mode, List<String> jvmArgs, String... tasks) {
try (ProjectConnection connection = GradleConnector.newConnector()
.forProjectDirectory(projectDir)
.connect()) {
return connection.action(new QuarkusModelBuildAction(mode)).forTasks(tasks).addJvmArguments(jvmArgs).run();
}
}
|
class QuarkusGradleModelFactory {
public static QuarkusModel create(File projectDir, String mode, String... tasks) {
return create(projectDir, mode, Collections.emptyList(), tasks);
}
public static QuarkusModel createForTasks(File projectDir, String... tasks) {
try (ProjectConnection connection = GradleConnector.newConnector()
.forProjectDirectory(projectDir)
.connect()) {
final ModelBuilder<QuarkusModel> modelBuilder = connection.model(QuarkusModel.class);
if (tasks.length != 0) {
modelBuilder.forTasks(tasks);
}
return modelBuilder.get();
}
}
}
|
class QuarkusGradleModelFactory {
public static QuarkusModel create(File projectDir, String mode, String... tasks) {
return create(projectDir, mode, Collections.emptyList(), tasks);
}
public static QuarkusModel createForTasks(File projectDir, String... tasks) {
try (ProjectConnection connection = GradleConnector.newConnector()
.forProjectDirectory(projectDir)
.connect()) {
final ModelBuilder<QuarkusModel> modelBuilder = connection.model(QuarkusModel.class);
if (tasks.length != 0) {
modelBuilder.forTasks(tasks);
}
return modelBuilder.get();
}
}
}
|
I was in doubt whether this exception type is appropriate here or whether an `Optional` should be returned. But at the upper level, it ultimately should be used, and the reason is only known here (so it would be harder to analyze the exception/empty buffers later).
|
private List<Buffer> getInflightBuffersUnsafe(long checkpointId) throws CheckpointException {
assert Thread.holdsLock(receivedBuffers);
if (checkpointId < lastBarrierId) {
throw new CheckpointException(
String.format("Sequence number for checkpoint %d is not known (it was likely been overwritten by a newer checkpoint %d)", checkpointId, lastBarrierId),
CheckpointFailureReason.CHECKPOINT_SUBSUMED);
}
final List<Buffer> inflightBuffers = new ArrayList<>();
Iterator<SequenceBuffer> iterator = receivedBuffers.iterator();
Iterators.advance(iterator, receivedBuffers.getNumPriorityElements());
while (iterator.hasNext()) {
SequenceBuffer sequenceBuffer = iterator.next();
if (sequenceBuffer.buffer.isBuffer()) {
if (shouldBeSpilled(sequenceBuffer.sequenceNumber)) {
inflightBuffers.add(sequenceBuffer.buffer.retainBuffer());
} else {
break;
}
}
}
return inflightBuffers;
}
|
throw new CheckpointException(
|
private List<Buffer> getInflightBuffersUnsafe(long checkpointId) throws CheckpointException {
assert Thread.holdsLock(receivedBuffers);
if (checkpointId < lastBarrierId) {
throw new CheckpointException(
String.format("Sequence number for checkpoint %d is not known (it was likely been overwritten by a newer checkpoint %d)", checkpointId, lastBarrierId),
CheckpointFailureReason.CHECKPOINT_SUBSUMED);
}
final List<Buffer> inflightBuffers = new ArrayList<>();
Iterator<SequenceBuffer> iterator = receivedBuffers.iterator();
Iterators.advance(iterator, receivedBuffers.getNumPriorityElements());
while (iterator.hasNext()) {
SequenceBuffer sequenceBuffer = iterator.next();
if (sequenceBuffer.buffer.isBuffer()) {
if (shouldBeSpilled(sequenceBuffer.sequenceNumber)) {
inflightBuffers.add(sequenceBuffer.buffer.retainBuffer());
} else {
break;
}
}
}
return inflightBuffers;
}
|
class RemoteInputChannel extends InputChannel {
private static final int NONE = -1;
/** ID to distinguish this channel from other channels sharing the same TCP connection. */
private final InputChannelID id = new InputChannelID();
/** The connection to use to request the remote partition. */
private final ConnectionID connectionId;
/** The connection manager to use connect to the remote partition provider. */
private final ConnectionManager connectionManager;
/**
* The received buffers. Received buffers are enqueued by the network I/O thread and the queue
* is consumed by the receiving task thread.
*/
private final PrioritizedDeque<SequenceBuffer> receivedBuffers = new PrioritizedDeque<>();
/**
* Flag indicating whether this channel has been released. Either called by the receiving task
* thread or the task manager actor.
*/
private final AtomicBoolean isReleased = new AtomicBoolean();
/** Client to establish a (possibly shared) TCP connection and request the partition. */
private volatile PartitionRequestClient partitionRequestClient;
/**
* The next expected sequence number for the next buffer.
*/
private int expectedSequenceNumber = 0;
/** The initial number of exclusive buffers assigned to this channel. */
private final int initialCredit;
/** The number of available buffers that have not been announced to the producer yet. */
private final AtomicInteger unannouncedCredit = new AtomicInteger(0);
private final BufferManager bufferManager;
@GuardedBy("receivedBuffers")
private int lastBarrierSequenceNumber = NONE;
@GuardedBy("receivedBuffers")
private long lastBarrierId = NONE;
private final ChannelStatePersister channelStatePersister;
public RemoteInputChannel(
SingleInputGate inputGate,
int channelIndex,
ResultPartitionID partitionId,
ConnectionID connectionId,
ConnectionManager connectionManager,
int initialBackOff,
int maxBackoff,
int networkBuffersPerChannel,
Counter numBytesIn,
Counter numBuffersIn,
ChannelStateWriter stateWriter) {
super(inputGate, channelIndex, partitionId, initialBackOff, maxBackoff, numBytesIn, numBuffersIn);
this.initialCredit = networkBuffersPerChannel;
this.connectionId = checkNotNull(connectionId);
this.connectionManager = checkNotNull(connectionManager);
this.bufferManager = new BufferManager(inputGate.getMemorySegmentProvider(), this, 0);
this.channelStatePersister = new ChannelStatePersister(stateWriter, getChannelInfo());
}
@VisibleForTesting
void setExpectedSequenceNumber(int expectedSequenceNumber) {
this.expectedSequenceNumber = expectedSequenceNumber;
}
/**
* Setup includes assigning exclusive buffers to this input channel, and this method should be called only once
* after this input channel is created.
*/
@Override
void setup() throws IOException {
checkState(bufferManager.unsynchronizedGetAvailableExclusiveBuffers() == 0,
"Bug in input channel setup logic: exclusive buffers have already been set for this input channel.");
bufferManager.requestExclusiveBuffers(initialCredit);
}
/**
* Requests a remote subpartition.
*/
@VisibleForTesting
@Override
public void requestSubpartition(int subpartitionIndex) throws IOException, InterruptedException {
if (partitionRequestClient == null) {
try {
partitionRequestClient = connectionManager.createPartitionRequestClient(connectionId);
} catch (IOException e) {
throw new PartitionConnectionException(partitionId, e);
}
partitionRequestClient.requestSubpartition(partitionId, subpartitionIndex, this, 0);
}
}
/**
* Retriggers a remote subpartition request.
*/
void retriggerSubpartitionRequest(int subpartitionIndex) throws IOException {
checkPartitionRequestQueueInitialized();
if (increaseBackoff()) {
partitionRequestClient.requestSubpartition(
partitionId, subpartitionIndex, this, getCurrentBackoff());
} else {
failPartitionRequest();
}
}
@Override
Optional<BufferAndAvailability> getNextBuffer() throws IOException {
checkPartitionRequestQueueInitialized();
final SequenceBuffer next;
final DataType nextDataType;
synchronized (receivedBuffers) {
next = receivedBuffers.poll();
nextDataType = receivedBuffers.peek() != null ? receivedBuffers.peek().buffer.getDataType() : DataType.NONE;
}
if (next == null) {
if (isReleased.get()) {
throw new CancelTaskException("Queried for a buffer after channel has been released.");
}
return Optional.empty();
}
numBytesIn.inc(next.buffer.getSize());
numBuffersIn.inc();
return Optional.of(new BufferAndAvailability(next.buffer, nextDataType, 0, next.sequenceNumber));
}
@Override
void sendTaskEvent(TaskEvent event) throws IOException {
checkState(!isReleased.get(), "Tried to send task event to producer after channel has been released.");
checkPartitionRequestQueueInitialized();
partitionRequestClient.sendTaskEvent(partitionId, event, this);
}
@Override
public boolean isReleased() {
return isReleased.get();
}
/**
* Releases all exclusive and floating buffers, closes the partition request client.
*/
@Override
void releaseAllResources() throws IOException {
if (isReleased.compareAndSet(false, true)) {
final ArrayDeque<Buffer> releasedBuffers;
synchronized (receivedBuffers) {
releasedBuffers = receivedBuffers.stream().map(sb -> sb.buffer)
.collect(Collectors.toCollection(ArrayDeque::new));
receivedBuffers.clear();
}
bufferManager.releaseAllBuffers(releasedBuffers);
if (partitionRequestClient != null) {
partitionRequestClient.close(this);
} else {
connectionManager.closeOpenChannelConnections(connectionId);
}
}
}
private void failPartitionRequest() {
setError(new PartitionNotFoundException(partitionId));
}
@Override
public String toString() {
return "RemoteInputChannel [" + partitionId + " at " + connectionId + "]";
}
/**
* Enqueue this input channel in the pipeline for notifying the producer of unannounced credit.
*/
private void notifyCreditAvailable() throws IOException {
checkPartitionRequestQueueInitialized();
partitionRequestClient.notifyCreditAvailable(this);
}
@VisibleForTesting
public int getNumberOfAvailableBuffers() {
return bufferManager.getNumberOfAvailableBuffers();
}
@VisibleForTesting
public int getNumberOfRequiredBuffers() {
return bufferManager.unsynchronizedGetNumberOfRequiredBuffers();
}
@VisibleForTesting
public int getSenderBacklog() {
return getNumberOfRequiredBuffers() - initialCredit;
}
@VisibleForTesting
boolean isWaitingForFloatingBuffers() {
return bufferManager.unsynchronizedIsWaitingForFloatingBuffers();
}
@VisibleForTesting
public Buffer getNextReceivedBuffer() {
final SequenceBuffer sequenceBuffer = receivedBuffers.poll();
return sequenceBuffer != null ? sequenceBuffer.buffer : null;
}
@VisibleForTesting
BufferManager getBufferManager() {
return bufferManager;
}
@VisibleForTesting
PartitionRequestClient getPartitionRequestClient() {
return partitionRequestClient;
}
/**
* The unannounced credit is increased by the given amount and might notify
* increased credit to the producer.
*/
@Override
public void notifyBufferAvailable(int numAvailableBuffers) throws IOException {
if (numAvailableBuffers > 0 && unannouncedCredit.getAndAdd(numAvailableBuffers) == 0) {
notifyCreditAvailable();
}
}
@Override
public void resumeConsumption() throws IOException {
checkState(!isReleased.get(), "Channel released.");
checkPartitionRequestQueueInitialized();
partitionRequestClient.resumeConsumption(this);
}
/**
* Gets the currently unannounced credit.
*
* @return Credit which was not announced to the sender yet.
*/
public int getUnannouncedCredit() {
return unannouncedCredit.get();
}
/**
* Gets the unannounced credit and resets it to <tt>0</tt> atomically.
*
* @return Credit which was not announced to the sender yet.
*/
public int getAndResetUnannouncedCredit() {
return unannouncedCredit.getAndSet(0);
}
/**
* Gets the current number of received buffers which have not been processed yet.
*
* @return Buffers queued for processing.
*/
public int getNumberOfQueuedBuffers() {
synchronized (receivedBuffers) {
return receivedBuffers.size();
}
}
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
return Math.max(0, receivedBuffers.size());
}
public int unsynchronizedGetExclusiveBuffersUsed() {
return Math.max(0, initialCredit - bufferManager.unsynchronizedGetAvailableExclusiveBuffers());
}
public int unsynchronizedGetFloatingBuffersAvailable() {
return Math.max(0, bufferManager.unsynchronizedGetFloatingBuffersAvailable());
}
public InputChannelID getInputChannelId() {
return id;
}
public int getInitialCredit() {
return initialCredit;
}
public BufferProvider getBufferProvider() throws IOException {
if (isReleased.get()) {
return null;
}
return inputGate.getBufferProvider();
}
/**
* Requests buffer from input channel directly for receiving network data.
* It should always return an available buffer in credit-based mode unless
* the channel has been released.
*
* @return The available buffer.
*/
@Nullable
public Buffer requestBuffer() {
return bufferManager.requestBuffer();
}
/**
* Receives the backlog from the producer's buffer response. If the number of available
* buffers is less than backlog + initialCredit, it will request floating buffers from
* the buffer manager, and then notify unannounced credits to the producer.
*
* @param backlog The number of unsent buffers in the producer's sub partition.
*/
void onSenderBacklog(int backlog) throws IOException {
int numRequestedBuffers = bufferManager.requestFloatingBuffers(backlog + initialCredit);
if (numRequestedBuffers > 0 && unannouncedCredit.getAndAdd(numRequestedBuffers) == 0) {
notifyCreditAvailable();
}
}
public void onBuffer(Buffer buffer, int sequenceNumber, int backlog) throws IOException {
boolean recycleBuffer = true;
try {
if (expectedSequenceNumber != sequenceNumber) {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
return;
}
final boolean wasEmpty;
boolean firstPriorityEvent = false;
synchronized (receivedBuffers) {
if (isReleased.get()) {
return;
}
wasEmpty = receivedBuffers.isEmpty();
SequenceBuffer sequenceBuffer = new SequenceBuffer(buffer, sequenceNumber);
DataType dataType = buffer.getDataType();
if (dataType.hasPriority()) {
firstPriorityEvent = addPriorityBuffer(sequenceBuffer);
}
else {
receivedBuffers.add(sequenceBuffer);
channelStatePersister.maybePersist(buffer);
if (dataType.requiresAnnouncement()) {
firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer));
}
}
++expectedSequenceNumber;
}
recycleBuffer = false;
if (firstPriorityEvent) {
notifyPriorityEvent(sequenceNumber);
}
if (wasEmpty) {
notifyChannelNonEmpty();
}
if (backlog >= 0) {
onSenderBacklog(backlog);
}
} finally {
if (recycleBuffer) {
buffer.recycleBuffer();
}
}
}
/**
* @return {@code true} if this was first priority buffer added.
*/
private boolean addPriorityBuffer(SequenceBuffer sequenceBuffer) throws IOException {
receivedBuffers.addPriorityElement(sequenceBuffer);
channelStatePersister
.checkForBarrier(sequenceBuffer.buffer)
.filter(id -> id > lastBarrierId)
.ifPresent(id -> {
lastBarrierId = id;
lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber;
});
return receivedBuffers.getNumPriorityElements() == 1;
}
private SequenceBuffer announce(SequenceBuffer sequenceBuffer) throws IOException {
checkState(!sequenceBuffer.buffer.isBuffer(), "Only a CheckpointBarrier can be announced but found %s", sequenceBuffer.buffer);
AbstractEvent event = EventSerializer.fromBuffer(
sequenceBuffer.buffer,
getClass().getClassLoader());
checkState(event instanceof CheckpointBarrier, "Only a CheckpointBarrier can be announced but found %s", sequenceBuffer.buffer);
CheckpointBarrier barrier = (CheckpointBarrier) event;
return new SequenceBuffer(
EventSerializer.toBuffer(new EventAnnouncement(barrier, sequenceBuffer.sequenceNumber), true),
sequenceBuffer.sequenceNumber);
}
/**
* Spills all queued buffers on checkpoint start. If barrier has already been received (and reordered), spill only
* the overtaken buffers.
*/
public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException {
synchronized (receivedBuffers) {
channelStatePersister.startPersisting(
barrier.getId(),
getInflightBuffersUnsafe(barrier.getId()));
}
}
public void checkpointStopped(long checkpointId) {
synchronized (receivedBuffers) {
channelStatePersister.stopPersisting(checkpointId);
if (lastBarrierId == checkpointId) {
lastBarrierId = NONE;
lastBarrierSequenceNumber = NONE;
}
}
}
@VisibleForTesting
List<Buffer> getInflightBuffers(long checkpointId) throws CheckpointException {
synchronized (receivedBuffers) {
return getInflightBuffersUnsafe(checkpointId);
}
}
/**
* Returns a list of buffers, checking the first n non-priority buffers, and skipping all events.
*/
/**
* @return if given {@param sequenceNumber} should be spilled given {@link
* We might not have yet received {@link CheckpointBarrier} and we might need to spill everything.
* If we have already received it, there is a bit nasty corner case of {@link SequenceBuffer
* overflowing that needs to be handled as well.
*/
private boolean shouldBeSpilled(int sequenceNumber) {
if (lastBarrierSequenceNumber == NONE) {
return true;
}
checkState(
receivedBuffers.size() < Integer.MAX_VALUE / 2,
"Too many buffers for sequenceNumber overflow detection code to work correctly");
boolean possibleOverflowAfterOvertaking = Integer.MAX_VALUE / 2 < lastBarrierSequenceNumber;
boolean possibleOverflowBeforeOvertaking = lastBarrierSequenceNumber < -Integer.MAX_VALUE / 2;
if (possibleOverflowAfterOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber && sequenceNumber > 0;
}
else if (possibleOverflowBeforeOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber || sequenceNumber > 0;
}
else {
return sequenceNumber < lastBarrierSequenceNumber;
}
}
public void onEmptyBuffer(int sequenceNumber, int backlog) throws IOException {
boolean success = false;
synchronized (receivedBuffers) {
if (!isReleased.get()) {
if (expectedSequenceNumber == sequenceNumber) {
expectedSequenceNumber++;
success = true;
} else {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
}
}
}
if (success && backlog >= 0) {
onSenderBacklog(backlog);
}
}
public void onFailedPartitionRequest() {
inputGate.triggerPartitionStateCheck(partitionId);
}
public void onError(Throwable cause) {
setError(cause);
}
private void checkPartitionRequestQueueInitialized() throws IOException {
checkError();
checkState(partitionRequestClient != null,
"Bug: partitionRequestClient is not initialized before processing data and no error is detected.");
}
private static class BufferReorderingException extends IOException {
private static final long serialVersionUID = -888282210356266816L;
private final int expectedSequenceNumber;
private final int actualSequenceNumber;
BufferReorderingException(int expectedSequenceNumber, int actualSequenceNumber) {
this.expectedSequenceNumber = expectedSequenceNumber;
this.actualSequenceNumber = actualSequenceNumber;
}
@Override
public String getMessage() {
return String.format("Buffer re-ordering: expected buffer with sequence number %d, but received %d.",
expectedSequenceNumber, actualSequenceNumber);
}
}
private static final class SequenceBuffer {
final Buffer buffer;
final int sequenceNumber;
private SequenceBuffer(Buffer buffer, int sequenceNumber) {
this.buffer = buffer;
this.sequenceNumber = sequenceNumber;
}
}
}
|
class RemoteInputChannel extends InputChannel {
private static final int NONE = -1;
/** ID to distinguish this channel from other channels sharing the same TCP connection. */
private final InputChannelID id = new InputChannelID();
/** The connection to use to request the remote partition. */
private final ConnectionID connectionId;
/** The connection manager to use connect to the remote partition provider. */
private final ConnectionManager connectionManager;
/**
* The received buffers. Received buffers are enqueued by the network I/O thread and the queue
* is consumed by the receiving task thread.
*/
private final PrioritizedDeque<SequenceBuffer> receivedBuffers = new PrioritizedDeque<>();
/**
* Flag indicating whether this channel has been released. Either called by the receiving task
* thread or the task manager actor.
*/
private final AtomicBoolean isReleased = new AtomicBoolean();
/** Client to establish a (possibly shared) TCP connection and request the partition. */
private volatile PartitionRequestClient partitionRequestClient;
/**
* The next expected sequence number for the next buffer.
*/
private int expectedSequenceNumber = 0;
/** The initial number of exclusive buffers assigned to this channel. */
private final int initialCredit;
/** The number of available buffers that have not been announced to the producer yet. */
private final AtomicInteger unannouncedCredit = new AtomicInteger(0);
private final BufferManager bufferManager;
@GuardedBy("receivedBuffers")
private int lastBarrierSequenceNumber = NONE;
@GuardedBy("receivedBuffers")
private long lastBarrierId = NONE;
private final ChannelStatePersister channelStatePersister;
public RemoteInputChannel(
SingleInputGate inputGate,
int channelIndex,
ResultPartitionID partitionId,
ConnectionID connectionId,
ConnectionManager connectionManager,
int initialBackOff,
int maxBackoff,
int networkBuffersPerChannel,
Counter numBytesIn,
Counter numBuffersIn,
ChannelStateWriter stateWriter) {
super(inputGate, channelIndex, partitionId, initialBackOff, maxBackoff, numBytesIn, numBuffersIn);
this.initialCredit = networkBuffersPerChannel;
this.connectionId = checkNotNull(connectionId);
this.connectionManager = checkNotNull(connectionManager);
this.bufferManager = new BufferManager(inputGate.getMemorySegmentProvider(), this, 0);
this.channelStatePersister = new ChannelStatePersister(stateWriter, getChannelInfo());
}
@VisibleForTesting
void setExpectedSequenceNumber(int expectedSequenceNumber) {
this.expectedSequenceNumber = expectedSequenceNumber;
}
/**
* Setup includes assigning exclusive buffers to this input channel, and this method should be called only once
* after this input channel is created.
*/
@Override
void setup() throws IOException {
checkState(bufferManager.unsynchronizedGetAvailableExclusiveBuffers() == 0,
"Bug in input channel setup logic: exclusive buffers have already been set for this input channel.");
bufferManager.requestExclusiveBuffers(initialCredit);
}
/**
* Requests a remote subpartition.
*/
@VisibleForTesting
@Override
public void requestSubpartition(int subpartitionIndex) throws IOException, InterruptedException {
if (partitionRequestClient == null) {
try {
partitionRequestClient = connectionManager.createPartitionRequestClient(connectionId);
} catch (IOException e) {
throw new PartitionConnectionException(partitionId, e);
}
partitionRequestClient.requestSubpartition(partitionId, subpartitionIndex, this, 0);
}
}
/**
* Retriggers a remote subpartition request.
*/
void retriggerSubpartitionRequest(int subpartitionIndex) throws IOException {
checkPartitionRequestQueueInitialized();
if (increaseBackoff()) {
partitionRequestClient.requestSubpartition(
partitionId, subpartitionIndex, this, getCurrentBackoff());
} else {
failPartitionRequest();
}
}
@Override
Optional<BufferAndAvailability> getNextBuffer() throws IOException {
checkPartitionRequestQueueInitialized();
final SequenceBuffer next;
final DataType nextDataType;
synchronized (receivedBuffers) {
next = receivedBuffers.poll();
nextDataType = receivedBuffers.peek() != null ? receivedBuffers.peek().buffer.getDataType() : DataType.NONE;
}
if (next == null) {
if (isReleased.get()) {
throw new CancelTaskException("Queried for a buffer after channel has been released.");
}
return Optional.empty();
}
numBytesIn.inc(next.buffer.getSize());
numBuffersIn.inc();
return Optional.of(new BufferAndAvailability(next.buffer, nextDataType, 0, next.sequenceNumber));
}
@Override
void sendTaskEvent(TaskEvent event) throws IOException {
checkState(!isReleased.get(), "Tried to send task event to producer after channel has been released.");
checkPartitionRequestQueueInitialized();
partitionRequestClient.sendTaskEvent(partitionId, event, this);
}
@Override
public boolean isReleased() {
return isReleased.get();
}
/**
* Releases all exclusive and floating buffers, closes the partition request client.
*/
@Override
void releaseAllResources() throws IOException {
if (isReleased.compareAndSet(false, true)) {
final ArrayDeque<Buffer> releasedBuffers;
synchronized (receivedBuffers) {
releasedBuffers = receivedBuffers.stream().map(sb -> sb.buffer)
.collect(Collectors.toCollection(ArrayDeque::new));
receivedBuffers.clear();
}
bufferManager.releaseAllBuffers(releasedBuffers);
if (partitionRequestClient != null) {
partitionRequestClient.close(this);
} else {
connectionManager.closeOpenChannelConnections(connectionId);
}
}
}
private void failPartitionRequest() {
setError(new PartitionNotFoundException(partitionId));
}
@Override
public String toString() {
return "RemoteInputChannel [" + partitionId + " at " + connectionId + "]";
}
/**
* Enqueue this input channel in the pipeline for notifying the producer of unannounced credit.
*/
private void notifyCreditAvailable() throws IOException {
checkPartitionRequestQueueInitialized();
partitionRequestClient.notifyCreditAvailable(this);
}
@VisibleForTesting
public int getNumberOfAvailableBuffers() {
return bufferManager.getNumberOfAvailableBuffers();
}
@VisibleForTesting
public int getNumberOfRequiredBuffers() {
return bufferManager.unsynchronizedGetNumberOfRequiredBuffers();
}
@VisibleForTesting
public int getSenderBacklog() {
return getNumberOfRequiredBuffers() - initialCredit;
}
@VisibleForTesting
boolean isWaitingForFloatingBuffers() {
return bufferManager.unsynchronizedIsWaitingForFloatingBuffers();
}
@VisibleForTesting
public Buffer getNextReceivedBuffer() {
final SequenceBuffer sequenceBuffer = receivedBuffers.poll();
return sequenceBuffer != null ? sequenceBuffer.buffer : null;
}
@VisibleForTesting
BufferManager getBufferManager() {
return bufferManager;
}
@VisibleForTesting
PartitionRequestClient getPartitionRequestClient() {
return partitionRequestClient;
}
/**
* The unannounced credit is increased by the given amount and might notify
* increased credit to the producer.
*/
@Override
public void notifyBufferAvailable(int numAvailableBuffers) throws IOException {
if (numAvailableBuffers > 0 && unannouncedCredit.getAndAdd(numAvailableBuffers) == 0) {
notifyCreditAvailable();
}
}
@Override
public void resumeConsumption() throws IOException {
checkState(!isReleased.get(), "Channel released.");
checkPartitionRequestQueueInitialized();
partitionRequestClient.resumeConsumption(this);
}
/**
* Gets the currently unannounced credit.
*
* @return Credit which was not announced to the sender yet.
*/
public int getUnannouncedCredit() {
return unannouncedCredit.get();
}
/**
* Gets the unannounced credit and resets it to <tt>0</tt> atomically.
*
* @return Credit which was not announced to the sender yet.
*/
public int getAndResetUnannouncedCredit() {
return unannouncedCredit.getAndSet(0);
}
/**
* Gets the current number of received buffers which have not been processed yet.
*
* @return Buffers queued for processing.
*/
public int getNumberOfQueuedBuffers() {
synchronized (receivedBuffers) {
return receivedBuffers.size();
}
}
@Override
public int unsynchronizedGetNumberOfQueuedBuffers() {
return Math.max(0, receivedBuffers.size());
}
public int unsynchronizedGetExclusiveBuffersUsed() {
return Math.max(0, initialCredit - bufferManager.unsynchronizedGetAvailableExclusiveBuffers());
}
public int unsynchronizedGetFloatingBuffersAvailable() {
return Math.max(0, bufferManager.unsynchronizedGetFloatingBuffersAvailable());
}
public InputChannelID getInputChannelId() {
return id;
}
public int getInitialCredit() {
return initialCredit;
}
public BufferProvider getBufferProvider() throws IOException {
if (isReleased.get()) {
return null;
}
return inputGate.getBufferProvider();
}
/**
* Requests buffer from input channel directly for receiving network data.
* It should always return an available buffer in credit-based mode unless
* the channel has been released.
*
* @return The available buffer.
*/
@Nullable
public Buffer requestBuffer() {
return bufferManager.requestBuffer();
}
/**
* Receives the backlog from the producer's buffer response. If the number of available
* buffers is less than backlog + initialCredit, it will request floating buffers from
* the buffer manager, and then notify unannounced credits to the producer.
*
* @param backlog The number of unsent buffers in the producer's sub partition.
*/
void onSenderBacklog(int backlog) throws IOException {
int numRequestedBuffers = bufferManager.requestFloatingBuffers(backlog + initialCredit);
if (numRequestedBuffers > 0 && unannouncedCredit.getAndAdd(numRequestedBuffers) == 0) {
notifyCreditAvailable();
}
}
public void onBuffer(Buffer buffer, int sequenceNumber, int backlog) throws IOException {
boolean recycleBuffer = true;
try {
if (expectedSequenceNumber != sequenceNumber) {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
return;
}
final boolean wasEmpty;
boolean firstPriorityEvent = false;
synchronized (receivedBuffers) {
if (isReleased.get()) {
return;
}
wasEmpty = receivedBuffers.isEmpty();
SequenceBuffer sequenceBuffer = new SequenceBuffer(buffer, sequenceNumber);
DataType dataType = buffer.getDataType();
if (dataType.hasPriority()) {
firstPriorityEvent = addPriorityBuffer(sequenceBuffer);
}
else {
receivedBuffers.add(sequenceBuffer);
channelStatePersister.maybePersist(buffer);
if (dataType.requiresAnnouncement()) {
firstPriorityEvent = addPriorityBuffer(announce(sequenceBuffer));
}
}
++expectedSequenceNumber;
}
recycleBuffer = false;
if (firstPriorityEvent) {
notifyPriorityEvent(sequenceNumber);
}
if (wasEmpty) {
notifyChannelNonEmpty();
}
if (backlog >= 0) {
onSenderBacklog(backlog);
}
} finally {
if (recycleBuffer) {
buffer.recycleBuffer();
}
}
}
/**
* @return {@code true} if this was first priority buffer added.
*/
private boolean addPriorityBuffer(SequenceBuffer sequenceBuffer) throws IOException {
receivedBuffers.addPriorityElement(sequenceBuffer);
channelStatePersister
.checkForBarrier(sequenceBuffer.buffer)
.filter(id -> id > lastBarrierId)
.ifPresent(id -> {
lastBarrierId = id;
lastBarrierSequenceNumber = sequenceBuffer.sequenceNumber;
});
return receivedBuffers.getNumPriorityElements() == 1;
}
private SequenceBuffer announce(SequenceBuffer sequenceBuffer) throws IOException {
checkState(!sequenceBuffer.buffer.isBuffer(), "Only a CheckpointBarrier can be announced but found %s", sequenceBuffer.buffer);
AbstractEvent event = EventSerializer.fromBuffer(
sequenceBuffer.buffer,
getClass().getClassLoader());
checkState(event instanceof CheckpointBarrier, "Only a CheckpointBarrier can be announced but found %s", sequenceBuffer.buffer);
CheckpointBarrier barrier = (CheckpointBarrier) event;
return new SequenceBuffer(
EventSerializer.toBuffer(new EventAnnouncement(barrier, sequenceBuffer.sequenceNumber), true),
sequenceBuffer.sequenceNumber);
}
/**
* Spills all queued buffers on checkpoint start. If barrier has already been received (and reordered), spill only
* the overtaken buffers.
*/
public void checkpointStarted(CheckpointBarrier barrier) throws CheckpointException {
synchronized (receivedBuffers) {
channelStatePersister.startPersisting(
barrier.getId(),
getInflightBuffersUnsafe(barrier.getId()));
}
}
public void checkpointStopped(long checkpointId) {
synchronized (receivedBuffers) {
channelStatePersister.stopPersisting(checkpointId);
if (lastBarrierId == checkpointId) {
lastBarrierId = NONE;
lastBarrierSequenceNumber = NONE;
}
}
}
@VisibleForTesting
List<Buffer> getInflightBuffers(long checkpointId) throws CheckpointException {
synchronized (receivedBuffers) {
return getInflightBuffersUnsafe(checkpointId);
}
}
/**
* Returns a list of buffers, checking the first n non-priority buffers, and skipping all events.
*/
/**
* @return if given {@param sequenceNumber} should be spilled given {@link
* We might not have yet received {@link CheckpointBarrier} and we might need to spill everything.
* If we have already received it, there is a bit nasty corner case of {@link SequenceBuffer
* overflowing that needs to be handled as well.
*/
private boolean shouldBeSpilled(int sequenceNumber) {
if (lastBarrierSequenceNumber == NONE) {
return true;
}
checkState(
receivedBuffers.size() < Integer.MAX_VALUE / 2,
"Too many buffers for sequenceNumber overflow detection code to work correctly");
boolean possibleOverflowAfterOvertaking = Integer.MAX_VALUE / 2 < lastBarrierSequenceNumber;
boolean possibleOverflowBeforeOvertaking = lastBarrierSequenceNumber < -Integer.MAX_VALUE / 2;
if (possibleOverflowAfterOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber && sequenceNumber > 0;
}
else if (possibleOverflowBeforeOvertaking) {
return sequenceNumber < lastBarrierSequenceNumber || sequenceNumber > 0;
}
else {
return sequenceNumber < lastBarrierSequenceNumber;
}
}
public void onEmptyBuffer(int sequenceNumber, int backlog) throws IOException {
boolean success = false;
synchronized (receivedBuffers) {
if (!isReleased.get()) {
if (expectedSequenceNumber == sequenceNumber) {
expectedSequenceNumber++;
success = true;
} else {
onError(new BufferReorderingException(expectedSequenceNumber, sequenceNumber));
}
}
}
if (success && backlog >= 0) {
onSenderBacklog(backlog);
}
}
public void onFailedPartitionRequest() {
inputGate.triggerPartitionStateCheck(partitionId);
}
public void onError(Throwable cause) {
setError(cause);
}
private void checkPartitionRequestQueueInitialized() throws IOException {
checkError();
checkState(partitionRequestClient != null,
"Bug: partitionRequestClient is not initialized before processing data and no error is detected.");
}
private static class BufferReorderingException extends IOException {
private static final long serialVersionUID = -888282210356266816L;
private final int expectedSequenceNumber;
private final int actualSequenceNumber;
BufferReorderingException(int expectedSequenceNumber, int actualSequenceNumber) {
this.expectedSequenceNumber = expectedSequenceNumber;
this.actualSequenceNumber = actualSequenceNumber;
}
@Override
public String getMessage() {
return String.format("Buffer re-ordering: expected buffer with sequence number %d, but received %d.",
expectedSequenceNumber, actualSequenceNumber);
}
}
private static final class SequenceBuffer {
final Buffer buffer;
final int sequenceNumber;
private SequenceBuffer(Buffer buffer, int sequenceNumber) {
this.buffer = buffer;
this.sequenceNumber = sequenceNumber;
}
}
}
|
Also, it can't really break anything, can it? In my eyes, this is how `checkPermission` method should look like at the first place.
|
public Uni<Boolean> checkPermission(Permission permission) {
return association.getDeferredIdentity()
.flatMap(new Function<>() {
@Override
public Uni<? extends Boolean> apply(SecurityIdentity identity) {
return identity.checkPermission(permission);
}
});
}
|
return association.getDeferredIdentity()
|
public Uni<Boolean> checkPermission(Permission permission) {
return association.getDeferredIdentity()
.flatMap(new Function<>() {
@Override
public Uni<? extends Boolean> apply(SecurityIdentity identity) {
return identity.checkPermission(permission);
}
});
}
|
class SecurityIdentityProxy implements SecurityIdentity {
@Inject
SecurityIdentityAssociation association;
@Override
public Principal getPrincipal() {
return association.getIdentity().getPrincipal();
}
@Override
public boolean isAnonymous() {
return association.getIdentity().isAnonymous();
}
@Override
public Set<String> getRoles() {
return association.getIdentity().getRoles();
}
@Override
public boolean hasRole(String role) {
return association.getIdentity().hasRole(role);
}
@Override
public <T extends Credential> T getCredential(Class<T> credentialType) {
return association.getIdentity().getCredential(credentialType);
}
@Override
public Set<Credential> getCredentials() {
return association.getIdentity().getCredentials();
}
@Override
public <T> T getAttribute(String name) {
return association.getIdentity().getAttribute(name);
}
@Override
public Map<String, Object> getAttributes() {
return association.getIdentity().getAttributes();
}
@Override
@Override
public boolean checkPermissionBlocking(Permission permission) {
return association.getIdentity().checkPermissionBlocking(permission);
}
}
|
class SecurityIdentityProxy implements SecurityIdentity {
@Inject
SecurityIdentityAssociation association;
@Override
public Principal getPrincipal() {
return association.getIdentity().getPrincipal();
}
@Override
public boolean isAnonymous() {
return association.getIdentity().isAnonymous();
}
@Override
public Set<String> getRoles() {
return association.getIdentity().getRoles();
}
@Override
public boolean hasRole(String role) {
return association.getIdentity().hasRole(role);
}
@Override
public <T extends Credential> T getCredential(Class<T> credentialType) {
return association.getIdentity().getCredential(credentialType);
}
@Override
public Set<Credential> getCredentials() {
return association.getIdentity().getCredentials();
}
@Override
public <T> T getAttribute(String name) {
return association.getIdentity().getAttribute(name);
}
@Override
public Map<String, Object> getAttributes() {
return association.getIdentity().getAttributes();
}
@Override
@Override
public boolean checkPermissionBlocking(Permission permission) {
return association.getIdentity().checkPermissionBlocking(permission);
}
}
|
Should the get always return a new instance?
|
public SimpleVersionedSerializer<KafkaCommittable> getCommittableSerializer() {
return new KafkaCommittableSerializer();
}
|
return new KafkaCommittableSerializer();
|
public SimpleVersionedSerializer<KafkaCommittable> getCommittableSerializer() {
return new KafkaCommittableSerializer();
}
|
class KafkaSink<IN>
implements StatefulSink<IN, KafkaWriterState>,
TwoPhaseCommittingSink<IN, KafkaCommittable> {
private final DeliveryGuarantee deliveryGuarantee;
private final KafkaRecordSerializationSchema<IN> recordSerializer;
private final Properties kafkaProducerConfig;
private final String transactionalIdPrefix;
KafkaSink(
DeliveryGuarantee deliveryGuarantee,
Properties kafkaProducerConfig,
String transactionalIdPrefix,
KafkaRecordSerializationSchema<IN> recordSerializer) {
this.deliveryGuarantee = deliveryGuarantee;
this.kafkaProducerConfig = kafkaProducerConfig;
this.transactionalIdPrefix = transactionalIdPrefix;
this.recordSerializer = recordSerializer;
}
/**
* Create a {@link KafkaSinkBuilder} to construct a new {@link KafkaSink}.
*
* @param <IN> type of incoming records
* @return {@link KafkaSinkBuilder}
*/
public static <IN> KafkaSinkBuilder<IN> builder() {
return new KafkaSinkBuilder<>();
}
@Internal
@Override
public Committer<KafkaCommittable> createCommitter() throws IOException {
return new KafkaCommitter(kafkaProducerConfig);
}
@Internal
@Override
@Internal
@Override
public KafkaWriter<IN> createWriter(InitContext context) throws IOException {
return new KafkaWriter<IN>(
deliveryGuarantee,
kafkaProducerConfig,
transactionalIdPrefix,
context,
recordSerializer,
context.asSerializationSchemaInitializationContext(),
Collections.emptyList());
}
@Internal
@Override
public KafkaWriter<IN> restoreWriter(
InitContext context, Collection<KafkaWriterState> recoveredState) throws IOException {
return new KafkaWriter<>(
deliveryGuarantee,
kafkaProducerConfig,
transactionalIdPrefix,
context,
recordSerializer,
context.asSerializationSchemaInitializationContext(),
recoveredState);
}
@Internal
@Override
public SimpleVersionedSerializer<KafkaWriterState> getWriterStateSerializer() {
return new KafkaWriterStateSerializer();
}
}
|
class KafkaSink<IN>
implements StatefulSink<IN, KafkaWriterState>,
TwoPhaseCommittingSink<IN, KafkaCommittable> {
private final DeliveryGuarantee deliveryGuarantee;
private final KafkaRecordSerializationSchema<IN> recordSerializer;
private final Properties kafkaProducerConfig;
private final String transactionalIdPrefix;
KafkaSink(
DeliveryGuarantee deliveryGuarantee,
Properties kafkaProducerConfig,
String transactionalIdPrefix,
KafkaRecordSerializationSchema<IN> recordSerializer) {
this.deliveryGuarantee = deliveryGuarantee;
this.kafkaProducerConfig = kafkaProducerConfig;
this.transactionalIdPrefix = transactionalIdPrefix;
this.recordSerializer = recordSerializer;
}
/**
* Create a {@link KafkaSinkBuilder} to construct a new {@link KafkaSink}.
*
* @param <IN> type of incoming records
* @return {@link KafkaSinkBuilder}
*/
public static <IN> KafkaSinkBuilder<IN> builder() {
return new KafkaSinkBuilder<>();
}
@Internal
@Override
public Committer<KafkaCommittable> createCommitter() throws IOException {
return new KafkaCommitter(kafkaProducerConfig);
}
@Internal
@Override
@Internal
@Override
public KafkaWriter<IN> createWriter(InitContext context) throws IOException {
return new KafkaWriter<IN>(
deliveryGuarantee,
kafkaProducerConfig,
transactionalIdPrefix,
context,
recordSerializer,
context.asSerializationSchemaInitializationContext(),
Collections.emptyList());
}
@Internal
@Override
public KafkaWriter<IN> restoreWriter(
InitContext context, Collection<KafkaWriterState> recoveredState) throws IOException {
return new KafkaWriter<>(
deliveryGuarantee,
kafkaProducerConfig,
transactionalIdPrefix,
context,
recordSerializer,
context.asSerializationSchemaInitializationContext(),
recoveredState);
}
@Internal
@Override
public SimpleVersionedSerializer<KafkaWriterState> getWriterStateSerializer() {
return new KafkaWriterStateSerializer();
}
}
|
With `AssertJ` it would look more natural: ``` assertThat(logManager).isInstanceOf(org.jboss.logmanager.LogManager.class); ```
|
public void consoleOutputTest() {
LogManager logManager = LogManager.getLogManager();
Assertions.assertTrue(logManager instanceof org.jboss.logmanager.LogManager);
DelayedHandler delayedHandler = InitialConfigurator.DELAYED_HANDLER;
boolean loggerContainsDelayedHandler = Arrays.asList(Logger.getLogger("").getHandlers()).stream()
.anyMatch(h -> h.equals(delayedHandler));
Assertions.assertTrue(loggerContainsDelayedHandler);
Handler[] handlers = delayedHandler.getHandlers();
Handler handler = Arrays.asList(handlers).stream().filter(h -> (h instanceof ConsoleHandler)).findFirst().get();
Assertions.assertNotNull(handler);
Assertions.assertEquals(Level.ALL, handler.getLevel());
Formatter formatter = handler.getFormatter();
Assertions.assertTrue(formatter instanceof PatternFormatter);
PatternFormatter patternFormatter = (PatternFormatter) formatter;
Assertions.assertEquals(patternFormatter.getPattern(), "%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n");
}
|
Assertions.assertTrue(logManager instanceof org.jboss.logmanager.LogManager);
|
public void consoleOutputTest() {
LogManager logManager = LogManager.getLogManager();
assertThat(logManager).isInstanceOf(org.jboss.logmanager.LogManager.class);
DelayedHandler delayedHandler = InitialConfigurator.DELAYED_HANDLER;
assertThat(Logger.getLogger("").getHandlers()).contains(delayedHandler);
Handler handler = Arrays.asList(delayedHandler.getHandlers()).stream().filter(h -> (h instanceof ConsoleHandler))
.findFirst().get();
assertThat(handler).isNotNull();
assertThat(handler.getLevel()).isEqualTo(Level.ALL);
Formatter formatter = handler.getFormatter();
assertThat(formatter).isInstanceOf(PatternFormatter.class);
PatternFormatter patternFormatter = (PatternFormatter) formatter;
assertThat(patternFormatter.getPattern()).isEqualTo("%d{yyyy-MM-dd HH:mm:ss,SSS} %-5p [%c{3.}] (%t) %s%e%n");
}
|
class ConsoleHandlerTest {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addAsResource("application-console-output.properties", "application.properties"));
@Test
}
|
class ConsoleHandlerTest {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest()
.setArchiveProducer(() -> ShrinkWrap.create(JavaArchive.class)
.addAsResource("application-console-output.properties", "application.properties"));
@Test
}
|
Same comment about the iterator here
|
private static int countMatchingMediaTypes(List<MediaType> produces, List<MediaType> mediaTypes) {
int count = 0;
for (MediaType mediaType : mediaTypes) {
for (MediaType produce : produces) {
if (mediaType.isCompatible(produce)) {
count++;
break;
}
}
}
return count;
}
|
for (MediaType mediaType : mediaTypes) {
|
private static int countMatchingMediaTypes(List<MediaType> produces, List<MediaType> mediaTypes) {
int count = 0;
for (int i = 0; i < mediaTypes.size(); i++) {
MediaType mediaType = mediaTypes.get(i);
for (int j = 0; j < produces.size(); j++) {
MediaType produce = produces.get(j);
if (mediaType.isCompatible(produce)) {
count++;
break;
}
}
}
return count;
}
|
class MediaTypeComparator implements Comparator<MediaType>, Serializable {
private static final long serialVersionUID = -5828700121582498092L;
private final String parameterName;
public MediaTypeComparator(String parameterName) {
this.parameterName = parameterName;
}
public int compare(MediaType mediaType2, MediaType mediaType) {
float q = getQTypeWithParamInfo(mediaType, parameterName);
boolean wasQ = q != 2.0f;
if (q == 2.0f)
q = 1.0f;
float q2 = getQTypeWithParamInfo(mediaType2, parameterName);
boolean wasQ2 = q2 != 2.0f;
if (q2 == 2.0f)
q2 = 1.0f;
if (q < q2)
return -1;
if (q > q2)
return 1;
if (mediaType.isWildcardType() && !mediaType2.isWildcardType())
return -1;
if (!mediaType.isWildcardType() && mediaType2.isWildcardType())
return 1;
if (mediaType.isWildcardSubtype() && !mediaType2.isWildcardSubtype())
return -1;
if (!mediaType.isWildcardSubtype() && mediaType2.isWildcardSubtype())
return 1;
if (isComposite(mediaType.getSubtype()) && !isComposite(mediaType2.getSubtype()))
return -1;
if (!isComposite(mediaType.getSubtype()) && isComposite(mediaType2.getSubtype()))
return 1;
if (isCompositeWildcardSubtype(mediaType.getSubtype()) && !isCompositeWildcardSubtype(mediaType2.getSubtype()))
return -1;
if (!isCompositeWildcardSubtype(mediaType.getSubtype()) && isCompositeWildcardSubtype(mediaType2.getSubtype()))
return 1;
if (isWildcardCompositeSubtype(mediaType.getSubtype()) && !isWildcardCompositeSubtype(mediaType2.getSubtype()))
return -1;
if (!isWildcardCompositeSubtype(mediaType.getSubtype()) && isWildcardCompositeSubtype(mediaType2.getSubtype()))
return 1;
int numNonQ = 0;
if (mediaType.getParameters() != null) {
numNonQ = mediaType.getParameters().size();
if (wasQ)
numNonQ--;
}
int numNonQ2 = 0;
if (mediaType2.getParameters() != null) {
numNonQ2 = mediaType2.getParameters().size();
if (wasQ2)
numNonQ2--;
}
if (numNonQ < numNonQ2)
return -1;
if (numNonQ > numNonQ2)
return 1;
return 0;
}
}
|
class MediaTypeComparator implements Comparator<MediaType>, Serializable {
private static final long serialVersionUID = -5828700121582498092L;
private final String parameterName;
public MediaTypeComparator(String parameterName) {
this.parameterName = parameterName;
}
public int compare(MediaType mediaType2, MediaType mediaType) {
float q = getQTypeWithParamInfo(mediaType, parameterName);
boolean wasQ = q != 2.0f;
if (q == 2.0f)
q = 1.0f;
float q2 = getQTypeWithParamInfo(mediaType2, parameterName);
boolean wasQ2 = q2 != 2.0f;
if (q2 == 2.0f)
q2 = 1.0f;
if (q < q2)
return -1;
if (q > q2)
return 1;
if (mediaType.isWildcardType() && !mediaType2.isWildcardType())
return -1;
if (!mediaType.isWildcardType() && mediaType2.isWildcardType())
return 1;
if (mediaType.isWildcardSubtype() && !mediaType2.isWildcardSubtype())
return -1;
if (!mediaType.isWildcardSubtype() && mediaType2.isWildcardSubtype())
return 1;
if (isComposite(mediaType.getSubtype()) && !isComposite(mediaType2.getSubtype()))
return -1;
if (!isComposite(mediaType.getSubtype()) && isComposite(mediaType2.getSubtype()))
return 1;
if (isCompositeWildcardSubtype(mediaType.getSubtype()) && !isCompositeWildcardSubtype(mediaType2.getSubtype()))
return -1;
if (!isCompositeWildcardSubtype(mediaType.getSubtype()) && isCompositeWildcardSubtype(mediaType2.getSubtype()))
return 1;
if (isWildcardCompositeSubtype(mediaType.getSubtype()) && !isWildcardCompositeSubtype(mediaType2.getSubtype()))
return -1;
if (!isWildcardCompositeSubtype(mediaType.getSubtype()) && isWildcardCompositeSubtype(mediaType2.getSubtype()))
return 1;
int numNonQ = 0;
if (mediaType.getParameters() != null) {
numNonQ = mediaType.getParameters().size();
if (wasQ)
numNonQ--;
}
int numNonQ2 = 0;
if (mediaType2.getParameters() != null) {
numNonQ2 = mediaType2.getParameters().size();
if (wasQ2)
numNonQ2--;
}
if (numNonQ < numNonQ2)
return -1;
if (numNonQ > numNonQ2)
return 1;
return 0;
}
}
|
Not all tuples should fail right? Shouldn't we be able to remove all from a tuple with only a rest field? ``` [int...] i = [2, 3]; i.removeAll(); // this is valid? ```
|
private void checkFixedLength(long length) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INHERENT_TYPE_VIOLATION_ERROR,
RuntimeErrors.CANNOT_CHANGE_TUPLE_SIZE);
}
if (((BArrayType) this.arrayType).getState() == ArrayState.CLOSED_SEALED) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INHERENT_TYPE_VIOLATION_ERROR,
RuntimeErrors.ILLEGAL_ARRAY_SIZE, length);
}
}
|
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
|
private void checkFixedLength(long length) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INHERENT_TYPE_VIOLATION_ERROR,
RuntimeErrors.CANNOT_CHANGE_TUPLE_SIZE);
}
if (((BArrayType) this.arrayType).getState() == ArrayState.CLOSED_SEALED) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INHERENT_TYPE_VIOLATION_ERROR,
RuntimeErrors.ILLEGAL_ARRAY_SIZE, length);
}
}
|
class ArrayValue implements RefValue, CollectionValue {
static final int SYSTEM_ARRAY_MAX = Integer.MAX_VALUE - 8;
protected BType arrayType;
private volatile Status freezeStatus = new Status(State.UNFROZEN);
/**
* The maximum size of arrays to allocate.
* <p>
* This is same as Java
*/
protected int maxArraySize = SYSTEM_ARRAY_MAX;
private static final int DEFAULT_ARRAY_SIZE = 100;
protected int size = 0;
Object[] refValues;
private long[] intValues;
private boolean[] booleanValues;
private byte[] byteValues;
private double[] floatValues;
private String[] stringValues;
public BType elementType;
private BType tupleRestType;
public ArrayValue(Object[] values, BType type) {
this.refValues = values;
this.arrayType = type;
this.size = values.length;
if (type.getTag() == TypeTags.ARRAY_TAG) {
this.elementType = ((BArrayType) type).getElementType();
}
}
public ArrayValue(long[] values) {
this.intValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeInt);
}
public ArrayValue(boolean[] values) {
this.booleanValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeBoolean);
}
public ArrayValue(byte[] values) {
this.byteValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeByte);
}
public ArrayValue(double[] values) {
this.floatValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeFloat);
}
public ArrayValue(String[] values) {
this.stringValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeString);
}
public ArrayValue(BType type) {
if (type.getTag() == TypeTags.INT_TAG) {
intValues = (long[]) newArrayInstance(Long.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.BOOLEAN_TAG) {
booleanValues = (boolean[]) newArrayInstance(Boolean.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.BYTE_TAG) {
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.FLOAT_TAG) {
floatValues = (double[]) newArrayInstance(Double.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.STRING_TAG) {
stringValues = (String[]) newArrayInstance(String.class);
setArrayElementType(type);
} else {
this.arrayType = type;
if (type.getTag() == TypeTags.ARRAY_TAG) {
BArrayType arrayType = (BArrayType) type;
this.elementType = arrayType.getElementType();
if (arrayType.getState() == ArrayState.CLOSED_SEALED) {
this.size = maxArraySize = arrayType.getSize();
}
initArrayValues(this.elementType);
} else if (type.getTag() == TypeTags.TUPLE_TAG) {
BTupleType tupleType = (BTupleType) type;
tupleRestType = tupleType.getRestType();
size = tupleType.getTupleTypes().size();
maxArraySize = (tupleRestType != null) ? maxArraySize : size;
refValues = (Object[]) newArrayInstance(Object.class);
AtomicInteger counter = new AtomicInteger(0);
tupleType.getTupleTypes()
.forEach(memType -> refValues[counter.getAndIncrement()] = memType.getEmptyValue());
} else if (type.getTag() == TypeTags.UNION_TAG) {
BUnionType unionType = (BUnionType) type;
this.size = maxArraySize = unionType.getMemberTypes().size();
unionType.getMemberTypes().forEach(this::initArrayValues);
} else {
refValues = (Object[]) newArrayInstance(Object.class);
}
}
}
private void initArrayValues(BType elementType) {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
intValues = (long[]) newArrayInstance(Long.TYPE);
break;
case TypeTags.FLOAT_TAG:
floatValues = (double[]) newArrayInstance(Double.TYPE);
break;
case TypeTags.STRING_TAG:
stringValues = (String[]) newArrayInstance(String.class);
break;
case TypeTags.BOOLEAN_TAG:
booleanValues = (boolean[]) newArrayInstance(Boolean.TYPE);
break;
case TypeTags.BYTE_TAG:
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
break;
case TypeTags.XML_TAG:
refValues = (Object[]) newArrayInstance(Object.class);
break;
default:
refValues = (Object[]) newArrayInstance(Object.class);
}
}
public ArrayValue() {
refValues = (Object[]) newArrayInstance(Object.class);
}
public ArrayValue(BType type, long size) {
this.arrayType = type;
if (type.getTag() == TypeTags.ARRAY_TAG) {
elementType = ((BArrayType) type).getElementType();
if (size != -1) {
this.size = maxArraySize = (int) size;
}
initArrayValues(elementType);
} else if (type.getTag() == TypeTags.TUPLE_TAG) {
tupleRestType = ((BTupleType) type).getRestType();
if (size != -1) {
this.size = (int) size;
maxArraySize = (tupleRestType != null) ? maxArraySize : (int) size;
}
refValues = (Object[]) newArrayInstance(Object.class);
} else {
if (size != -1) {
this.size = maxArraySize = (int) size;
}
refValues = (Object[]) newArrayInstance(Object.class);
}
}
public Object getValue(long index) {
if (elementType != null) {
if (elementType.getTag() == TypeTags.INT_TAG) {
return getInt(index);
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
return getBoolean(index);
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
return getByte(index);
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
return getFloat(index);
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
return getString(index);
} else {
return getRefValue(index);
}
}
return getRefValue(index);
}
public Object getRefValue(long index) {
rangeCheckForGet(index, size);
if (refValues == null) {
return getValue(index);
}
return refValues[(int) index];
}
public long getInt(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.INT_TAG) {
return intValues[(int) index];
} else {
return (Long) refValues[(int) index];
}
}
public boolean getBoolean(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
return booleanValues[(int) index];
} else {
return (Boolean) refValues[(int) index];
}
}
public byte getByte(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.BYTE_TAG) {
return byteValues[(int) index];
} else {
return (Byte) refValues[(int) index];
}
}
public double getFloat(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.FLOAT_TAG) {
return floatValues[(int) index];
} else {
return (Double) refValues[(int) index];
}
}
public String getString(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.STRING_TAG) {
return stringValues[(int) index];
} else {
return (String) refValues[(int) index];
}
}
public Object get(long index) {
rangeCheckForGet(index, size);
switch (this.elementType.getTag()) {
case TypeTags.INT_TAG:
return intValues[(int) index];
case TypeTags.BOOLEAN_TAG:
return booleanValues[(int) index];
case TypeTags.BYTE_TAG:
return byteValues[(int) index];
case TypeTags.FLOAT_TAG:
return floatValues[(int) index];
case TypeTags.STRING_TAG:
return stringValues[(int) index];
default:
return refValues[(int) index];
}
}
public void add(long index, Object value) {
handleFrozenArrayValue();
prepareForAdd(index, refValues.length);
refValues[(int) index] = value;
}
public void add(long index, long value) {
handleFrozenArrayValue();
prepareForAdd(index, intValues.length);
intValues[(int) index] = value;
}
public void add(long index, boolean value) {
if (elementType.getTag() == TypeTags.INT_TAG) {
add(index, value);
return;
}
handleFrozenArrayValue();
prepareForAdd(index, booleanValues.length);
booleanValues[(int) index] = value;
}
public void add(long index, byte value) {
handleFrozenArrayValue();
prepareForAdd(index, byteValues.length);
byteValues[(int) index] = value;
}
public void add(long index, double value) {
handleFrozenArrayValue();
prepareForAdd(index, floatValues.length);
floatValues[(int) index] = value;
}
public void add(long index, String value) {
handleFrozenArrayValue();
prepareForAdd(index, stringValues.length);
stringValues[(int) index] = value;
}
public void append(Object value) {
add(size, value);
}
public Object shift(long index) {
handleFrozenArrayValue();
Object val = get(index);
shiftArray((int) index, getArrayFromType(elementType.getTag()));
return val;
}
private void shiftArray(int index, Object arr) {
int nElemsToBeMoved = this.size - 1 - index;
if (nElemsToBeMoved >= 0) {
System.arraycopy(arr, index + 1, arr, index, nElemsToBeMoved);
}
this.size--;
}
public void unshift(long index, ArrayValue vals) {
handleFrozenArrayValue();
unshiftArray(index, vals.size, getCurrentArrayLength());
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
addToIntArray(vals, (int) index);
break;
case TypeTags.BOOLEAN_TAG:
addToBooleanArray(vals, (int) index);
break;
case TypeTags.BYTE_TAG:
addToByteArray(vals, (int) index);
break;
case TypeTags.FLOAT_TAG:
addToFloatArray(vals, (int) index);
break;
case TypeTags.STRING_TAG:
addToStringArray(vals, (int) index);
break;
default:
addToRefArray(vals, (int) index);
}
}
private void addToIntArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getInt(j));
}
}
private void addToFloatArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getFloat(j));
}
}
private void addToStringArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getString(j));
}
}
private void addToByteArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
byte[] bytes = vals.getBytes();
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
this.byteValues[i] = bytes[j];
}
}
private void addToBooleanArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getBoolean(j));
}
}
private void addToRefArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getRefValue(j));
}
}
private void unshiftArray(long index, int unshiftByN, int arrLength) {
int lastIndex = size() + unshiftByN - 1;
prepareForConsecutiveMultiAdd(lastIndex, arrLength);
Object arr = getArrayFromType(elementType.getTag());
if (index > lastIndex) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.INDEX_NUMBER_TOO_LARGE, index);
}
int i = (int) index;
System.arraycopy(arr, i, arr, i + unshiftByN, this.size - i);
}
private Object getArrayFromType(int typeTag) {
switch (typeTag) {
case TypeTags.INT_TAG:
return intValues;
case TypeTags.BOOLEAN_TAG:
return booleanValues;
case TypeTags.BYTE_TAG:
return byteValues;
case TypeTags.FLOAT_TAG:
return floatValues;
case TypeTags.STRING_TAG:
return stringValues;
default:
return refValues;
}
}
private int getCurrentArrayLength() {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
return intValues.length;
case TypeTags.BOOLEAN_TAG:
return booleanValues.length;
case TypeTags.BYTE_TAG:
return byteValues.length;
case TypeTags.FLOAT_TAG:
return floatValues.length;
case TypeTags.STRING_TAG:
return stringValues.length;
default:
return refValues.length;
}
}
@Override
public String stringValue() {
if (elementType != null) {
StringJoiner sj = new StringJoiner(" ");
if (elementType.getTag() == TypeTags.INT_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Long.toString(intValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Boolean.toString(booleanValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Long.toString(Byte.toUnsignedLong(byteValues[i])));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Double.toString(floatValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
for (int i = 0; i < size; i++) {
sj.add(stringValues[i]);
}
return sj.toString();
}
}
if (getElementType(arrayType).getTag() == TypeTags.JSON_TAG) {
return getJSONString();
}
StringJoiner sj;
if (arrayType != null && (arrayType.getTag() == TypeTags.TUPLE_TAG)) {
sj = new StringJoiner(" ");
} else {
sj = new StringJoiner(" ");
}
for (int i = 0; i < size; i++) {
if (refValues[i] != null) {
sj.add((refValues[i] instanceof RefValue) ? ((RefValue) refValues[i]).stringValue() :
(refValues[i] instanceof String) ? (String) refValues[i] : refValues[i].toString());
} else {
sj.add("()");
}
}
return sj.toString();
}
@Override
public BType getType() {
return arrayType;
}
@Override
public int size() {
return size;
}
public boolean isEmpty() {
return size == 0;
}
@Override
public void stamp(BType type, List<TypeValuePair> unresolvedValues) {
if (type.getTag() == TypeTags.TUPLE_TAG) {
if (elementType != null && isBasicType(elementType)) {
moveBasicTypeArrayToRefValueArray();
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
BType memberType = ((BTupleType) type).getTupleTypes().get(i);
if (memberType.getTag() == TypeTags.ANYDATA_TAG || memberType.getTag() == TypeTags.JSON_TAG) {
memberType = TypeConverter.resolveMatchingTypeForUnion(arrayValues[i], memberType);
((BTupleType) type).getTupleTypes().set(i, memberType);
}
((RefValue) arrayValues[i]).stamp(memberType, unresolvedValues);
}
}
} else if (type.getTag() == TypeTags.JSON_TAG) {
if (elementType != null && isBasicType(elementType) && !isBasicType(type)) {
moveBasicTypeArrayToRefValueArray();
this.arrayType = new BArrayType(type);
return;
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
((RefValue) arrayValues[i]).stamp(TypeConverter.resolveMatchingTypeForUnion(arrayValues[i], type),
unresolvedValues);
}
}
type = new BArrayType(type);
} else if (type.getTag() == TypeTags.UNION_TAG) {
for (BType memberType : ((BUnionType) type).getMemberTypes()) {
if (TypeChecker.checkIsLikeType(this, memberType, new ArrayList<>())) {
this.stamp(memberType, unresolvedValues);
type = memberType;
break;
}
}
} else if (type.getTag() == TypeTags.ANYDATA_TAG) {
type = TypeConverter.resolveMatchingTypeForUnion(this, type);
this.stamp(type, unresolvedValues);
} else {
BType arrayElementType = ((BArrayType) type).getElementType();
if (elementType != null && isBasicType(elementType)) {
if (isBasicType(arrayElementType)) {
this.arrayType = type;
return;
}
moveBasicTypeArrayToRefValueArray();
this.arrayType = type;
return;
}
if (isBasicType(arrayElementType) &&
(arrayType.getTag() == TypeTags.TUPLE_TAG || !isBasicType(elementType))) {
moveRefValueArrayToBasicTypeArray(type, arrayElementType);
return;
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
((RefValue) arrayValues[i]).stamp(arrayElementType, unresolvedValues);
}
}
}
this.arrayType = type;
}
@Override
public Object copy(Map<Object, Object> refs) {
if (isFrozen()) {
return this;
}
if (refs.containsKey(this)) {
return refs.get(this);
}
if (elementType != null) {
ArrayValue valueArray = null;
if (elementType.getTag() == TypeTags.INT_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(intValues, intValues.length));
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(booleanValues, booleanValues.length));
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(byteValues, byteValues.length));
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(floatValues, floatValues.length));
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(stringValues, stringValues.length));
}
if (valueArray != null) {
valueArray.size = this.size;
refs.put(this, valueArray);
return valueArray;
}
}
Object[] values = new Object[size];
ArrayValue refValueArray = new ArrayValue(values, arrayType);
refValueArray.size = this.size;
refs.put(this, refValueArray);
int bound = this.size;
IntStream.range(0, bound).forEach(i -> {
Object value = this.refValues[i];
if (value instanceof RefValue) {
values[i] = ((RefValue) value).copy(refs);
} else {
values[i] = value;
}
});
return refValueArray;
}
@Override
public Object frozenCopy(Map<Object, Object> refs) {
ArrayValue copy = (ArrayValue) copy(refs);
if (!copy.isFrozen()) {
copy.freezeDirect();
}
return copy;
}
@Override
public String toString() {
return stringValue();
}
public Object[] getValues() {
return refValues;
}
public byte[] getBytes() {
byte[] bytes = new byte[this.size];
System.arraycopy(byteValues, 0, bytes, 0, this.size);
return bytes;
}
public String[] getStringArray() {
return Arrays.copyOf(stringValues, size);
}
public long[] getLongArray() {
return Arrays.copyOf(intValues, size);
}
@Override
public void serialize(OutputStream outputStream) {
if (elementType.getTag() == TypeTags.BYTE_TAG) {
try {
outputStream.write(byteValues);
} catch (IOException e) {
throw new BallerinaException("error occurred while writing the binary content to the output stream", e);
}
} else {
try {
outputStream.write(this.toString().getBytes(Charset.defaultCharset()));
} catch (IOException e) {
throw new BallerinaException("error occurred while serializing data", e);
}
}
}
public void resizeInternalArray(int newLength) {
if (arrayType.getTag() == TypeTags.TUPLE_TAG) {
refValues = Arrays.copyOf(refValues, newLength);
} else {
if (elementType != null) {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
intValues = Arrays.copyOf(intValues, newLength);
break;
case TypeTags.BOOLEAN_TAG:
booleanValues = Arrays.copyOf(booleanValues, newLength);
break;
case TypeTags.BYTE_TAG:
byteValues = Arrays.copyOf(byteValues, newLength);
break;
case TypeTags.FLOAT_TAG:
floatValues = Arrays.copyOf(floatValues, newLength);
break;
case TypeTags.STRING_TAG:
stringValues = Arrays.copyOf(stringValues, newLength);
break;
default:
refValues = Arrays.copyOf(refValues, newLength);
break;
}
} else {
refValues = Arrays.copyOf(refValues, newLength);
}
}
}
private void fillValues(int index) {
if (index <= size) {
return;
}
if (arrayType.getTag() == TypeTags.TUPLE_TAG) {
if (tupleRestType != null) {
Arrays.fill(refValues, size, index, tupleRestType.getZeroValue());
}
} else {
int typeTag = elementType.getTag();
if (typeTag == TypeTags.STRING_TAG) {
Arrays.fill(stringValues, size, index, BLangConstants.STRING_EMPTY_VALUE);
return;
}
if (typeTag == TypeTags.INT_TAG || typeTag == TypeTags.BYTE_TAG || typeTag == TypeTags.FLOAT_TAG ||
typeTag == TypeTags.BOOLEAN_TAG) {
return;
}
Arrays.fill(refValues, size, index, elementType.getZeroValue());
}
}
public BType getArrayType() {
return arrayType;
}
private void rangeCheckForGet(long index, int size) {
rangeCheck(index, size);
if (index < 0 || index >= size) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.TUPLE_INDEX_OUT_OF_RANGE, index, size);
}
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.ARRAY_INDEX_OUT_OF_RANGE, index, size);
}
}
private void rangeCheck(long index, int size) {
if (index > Integer.MAX_VALUE || index < Integer.MIN_VALUE) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.INDEX_NUMBER_TOO_LARGE, index);
}
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
if ((((BTupleType) arrayType).getRestType() == null && index >= maxArraySize) || (int) index < 0) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.TUPLE_INDEX_OUT_OF_RANGE, index, size);
}
} else {
if ((int) index < 0 || index >= maxArraySize) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.ARRAY_INDEX_OUT_OF_RANGE, index, size);
}
}
}
private void fillerValueCheck(int index, int size) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
if (!TypeChecker.hasFillerValue(tupleRestType) && (index > size)) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.ILLEGAL_LIST_INSERTION_ERROR,
RuntimeErrors.ILLEGAL_TUPLE_INSERTION, size, index + 1);
}
} else {
if (!TypeChecker.hasFillerValue(elementType) && (index > size)) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.ILLEGAL_LIST_INSERTION_ERROR,
RuntimeErrors.ILLEGAL_ARRAY_INSERTION, size, index + 1);
}
}
}
Object newArrayInstance(Class<?> componentType) {
return (size > 0) ?
Array.newInstance(componentType, size) : Array.newInstance(componentType, DEFAULT_ARRAY_SIZE);
}
private void setArrayElementType(BType type) {
this.arrayType = new BArrayType(type);
this.elementType = type;
}
public String getJSONString() {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
JSONGenerator gen = new JSONGenerator(byteOut);
try {
gen.serialize(this);
gen.flush();
} catch (IOException e) {
throw new BallerinaException("Error in converting JSON to a string: " + e.getMessage(), e);
}
return new String(byteOut.toByteArray());
}
private BType getElementType(BType type) {
if (type.getTag() != TypeTags.ARRAY_TAG) {
return type;
}
return getElementType(((BArrayType) type).getElementType());
}
/**
* Util method to handle frozen array values.
*/
private void handleFrozenArrayValue() {
synchronized (this) {
try {
if (this.freezeStatus.getState() != State.UNFROZEN) {
FreezeUtils.handleInvalidUpdate(freezeStatus.getState());
}
} catch (BLangFreezeException e) {
throw BallerinaErrors.createError(e.getMessage(), e.getDetail());
}
}
}
protected void prepareForAdd(long index, int currentArraySize) {
int intIndex = (int) index;
rangeCheck(index, size);
fillerValueCheck(intIndex, size);
ensureCapacity(intIndex + 1, currentArraySize);
fillValues(intIndex);
resetSize(intIndex);
}
/**
* Same as {@code prepareForAdd}, except fillerValueCheck is not performed as we are guaranteed to add
* elements to consecutive positions.
*
* @param index last index after add operation completes
* @param currentArraySize current array size
*/
void prepareForConsecutiveMultiAdd(long index, int currentArraySize) {
int intIndex = (int) index;
rangeCheck(index, size);
ensureCapacity(intIndex + 1, currentArraySize);
resetSize(intIndex);
}
private void ensureCapacity(int requestedCapacity, int currentArraySize) {
if ((requestedCapacity) - currentArraySize > 0) {
if ((this.arrayType.getTag() == TypeTags.ARRAY_TAG
&& ((BArrayType) this.arrayType).getState() == ArrayState.UNSEALED)
|| this.arrayType.getTag() == TypeTags.TUPLE_TAG) {
int newArraySize = currentArraySize + (currentArraySize >> 1);
newArraySize = Math.max(newArraySize, requestedCapacity);
newArraySize = Math.min(newArraySize, maxArraySize);
resizeInternalArray(newArraySize);
}
}
}
private void resetSize(int index) {
if (index >= size) {
size = index + 1;
}
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void attemptFreeze(Status freezeStatus) {
if (!FreezeUtils.isOpenForFreeze(this.freezeStatus, freezeStatus)) {
return;
}
this.freezeStatus = freezeStatus;
if (elementType == null || elementType.getTag() > TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < this.size; i++) {
Object value = this.getRefValue(i);
if (value instanceof RefValue) {
((RefValue) value).attemptFreeze(freezeStatus);
}
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void freezeDirect() {
if (isFrozen()) {
return;
}
this.freezeStatus.setFrozen();
if (elementType == null || elementType.getTag() > TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < this.size; i++) {
Object value = this.getRefValue(i);
if (value instanceof RefValue) {
((RefValue) value).freezeDirect();
}
}
}
}
/**
* {@inheritDoc}
*/
@Override
public synchronized boolean isFrozen() {
return this.freezeStatus.isFrozen();
}
private boolean isBasicType(BType type) {
return type.getTag() <= TypeTags.BOOLEAN_TAG && type.getTag() != TypeTags.DECIMAL_TAG;
}
private void moveBasicTypeArrayToRefValueArray() {
refValues = new Object[this.size];
if (elementType == BTypes.typeBoolean) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = booleanValues[i];
}
booleanValues = null;
}
if (elementType == BTypes.typeInt) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = intValues[i];
}
intValues = null;
}
if (elementType == BTypes.typeString) {
System.arraycopy(stringValues, 0, refValues, 0, this.size());
stringValues = null;
}
if (elementType == BTypes.typeFloat) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = floatValues[i];
}
floatValues = null;
}
if (elementType == BTypes.typeByte) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = (byteValues[i]);
}
byteValues = null;
}
elementType = null;
}
private void moveRefValueArrayToBasicTypeArray(BType type, BType arrayElementType) {
Object[] arrayValues = this.getValues();
if (arrayElementType.getTag() == TypeTags.INT_TAG) {
intValues = (long[]) newArrayInstance(Long.TYPE);
for (int i = 0; i < this.size(); i++) {
intValues[i] = ((long) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.FLOAT_TAG) {
floatValues = (double[]) newArrayInstance(Double.TYPE);
for (int i = 0; i < this.size(); i++) {
floatValues[i] = ((float) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.BOOLEAN_TAG) {
booleanValues = new boolean[this.size()];
for (int i = 0; i < this.size(); i++) {
booleanValues[i] = ((boolean) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.STRING_TAG) {
stringValues = (String[]) newArrayInstance(String.class);
for (int i = 0; i < this.size(); i++) {
stringValues[i] = (String) arrayValues[i];
}
}
if (arrayElementType.getTag() == TypeTags.BYTE_TAG) {
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
for (int i = 0; i < this.size(); i++) {
byteValues[i] = (byte) arrayValues[i];
}
}
this.elementType = arrayElementType;
this.arrayType = type;
refValues = null;
}
@Override
public IteratorValue getIterator() {
return new ArrayIterator(this);
}
public void setLength(long length) {
handleFrozenArrayValue();
int newLength = (int) length;
checkFixedLength(length);
rangeCheck(length, size);
fillerValueCheck(newLength, size);
resizeInternalArray(newLength);
fillValues(newLength);
size = newLength;
}
/**
* {@code {@link ArrayIterator}} provides iterator implementation for Ballerina array values.
*
* @since 0.995.0
*/
static class ArrayIterator implements IteratorValue {
ArrayValue array;
long cursor = 0;
long length;
ArrayIterator(ArrayValue value) {
this.array = value;
this.length = value.size();
}
@Override
public Object next() {
long cursor = this.cursor++;
if (cursor == length) {
return null;
}
return array.getValue(cursor);
}
@Override
public boolean hasNext() {
return cursor < length;
}
}
}
|
class ArrayValue implements RefValue, CollectionValue {
static final int SYSTEM_ARRAY_MAX = Integer.MAX_VALUE - 8;
protected BType arrayType;
private volatile Status freezeStatus = new Status(State.UNFROZEN);
/**
* The maximum size of arrays to allocate.
* <p>
* This is same as Java
*/
protected int maxArraySize = SYSTEM_ARRAY_MAX;
private static final int DEFAULT_ARRAY_SIZE = 100;
protected int size = 0;
Object[] refValues;
private long[] intValues;
private boolean[] booleanValues;
private byte[] byteValues;
private double[] floatValues;
private String[] stringValues;
public BType elementType;
private BType tupleRestType;
public ArrayValue(Object[] values, BType type) {
this.refValues = values;
this.arrayType = type;
this.size = values.length;
if (type.getTag() == TypeTags.ARRAY_TAG) {
this.elementType = ((BArrayType) type).getElementType();
}
}
public ArrayValue(long[] values) {
this.intValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeInt);
}
public ArrayValue(boolean[] values) {
this.booleanValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeBoolean);
}
public ArrayValue(byte[] values) {
this.byteValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeByte);
}
public ArrayValue(double[] values) {
this.floatValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeFloat);
}
public ArrayValue(String[] values) {
this.stringValues = values;
this.size = values.length;
setArrayElementType(BTypes.typeString);
}
public ArrayValue(BType type) {
if (type.getTag() == TypeTags.INT_TAG) {
intValues = (long[]) newArrayInstance(Long.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.BOOLEAN_TAG) {
booleanValues = (boolean[]) newArrayInstance(Boolean.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.BYTE_TAG) {
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.FLOAT_TAG) {
floatValues = (double[]) newArrayInstance(Double.TYPE);
setArrayElementType(type);
} else if (type.getTag() == TypeTags.STRING_TAG) {
stringValues = (String[]) newArrayInstance(String.class);
setArrayElementType(type);
} else {
this.arrayType = type;
if (type.getTag() == TypeTags.ARRAY_TAG) {
BArrayType arrayType = (BArrayType) type;
this.elementType = arrayType.getElementType();
if (arrayType.getState() == ArrayState.CLOSED_SEALED) {
this.size = maxArraySize = arrayType.getSize();
}
initArrayValues(this.elementType);
} else if (type.getTag() == TypeTags.TUPLE_TAG) {
BTupleType tupleType = (BTupleType) type;
tupleRestType = tupleType.getRestType();
size = tupleType.getTupleTypes().size();
maxArraySize = (tupleRestType != null) ? maxArraySize : size;
refValues = (Object[]) newArrayInstance(Object.class);
AtomicInteger counter = new AtomicInteger(0);
tupleType.getTupleTypes()
.forEach(memType -> refValues[counter.getAndIncrement()] = memType.getEmptyValue());
} else if (type.getTag() == TypeTags.UNION_TAG) {
BUnionType unionType = (BUnionType) type;
this.size = maxArraySize = unionType.getMemberTypes().size();
unionType.getMemberTypes().forEach(this::initArrayValues);
} else {
refValues = (Object[]) newArrayInstance(Object.class);
}
}
}
private void initArrayValues(BType elementType) {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
intValues = (long[]) newArrayInstance(Long.TYPE);
break;
case TypeTags.FLOAT_TAG:
floatValues = (double[]) newArrayInstance(Double.TYPE);
break;
case TypeTags.STRING_TAG:
stringValues = (String[]) newArrayInstance(String.class);
break;
case TypeTags.BOOLEAN_TAG:
booleanValues = (boolean[]) newArrayInstance(Boolean.TYPE);
break;
case TypeTags.BYTE_TAG:
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
break;
case TypeTags.XML_TAG:
refValues = (Object[]) newArrayInstance(Object.class);
break;
default:
refValues = (Object[]) newArrayInstance(Object.class);
}
}
public ArrayValue() {
refValues = (Object[]) newArrayInstance(Object.class);
}
public ArrayValue(BType type, long size) {
this.arrayType = type;
if (type.getTag() == TypeTags.ARRAY_TAG) {
elementType = ((BArrayType) type).getElementType();
if (size != -1) {
this.size = maxArraySize = (int) size;
}
initArrayValues(elementType);
} else if (type.getTag() == TypeTags.TUPLE_TAG) {
tupleRestType = ((BTupleType) type).getRestType();
if (size != -1) {
this.size = (int) size;
maxArraySize = (tupleRestType != null) ? maxArraySize : (int) size;
}
refValues = (Object[]) newArrayInstance(Object.class);
} else {
if (size != -1) {
this.size = maxArraySize = (int) size;
}
refValues = (Object[]) newArrayInstance(Object.class);
}
}
public Object getValue(long index) {
if (elementType != null) {
if (elementType.getTag() == TypeTags.INT_TAG) {
return getInt(index);
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
return getBoolean(index);
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
return getByte(index);
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
return getFloat(index);
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
return getString(index);
} else {
return getRefValue(index);
}
}
return getRefValue(index);
}
public Object getRefValue(long index) {
rangeCheckForGet(index, size);
if (refValues == null) {
return getValue(index);
}
return refValues[(int) index];
}
public long getInt(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.INT_TAG) {
return intValues[(int) index];
} else {
return (Long) refValues[(int) index];
}
}
public boolean getBoolean(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
return booleanValues[(int) index];
} else {
return (Boolean) refValues[(int) index];
}
}
public byte getByte(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.BYTE_TAG) {
return byteValues[(int) index];
} else {
return (Byte) refValues[(int) index];
}
}
public double getFloat(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.FLOAT_TAG) {
return floatValues[(int) index];
} else {
return (Double) refValues[(int) index];
}
}
public String getString(long index) {
rangeCheckForGet(index, size);
if (elementType.getTag() == TypeTags.STRING_TAG) {
return stringValues[(int) index];
} else {
return (String) refValues[(int) index];
}
}
public Object get(long index) {
rangeCheckForGet(index, size);
switch (this.elementType.getTag()) {
case TypeTags.INT_TAG:
return intValues[(int) index];
case TypeTags.BOOLEAN_TAG:
return booleanValues[(int) index];
case TypeTags.BYTE_TAG:
return byteValues[(int) index];
case TypeTags.FLOAT_TAG:
return floatValues[(int) index];
case TypeTags.STRING_TAG:
return stringValues[(int) index];
default:
return refValues[(int) index];
}
}
public void add(long index, Object value) {
handleFrozenArrayValue();
prepareForAdd(index, refValues.length);
refValues[(int) index] = value;
}
public void add(long index, long value) {
handleFrozenArrayValue();
prepareForAdd(index, intValues.length);
intValues[(int) index] = value;
}
public void add(long index, boolean value) {
if (elementType.getTag() == TypeTags.INT_TAG) {
add(index, value);
return;
}
handleFrozenArrayValue();
prepareForAdd(index, booleanValues.length);
booleanValues[(int) index] = value;
}
public void add(long index, byte value) {
handleFrozenArrayValue();
prepareForAdd(index, byteValues.length);
byteValues[(int) index] = value;
}
public void add(long index, double value) {
handleFrozenArrayValue();
prepareForAdd(index, floatValues.length);
floatValues[(int) index] = value;
}
public void add(long index, String value) {
handleFrozenArrayValue();
prepareForAdd(index, stringValues.length);
stringValues[(int) index] = value;
}
public void append(Object value) {
add(size, value);
}
public Object shift(long index) {
handleFrozenArrayValue();
Object val = get(index);
shiftArray((int) index, getArrayFromType(elementType.getTag()));
return val;
}
private void shiftArray(int index, Object arr) {
int nElemsToBeMoved = this.size - 1 - index;
if (nElemsToBeMoved >= 0) {
System.arraycopy(arr, index + 1, arr, index, nElemsToBeMoved);
}
this.size--;
}
public void unshift(long index, ArrayValue vals) {
handleFrozenArrayValue();
unshiftArray(index, vals.size, getCurrentArrayLength());
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
addToIntArray(vals, (int) index);
break;
case TypeTags.BOOLEAN_TAG:
addToBooleanArray(vals, (int) index);
break;
case TypeTags.BYTE_TAG:
addToByteArray(vals, (int) index);
break;
case TypeTags.FLOAT_TAG:
addToFloatArray(vals, (int) index);
break;
case TypeTags.STRING_TAG:
addToStringArray(vals, (int) index);
break;
default:
addToRefArray(vals, (int) index);
}
}
private void addToIntArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getInt(j));
}
}
private void addToFloatArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getFloat(j));
}
}
private void addToStringArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getString(j));
}
}
private void addToByteArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
byte[] bytes = vals.getBytes();
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
this.byteValues[i] = bytes[j];
}
}
private void addToBooleanArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getBoolean(j));
}
}
private void addToRefArray(ArrayValue vals, int startIndex) {
int endIndex = startIndex + vals.size;
for (int i = startIndex, j = 0; i < endIndex; i++, j++) {
add(i, vals.getRefValue(j));
}
}
private void unshiftArray(long index, int unshiftByN, int arrLength) {
int lastIndex = size() + unshiftByN - 1;
prepareForConsecutiveMultiAdd(lastIndex, arrLength);
Object arr = getArrayFromType(elementType.getTag());
if (index > lastIndex) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.INDEX_NUMBER_TOO_LARGE, index);
}
int i = (int) index;
System.arraycopy(arr, i, arr, i + unshiftByN, this.size - i);
}
private Object getArrayFromType(int typeTag) {
switch (typeTag) {
case TypeTags.INT_TAG:
return intValues;
case TypeTags.BOOLEAN_TAG:
return booleanValues;
case TypeTags.BYTE_TAG:
return byteValues;
case TypeTags.FLOAT_TAG:
return floatValues;
case TypeTags.STRING_TAG:
return stringValues;
default:
return refValues;
}
}
private int getCurrentArrayLength() {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
return intValues.length;
case TypeTags.BOOLEAN_TAG:
return booleanValues.length;
case TypeTags.BYTE_TAG:
return byteValues.length;
case TypeTags.FLOAT_TAG:
return floatValues.length;
case TypeTags.STRING_TAG:
return stringValues.length;
default:
return refValues.length;
}
}
@Override
public String stringValue() {
if (elementType != null) {
StringJoiner sj = new StringJoiner(" ");
if (elementType.getTag() == TypeTags.INT_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Long.toString(intValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Boolean.toString(booleanValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Long.toString(Byte.toUnsignedLong(byteValues[i])));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
for (int i = 0; i < size; i++) {
sj.add(Double.toString(floatValues[i]));
}
return sj.toString();
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
for (int i = 0; i < size; i++) {
sj.add(stringValues[i]);
}
return sj.toString();
}
}
if (getElementType(arrayType).getTag() == TypeTags.JSON_TAG) {
return getJSONString();
}
StringJoiner sj;
if (arrayType != null && (arrayType.getTag() == TypeTags.TUPLE_TAG)) {
sj = new StringJoiner(" ");
} else {
sj = new StringJoiner(" ");
}
for (int i = 0; i < size; i++) {
if (refValues[i] != null) {
sj.add((refValues[i] instanceof RefValue) ? ((RefValue) refValues[i]).stringValue() :
(refValues[i] instanceof String) ? (String) refValues[i] : refValues[i].toString());
} else {
sj.add("()");
}
}
return sj.toString();
}
@Override
public BType getType() {
return arrayType;
}
@Override
public int size() {
return size;
}
public boolean isEmpty() {
return size == 0;
}
@Override
public void stamp(BType type, List<TypeValuePair> unresolvedValues) {
if (type.getTag() == TypeTags.TUPLE_TAG) {
if (elementType != null && isBasicType(elementType)) {
moveBasicTypeArrayToRefValueArray();
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
BType memberType = ((BTupleType) type).getTupleTypes().get(i);
if (memberType.getTag() == TypeTags.ANYDATA_TAG || memberType.getTag() == TypeTags.JSON_TAG) {
memberType = TypeConverter.resolveMatchingTypeForUnion(arrayValues[i], memberType);
((BTupleType) type).getTupleTypes().set(i, memberType);
}
((RefValue) arrayValues[i]).stamp(memberType, unresolvedValues);
}
}
} else if (type.getTag() == TypeTags.JSON_TAG) {
if (elementType != null && isBasicType(elementType) && !isBasicType(type)) {
moveBasicTypeArrayToRefValueArray();
this.arrayType = new BArrayType(type);
return;
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
((RefValue) arrayValues[i]).stamp(TypeConverter.resolveMatchingTypeForUnion(arrayValues[i], type),
unresolvedValues);
}
}
type = new BArrayType(type);
} else if (type.getTag() == TypeTags.UNION_TAG) {
for (BType memberType : ((BUnionType) type).getMemberTypes()) {
if (TypeChecker.checkIsLikeType(this, memberType, new ArrayList<>())) {
this.stamp(memberType, unresolvedValues);
type = memberType;
break;
}
}
} else if (type.getTag() == TypeTags.ANYDATA_TAG) {
type = TypeConverter.resolveMatchingTypeForUnion(this, type);
this.stamp(type, unresolvedValues);
} else {
BType arrayElementType = ((BArrayType) type).getElementType();
if (elementType != null && isBasicType(elementType)) {
if (isBasicType(arrayElementType)) {
this.arrayType = type;
return;
}
moveBasicTypeArrayToRefValueArray();
this.arrayType = type;
return;
}
if (isBasicType(arrayElementType) &&
(arrayType.getTag() == TypeTags.TUPLE_TAG || !isBasicType(elementType))) {
moveRefValueArrayToBasicTypeArray(type, arrayElementType);
return;
}
Object[] arrayValues = this.getValues();
for (int i = 0; i < this.size(); i++) {
if (arrayValues[i] instanceof RefValue) {
((RefValue) arrayValues[i]).stamp(arrayElementType, unresolvedValues);
}
}
}
this.arrayType = type;
}
@Override
public Object copy(Map<Object, Object> refs) {
if (isFrozen()) {
return this;
}
if (refs.containsKey(this)) {
return refs.get(this);
}
if (elementType != null) {
ArrayValue valueArray = null;
if (elementType.getTag() == TypeTags.INT_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(intValues, intValues.length));
} else if (elementType.getTag() == TypeTags.BOOLEAN_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(booleanValues, booleanValues.length));
} else if (elementType.getTag() == TypeTags.BYTE_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(byteValues, byteValues.length));
} else if (elementType.getTag() == TypeTags.FLOAT_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(floatValues, floatValues.length));
} else if (elementType.getTag() == TypeTags.STRING_TAG) {
valueArray = new ArrayValue(Arrays.copyOf(stringValues, stringValues.length));
}
if (valueArray != null) {
valueArray.size = this.size;
refs.put(this, valueArray);
return valueArray;
}
}
Object[] values = new Object[size];
ArrayValue refValueArray = new ArrayValue(values, arrayType);
refValueArray.size = this.size;
refs.put(this, refValueArray);
int bound = this.size;
IntStream.range(0, bound).forEach(i -> {
Object value = this.refValues[i];
if (value instanceof RefValue) {
values[i] = ((RefValue) value).copy(refs);
} else {
values[i] = value;
}
});
return refValueArray;
}
@Override
public Object frozenCopy(Map<Object, Object> refs) {
ArrayValue copy = (ArrayValue) copy(refs);
if (!copy.isFrozen()) {
copy.freezeDirect();
}
return copy;
}
@Override
public String toString() {
return stringValue();
}
public Object[] getValues() {
return refValues;
}
public byte[] getBytes() {
byte[] bytes = new byte[this.size];
System.arraycopy(byteValues, 0, bytes, 0, this.size);
return bytes;
}
public String[] getStringArray() {
return Arrays.copyOf(stringValues, size);
}
public long[] getLongArray() {
return Arrays.copyOf(intValues, size);
}
@Override
public void serialize(OutputStream outputStream) {
if (elementType.getTag() == TypeTags.BYTE_TAG) {
try {
outputStream.write(byteValues);
} catch (IOException e) {
throw new BallerinaException("error occurred while writing the binary content to the output stream", e);
}
} else {
try {
outputStream.write(this.toString().getBytes(Charset.defaultCharset()));
} catch (IOException e) {
throw new BallerinaException("error occurred while serializing data", e);
}
}
}
public void resizeInternalArray(int newLength) {
if (arrayType.getTag() == TypeTags.TUPLE_TAG) {
refValues = Arrays.copyOf(refValues, newLength);
} else {
if (elementType != null) {
switch (elementType.getTag()) {
case TypeTags.INT_TAG:
intValues = Arrays.copyOf(intValues, newLength);
break;
case TypeTags.BOOLEAN_TAG:
booleanValues = Arrays.copyOf(booleanValues, newLength);
break;
case TypeTags.BYTE_TAG:
byteValues = Arrays.copyOf(byteValues, newLength);
break;
case TypeTags.FLOAT_TAG:
floatValues = Arrays.copyOf(floatValues, newLength);
break;
case TypeTags.STRING_TAG:
stringValues = Arrays.copyOf(stringValues, newLength);
break;
default:
refValues = Arrays.copyOf(refValues, newLength);
break;
}
} else {
refValues = Arrays.copyOf(refValues, newLength);
}
}
}
private void fillValues(int index) {
if (index <= size) {
return;
}
if (arrayType.getTag() == TypeTags.TUPLE_TAG) {
if (tupleRestType != null) {
Arrays.fill(refValues, size, index, tupleRestType.getZeroValue());
}
} else {
int typeTag = elementType.getTag();
if (typeTag == TypeTags.STRING_TAG) {
Arrays.fill(stringValues, size, index, BLangConstants.STRING_EMPTY_VALUE);
return;
}
if (typeTag == TypeTags.INT_TAG || typeTag == TypeTags.BYTE_TAG || typeTag == TypeTags.FLOAT_TAG ||
typeTag == TypeTags.BOOLEAN_TAG) {
return;
}
Arrays.fill(refValues, size, index, elementType.getZeroValue());
}
}
public BType getArrayType() {
return arrayType;
}
private void rangeCheckForGet(long index, int size) {
rangeCheck(index, size);
if (index < 0 || index >= size) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.TUPLE_INDEX_OUT_OF_RANGE, index, size);
}
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.ARRAY_INDEX_OUT_OF_RANGE, index, size);
}
}
private void rangeCheck(long index, int size) {
if (index > Integer.MAX_VALUE || index < Integer.MIN_VALUE) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.INDEX_NUMBER_TOO_LARGE, index);
}
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
if ((((BTupleType) arrayType).getRestType() == null && index >= maxArraySize) || (int) index < 0) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.TUPLE_INDEX_OUT_OF_RANGE, index, size);
}
} else {
if ((int) index < 0 || index >= maxArraySize) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.INDEX_OUT_OF_RANGE_ERROR,
RuntimeErrors.ARRAY_INDEX_OUT_OF_RANGE, index, size);
}
}
}
private void fillerValueCheck(int index, int size) {
if (arrayType != null && arrayType.getTag() == TypeTags.TUPLE_TAG) {
if (!TypeChecker.hasFillerValue(tupleRestType) && (index > size)) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.ILLEGAL_LIST_INSERTION_ERROR,
RuntimeErrors.ILLEGAL_TUPLE_INSERTION, size, index + 1);
}
} else {
if (!TypeChecker.hasFillerValue(elementType) && (index > size)) {
throw BLangExceptionHelper.getRuntimeException(BallerinaErrorReasons.ILLEGAL_LIST_INSERTION_ERROR,
RuntimeErrors.ILLEGAL_ARRAY_INSERTION, size, index + 1);
}
}
}
Object newArrayInstance(Class<?> componentType) {
return (size > 0) ?
Array.newInstance(componentType, size) : Array.newInstance(componentType, DEFAULT_ARRAY_SIZE);
}
private void setArrayElementType(BType type) {
this.arrayType = new BArrayType(type);
this.elementType = type;
}
public String getJSONString() {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
JSONGenerator gen = new JSONGenerator(byteOut);
try {
gen.serialize(this);
gen.flush();
} catch (IOException e) {
throw new BallerinaException("Error in converting JSON to a string: " + e.getMessage(), e);
}
return new String(byteOut.toByteArray());
}
private BType getElementType(BType type) {
if (type.getTag() != TypeTags.ARRAY_TAG) {
return type;
}
return getElementType(((BArrayType) type).getElementType());
}
/**
* Util method to handle frozen array values.
*/
private void handleFrozenArrayValue() {
synchronized (this) {
try {
if (this.freezeStatus.getState() != State.UNFROZEN) {
FreezeUtils.handleInvalidUpdate(freezeStatus.getState());
}
} catch (BLangFreezeException e) {
throw BallerinaErrors.createError(e.getMessage(), e.getDetail());
}
}
}
protected void prepareForAdd(long index, int currentArraySize) {
int intIndex = (int) index;
rangeCheck(index, size);
fillerValueCheck(intIndex, size);
ensureCapacity(intIndex + 1, currentArraySize);
fillValues(intIndex);
resetSize(intIndex);
}
/**
* Same as {@code prepareForAdd}, except fillerValueCheck is not performed as we are guaranteed to add
* elements to consecutive positions.
*
* @param index last index after add operation completes
* @param currentArraySize current array size
*/
void prepareForConsecutiveMultiAdd(long index, int currentArraySize) {
int intIndex = (int) index;
rangeCheck(index, size);
ensureCapacity(intIndex + 1, currentArraySize);
resetSize(intIndex);
}
private void ensureCapacity(int requestedCapacity, int currentArraySize) {
if ((requestedCapacity) - currentArraySize > 0) {
if ((this.arrayType.getTag() == TypeTags.ARRAY_TAG
&& ((BArrayType) this.arrayType).getState() == ArrayState.UNSEALED)
|| this.arrayType.getTag() == TypeTags.TUPLE_TAG) {
int newArraySize = currentArraySize + (currentArraySize >> 1);
newArraySize = Math.max(newArraySize, requestedCapacity);
newArraySize = Math.min(newArraySize, maxArraySize);
resizeInternalArray(newArraySize);
}
}
}
private void resetSize(int index) {
if (index >= size) {
size = index + 1;
}
}
/**
* {@inheritDoc}
*/
@Override
public synchronized void attemptFreeze(Status freezeStatus) {
if (!FreezeUtils.isOpenForFreeze(this.freezeStatus, freezeStatus)) {
return;
}
this.freezeStatus = freezeStatus;
if (elementType == null || elementType.getTag() > TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < this.size; i++) {
Object value = this.getRefValue(i);
if (value instanceof RefValue) {
((RefValue) value).attemptFreeze(freezeStatus);
}
}
}
}
/**
* {@inheritDoc}
*/
@Override
public void freezeDirect() {
if (isFrozen()) {
return;
}
this.freezeStatus.setFrozen();
if (elementType == null || elementType.getTag() > TypeTags.BOOLEAN_TAG) {
for (int i = 0; i < this.size; i++) {
Object value = this.getRefValue(i);
if (value instanceof RefValue) {
((RefValue) value).freezeDirect();
}
}
}
}
/**
* {@inheritDoc}
*/
@Override
public synchronized boolean isFrozen() {
return this.freezeStatus.isFrozen();
}
private boolean isBasicType(BType type) {
return type.getTag() <= TypeTags.BOOLEAN_TAG && type.getTag() != TypeTags.DECIMAL_TAG;
}
private void moveBasicTypeArrayToRefValueArray() {
refValues = new Object[this.size];
if (elementType == BTypes.typeBoolean) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = booleanValues[i];
}
booleanValues = null;
}
if (elementType == BTypes.typeInt) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = intValues[i];
}
intValues = null;
}
if (elementType == BTypes.typeString) {
System.arraycopy(stringValues, 0, refValues, 0, this.size());
stringValues = null;
}
if (elementType == BTypes.typeFloat) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = floatValues[i];
}
floatValues = null;
}
if (elementType == BTypes.typeByte) {
for (int i = 0; i < this.size(); i++) {
refValues[i] = (byteValues[i]);
}
byteValues = null;
}
elementType = null;
}
private void moveRefValueArrayToBasicTypeArray(BType type, BType arrayElementType) {
Object[] arrayValues = this.getValues();
if (arrayElementType.getTag() == TypeTags.INT_TAG) {
intValues = (long[]) newArrayInstance(Long.TYPE);
for (int i = 0; i < this.size(); i++) {
intValues[i] = ((long) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.FLOAT_TAG) {
floatValues = (double[]) newArrayInstance(Double.TYPE);
for (int i = 0; i < this.size(); i++) {
floatValues[i] = ((float) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.BOOLEAN_TAG) {
booleanValues = new boolean[this.size()];
for (int i = 0; i < this.size(); i++) {
booleanValues[i] = ((boolean) arrayValues[i]);
}
}
if (arrayElementType.getTag() == TypeTags.STRING_TAG) {
stringValues = (String[]) newArrayInstance(String.class);
for (int i = 0; i < this.size(); i++) {
stringValues[i] = (String) arrayValues[i];
}
}
if (arrayElementType.getTag() == TypeTags.BYTE_TAG) {
byteValues = (byte[]) newArrayInstance(Byte.TYPE);
for (int i = 0; i < this.size(); i++) {
byteValues[i] = (byte) arrayValues[i];
}
}
this.elementType = arrayElementType;
this.arrayType = type;
refValues = null;
}
@Override
public IteratorValue getIterator() {
return new ArrayIterator(this);
}
public void setLength(long length) {
handleFrozenArrayValue();
int newLength = (int) length;
checkFixedLength(length);
rangeCheck(length, size);
fillerValueCheck(newLength, size);
resizeInternalArray(newLength);
fillValues(newLength);
size = newLength;
}
/**
* {@code {@link ArrayIterator}} provides iterator implementation for Ballerina array values.
*
* @since 0.995.0
*/
static class ArrayIterator implements IteratorValue {
ArrayValue array;
long cursor = 0;
long length;
ArrayIterator(ArrayValue value) {
this.array = value;
this.length = value.size();
}
@Override
public Object next() {
long cursor = this.cursor++;
if (cursor == length) {
return null;
}
return array.getValue(cursor);
}
@Override
public boolean hasNext() {
return cursor < length;
}
}
}
|
I find the way we check the incompatibility confusing atm. There are at least two places that throw a similar exception: 1. `VersionedIOReadeableWritable` from `getIncompatibleVersionError` 2. Here from the `read` method. I think 1) is actually a dead code now, as we tell in `TypeSerializerSnapshotSerializationProxy` that we support version 1. Moreover I find the method `KeyedBackendSerializationProxy#getIncompatibleVersionError` misleading. I think at least the `Ops, this should not happen...` is unnecessary. You already get a message from `VersionedIOReadableWritable` that is an unsupported version. This comment does not give us any additional context. Lastly if you leave the `getIncompatibleVersionError` method I'd rename it something along the lines of "extra information", e.g. `getAdditionalDetailsForIncompatibleVersion`.
|
public void read(DataInputView in) throws IOException {
super.read(in);
final int version = getReadVersion();
switch (version) {
case 2:
serializerSnapshot = deserializeV2(in, userCodeClassLoader);
break;
case 1:
throw new IOException(
String.format(
"No longer supported version [%d] for TypeSerializerSnapshot. "
+ "Please migrate away from the old TypeSerializerConfigSnapshot "
+ "and use Flink 1.16 for the migration",
version));
default:
throw new IOException(
"Unrecognized version for TypeSerializerSnapshot format: " + version);
}
}
|
version));
|
public void read(DataInputView in) throws IOException {
super.read(in);
final int version = getReadVersion();
switch (version) {
case 2:
serializerSnapshot = deserializeV2(in, userCodeClassLoader);
break;
default:
throw new IOException(
"Unrecognized version for TypeSerializerSnapshot format: " + version);
}
}
|
class TypeSerializerSnapshotSerializationProxy<T>
extends VersionedIOReadableWritable {
private static final int VERSION = 2;
private ClassLoader userCodeClassLoader;
private TypeSerializerSnapshot<T> serializerSnapshot;
@Nullable private TypeSerializer<T> serializer;
/** Constructor for reading serializers. */
TypeSerializerSnapshotSerializationProxy(
ClassLoader userCodeClassLoader,
@Nullable TypeSerializer<T> existingPriorSerializer) {
this.userCodeClassLoader = Preconditions.checkNotNull(userCodeClassLoader);
this.serializer = existingPriorSerializer;
}
/** Constructor for writing out serializers. */
TypeSerializerSnapshotSerializationProxy(
TypeSerializerSnapshot<T> serializerConfigSnapshot, TypeSerializer<T> serializer) {
this.serializerSnapshot = Preconditions.checkNotNull(serializerConfigSnapshot);
this.serializer = Preconditions.checkNotNull(serializer);
}
/**
* Binary format layout of a written serializer snapshot is as follows:
*
* <ul>
* <li>1. Format version of this util.
* <li>2. Name of the TypeSerializerSnapshot class.
* <li>3. The version of the TypeSerializerSnapshot's binary format.
* <li>4. The actual serializer snapshot data.
* </ul>
*/
@SuppressWarnings("deprecation")
@Override
public void write(DataOutputView out) throws IOException {
super.write(out);
TypeSerializerSnapshot.writeVersionedSnapshot(out, serializerSnapshot);
}
@SuppressWarnings("unchecked")
@Override
@Override
public int getVersion() {
return VERSION;
}
@Override
public int[] getCompatibleVersions() {
return new int[] {VERSION, 1};
}
TypeSerializerSnapshot<T> getSerializerSnapshot() {
return serializerSnapshot;
}
/** Deserialization path for Flink versions 1.7+. */
@VisibleForTesting
static <T> TypeSerializerSnapshot<T> deserializeV2(DataInputView in, ClassLoader cl)
throws IOException {
return TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
}
}
|
class TypeSerializerSnapshotSerializationProxy<T>
extends VersionedIOReadableWritable {
private static final int VERSION = 2;
private ClassLoader userCodeClassLoader;
private TypeSerializerSnapshot<T> serializerSnapshot;
/** Constructor for reading serializers. */
TypeSerializerSnapshotSerializationProxy(ClassLoader userCodeClassLoader) {
this.userCodeClassLoader = Preconditions.checkNotNull(userCodeClassLoader);
}
/** Constructor for writing out serializers. */
TypeSerializerSnapshotSerializationProxy(
TypeSerializerSnapshot<T> serializerConfigSnapshot) {
this.serializerSnapshot = Preconditions.checkNotNull(serializerConfigSnapshot);
}
/**
* Binary format layout of a written serializer snapshot is as follows:
*
* <ul>
* <li>1. Format version of this util.
* <li>2. Name of the TypeSerializerSnapshot class.
* <li>3. The version of the TypeSerializerSnapshot's binary format.
* <li>4. The actual serializer snapshot data.
* </ul>
*/
@SuppressWarnings("deprecation")
@Override
public void write(DataOutputView out) throws IOException {
super.write(out);
TypeSerializerSnapshot.writeVersionedSnapshot(out, serializerSnapshot);
}
@SuppressWarnings("unchecked")
@Override
@Override
public int getVersion() {
return VERSION;
}
@Override
public int[] getCompatibleVersions() {
return new int[] {VERSION};
}
@Override
public Optional<String> getAdditionalDetailsForIncompatibleVersion(int readVersion) {
if (readVersion == 1) {
return Optional.of(
"As of Flink 1.17 TypeSerializerConfigSnapshot is no longer supported. "
+ "In order to upgrade Flink to 1.17+ you need to first migrate your "
+ "serializers to use TypeSerializerSnapshot instead. Please first take a "
+ "savepoint in Flink 1.16 without using TypeSerializerConfigSnapshot. "
+ "After that you can use this savepoint to upgrade to Flink 1.17.");
}
return Optional.empty();
}
TypeSerializerSnapshot<T> getSerializerSnapshot() {
return serializerSnapshot;
}
/** Deserialization path for Flink versions 1.7+. */
@VisibleForTesting
static <T> TypeSerializerSnapshot<T> deserializeV2(DataInputView in, ClassLoader cl)
throws IOException {
return TypeSerializerSnapshot.readVersionedSnapshot(in, cl);
}
}
|
Checkpoints are always correct because of following properties: 1) shardIteratorsMap is always in a consistent state, i.e. it is guaranteed to not contain any two shards that are in a parent-child relation. This is a requirement for current logic to properly traverse through splits and merges. 2) All records read from particular shard are removed from the queue in advance() method before shardIterator is removed from shardIteratorsMap. This guarantees that checkpoints will contain particular shardIterator at least as long as there are any unread records from this shard. So even if shardIteratorsMap is modified an unlimited number of times during any other action, it will always be consistent. The only risk of outdated checkpoint are potential duplicates during restoration.
|
boolean allShardsUpToDate() {
boolean shardsUpToDate = true;
ImmutableMap<String, ShardRecordsIterator> currentShardIterators = shardIteratorsMap.get();
for (ShardRecordsIterator shardRecordsIterator : currentShardIterators.values()) {
shardsUpToDate &= shardRecordsIterator.isUpToDate();
}
return shardsUpToDate;
}
|
shardsUpToDate &= shardRecordsIterator.isUpToDate();
|
boolean allShardsUpToDate() {
boolean shardsUpToDate = true;
ImmutableMap<String, ShardRecordsIterator> currentShardIterators = shardIteratorsMap.get();
for (ShardRecordsIterator shardRecordsIterator : currentShardIterators.values()) {
shardsUpToDate &= shardRecordsIterator.isUpToDate();
}
return shardsUpToDate;
}
|
class ShardReadersPool {
private static final Logger LOG = LoggerFactory.getLogger(ShardReadersPool.class);
private static final int DEFAULT_CAPACITY_PER_SHARD = 10_000;
/**
* Executor service for running the threads that read records from shards handled by this pool.
* Each thread runs the {@link ShardReadersPool
* handles exactly one shard.
*/
private final ExecutorService executorService;
/**
* A reference to a bounded buffer for read records. Its capacity is related to the number of
* shards and new buffer is created with each shard split/merge operation. Records are added to
* this buffer within {@link ShardReadersPool
* in {@link ShardReadersPool
*/
private final AtomicReference<BlockingQueue<KinesisRecord>> recordsQueue;
/**
* A reference to an immutable mapping of {@link ShardRecordsIterator} instances to shard ids.
* This map is replaced with a new one when resharding operation on any handled shard occurs.
*/
private final AtomicReference<ImmutableMap<String, ShardRecordsIterator>> shardIteratorsMap;
/**
* A map for keeping the current number of records stored in a buffer per shard.
*/
private final ConcurrentMap<String, AtomicInteger> numberOfRecordsInAQueueByShard;
private final SimplifiedKinesisClient kinesis;
private final KinesisReaderCheckpoint initialCheckpoint;
private final int queueCapacityPerShard;
private final AtomicBoolean poolOpened = new AtomicBoolean(true);
ShardReadersPool(SimplifiedKinesisClient kinesis, KinesisReaderCheckpoint initialCheckpoint) {
this(kinesis, initialCheckpoint, DEFAULT_CAPACITY_PER_SHARD);
}
ShardReadersPool(SimplifiedKinesisClient kinesis, KinesisReaderCheckpoint initialCheckpoint,
int queueCapacityPerShard) {
this.kinesis = kinesis;
this.initialCheckpoint = initialCheckpoint;
this.queueCapacityPerShard = queueCapacityPerShard;
this.executorService = Executors.newCachedThreadPool();
this.numberOfRecordsInAQueueByShard = new ConcurrentHashMap<>();
this.recordsQueue = new AtomicReference<>();
this.shardIteratorsMap = new AtomicReference<>();
}
void start() throws TransientKinesisException {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
for (ShardCheckpoint checkpoint : initialCheckpoint) {
shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint));
}
shardIteratorsMap.set(shardsMap.build());
if (!shardIteratorsMap.get().isEmpty()) {
BlockingQueue<KinesisRecord> queue = new ArrayBlockingQueue<>(
queueCapacityPerShard * shardIteratorsMap.get().size());
recordsQueue.set(queue);
startReadingShards(shardIteratorsMap.get().values());
}
}
private void startReadingShards(Iterable<ShardRecordsIterator> shardRecordsIterators) {
for (final ShardRecordsIterator recordsIterator : shardRecordsIterators) {
numberOfRecordsInAQueueByShard.put(recordsIterator.getShardId(), new AtomicInteger());
executorService.submit(new Runnable() {
@Override
public void run() {
readLoop(recordsIterator);
}
});
}
}
private void readLoop(ShardRecordsIterator shardRecordsIterator) {
while (poolOpened.get()) {
try {
List<KinesisRecord> kinesisRecords;
try {
kinesisRecords = shardRecordsIterator.readNextBatch();
} catch (KinesisShardClosedException e) {
LOG.info("Shard iterator for {} shard is closed, finishing the read loop",
shardRecordsIterator.getShardId(), e);
waitUntilAllShardRecordsRead(shardRecordsIterator);
readFromSuccessiveShards(shardRecordsIterator);
break;
}
for (KinesisRecord kinesisRecord : kinesisRecords) {
recordsQueue.get().put(kinesisRecord);
numberOfRecordsInAQueueByShard.get(kinesisRecord.getShardId()).incrementAndGet();
}
} catch (TransientKinesisException e) {
LOG.warn("Transient exception occurred.", e);
} catch (InterruptedException e) {
LOG.warn("Thread was interrupted, finishing the read loop", e);
break;
} catch (Throwable e) {
LOG.error("Unexpected exception occurred", e);
}
}
LOG.info("Kinesis Shard read loop has finished");
}
CustomOptional<KinesisRecord> nextRecord() {
try {
if (recordsQueue.get() == null) {
return CustomOptional.absent();
}
KinesisRecord record = recordsQueue.get().poll(1, TimeUnit.SECONDS);
if (record == null) {
return CustomOptional.absent();
}
shardIteratorsMap.get().get(record.getShardId()).ackRecord(record);
numberOfRecordsInAQueueByShard.get(record.getShardId()).decrementAndGet();
return CustomOptional.of(record);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for KinesisRecord from the buffer");
return CustomOptional.absent();
}
}
void stop() {
LOG.info("Closing shard iterators pool");
poolOpened.set(false);
executorService.shutdownNow();
boolean isShutdown = false;
int attemptsLeft = 3;
while (!isShutdown && attemptsLeft-- > 0) {
try {
isShutdown = executorService.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for the executor service to shutdown");
throw new RuntimeException(e);
}
if (!isShutdown && attemptsLeft > 0) {
LOG.warn("Executor service is taking long time to shutdown, will retry. {} attempts left",
attemptsLeft);
}
}
}
KinesisReaderCheckpoint getCheckpointMark() {
ImmutableMap<String, ShardRecordsIterator> currentShardIterators = shardIteratorsMap.get();
return new KinesisReaderCheckpoint(transform(currentShardIterators.values(),
new Function<ShardRecordsIterator, ShardCheckpoint>() {
@Override
public ShardCheckpoint apply(ShardRecordsIterator shardRecordsIterator) {
checkArgument(shardRecordsIterator != null, "shardRecordsIterator can not be null");
return shardRecordsIterator.getCheckpoint();
}
}));
}
ShardRecordsIterator createShardIterator(SimplifiedKinesisClient kinesis,
ShardCheckpoint checkpoint) throws TransientKinesisException {
return new ShardRecordsIterator(checkpoint, kinesis);
}
/**
* Waits until all records read from given shardRecordsIterator are taken from
* {@link
* Uses {@link
*/
private void waitUntilAllShardRecordsRead(ShardRecordsIterator shardRecordsIterator)
throws InterruptedException {
while (!allShardRecordsRead(shardRecordsIterator)) {
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
}
}
private boolean allShardRecordsRead(final ShardRecordsIterator shardRecordsIterator) {
return numberOfRecordsInAQueueByShard.get(shardRecordsIterator.getShardId()).get() == 0;
}
/**
* <p>
* Tries to find successors of a given shard and start reading them. Each closed shard can have
* 0, 1 or 2 successors
* <ul>
* <li>0 successors - when shard was merged with another shard and this one is considered
* adjacent by merge operation</li>
* <li>1 successor - when shard was merged with another shard and this one is considered a
* parent by merge operation</li>
* <li>2 successors - when shard was split into two shards</li>
* </ul>
* </p>
* <p>
* Once shard successors are established, the transition to reading new shards can begin.
* The {@link ShardReadersPool
* and {@link ShardReadersPool
* remain in sync. Therefore the operation is synchronized on this {@link ShardReadersPool}
* instance. During this operation, the immutable {@link ShardReadersPool
* is replaced with a new one holding references to {@link ShardRecordsIterator} instances for
* open shards only. The {@link ShardReadersPool
* capacity adjusted to the new number of open shards and records remaining in previous queue are
* drained to the new one. Also, the counter for already closed shard is removed from
* {@link ShardReadersPool
* </p>
* <p>
* Finally when atomic update is finished, new threads are spawned for reading the successive
* shards. The thread that handled reading from already closed shard can finally complete.
* </p>
*/
private void readFromSuccessiveShards(final ShardRecordsIterator closedShardIterator)
throws TransientKinesisException {
List<ShardRecordsIterator> successiveShardRecordIterators = closedShardIterator
.findSuccessiveShardRecordIterators();
synchronized (this) {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
Iterable<ShardRecordsIterator> allShards = Iterables
.concat(shardIteratorsMap.get().values(), successiveShardRecordIterators);
for (ShardRecordsIterator iterator : allShards) {
if (!closedShardIterator.getShardId().equals(iterator.getShardId())) {
shardsMap.put(iterator.getShardId(), iterator);
}
}
shardIteratorsMap.set(shardsMap.build());
numberOfRecordsInAQueueByShard.remove(closedShardIterator.getShardId());
BlockingQueue<KinesisRecord> previousRecordsQueue = recordsQueue.get();
int capacity = queueCapacityPerShard * shardIteratorsMap.get().size();
if (capacity > 0) {
BlockingQueue<KinesisRecord> newRecordsQueue = new ArrayBlockingQueue<>(capacity);
recordsQueue.set(newRecordsQueue);
do {
try {
KinesisRecord record = previousRecordsQueue.poll(500, TimeUnit.MILLISECONDS);
if (record != null) {
newRecordsQueue.put(record);
}
} catch (InterruptedException e) {
LOG.warn("Thread was interrupted during resharding operation, stopping", e);
return;
}
} while (!previousRecordsQueue.isEmpty());
} else {
recordsQueue.set(null);
}
}
startReadingShards(successiveShardRecordIterators);
}
}
|
class ShardReadersPool {
private static final Logger LOG = LoggerFactory.getLogger(ShardReadersPool.class);
private static final int DEFAULT_CAPACITY_PER_SHARD = 10_000;
/**
* Executor service for running the threads that read records from shards handled by this pool.
* Each thread runs the {@link ShardReadersPool
* handles exactly one shard.
*/
private final ExecutorService executorService;
/**
* A Bounded buffer for read records. Records are added to this buffer within
* {@link ShardReadersPool
* in {@link ShardReadersPool
*/
private BlockingQueue<KinesisRecord> recordsQueue;
/**
* A reference to an immutable mapping of {@link ShardRecordsIterator} instances to shard ids.
* This map is replaced with a new one when resharding operation on any handled shard occurs.
*/
private final AtomicReference<ImmutableMap<String, ShardRecordsIterator>> shardIteratorsMap;
/**
* A map for keeping the current number of records stored in a buffer per shard.
*/
private final ConcurrentMap<String, AtomicInteger> numberOfRecordsInAQueueByShard;
private final SimplifiedKinesisClient kinesis;
private final KinesisReaderCheckpoint initialCheckpoint;
private final int queueCapacityPerShard;
private final AtomicBoolean poolOpened = new AtomicBoolean(true);
ShardReadersPool(SimplifiedKinesisClient kinesis, KinesisReaderCheckpoint initialCheckpoint) {
this(kinesis, initialCheckpoint, DEFAULT_CAPACITY_PER_SHARD);
}
ShardReadersPool(SimplifiedKinesisClient kinesis, KinesisReaderCheckpoint initialCheckpoint,
int queueCapacityPerShard) {
this.kinesis = kinesis;
this.initialCheckpoint = initialCheckpoint;
this.queueCapacityPerShard = queueCapacityPerShard;
this.executorService = Executors.newCachedThreadPool();
this.numberOfRecordsInAQueueByShard = new ConcurrentHashMap<>();
this.shardIteratorsMap = new AtomicReference<>();
}
void start() throws TransientKinesisException {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
for (ShardCheckpoint checkpoint : initialCheckpoint) {
shardsMap.put(checkpoint.getShardId(), createShardIterator(kinesis, checkpoint));
}
shardIteratorsMap.set(shardsMap.build());
if (!shardIteratorsMap.get().isEmpty()) {
recordsQueue = new ArrayBlockingQueue<>(
queueCapacityPerShard * shardIteratorsMap.get().size());
startReadingShards(shardIteratorsMap.get().values());
} else {
recordsQueue = new ArrayBlockingQueue<>(1);
}
}
private void startReadingShards(Iterable<ShardRecordsIterator> shardRecordsIterators) {
for (final ShardRecordsIterator recordsIterator : shardRecordsIterators) {
numberOfRecordsInAQueueByShard.put(recordsIterator.getShardId(), new AtomicInteger());
executorService.submit(() -> readLoop(recordsIterator));
}
}
private void readLoop(ShardRecordsIterator shardRecordsIterator) {
while (poolOpened.get()) {
try {
List<KinesisRecord> kinesisRecords;
try {
kinesisRecords = shardRecordsIterator.readNextBatch();
} catch (KinesisShardClosedException e) {
LOG.info("Shard iterator for {} shard is closed, finishing the read loop",
shardRecordsIterator.getShardId(), e);
waitUntilAllShardRecordsRead(shardRecordsIterator);
readFromSuccessiveShards(shardRecordsIterator);
break;
}
for (KinesisRecord kinesisRecord : kinesisRecords) {
recordsQueue.put(kinesisRecord);
numberOfRecordsInAQueueByShard.get(kinesisRecord.getShardId()).incrementAndGet();
}
} catch (TransientKinesisException e) {
LOG.warn("Transient exception occurred.", e);
} catch (InterruptedException e) {
LOG.warn("Thread was interrupted, finishing the read loop", e);
break;
} catch (Throwable e) {
LOG.error("Unexpected exception occurred", e);
}
}
LOG.info("Kinesis Shard read loop has finished");
}
CustomOptional<KinesisRecord> nextRecord() {
try {
KinesisRecord record = recordsQueue.poll(1, TimeUnit.SECONDS);
if (record == null) {
return CustomOptional.absent();
}
shardIteratorsMap.get().get(record.getShardId()).ackRecord(record);
numberOfRecordsInAQueueByShard.get(record.getShardId()).decrementAndGet();
return CustomOptional.of(record);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for KinesisRecord from the buffer");
return CustomOptional.absent();
}
}
void stop() {
LOG.info("Closing shard iterators pool");
poolOpened.set(false);
executorService.shutdownNow();
boolean isShutdown = false;
int attemptsLeft = 3;
while (!isShutdown && attemptsLeft-- > 0) {
try {
isShutdown = executorService.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.error("Interrupted while waiting for the executor service to shutdown");
throw new RuntimeException(e);
}
if (!isShutdown && attemptsLeft > 0) {
LOG.warn("Executor service is taking long time to shutdown, will retry. {} attempts left",
attemptsLeft);
}
}
}
KinesisReaderCheckpoint getCheckpointMark() {
ImmutableMap<String, ShardRecordsIterator> currentShardIterators = shardIteratorsMap.get();
return new KinesisReaderCheckpoint(
currentShardIterators.values().stream().map(shardRecordsIterator -> {
checkArgument(shardRecordsIterator != null, "shardRecordsIterator can not be null");
return shardRecordsIterator.getCheckpoint();
}).collect(Collectors.toList()));
}
ShardRecordsIterator createShardIterator(SimplifiedKinesisClient kinesis,
ShardCheckpoint checkpoint) throws TransientKinesisException {
return new ShardRecordsIterator(checkpoint, kinesis);
}
/**
* Waits until all records read from given shardRecordsIterator are taken from
* {@link
* Uses {@link
*/
private void waitUntilAllShardRecordsRead(ShardRecordsIterator shardRecordsIterator)
throws InterruptedException {
AtomicInteger numberOfShardRecordsInAQueue = numberOfRecordsInAQueueByShard
.get(shardRecordsIterator.getShardId());
while (!(numberOfShardRecordsInAQueue.get() == 0)) {
Thread.sleep(TimeUnit.SECONDS.toMillis(1));
}
}
/**
* <p>
* Tries to find successors of a given shard and start reading them. Each closed shard can have
* 0, 1 or 2 successors
* <ul>
* <li>0 successors - when shard was merged with another shard and this one is considered
* adjacent by merge operation</li>
* <li>1 successor - when shard was merged with another shard and this one is considered a
* parent by merge operation</li>
* <li>2 successors - when shard was split into two shards</li>
* </ul>
* </p>
* <p>
* Once shard successors are established, the transition to reading new shards can begin.
* During this operation, the immutable {@link ShardReadersPool
* is replaced with a new one holding references to {@link ShardRecordsIterator} instances for
* open shards only. Potentially there might be more shard iterators closing at the same time so
* {@link ShardReadersPool
* the updates. Then, the counter for already closed shard is removed from
* {@link ShardReadersPool
* </p>
* <p>
* Finally when update is finished, new threads are spawned for reading the successive shards.
* The thread that handled reading from already closed shard can finally complete.
* </p>
*/
private void readFromSuccessiveShards(final ShardRecordsIterator closedShardIterator)
throws TransientKinesisException {
List<ShardRecordsIterator> successiveShardRecordIterators = closedShardIterator
.findSuccessiveShardRecordIterators();
ImmutableMap<String, ShardRecordsIterator> current;
ImmutableMap<String, ShardRecordsIterator> updated;
do {
current = shardIteratorsMap.get();
updated = createMapWithSuccessiveShards(current, closedShardIterator,
successiveShardRecordIterators);
} while (!shardIteratorsMap.compareAndSet(current, updated));
numberOfRecordsInAQueueByShard.remove(closedShardIterator.getShardId());
startReadingShards(successiveShardRecordIterators);
}
private ImmutableMap<String, ShardRecordsIterator> createMapWithSuccessiveShards(
ImmutableMap<String, ShardRecordsIterator> current, ShardRecordsIterator closedShardIterator,
List<ShardRecordsIterator> successiveShardRecordIterators) throws TransientKinesisException {
ImmutableMap.Builder<String, ShardRecordsIterator> shardsMap = ImmutableMap.builder();
Iterable<ShardRecordsIterator> allShards = Iterables
.concat(current.values(), successiveShardRecordIterators);
for (ShardRecordsIterator iterator : allShards) {
if (!closedShardIterator.getShardId().equals(iterator.getShardId())) {
shardsMap.put(iterator.getShardId(), iterator);
}
}
return shardsMap.build();
}
}
|
We usually include `test` in the file. ```suggestion CompileResult result = BCompileUtil.compile("test-src/klass/resource-method-assignability-negative-test.bal"); ``` Not introduced by this PR, but we use underscores in bal file names.
|
public void testResourceMethodsDoesNotAffectAssignability() {
CompileResult result = BCompileUtil.compile("test-src/klass/resource-method-assignability-negative.bal");
int index = 0;
validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 38, 13);
validateError(result, index++, "incompatible types: expected 'Bar', found 'isolated object { " +
"public function hello () returns (); function foo () returns (int); }'", 40, 15);
Assert.assertEquals(index, result.getErrorCount());
}
|
CompileResult result = BCompileUtil.compile("test-src/klass/resource-method-assignability-negative.bal");
|
public void testResourceMethodsDoesNotAffectAssignability() {
CompileResult result = BCompileUtil.compile("test-src/klass/resource_method_assignability_negative_test.bal");
int index = 0;
validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 38, 13);
validateError(result, index++, "incompatible types: expected 'Bar', found 'isolated object { " +
"public function hello () returns (); function foo () returns (int); }'",
40, 15);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".() returns (); }', found 'isolated object { resource function post .() returns (); }'",
56, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'",
63, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " +
"(); }'", 70, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'",
78, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() " +
"returns (); }'", 85, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [byte]() returns (); }'",
93, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); " +
"resource function post [int]() returns (); }'", 100, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); " +
"resource function post [int]() returns (); }'", 109, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[string...]() " +
"returns (); }'", 118, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[byte...]() " +
"returns (); }'", 125, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"bar/[int...]() returns (); }', found 'isolated object { resource function get bar/[int]() " +
"returns (); }'", 132, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo/[int]() returns (); }', found 'isolated object { resource function get foo2/[int]() returns " +
"(); }'", 139, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " +
"(); }'", 146, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".(int) returns (); }', found 'isolated object { resource function get .() returns (); }'",
153, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".() returns (); }', found 'isolated object { resource function get .(int) returns (); }'",
160, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".(int...) returns (); }', found 'isolated object { resource function get .(int) returns (); }'",
167, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo() returns (); }', found 'isolated object { resource function get foo(int) returns (); }'",
174, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get .(int) returns (); }'",
181, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int](int) returns (); }', found 'isolated object { resource function get .(int) returns (); }'",
188, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".(int) returns (); }', found 'isolated object { resource function get [int]() returns (); }'",
195, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".(int) returns (); }', found 'isolated object { resource function get [int]() returns (); }'",
202, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
".() returns (); }', found 'isolated object { resource function post .() returns (); }'",
209, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'",
216, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"foo/[int]() returns (); }', found 'isolated object { resource function get foo/[string]() returns " +
"(); }'", 223, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [string]() returns (); }'",
230, 11);
validateError(result, index++, "incompatible types: expected 'object { resource function get " +
"[int]() returns (); }', found 'isolated object { resource function get [int]() returns (); }'",
237, 11);
Assert.assertEquals(index, result.getErrorCount());
}
|
class ServiceClassTest {
@Test
public void testBasicStructAsObject() {
CompileResult compileResult = BCompileUtil.compile("test-src/klass/simple_service_class.bal");
BRunUtil.invoke(compileResult, "testServiceObjectValue");
}
@Test
@Test
public void testResourcePathParamNegative() {
CompileResult result = BCompileUtil.compile("test-src/klass/simple_service_class_neg_path_param.bal");
int index = 0;
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " +
"found 'json'", 37, 32);
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " +
"found 'anydata'", 37, 41);
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path param, " +
"found 'anydata'", 37, 65);
Assert.assertEquals(index, 3);
}
@Test
public void testServiceObjectAndUsingServiceObjectAsATypeInclusionNegative() {
CompileResult result =
BCompileUtil.compile("test-src/klass/service_type_resource_method_decl_neg.bal");
int index = 0;
validateError(result, index++, "no implementation found for the method 'onMesage' of class 'SClass'",
23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo/[string]() returns ()' of class 'SClass'", 23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo/bar() returns ()' of class 'SClass'", 23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo() returns (string)' of class 'RTwo'", 34, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo() returns (string)' of class 'RTypeImpl'", 43, 1);
validateError(result, index++, "no implementation found for the method 'resource function do " +
"f() returns (int)' of class 'Do'", 56, 1);
validateError(result, index++, "no implementation found for the method 'resource function done " +
"f() returns (int)' of class 'Do'", 56, 1);
validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 80, 13);
validateError(result, index++, "incompatible types: expected 'Foo', found 'Baz'", 88, 13);
Assert.assertEquals(result.getErrorCount(), index);
}
@Test
public void testResourceFunctionWithInvalidPathParam() {
CompileResult result =
BCompileUtil.compile("test-src/klass/resource_function_with_invalid_path_param_type_negative.bal");
int index = 0;
validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types " +
"are supported as path params, found 'other'",
24, 29);
validateError(result, index++, "undefined module 'module1'",
24, 29);
validateError(result, index++, "unknown type 'RequestMessage'", 24, 29);
BAssertUtil.validateError(result, index++,
"redeclared symbol 'a'", 35, 56);
BAssertUtil.validateError(result, index++,
"redeclared symbol 'name'", 39, 69);
BAssertUtil.validateError(result, index++,
"redeclared symbol '$anonType$_2.$get$path$*$foo2'", 43, 27);
BAssertUtil.validateError(result, index++,
"resource path segment is not allowed after resource path rest parameter",
47, 47);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types " +
"are supported as path params, found 'string?'", 51, 38);
BAssertUtil.validateError(result, index++,
"missing resource path in resource accessor definition",
55, 27);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" +
" path params, found 'record {| int a; anydata...; |}'", 59, 43);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" +
" path params, found 'record {| int a; anydata...; |}'", 63, 44);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path " +
"param, found 'xml'", 67, 40);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'xml'", 71, 41);
BAssertUtil.validateError(result, index++,
"redeclared symbol '$anonType$_2.$get$xmlPath2$*'", 75, 27);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path" +
" params, found 'xml'", 75, 41);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'map<string>'", 79, 40);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'map<string>'", 83, 41);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found '(int|error)'", 87, 47);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found '(int|error)'", 91, 48);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'error'", 95, 42);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'error'", 99, 43);
Assert.assertEquals(result.getErrorCount(), index);
}
@AfterClass
public void reset() {
ServiceValue.reset();
}
}
|
class ServiceClassTest {
@Test
public void testBasicStructAsObject() {
CompileResult compileResult = BCompileUtil.compile("test-src/klass/simple_service_class.bal");
BRunUtil.invoke(compileResult, "testServiceObjectValue");
}
@Test
@Test
public void testResourcePathParamNegative() {
CompileResult result = BCompileUtil.compile("test-src/klass/simple_service_class_neg_path_param.bal");
int index = 0;
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " +
"found 'json'", 37, 32);
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path params, " +
"found 'anydata'", 37, 41);
validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path param, " +
"found 'anydata'", 37, 65);
Assert.assertEquals(index, 3);
}
@Test
public void testServiceObjectAndUsingServiceObjectAsATypeInclusionNegative() {
CompileResult result =
BCompileUtil.compile("test-src/klass/service_type_resource_method_decl_neg.bal");
int index = 0;
validateError(result, index++, "no implementation found for the method 'onMesage' of class 'SClass'",
23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo/[string]() returns ()' of class 'SClass'", 23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo/bar() returns ()' of class 'SClass'", 23, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo() returns (string)' of class 'RTwo'", 34, 1);
validateError(result, index++, "no implementation found for the method 'resource function get " +
"foo() returns (string)' of class 'RTypeImpl'", 43, 1);
validateError(result, index++, "no implementation found for the method 'resource function do " +
"f() returns (int)' of class 'Do'", 56, 1);
validateError(result, index++, "no implementation found for the method 'resource function done " +
"f() returns (int)' of class 'Do'", 56, 1);
validateError(result, index++, "incompatible types: expected 'Foo', found 'Bar'", 80, 13);
validateError(result, index++, "incompatible types: expected 'Foo', found 'Baz'", 88, 13);
Assert.assertEquals(result.getErrorCount(), index);
}
@Test
public void testResourceFunctionWithInvalidPathParam() {
CompileResult result =
BCompileUtil.compile("test-src/klass/resource_function_with_invalid_path_param_type_negative.bal");
int index = 0;
validateError(result, index++, "only 'int', 'string', 'float', 'boolean', 'decimal' types " +
"are supported as path params, found 'other'",
24, 29);
validateError(result, index++, "undefined module 'module1'",
24, 29);
validateError(result, index++, "unknown type 'RequestMessage'", 24, 29);
BAssertUtil.validateError(result, index++,
"redeclared symbol 'a'", 35, 56);
BAssertUtil.validateError(result, index++,
"redeclared symbol 'name'", 39, 69);
BAssertUtil.validateError(result, index++,
"redeclared symbol '$anonType$_2.$get$path$*$foo2'", 43, 27);
BAssertUtil.validateError(result, index++,
"resource path segment is not allowed after resource path rest parameter",
47, 47);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types " +
"are supported as path params, found 'string?'", 51, 38);
BAssertUtil.validateError(result, index++,
"missing resource path in resource accessor definition",
55, 27);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" +
" path params, found 'record {| int a; anydata...; |}'", 59, 43);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as" +
" path params, found 'record {| int a; anydata...; |}'", 63, 44);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as rest path " +
"param, found 'xml'", 67, 40);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'xml'", 71, 41);
BAssertUtil.validateError(result, index++,
"redeclared symbol '$anonType$_2.$get$xmlPath2$*'", 75, 27);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path" +
" params, found 'xml'", 75, 41);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'map<string>'", 79, 40);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'map<string>'", 83, 41);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found '(int|error)'", 87, 47);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found '(int|error)'", 91, 48);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'error'", 95, 42);
BAssertUtil.validateError(result, index++,
"only 'int', 'string', 'float', 'boolean', 'decimal' types are supported as path " +
"params, found 'error'", 99, 43);
Assert.assertEquals(result.getErrorCount(), index);
}
@Test
public void testResourceMethodAssignability() {
CompileResult compileResult = BCompileUtil.compile(
"test-src/klass/resource_method_assignability_test.bal");
BRunUtil.invoke(compileResult, "testResourceMethodAssignability");
}
@AfterClass
public void reset() {
ServiceValue.reset();
}
}
|
There's a compilation error after the parameter type changed to the `CheckpointType` enum.
|
protected CheckpointingStatistics getTestResponseInstance() throws Exception {
final CheckpointingStatistics.Counts counts = new CheckpointingStatistics.Counts(1, 2, 3, 4, 5);
final CheckpointingStatistics.Summary summary = new CheckpointingStatistics.Summary(
new MinMaxAvgStatistics(1L, 1L, 1L),
new MinMaxAvgStatistics(2L, 2L, 2L),
new MinMaxAvgStatistics(3L, 3L, 3L));
final Map<JobVertexID, TaskCheckpointStatistics> checkpointStatisticsPerTask = new HashMap<>(2);
checkpointStatisticsPerTask.put(
new JobVertexID(),
new TaskCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
1L,
2L,
3L,
4L,
5,
6));
checkpointStatisticsPerTask.put(
new JobVertexID(),
new TaskCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
2L,
3L,
4L,
5L,
6,
7));
final CheckpointStatistics.CompletedCheckpointStatistics completed = new CheckpointStatistics.CompletedCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
false,
42L,
41L,
1337L,
1L,
0L,
10,
10,
"Checkpoint",
Collections.emptyMap(),
null,
false);
final CheckpointStatistics.CompletedCheckpointStatistics savepoint = new CheckpointStatistics.CompletedCheckpointStatistics(
2L,
CheckpointStatsStatus.COMPLETED,
true,
11L,
10L,
43L,
1L,
0L,
9,
9,
"Savepoint",
checkpointStatisticsPerTask,
"externalPath",
false);
final CheckpointStatistics.FailedCheckpointStatistics failed = new CheckpointStatistics.FailedCheckpointStatistics(
3L,
CheckpointStatsStatus.FAILED,
false,
5L,
10L,
4L,
2L,
0L,
11,
9,
"Checkpoint",
Collections.emptyMap(),
100L,
"Test failure");
CheckpointingStatistics.RestoredCheckpointStatistics restored = new CheckpointingStatistics.RestoredCheckpointStatistics(
4L,
1445L,
true,
"foobar");
CheckpointStatistics.PendingCheckpointStatistics pending = new CheckpointStatistics.PendingCheckpointStatistics(
5L,
CheckpointStatsStatus.IN_PROGRESS,
false,
42L,
41L,
1337L,
1L,
0L,
10,
10,
"Checkpoint",
Collections.emptyMap()
);
final CheckpointingStatistics.LatestCheckpoints latestCheckpoints = new CheckpointingStatistics.LatestCheckpoints(
completed,
savepoint,
failed,
restored);
return new CheckpointingStatistics(
counts,
summary,
latestCheckpoints,
Arrays.asList(completed, savepoint, failed, pending));
}
|
"Checkpoint",
|
protected CheckpointingStatistics getTestResponseInstance() throws Exception {
final CheckpointingStatistics.Counts counts = new CheckpointingStatistics.Counts(1, 2, 3, 4, 5);
final CheckpointingStatistics.Summary summary = new CheckpointingStatistics.Summary(
new MinMaxAvgStatistics(1L, 1L, 1L),
new MinMaxAvgStatistics(2L, 2L, 2L),
new MinMaxAvgStatistics(3L, 3L, 3L),
new MinMaxAvgStatistics(4L, 4L, 4L),
new MinMaxAvgStatistics(5L, 5L, 5L));
final Map<JobVertexID, TaskCheckpointStatistics> checkpointStatisticsPerTask = new HashMap<>(2);
checkpointStatisticsPerTask.put(
new JobVertexID(),
new TaskCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
1L,
2L,
3L,
4L,
7,
8,
5,
6));
checkpointStatisticsPerTask.put(
new JobVertexID(),
new TaskCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
2L,
3L,
4L,
5L,
8,
9,
6,
7));
final CheckpointStatistics.CompletedCheckpointStatistics completed = new CheckpointStatistics.CompletedCheckpointStatistics(
1L,
CheckpointStatsStatus.COMPLETED,
false,
42L,
41L,
1337L,
1L,
0L,
43L,
44L,
10,
10,
CheckpointType.CHECKPOINT,
Collections.emptyMap(),
null,
false);
final CheckpointStatistics.CompletedCheckpointStatistics savepoint = new CheckpointStatistics.CompletedCheckpointStatistics(
2L,
CheckpointStatsStatus.COMPLETED,
true,
11L,
10L,
43L,
1L,
0L,
31337L,
4244L,
9,
9,
CheckpointType.SAVEPOINT,
checkpointStatisticsPerTask,
"externalPath",
false);
final CheckpointStatistics.FailedCheckpointStatistics failed = new CheckpointStatistics.FailedCheckpointStatistics(
3L,
CheckpointStatsStatus.FAILED,
false,
5L,
10L,
4L,
2L,
0L,
21L,
22L,
11,
9,
CheckpointType.CHECKPOINT,
Collections.emptyMap(),
100L,
"Test failure");
CheckpointingStatistics.RestoredCheckpointStatistics restored = new CheckpointingStatistics.RestoredCheckpointStatistics(
4L,
1445L,
true,
"foobar");
CheckpointStatistics.PendingCheckpointStatistics pending = new CheckpointStatistics.PendingCheckpointStatistics(
5L,
CheckpointStatsStatus.IN_PROGRESS,
false,
42L,
41L,
1337L,
1L,
0L,
15L,
16L,
10,
10,
CheckpointType.CHECKPOINT,
Collections.emptyMap()
);
final CheckpointingStatistics.LatestCheckpoints latestCheckpoints = new CheckpointingStatistics.LatestCheckpoints(
completed,
savepoint,
failed,
restored);
return new CheckpointingStatistics(
counts,
summary,
latestCheckpoints,
Arrays.asList(completed, savepoint, failed, pending));
}
|
class CheckpointingStatisticsTest extends RestResponseMarshallingTestBase<CheckpointingStatistics> {
@Override
protected Class<CheckpointingStatistics> getTestResponseClass() {
return CheckpointingStatistics.class;
}
@Override
}
|
class CheckpointingStatisticsTest extends RestResponseMarshallingTestBase<CheckpointingStatistics> {
@Override
protected Class<CheckpointingStatistics> getTestResponseClass() {
return CheckpointingStatistics.class;
}
@Override
}
|
There are only three places where BLACKQUOTE is used. This label cannot be added arbitrarily and can only be used behind AS
|
public String visitSelect(SelectRelation stmt, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
SelectList selectList = stmt.getSelectList();
sqlBuilder.append("SELECT ");
if (selectList.isDistinct()) {
sqlBuilder.append("DISTINCT");
}
List<String> selectListString = new ArrayList<>();
for (int i = 0; i < selectList.getItems().size(); ++i) {
SelectListItem item = selectList.getItems().get(i);
if (item.isStar()) {
List<Expr> outputExpression = SelectAnalyzer.expandStar(item, stmt.getRelation());
for (Expr expr : outputExpression) {
selectListString.add(visit(expr) + " AS `" + toSQL(expr) + "`");
}
} else {
if (item.getAlias() != null) {
selectListString.add((visit(item.getExpr()) + " AS `" + item.getAlias()) + "`");
} else {
selectListString.add(visit(item.getExpr()) + " AS `" + toSQL(item.getExpr()) + "`");
}
}
}
sqlBuilder.append(Joiner.on(", ").join(selectListString));
if (stmt.getRelation() != null) {
sqlBuilder.append(" FROM ");
sqlBuilder.append(visit(stmt.getRelation()));
}
if (stmt.hasWhereClause()) {
sqlBuilder.append(" WHERE ");
sqlBuilder.append(visit(stmt.getWhereClause()));
}
if (stmt.hasGroupByClause()) {
sqlBuilder.append(" GROUP BY ");
sqlBuilder.append(stmt.getGroupByClause().toSql());
}
if (stmt.hasHavingClause()) {
sqlBuilder.append(" HAVING ");
sqlBuilder.append(visit(stmt.getHavingClause()));
}
return sqlBuilder.toString();
}
|
selectListString.add(visit(expr) + " AS `" + toSQL(expr) + "`");
|
public String visitSelect(SelectRelation stmt, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
SelectList selectList = stmt.getSelectList();
sqlBuilder.append("SELECT ");
if (selectList.isDistinct()) {
sqlBuilder.append("DISTINCT");
}
List<String> selectListString = new ArrayList<>();
for (int i = 0; i < selectList.getItems().size(); ++i) {
SelectListItem item = selectList.getItems().get(i);
if (item.isStar()) {
List<Expr> outputExpression = SelectAnalyzer.expandStar(item, stmt.getRelation());
for (Expr expr : outputExpression) {
selectListString.add(visit(expr) + " AS `" + AST2SQL.toString(expr) + "`");
}
} else {
if (item.getAlias() != null) {
selectListString.add((visit(item.getExpr()) + " AS `" + item.getAlias()) + "`");
} else {
selectListString.add(visit(item.getExpr()) + " AS `" + AST2SQL.toString(item.getExpr()) + "`");
}
}
}
sqlBuilder.append(Joiner.on(", ").join(selectListString));
if (stmt.getRelation() != null) {
sqlBuilder.append(" FROM ");
sqlBuilder.append(visit(stmt.getRelation()));
}
if (stmt.hasWhereClause()) {
sqlBuilder.append(" WHERE ");
sqlBuilder.append(visit(stmt.getWhereClause()));
}
if (stmt.hasGroupByClause()) {
sqlBuilder.append(" GROUP BY ");
sqlBuilder.append(stmt.getGroupByClause().toSql());
}
if (stmt.hasHavingClause()) {
sqlBuilder.append(" HAVING ");
sqlBuilder.append(visit(stmt.getHavingClause()));
}
return sqlBuilder.toString();
}
|
class ViewDefBuilderVisitor extends AstVisitor<String, Void> {
private ConnectContext session;
public ViewDefBuilderVisitor(ConnectContext session) {
this.session = session;
}
@Override
public String visitNode(ParseNode node, Void context) {
return "";
}
@Override
public String visitQueryStatement(QueryStatement stmt, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
QueryRelation queryRelation = stmt.getQueryRelation();
if (queryRelation.hasWithClause()) {
sqlBuilder.append("WITH ");
List<String> cteStrings =
queryRelation.getCteRelations().stream().map(this::visit).collect(Collectors.toList());
sqlBuilder.append(Joiner.on(",").join(cteStrings));
}
sqlBuilder.append(visit(queryRelation));
if (queryRelation.hasOrderByClause()) {
List<OrderByElement> sortClause = queryRelation.getOrderBy();
sqlBuilder.append(" order by ");
for (int i = 0; i < sortClause.size(); ++i) {
sqlBuilder.append(visit(sortClause.get(i).getExpr()));
sqlBuilder.append((sortClause.get(i).getIsAsc()) ? " asc" : " desc");
sqlBuilder.append((i + 1 != sortClause.size()) ? ", " : "");
}
}
if (queryRelation.getLimit() != null) {
sqlBuilder.append(queryRelation.getLimit().toSql());
}
return sqlBuilder.toString();
}
@Override
public String visitCTE(CTERelation relation, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append(relation.getName());
if (relation.isResolvedInFromClause()) {
if (relation.getAliasWithoutNameRewrite() != null) {
sqlBuilder.append(" AS ").append(relation.getAliasWithoutNameRewrite());
}
return sqlBuilder.toString();
}
if (relation.getColumnOutputNames() != null) {
sqlBuilder.append("(").append(Joiner.on(", ").join(relation.getColumnOutputNames())).append(")");
}
sqlBuilder.append(" AS (").append(visit(new QueryStatement(relation.getCteQuery()))).append(") ");
return sqlBuilder.toString();
}
@Override
@Override
public String visitSubquery(SubqueryRelation subquery, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append("(");
sqlBuilder.append(visit(new QueryStatement(subquery.getQuery())));
sqlBuilder.append(")");
sqlBuilder.append(" ").append(subquery.getAlias());
return sqlBuilder.toString();
}
@Override
public String visitJoin(JoinRelation relation, Void context) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append(visit(relation.getLeft())).append(" ");
sqlBuilder.append(relation.getType());
if (relation.getJoinHint() != null && !relation.getJoinHint().isEmpty()) {
sqlBuilder.append(" [").append(relation.getJoinHint()).append("]");
}
sqlBuilder.append(" ");
sqlBuilder.append(visit(relation.getRight())).append(" ");
if (relation.getUsingColNames() != null) {
sqlBuilder.append("USING (").append(Joiner.on(", ").join(relation.getUsingColNames())).append(")");
}
if (relation.getOnPredicate() != null) {
sqlBuilder.append("ON ").append(visit(relation.getOnPredicate()));
}
return sqlBuilder.toString();
}
@Override
public String visitUnion(UnionRelation relation, Void context) {
return processSetOp(relation);
}
@Override
public String visitExcept(ExceptRelation relation, Void context) {
return processSetOp(relation);
}
@Override
public String visitIntersect(IntersectRelation relation, Void context) {
return processSetOp(relation);
}
private String processSetOp(SetOperationRelation relation) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append(visit(relation.getRelations().get(0)));
for (int i = 1; i < relation.getRelations().size(); ++i) {
if (relation instanceof UnionRelation) {
sqlBuilder.append(" UNION ");
} else if (relation instanceof ExceptRelation) {
sqlBuilder.append(" EXCEPT ");
} else {
sqlBuilder.append(" INTERSECT ");
}
sqlBuilder.append(relation.getQualifier() == SetQualifier.ALL ? "ALL " : "");
Relation setChildRelation = relation.getRelations().get(i);
if (setChildRelation instanceof SetOperationRelation) {
sqlBuilder.append("(");
}
sqlBuilder.append(visit(setChildRelation));
if (setChildRelation instanceof SetOperationRelation) {
sqlBuilder.append(")");
}
}
return sqlBuilder.toString();
}
@Override
public String visitTable(TableRelation node, Void outerScope) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append(node.getName());
if (node.getAliasWithoutNameRewrite() != null) {
sqlBuilder.append(" AS ");
sqlBuilder.append(node.getAliasWithoutNameRewrite());
}
return sqlBuilder.toString();
}
@Override
public String visitValues(ValuesRelation node, Void scope) {
StringBuilder sqlBuilder = new StringBuilder();
if (node.getRows().size() == 1) {
sqlBuilder.append("SELECT ");
List<String> fieldLis = Lists.newArrayList();
for (int i = 0; i < node.getRows().get(0).size(); ++i) {
String field = visit(node.getRows().get(0).get(i));
String alias = " AS `" + node.getColumnOutputNames().get(i) + "`";
fieldLis.add(field + alias);
}
sqlBuilder.append(Joiner.on(", ").join(fieldLis));
} else {
sqlBuilder.append("VALUES(");
for (int i = 0; i < node.getRows().size(); ++i) {
sqlBuilder.append("(");
List<String> rowStrings =
node.getRows().get(i).stream().map(Expr::toSql).collect(Collectors.toList());
sqlBuilder.append(Joiner.on(", ").join(rowStrings));
sqlBuilder.append(")");
}
sqlBuilder.append(")");
}
return sqlBuilder.toString();
}
@Override
public String visitTableFunction(TableFunctionRelation node, Void scope) {
StringBuilder sqlBuilder = new StringBuilder();
sqlBuilder.append(node.getFunctionName());
sqlBuilder.append("(");
sqlBuilder.append(node.getFunctionParams());
sqlBuilder.append(")");
return sqlBuilder.toString();
}
@Override
public String visitExpression(Expr expr, Void context) {
return expr.toSql();
}
@Override
public String visitSlot(SlotRef expr, Void context) {
return expr.toSql();
}
@Override
public String visitExistsPredicate(ExistsPredicate node, Void context) {
StringBuilder strBuilder = new StringBuilder();
if (node.isNotExists()) {
strBuilder.append("NOT ");
}
strBuilder.append("EXISTS ");
strBuilder.append(visit(node.getChild(0)));
return strBuilder.toString();
}
@Override
public String visitInPredicate(InPredicate node, Void context) {
StringBuilder strBuilder = new StringBuilder();
String notStr = (node.isNotIn()) ? "NOT " : "";
strBuilder.append(visit(node.getChild(0))).append(" ").append(notStr).append("IN (");
for (int i = 1; i < node.getChildren().size(); ++i) {
strBuilder.append(node.getChild(i).toSql());
strBuilder.append((i + 1 != node.getChildren().size()) ? ", " : "");
}
strBuilder.append(")");
return strBuilder.toString();
}
@Override
public String visitBinaryPredicate(BinaryPredicate node, Void context) {
return visit(node.getChild(0)) + " " + node.getOp().toString() + " " + visit(node.getChild(1));
}
@Override
public String visitSubquery(Subquery subquery, Void context) {
return "(" + visit(new QueryStatement(subquery.getQueryBlock())) + ")";
}
}
|
class ViewDefBuilderVisitor extends AST2SQL.SQLLabelBuilderImpl {
@Override
public String visitNode(ParseNode node, Void context) {
return "";
}
@Override
@Override
public String visitExpression(Expr expr, Void context) {
return expr.toSql();
}
@Override
public String visitSlot(SlotRef expr, Void context) {
return expr.toSql();
}
}
|
`throw new NumberFormatException("invalid id: " + id + " " + e.getMessage());`
|
public static TUniqueId parseTUniqueIdFromString(String id) {
if (Strings.isNullOrEmpty(id)) {
throw new NumberFormatException("invalid query id");
}
String[] parts = id.split("-");
if (parts.length != 2) {
throw new NumberFormatException("invalid query id");
}
TUniqueId uniqueId = new TUniqueId();
try {
uniqueId.setHi(Long.parseUnsignedLong(parts[0], 16));
uniqueId.setLo(Long.parseUnsignedLong(parts[1], 16));
} catch (NumberFormatException e) {
throw new NumberFormatException("invalid query id:" + e.getMessage());
}
return uniqueId;
}
|
throw new NumberFormatException("invalid query id:" + e.getMessage());
|
public static TUniqueId parseTUniqueIdFromString(String id) {
if (Strings.isNullOrEmpty(id)) {
throw new NumberFormatException("invalid query id");
}
String[] parts = id.split("-");
if (parts.length != 2) {
throw new NumberFormatException("invalid query id");
}
TUniqueId uniqueId = new TUniqueId();
try {
uniqueId.setHi(Long.parseUnsignedLong(parts[0], 16));
uniqueId.setLo(Long.parseUnsignedLong(parts[1], 16));
} catch (NumberFormatException e) {
throw new NumberFormatException("invalid query id:" + e.getMessage());
}
return uniqueId;
}
|
class DebugUtil {
public static final DecimalFormat DECIMAL_FORMAT_SCALE_3 = new DecimalFormat("0.000");
public static int THOUSAND = 1000;
public static int MILLION = 1000 * THOUSAND;
public static int BILLION = 1000 * MILLION;
public static int SECOND = 1000;
public static int MINUTE = 60 * SECOND;
public static int HOUR = 60 * MINUTE;
public static long KILOBYTE = 1024;
public static long MEGABYTE = 1024 * KILOBYTE;
public static long GIGABYTE = 1024 * MEGABYTE;
public static long TERABYTE = 1024 * GIGABYTE;
public static Pair<Double, String> getUint(long value) {
Double doubleValue = Double.valueOf(value);
String unit = "";
if (value >= BILLION) {
unit = "B";
doubleValue /= BILLION;
} else if (value >= MILLION) {
unit = "M";
doubleValue /= MILLION;
} else if (value >= THOUSAND) {
unit = "K";
doubleValue /= THOUSAND;
}
Pair<Double, String> returnValue = Pair.of(doubleValue, unit);
return returnValue;
}
public static void printTimeMs(long value, StringBuilder builder) {
long newValue = value;
if (newValue == 0) {
builder.append("0");
} else {
boolean hour = false;
boolean minute = false;
if (newValue >= HOUR) {
builder.append(newValue / HOUR).append("h");
newValue %= HOUR;
hour = true;
}
if (newValue >= MINUTE) {
builder.append(newValue / MINUTE).append("m");
newValue %= MINUTE;
minute = true;
}
if (!hour && newValue >= SECOND) {
builder.append(newValue / SECOND).append("s");
newValue %= SECOND;
}
if (!hour && !minute) {
builder.append(newValue).append("ms");
}
}
}
public static String getPrettyStringMs(long timestampMs) {
StringBuilder builder = new StringBuilder();
printTimeMs(timestampMs, builder);
return builder.toString();
}
public static Pair<Double, String> getByteUint(long value) {
Double doubleValue = Double.valueOf(value);
String unit = "";
if (value == 0) {
unit = "";
} else if (value > TERABYTE) {
unit = "TB";
doubleValue /= TERABYTE;
} else if (value > GIGABYTE) {
unit = "GB";
doubleValue /= GIGABYTE;
} else if (value > MEGABYTE) {
unit = "MB";
doubleValue /= MEGABYTE;
} else if (value > KILOBYTE) {
unit = "KB";
doubleValue /= KILOBYTE;
} else {
unit = "B";
}
Pair<Double, String> returnValue = Pair.of(doubleValue, unit);
return returnValue;
}
public static String printByteWithUnit(long value) {
Pair<Double, String> quotaUnitPair = getByteUint(value);
String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " "
+ quotaUnitPair.second;
return readableQuota;
}
public static String printId(final TUniqueId id) {
if (id == null) {
return "";
}
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(id.hi)).append("-").append(Long.toHexString(id.lo));
return builder.toString();
}
public static String printId(final UUID id) {
TUniqueId tUniqueId = new TUniqueId(id.getMostSignificantBits(), id.getLeastSignificantBits());
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(tUniqueId.hi)).append("-").append(Long.toHexString(tUniqueId.lo));
return builder.toString();
}
public static String printId(final Types.PUniqueId id) {
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(id.getHi())).append("-").append(Long.toHexString(id.getLo()));
return builder.toString();
}
public static String getStackTrace(Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
return sw.toString();
}
}
|
class DebugUtil {
public static final DecimalFormat DECIMAL_FORMAT_SCALE_3 = new DecimalFormat("0.000");
public static int THOUSAND = 1000;
public static int MILLION = 1000 * THOUSAND;
public static int BILLION = 1000 * MILLION;
public static int SECOND = 1000;
public static int MINUTE = 60 * SECOND;
public static int HOUR = 60 * MINUTE;
public static long KILOBYTE = 1024;
public static long MEGABYTE = 1024 * KILOBYTE;
public static long GIGABYTE = 1024 * MEGABYTE;
public static long TERABYTE = 1024 * GIGABYTE;
public static Pair<Double, String> getUint(long value) {
Double doubleValue = Double.valueOf(value);
String unit = "";
if (value >= BILLION) {
unit = "B";
doubleValue /= BILLION;
} else if (value >= MILLION) {
unit = "M";
doubleValue /= MILLION;
} else if (value >= THOUSAND) {
unit = "K";
doubleValue /= THOUSAND;
}
Pair<Double, String> returnValue = Pair.of(doubleValue, unit);
return returnValue;
}
public static void printTimeMs(long value, StringBuilder builder) {
long newValue = value;
if (newValue == 0) {
builder.append("0");
} else {
boolean hour = false;
boolean minute = false;
if (newValue >= HOUR) {
builder.append(newValue / HOUR).append("h");
newValue %= HOUR;
hour = true;
}
if (newValue >= MINUTE) {
builder.append(newValue / MINUTE).append("m");
newValue %= MINUTE;
minute = true;
}
if (!hour && newValue >= SECOND) {
builder.append(newValue / SECOND).append("s");
newValue %= SECOND;
}
if (!hour && !minute) {
builder.append(newValue).append("ms");
}
}
}
public static String getPrettyStringMs(long timestampMs) {
StringBuilder builder = new StringBuilder();
printTimeMs(timestampMs, builder);
return builder.toString();
}
public static Pair<Double, String> getByteUint(long value) {
Double doubleValue = Double.valueOf(value);
String unit = "";
if (value == 0) {
unit = "";
} else if (value > TERABYTE) {
unit = "TB";
doubleValue /= TERABYTE;
} else if (value > GIGABYTE) {
unit = "GB";
doubleValue /= GIGABYTE;
} else if (value > MEGABYTE) {
unit = "MB";
doubleValue /= MEGABYTE;
} else if (value > KILOBYTE) {
unit = "KB";
doubleValue /= KILOBYTE;
} else {
unit = "B";
}
Pair<Double, String> returnValue = Pair.of(doubleValue, unit);
return returnValue;
}
public static String printByteWithUnit(long value) {
Pair<Double, String> quotaUnitPair = getByteUint(value);
String readableQuota = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(quotaUnitPair.first) + " "
+ quotaUnitPair.second;
return readableQuota;
}
public static String printId(final TUniqueId id) {
if (id == null) {
return "";
}
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(id.hi)).append("-").append(Long.toHexString(id.lo));
return builder.toString();
}
public static String printId(final UUID id) {
TUniqueId tUniqueId = new TUniqueId(id.getMostSignificantBits(), id.getLeastSignificantBits());
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(tUniqueId.hi)).append("-").append(Long.toHexString(tUniqueId.lo));
return builder.toString();
}
public static String printId(final Types.PUniqueId id) {
StringBuilder builder = new StringBuilder();
builder.append(Long.toHexString(id.getHi())).append("-").append(Long.toHexString(id.getLo()));
return builder.toString();
}
public static String getStackTrace(Exception e) {
StringWriter sw = new StringWriter();
e.printStackTrace(new PrintWriter(sw));
return sw.toString();
}
}
|
Missing space between text end and URL
|
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) {
submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> {
if (explicitMajor < 8)
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.applicationPackage,
Notification.Level.warning,
"Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" +
"https:
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage);
});
}
|
"Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" +
|
private void validateMajorVersion(TenantAndApplicationId id, Submission submission) {
submission.applicationPackage().deploymentSpec().majorVersion().ifPresent(explicitMajor -> {
if (explicitMajor < 8)
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.applicationPackage,
Notification.Level.warning,
"Vespa 7 will soon be end of life, upgrade to Vespa 8 now:" +
"https:
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.applicationPackage);
});
}
|
class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries, true);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
storeVespaLogs(id);
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
List<LogEntry> log;
Instant deployedAt;
Instant from;
if ( ! run.id().type().isProduction()) {
deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
}
else
log = List.of();
if (id.type().isTest()) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false);
return run.with(log.get(log.size() - 1).at());
});
}
public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) {
Run run = run(id);
return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false)
? controller.serviceRegistry().runDataStore().getLogs(id, tester)
: getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream());
}
public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) {
return (tester ? run.stepInfo(installTester)
: run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)))
.flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10));
}
public void storeVespaLogs(RunId id) {
Run run = run(id);
if ( ! id.type().isProduction()) {
getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> {
try (logs) {
controller.serviceRegistry().runDataStore().putLogs(id, false, logs);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
if (id.type().isTest()) {
getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> {
try (logs) {
controller.serviceRegistry().runDataStore().putLogs(id, true, logs);
}
catch(IOException e){
throw new UncheckedIOException(e);
}
});
}
}
private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) {
return deploymentCompletedAt(run, tester).map(at ->
controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(),
run.id().type().zone()),
Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())),
"to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli()))));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries, false);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, or throws if no such run exists. */
public Run run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny()
.orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists"));
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.toList();
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.toList();
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.toList();
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
VersionStatus versionStatus = controller.readVersionStatus();
return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus));
}
private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
versionStatus,
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) {
Version systemVersion = controller.systemVersion(versionStatus);
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, versionStatus, systemVersion))
.toList());
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
VersionStatus versionStatus = controller.readVersionStatus();
return deploymentStatuses(applications, versionStatus);
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id);
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application, version.get().id());
validate(id, submission);
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
if (application.get().projectId().isPresent())
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private void validate(TenantAndApplicationId id, Submission submission) {
validateTests(id, submission);
validateParentVersion(id, submission);
validateMajorVersion(id, submission);
}
private void validateTests(TenantAndApplicationId id, Submission submission) {
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
}
private void validateParentVersion(TenantAndApplicationId id, Submission submission) {
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
}
private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){
TenantAndApplicationId id = application.get().id();
Application wrapped = application.get();
RevisionId oldestDeployed = application.get().oldestDeployedRevision()
.or(() -> wrapped.instances().values().stream()
.flatMap(instance -> instance.change().revision().stream())
.min(naturalOrder()))
.orElse(latest);
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed);
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" +
versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2)));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
if (versions.isEmpty())
throw new IllegalStateException("no deployable platform version found in the system");
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> compatibleVersions = new ArrayList<>();
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
compatibleVersions.add(target);
if (compatibleVersions.isEmpty())
throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get());
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
List<Version> versionOnRightMajor = new ArrayList<>();
for (Version target : reversed(versions))
if (major.isEmpty() || major.get() == target.getMajor())
versionOnRightMajor.add(target);
if (versionOnRightMajor.isEmpty())
throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml");
for (Version target : compatibleVersions)
if (versionOnRightMajor.contains(target))
return target;
throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " +
"are compatible with compile version " + applicationPackage.compileVersion().get());
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id, Duration timeout) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
Instant doom = controller.clock().instant().plus(timeout);
Duration sleep = Duration.ofMillis(100);
while ( ! last(id.application(), id.type()).get().hasEnded()) {
if (controller.clock().instant().plus(sleep).isAfter(doom))
throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish");
try {
Thread.sleep(sleep.toMillis());
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
Run modified = modifications.apply(run);
if (modified != null) curator.writeLastRun(modified);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
}
|
class JobController {
public static final Duration maxHistoryAge = Duration.ofDays(60);
private static final Logger log = Logger.getLogger(JobController.class.getName());
private final int historyLength;
private final Controller controller;
private final CuratorDb curator;
private final BufferedLogStore logs;
private final TesterCloud cloud;
private final JobMetrics metric;
private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { });
public JobController(Controller controller) {
this.historyLength = controller.system().isCd() ? 256 : 64;
this.controller = controller;
this.curator = controller.curator();
this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore());
this.cloud = controller.serviceRegistry().testerCloud();
this.metric = new JobMetrics(controller.metric(), controller::system);
}
public TesterCloud cloud() { return cloud; }
public int historyLength() { return historyLength; }
public void setRunner(Consumer<Run> runner) { this.runner.set(runner); }
/** Rewrite all job data with the newest format. */
public void updateStorage() {
for (ApplicationId id : instances())
for (JobType type : jobs(id)) {
locked(id, type, runs -> {
curator.readLastRun(id, type).ifPresent(curator::writeLastRun);
});
}
}
/** Returns all entries currently logged for the given run. */
public Optional<RunLog> details(RunId id) {
return details(id, -1);
}
/** Returns the logged entries for the given run, which are after the given id threshold. */
public Optional<RunLog> details(RunId id, long after) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
Run run = runs(id.application(), id.type()).get(id);
if (run == null)
return Optional.empty();
return active(id).isPresent()
? Optional.of(logs.readActive(id.application(), id.type(), after))
: logs.readFinished(id, after);
}
}
/** Stores the given log entries for the given run and step. */
public void log(RunId id, Step step, List<LogEntry> entries) {
locked(id, __ -> {
logs.append(id.application(), id.type(), step, entries, true);
return __;
});
}
/** Stores the given log messages for the given run and step. */
public void log(RunId id, Step step, Level level, List<String> messages) {
log(id, step, messages.stream()
.map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message))
.collect(toList()));
}
/** Stores the given log message for the given run and step. */
public void log(RunId id, Step step, Level level, String message) {
log(id, step, level, Collections.singletonList(message));
}
/** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */
public void updateVespaLog(RunId id) {
locked(id, run -> {
if ( ! run.hasStep(copyVespaLogs))
return run;
storeVespaLogs(id);
ZoneId zone = id.type().zone();
Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application())
.deployments().get(zone));
if (deployment.isEmpty() || deployment.get().at().isBefore(run.start()))
return run;
List<LogEntry> log;
Instant deployedAt;
Instant from;
if ( ! run.id().type().isProduction()) {
deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.application(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
}
else
log = List.of();
if (id.type().isTest()) {
deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow();
from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10);
List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer()
.getLogs(new DeploymentId(id.tester().id(), zone),
Map.of("from", Long.toString(from.toEpochMilli()))),
from);
Instant justNow = controller.clock().instant().minusSeconds(2);
log = Stream.concat(log.stream(), testerLog.stream())
.filter(entry -> entry.at().isBefore(justNow))
.sorted(comparing(LogEntry::at))
.collect(toUnmodifiableList());
}
if (log.isEmpty())
return run;
logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false);
return run.with(log.get(log.size() - 1).at());
});
}
public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) {
Run run = run(id);
return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false)
? controller.serviceRegistry().runDataStore().getLogs(id, tester)
: getVespaLogsFromLogserver(run, fromMillis, tester).orElse(InputStream.nullInputStream());
}
public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) {
return (tester ? run.stepInfo(installTester)
: run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)))
.flatMap(StepInfo::startTime).map(start -> start.minusSeconds(10));
}
public void storeVespaLogs(RunId id) {
Run run = run(id);
if ( ! id.type().isProduction()) {
getVespaLogsFromLogserver(run, 0, false).ifPresent(logs -> {
try (logs) {
controller.serviceRegistry().runDataStore().putLogs(id, false, logs);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
});
}
if (id.type().isTest()) {
getVespaLogsFromLogserver(run, 0, true).ifPresent(logs -> {
try (logs) {
controller.serviceRegistry().runDataStore().putLogs(id, true, logs);
}
catch(IOException e){
throw new UncheckedIOException(e);
}
});
}
}
private Optional<InputStream> getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) {
return deploymentCompletedAt(run, tester).map(at ->
controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(),
run.id().type().zone()),
Map.of("from", Long.toString(Math.max(fromMillis, at.toEpochMilli())),
"to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli()))));
}
/** Fetches any new test log entries, and records the id of the last of these, for continuation. */
public void updateTestLog(RunId id) {
locked(id, run -> {
Optional<Step> step = Stream.of(endStagingSetup, endTests)
.filter(run.readySteps()::contains)
.findAny();
if (step.isEmpty())
return run;
List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()),
run.lastTestLogEntry());
if (entries.isEmpty())
return run;
logs.append(id.application(), id.type(), step.get(), entries, false);
return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong());
});
}
public void updateTestReport(RunId id) {
locked(id, run -> {
Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone()));
if (report.isEmpty()) {
return run;
}
logs.writeTestReport(id, report.get());
return run;
});
}
public Optional<String> getTestReports(RunId id) {
return logs.readTestReports(id);
}
/** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */
public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) {
locked(id, run -> run.with(testerCertificate));
}
/** Returns a list of all instances of applications which have registered. */
public List<ApplicationId> instances() {
return controller.applications().readable().stream()
.flatMap(application -> application.instances().values().stream())
.map(Instance::id)
.collect(toUnmodifiableList());
}
/** Returns all job types which have been run for the given application. */
private List<JobType> jobs(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.filter(type -> last(id, type).isPresent())
.collect(toUnmodifiableList());
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(JobId id) {
return runs(id.application(), id.type());
}
/** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */
public List<Instant> jobStarts(JobId id) {
return runs(id).descendingMap().values().stream()
.filter(run -> ! run.isRedeployment())
.map(Run::start)
.collect(toUnmodifiableList());
}
/** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */
public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) {
return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream()
.findFirst()
.orElseGet(deployment::at);
}
/** Returns an immutable map of all known runs for the given application and job type. */
public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) {
ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number));
Optional<Run> last = last(id, type);
curator.readHistoricRuns(id, type).forEach((runId, run) -> {
if (last.isEmpty() || ! runId.equals(last.get().id()))
runs.put(runId, run);
});
last.ifPresent(run -> runs.put(run.id(), run));
return runs.build();
}
/** Returns the run with the given id, or throws if no such run exists. */
public Run run(RunId id) {
return runs(id.application(), id.type()).values().stream()
.filter(run -> run.id().equals(id))
.findAny()
.orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists"));
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(JobId job) {
return curator.readLastRun(job.application(), job.type());
}
/** Returns the last run of the given type, for the given application, if one has been run. */
public Optional<Run> last(ApplicationId id, JobType type) {
return curator.readLastRun(id, type);
}
/** Returns the last completed of the given job. */
public Optional<Run> lastCompleted(JobId id) {
return JobStatus.lastCompleted(runs(id));
}
/** Returns the first failing of the given job. */
public Optional<Run> firstFailing(JobId id) {
return JobStatus.firstFailing(runs(id));
}
/** Returns the last success of the given job. */
public Optional<Run> lastSuccess(JobId id) {
return JobStatus.lastSuccess(runs(id));
}
/** Returns the run with the given id, provided it is still active. */
public Optional<Run> active(RunId id) {
return last(id.application(), id.type())
.filter(run -> ! run.hasEnded())
.filter(run -> run.id().equals(id));
}
/** Returns a list of all active runs. */
public List<Run> active() {
return controller.applications().idList().stream()
.flatMap(id -> active(id).stream())
.toList();
}
/** Returns a list of all active runs for the given application. */
public List<Run> active(TenantAndApplicationId id) {
return controller.applications().requireApplication(id).instances().keySet().stream()
.flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id.instance(name), type))
.flatMap(Optional::stream)
.filter(run -> ! run.hasEnded()))
.toList();
}
/** Returns a list of all active runs for the given instance. */
public List<Run> active(ApplicationId id) {
return JobType.allIn(controller.zoneRegistry()).stream()
.map(type -> last(id, type))
.flatMap(Optional::stream)
.filter(run -> !run.hasEnded())
.toList();
}
/** Returns the job status of the given job, possibly empty. */
public JobStatus jobStatus(JobId id) {
return new JobStatus(id, runs(id));
}
/** Returns the deployment status of the given application. */
public DeploymentStatus deploymentStatus(Application application) {
VersionStatus versionStatus = controller.readVersionStatus();
return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus));
}
private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) {
return new DeploymentStatus(application,
this::jobStatus,
controller.zoneRegistry(),
versionStatus,
systemVersion,
instance -> controller.applications().versionCompatibility(application.id().instance(instance)),
controller.clock().instant());
}
/** Adds deployment status to each of the given applications. */
public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) {
Version systemVersion = controller.systemVersion(versionStatus);
return DeploymentStatusList.from(applications.asList().stream()
.map(application -> deploymentStatus(application, versionStatus, systemVersion))
.toList());
}
/** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */
public DeploymentStatusList deploymentStatuses(ApplicationList applications) {
VersionStatus versionStatus = controller.readVersionStatus();
return deploymentStatuses(applications, versionStatus);
}
/** Changes the status of the given step, for the given run, provided it is still active. */
public void update(RunId id, RunStatus status, LockedStep step) {
locked(id, run -> run.with(status, step));
}
/**
* Changes the status of the given run to inactive, and stores it as a historic run.
* Throws TimeoutException if some step in this job is still being run.
*/
public void finish(RunId id) throws TimeoutException {
Deque<Mutex> locks = new ArrayDeque<>();
try {
Run unlockedRun = run(id);
locks.push(curator.lock(id.application(), id.type(), report));
for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.push(curator.lock(id.application(), id.type(), step));
locked(id, run -> {
if (run.status() == reset) {
for (Step step : run.steps().keySet())
log(id, step, INFO, List.of("
return run.reset();
}
if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run;
Run finishedRun = run.finished(controller.clock().instant());
locked(id.application(), id.type(), runs -> {
runs.put(run.id(), finishedRun);
long last = id.number();
long successes = runs.values().stream().filter(Run::hasSucceeded).count();
var oldEntries = runs.entrySet().iterator();
for (var old = oldEntries.next();
old.getKey().number() <= last - historyLength
|| old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge));
old = oldEntries.next()) {
if ( successes == 1
&& old.getValue().hasSucceeded()
&& ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) {
oldEntries.next();
continue;
}
logs.delete(old.getKey());
oldEntries.remove();
}
});
logs.flush(id);
metric.jobFinished(run.id().job(), finishedRun.status());
pruneRevisions(unlockedRun);
return finishedRun;
});
}
finally {
for (Mutex lock : locks) {
try {
lock.close();
} catch (Throwable t) {
log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " +
"have been released in ZooKeeper, and if not this controller " +
"must be restarted to release the lock", t);
}
}
}
}
/** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */
public void abort(RunId id, String reason) {
locked(id, run -> {
run.stepStatuses().entrySet().stream()
.filter(entry -> entry.getValue() == unfinished)
.forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason));
return run.aborted();
});
}
/** Accepts and stores a new application package and test jar pair under a generated application version key. */
public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) {
ApplicationController applications = controller.applications();
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
.map(ApplicationPackage::new);
long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage()));
applications.applicationStore().put(id.tenant(),
id.application(),
version.get().id(),
submission.applicationPackage().zippedContent(),
submission.testPackage(),
diff);
applications.applicationStore().putMeta(id.tenant(),
id.application(),
controller.clock().instant(),
submission.applicationPackage().metaDataZip());
application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId));
application = application.withRevisions(revisions -> revisions.with(version.get()));
application = withPrunedPackages(application, version.get().id());
validate(id, submission);
applications.storeWithUpdatedConfig(application, submission.applicationPackage());
if (application.get().projectId().isPresent())
applications.deploymentTrigger().triggerNewRevision(id);
});
return version.get();
}
private void validate(TenantAndApplicationId id, Submission submission) {
validateTests(id, submission);
validateParentVersion(id, submission);
validateMajorVersion(id, submission);
}
private void validateTests(TenantAndApplicationId id, Submission submission) {
TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage());
if (testSummary.problems().isEmpty())
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage);
else
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.testPackage,
Notification.Level.warning,
testSummary.problems());
}
private void validateParentVersion(TenantAndApplicationId id, Submission submission) {
submission.applicationPackage().parentVersion().ifPresent(parent -> {
if (parent.getMajor() < controller.readSystemVersion().getMajor())
controller.notificationsDb().setNotification(NotificationSource.from(id),
Type.submission,
Notification.Level.warning,
"Parent version used to compile the application is on a " +
"lower major version than the current Vespa Cloud version");
else
controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission);
});
}
private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){
TenantAndApplicationId id = application.get().id();
Application wrapped = application.get();
RevisionId oldestDeployed = application.get().oldestDeployedRevision()
.or(() -> wrapped.instances().values().stream()
.flatMap(instance -> instance.change().revision().stream())
.min(naturalOrder()))
.orElse(latest);
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed);
for (ApplicationVersion version : application.get().revisions().withPackage())
if (version.id().compareTo(oldestDeployed) < 0)
application = application.withRevisions(revisions -> revisions.with(version.withoutPackage()));
return application;
}
/** Forget revisions no longer present in any relevant job history. */
private void pruneRevisions(Run run) {
TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application());
boolean isProduction = run.versions().targetRevision().isProduction();
(isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream()
: Stream.of(jobStatus(run.id().job())))
.flatMap(jobs -> jobs.runs().values().stream())
.map(r -> r.versions().targetRevision())
.filter(id -> id.isProduction() == isProduction)
.min(naturalOrder())
.ifPresent(oldestRevision -> {
controller.applications().lockApplicationOrThrow(applicationId, application -> {
if (isProduction) {
controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision)));
}
else {
controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number());
controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job())));
}
});
});
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) {
start(id, type, versions, isRedeployment, JobProfile.of(type), reason);
}
/** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */
public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) {
ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision());
if (revision.compileVersion()
.map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version))
.orElse(false))
throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" +
versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")");
locked(id, type, __ -> {
Optional<Run> last = last(id, type);
if (last.flatMap(run -> active(run.id())).isPresent())
throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!");
RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1);
curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason));
metric.jobStarted(newId.job());
});
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) {
deploy(id, type, platform, applicationPackage, false);
}
/** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/
public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) {
if ( ! controller.zoneRegistry().hasZone(type.zone()))
throw new IllegalArgumentException(type.zone() + " is not present in this system");
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
if ( ! application.get().instances().containsKey(id.instance()))
application = controller.applications().withNewInstance(application, id);
controller.applications().store(application);
});
DeploymentId deploymentId = new DeploymentId(id, type.zone());
Optional<Run> lastRun = last(id, type);
lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2)));
long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L);
RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type));
ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion());
byte[] diff = getDiff(applicationPackage, deploymentId, lastRun);
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff);
Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance())));
controller.applications().store(application.withRevisions(revisions -> revisions.with(version)));
start(id,
type,
new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())),
false,
dryRun ? JobProfile.developmentDryRun : JobProfile.development,
Optional.empty());
});
locked(id, type, __ -> {
runner.get().accept(last(id, type).get());
});
}
/* Application package diff against previous version, or against empty version if previous does not exist or is invalid */
private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) {
return lastRun.map(run -> run.versions().targetRevision())
.map(prevVersion -> {
ApplicationPackage previous;
try {
previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion));
} catch (IllegalArgumentException e) {
return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage);
}
return ApplicationPackageDiff.diff(previous, applicationPackage);
})
.orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
}
private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) {
List<Version> versions = controller.readVersionStatus().deployableVersions().stream()
.map(VespaVersion::versionNumber)
.collect(toList());
instance.map(Instance::deployments)
.map(deployments -> deployments.get(id.zoneId()))
.map(Deployment::version)
.ifPresent(versions::add);
if (versions.isEmpty())
throw new IllegalStateException("no deployable platform version found in the system");
VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId());
List<Version> compatibleVersions = new ArrayList<>();
for (Version target : reversed(versions))
if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get()))
compatibleVersions.add(target);
if (compatibleVersions.isEmpty())
throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get());
Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion();
List<Version> versionOnRightMajor = new ArrayList<>();
for (Version target : reversed(versions))
if (major.isEmpty() || major.get() == target.getMajor())
versionOnRightMajor.add(target);
if (versionOnRightMajor.isEmpty())
throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml");
for (Version target : compatibleVersions)
if (versionOnRightMajor.contains(target))
return target;
throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " +
"are compatible with compile version " + applicationPackage.compileVersion().get());
}
/** Aborts a run and waits for it complete. */
private void abortAndWait(RunId id, Duration timeout) {
abort(id, "replaced by new deployment");
runner.get().accept(last(id.application(), id.type()).get());
Instant doom = controller.clock().instant().plus(timeout);
Duration sleep = Duration.ofMillis(100);
while ( ! last(id.application(), id.type()).get().hasEnded()) {
if (controller.clock().instant().plus(sleep).isAfter(doom))
throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish");
try {
Thread.sleep(sleep.toMillis());
}
catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
/** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */
public void collectGarbage() {
Set<ApplicationId> applicationsToBuild = new HashSet<>(instances());
curator.applicationsWithJobs().stream()
.filter(id -> ! applicationsToBuild.contains(id))
.forEach(id -> {
try {
TesterId tester = TesterId.of(id);
for (JobType type : jobs(id))
locked(id, type, deactivateTester, __ -> {
try (Mutex ___ = curator.lock(id, type)) {
try {
deactivateTester(tester, type);
}
catch (Exception e) {
}
curator.deleteRunData(id, type);
}
});
logs.delete(id);
curator.deleteRunData(id);
}
catch (Exception e) {
log.log(WARNING, "failed cleaning up after deleted application", e);
}
});
}
public void deactivateTester(TesterId id, JobType type) {
controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone()));
}
/** Locks all runs and modifies the list of historic runs for the given application and job type. */
private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) {
try (Mutex __ = curator.lock(id, type)) {
SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type));
modifications.accept(runs);
curator.writeHistoricRuns(id, type, runs.values());
}
}
/** Locks and modifies the run with the given id, provided it is still active. */
public void locked(RunId id, UnaryOperator<Run> modifications) {
try (Mutex __ = curator.lock(id.application(), id.type())) {
active(id).ifPresent(run -> {
Run modified = modifications.apply(run);
if (modified != null) curator.writeLastRun(modified);
});
}
}
/** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */
public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException {
try (Mutex lock = curator.lock(id, type, step)) {
for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet()))
try (Mutex __ = curator.lock(id, type, prerequisite)) { ; }
action.accept(new LockedStep(lock, step));
}
}
}
|
Now we need to check for null here too.
|
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
|
if (0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
|
private void ensureClusterTableIsUpdated() {
try {
if (0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), clusterTable.name)) {
}
} catch (Exception e) {
clusterTable.repair(e);
}
}
|
class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final AtomicReference<CairoEngine> engine = new AtomicReference<>();
private final ThreadLocal<SqlCompiler> sqlCompiler;
private final AtomicInteger nullRecords = new AtomicInteger();
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine.set(new CairoEngine(new DefaultCairoConfiguration(dataDir)));
sqlCompiler = ThreadLocal.withInitial(() -> new SqlCompiler(engine.get()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
public int getNullRecordsCount() { return nullRecords.get(); }
@Override
public void gc() {
nullRecords.set(0);
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
synchronized (clusterTable.writeLock) {
CairoEngine myEngine = engine.getAndSet(null);
if (myEngine != null) {
myEngine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
if (record == null || record.getStr(0) == null) {
nullRecords.incrementAndGet();
continue;
}
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
return sqlCompiler.get().compile(sql, context);
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine.get(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine.get().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine.get().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
}
|
class QuestMetricsDb extends AbstractComponent implements MetricsDb {
private static final Logger log = Logger.getLogger(QuestMetricsDb.class.getName());
private final Table nodeTable;
private final Table clusterTable;
private final Clock clock;
private final String dataDir;
private final CairoEngine engine;
private final ConcurrentResourcePool<SqlCompiler> sqlCompilerPool;
private final AtomicBoolean closed = new AtomicBoolean(false);
@Inject
public QuestMetricsDb() {
this(Defaults.getDefaults().underVespaHome("var/db/vespa/autoscaling"), Clock.systemUTC());
}
public QuestMetricsDb(String dataDir, Clock clock) {
this.clock = clock;
if (dataDir.startsWith(Defaults.getDefaults().vespaHome())
&& ! new File(Defaults.getDefaults().vespaHome()).exists())
dataDir = "data";
String logConfig = dataDir + "/quest-log.conf";
IOUtils.createDirectory(logConfig);
IOUtils.writeFile(new File(logConfig), new byte[0]);
System.setProperty("out", logConfig);
this.dataDir = dataDir;
engine = new CairoEngine(new DefaultCairoConfiguration(dataDir));
sqlCompilerPool = new ConcurrentResourcePool<>(() -> new SqlCompiler(engine()));
nodeTable = new Table(dataDir, "metrics", clock);
clusterTable = new Table(dataDir, "clusterMetrics", clock);
ensureTablesExist();
}
private CairoEngine engine() {
if (closed.get())
throw new IllegalStateException("Attempted to access QuestDb after calling close");
return engine;
}
@Override
public Clock clock() { return clock; }
@Override
public void addNodeMetrics(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
try {
addNodeMetricsBody(snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
nodeTable.repair(e);
addNodeMetricsBody(snapshots);
}
}
}
private void addNodeMetricsBody(Collection<Pair<String, NodeMetricSnapshot>> snapshots) {
synchronized (nodeTable.writeLock) {
try (TableWriter writer = nodeTable.getWriter()) {
for (var snapshot : snapshots) {
Optional<Long> atMillis = nodeTable.adjustOrDiscard(snapshot.getSecond().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, snapshot.getFirst());
row.putFloat(2, (float) snapshot.getSecond().load().cpu());
row.putFloat(3, (float) snapshot.getSecond().load().memory());
row.putFloat(4, (float) snapshot.getSecond().load().disk());
row.putLong(5, snapshot.getSecond().generation());
row.putBool(6, snapshot.getSecond().inService());
row.putBool(7, snapshot.getSecond().stable());
row.putFloat(8, (float) snapshot.getSecond().queryRate());
row.append();
}
writer.commit();
}
}
}
@Override
public void addClusterMetrics(ApplicationId application, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
try {
addClusterMetricsBody(application, snapshots);
}
catch (CairoException e) {
if (e.getMessage().contains("Cannot read offset")) {
clusterTable.repair(e);
addClusterMetricsBody(application, snapshots);
}
}
}
private void addClusterMetricsBody(ApplicationId applicationId, Map<ClusterSpec.Id, ClusterMetricSnapshot> snapshots) {
synchronized (clusterTable.writeLock) {
try (TableWriter writer = clusterTable.getWriter()) {
for (var snapshot : snapshots.entrySet()) {
Optional<Long> atMillis = clusterTable.adjustOrDiscard(snapshot.getValue().at());
if (atMillis.isEmpty()) continue;
TableWriter.Row row = writer.newRow(atMillis.get() * 1000);
row.putStr(0, applicationId.serializedForm());
row.putStr(1, snapshot.getKey().value());
row.putFloat(3, (float) snapshot.getValue().queryRate());
row.putFloat(4, (float) snapshot.getValue().writeRate());
row.append();
}
writer.commit();
}
}
}
@Override
public List<NodeTimeseries> getNodeTimeseries(Duration period, Set<String> hostnames) {
try {
var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext());
return snapshots.entrySet().stream()
.map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue()))
.collect(Collectors.toList());
}
catch (SqlException e) {
throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public ClusterTimeseries getClusterTimeseries(ApplicationId applicationId, ClusterSpec.Id clusterId) {
try {
return getClusterSnapshots(applicationId, clusterId);
}
catch (SqlException e) {
throw new IllegalStateException("Could not read cluster timeseries data in Quest stored in " + dataDir, e);
}
}
@Override
public void gc() {
nodeTable.gc();
clusterTable.gc();
}
@Override
public void deconstruct() { close(); }
@Override
public void close() {
if (closed.getAndSet(true)) return;
synchronized (nodeTable.writeLock) {
synchronized (clusterTable.writeLock) {
for (SqlCompiler sqlCompiler : sqlCompilerPool)
sqlCompiler.close();
engine.close();
}
}
}
private void ensureTablesExist() {
if (nodeTable.exists())
ensureNodeTableIsUpdated();
else
createNodeTable();
if (clusterTable.exists())
ensureClusterTableIsUpdated();
else
createClusterTable();
}
private void ensureNodeTableIsUpdated() {
try {
} catch (Exception e) {
nodeTable.repair(e);
}
}
private void createNodeTable() {
try {
issue("create table " + nodeTable.name +
" (hostname string, at timestamp, cpu_util float, mem_total_util float, disk_util float," +
" application_generation long, inService boolean, stable boolean, queries_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + nodeTable.name + "'", e);
}
}
private void createClusterTable() {
try {
issue("create table " + clusterTable.name +
" (application string, cluster string, at timestamp, queries_rate float, write_rate float)" +
" timestamp(at)" +
"PARTITION BY DAY;",
newContext());
}
catch (SqlException e) {
throw new IllegalStateException("Could not create Quest db table '" + clusterTable.name + "'", e);
}
}
private ListMap<String, NodeMetricSnapshot> getNodeSnapshots(Instant startTime,
Set<String> hostnames,
SqlExecutionContext context) throws SqlException {
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
String from = formatter.format(startTime).substring(0, 19) + ".000000Z";
String to = formatter.format(clock.instant()).substring(0, 19) + ".000000Z";
String sql = "select * from " + nodeTable.name + " where at between('" + from + "', '" + to + "');";
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
ListMap<String, NodeMetricSnapshot> snapshots = new ListMap<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String hostname = record.getStr(0).toString();
if (hostnames.isEmpty() || hostnames.contains(hostname)) {
snapshots.put(hostname,
new NodeMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(1) / 1000),
new Load(record.getFloat(2),
record.getFloat(3),
record.getFloat(4)),
record.getLong(5),
record.getBool(6),
record.getBool(7),
record.getFloat(8)));
}
}
}
return snapshots;
}
}
private ClusterTimeseries getClusterSnapshots(ApplicationId application, ClusterSpec.Id cluster) throws SqlException {
String sql = "select * from " + clusterTable.name;
var context = newContext();
try (RecordCursorFactory factory = issue(sql, context).getRecordCursorFactory()) {
List<ClusterMetricSnapshot> snapshots = new ArrayList<>();
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
String applicationIdString = record.getStr(0).toString();
if ( ! application.serializedForm().equals(applicationIdString)) continue;
String clusterId = record.getStr(1).toString();
if (cluster.value().equals(clusterId)) {
snapshots.add(new ClusterMetricSnapshot(Instant.ofEpochMilli(record.getTimestamp(2) / 1000),
record.getFloat(3),
record.getFloat(4)));
}
}
}
return new ClusterTimeseries(cluster, snapshots);
}
}
/** Issues an SQL statement against the QuestDb engine */
private CompiledQuery issue(String sql, SqlExecutionContext context) throws SqlException {
SqlCompiler sqlCompiler = sqlCompilerPool.alloc();
try {
return sqlCompiler.compile(sql, context);
} finally {
sqlCompilerPool.free(sqlCompiler);
}
}
private SqlExecutionContext newContext() {
return new SqlExecutionContextImpl(engine(), 1);
}
/** A questDb table */
private class Table {
private final Object writeLock = new Object();
private final String name;
private final Clock clock;
private final File dir;
private long highestTimestampAdded = 0;
Table(String dataDir, String name, Clock clock) {
this.name = name;
this.clock = clock;
this.dir = new File(dataDir, name);
IOUtils.createDirectory(dir.getPath());
new File(dir + "/_txn_scoreboard").delete();
}
boolean exists() {
return 0 == engine().getStatus(newContext().getCairoSecurityContext(), new Path(), name);
}
TableWriter getWriter() {
return engine().getWriter(newContext().getCairoSecurityContext(), name);
}
void gc() {
synchronized (writeLock) {
Instant oldestToKeep = clock.instant().minus(Duration.ofDays(4));
SqlExecutionContext context = newContext();
int partitions = 0;
try {
List<String> removeList = new ArrayList<>();
for (String dirEntry : dir.list()) {
File partitionDir = new File(dir, dirEntry);
if (!partitionDir.isDirectory()) continue;
partitions++;
DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME.withZone(ZoneId.of("UTC"));
Instant partitionDay = Instant.from(formatter.parse(dirEntry.substring(0, 10) + "T00:00:00"));
if (partitionDay.isBefore(oldestToKeep))
removeList.add(dirEntry);
}
if (removeList.size() < partitions && !removeList.isEmpty()) {
issue("alter table " + name + " drop partition list " +
removeList.stream().map(dir -> "'" + dir + "'").collect(Collectors.joining(",")),
context);
}
} catch (SqlException e) {
log.log(Level.WARNING, "Failed to gc old metrics data in " + dir + " table " + name, e);
}
}
}
/**
* Repairs this db on corruption.
*
* @param e the exception indicating corruption
*/
private void repair(Exception e) {
log.log(Level.WARNING, "QuestDb seems corrupted, wiping data and starting over", e);
IOUtils.recursiveDeleteDir(dir);
IOUtils.createDirectory(dir.getPath());
ensureTablesExist();
}
void ensureColumnExists(String column, String columnType) throws SqlException {
if (columnNames().contains(column)) return;
issue("alter table " + name + " add column " + column + " " + columnType, newContext());
}
private Optional<Long> adjustOrDiscard(Instant at) {
long timestamp = at.toEpochMilli();
if (timestamp >= highestTimestampAdded) {
highestTimestampAdded = timestamp;
return Optional.of(timestamp);
}
if (timestamp >= highestTimestampAdded - 60 * 1000) return Optional.of(highestTimestampAdded);
return Optional.empty();
}
private List<String> columnNames() throws SqlException {
var context = newContext();
List<String> columns = new ArrayList<>();
try (RecordCursorFactory factory = issue("show columns from " + name, context).getRecordCursorFactory()) {
try (RecordCursor cursor = factory.getCursor(context)) {
Record record = cursor.getRecord();
while (cursor.hasNext()) {
columns.add(record.getStr(0).toString());
}
}
}
return columns;
}
}
}
|
Do you mean I added a log message before throw statement. Done.
|
public URL getQueueUrl() {
try {
return new URL(client.url());
} catch (MalformedURLException ex) {
throw new RuntimeException("Queue URL is malformed");
}
}
|
throw new RuntimeException("Queue URL is malformed");
|
public URL getQueueUrl() {
try {
return new URL(client.url());
} catch (MalformedURLException ex) {
LOGGER.asError().log("Queue URL is malformed");
throw new RuntimeException("Queue URL is malformed");
}
}
|
class QueueAsyncClient {
private final AzureQueueStorageImpl client;
private final String queueName;
/**
* Creates a QueueAsyncClient that sends requests to the storage queue service at {@link AzureQueueStorageImpl
* Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}.
*
* @param client Client that interacts with the service interfaces
* @param queueName Name of the queue
*/
QueueAsyncClient(AzureQueueStorageImpl client, String queueName) {
this.queueName = queueName;
this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline())
.url(client.url())
.version(client.version())
.build();
}
/**
* Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}.
* Each service call goes through the {@code httpPipeline}.
*
* @param endpoint URL for the Storage Queue service
* @param httpPipeline HttpPipeline that the HTTP requests and response flow through
* @param queueName Name of the queue
*/
QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) {
this.queueName = queueName;
this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline)
.url(endpoint.toString())
.build();
}
/**
* @return the URL of the storage queue
* @throws RuntimeException If the queue is using a malformed URL.
*/
/**
* Creates a new queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create a queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.create}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If a queue with the same name already exists in the queue service.
*/
public Mono<VoidResponse> create() {
return create(null);
}
/**
* Creates a new queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create a queue with metadata "queue:metadataMap"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.create
*
* @param metadata Metadata to associate with the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service.
*/
public Mono<VoidResponse> create(Map<String, String> metadata) {
return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Permanently deletes the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete a queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.delete}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> delete() {
return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE)
.map(VoidResponse::new);
}
/**
* Retrieves metadata and approximate message count of the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the properties of the queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties}
*
* @return A response containing a {@link QueueProperties} value which contains the metadata and approximate
* messages count of the queue.
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<Response<QueueProperties>> getProperties() {
return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE)
.map(this::getQueuePropertiesResponse);
}
/**
* Sets the metadata of the queue.
*
* Passing in a {@code null} value for metadata will clear the metadata associated with the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the queue's metadata to "queue:metadataMap"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata
*
* <p>Clear the queue's metadata</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata
*
* @param metadata Metadata to set on the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> setMetadata(Map<String, String> metadata) {
return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Retrieves stored access policies specified on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List the stored access policies</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy}
*
* @return The stored access policies specified on the queue.
* @throws StorageErrorException If the queue doesn't exist
*/
public Flux<SignedIdentifier> getAccessPolicy() {
return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Sets stored access policies on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set a read only stored access policy</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy}
*
* @param permissions Access policies to set on the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out,
* or the queue will have more than five policies.
*/
public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) {
return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Deletes all messages in the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the messages</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> clearMessages() {
return client.messages().clearWithRestResponseAsync(queueName, Context.NONE)
.map(VoidResponse::new);
}
/**
* Enqueues a message that has a time-to-live of 7 days and is instantly visible.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Enqueue a message of "Hello, Azure"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage
*
* @param messageText Message text
* @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage
* {@link EnqueuedMessage
* about the enqueued message.
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) {
return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7));
}
/**
* Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage
*
* <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime
*
* @param messageText Message text
* @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds.
* If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0
* seconds and 7 days.
* @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will
* default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number.
* @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage
* {@link EnqueuedMessage
* about the enqueued message.
* @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive}
* are outside of the allowed limits.
*/
public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) {
Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds();
Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds();
QueueMessage message = new QueueMessage().messageText(messageText);
return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.value().get(0)));
}
/**
* Retrieves the first message in the queue and hides it from other operations for 30 seconds.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue a message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages}
*
* @return The first {@link DequeuedMessage} in the queue, it contains
* {@link DequeuedMessage
* with the message, additionally it contains other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist
*/
public Flux<DequeuedMessage> dequeueMessages() {
return dequeueMessages(1, Duration.ofSeconds(30));
}
/**
* Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue up to 5 messages</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages
*
* @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested
* all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32
* messages.
* @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains
* {@link DequeuedMessage
* with the message and other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds
*/
public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) {
return dequeueMessages(maxMessages, Duration.ofSeconds(30));
}
/**
* Retrieves up to the maximum number of messages from the queue and hides them from other operations for the
* timeout period.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue up to 5 messages and give them a 60 second timeout period</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages
*
* @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested
* all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32
* messages.
* @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds.
* If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days.
* @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains
* {@link DequeuedMessage
* with the message and other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is
* outside of the allowed bounds
*/
public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) {
Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds();
return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Peeks the first message in the queue.
*
* Peeked messages don't contain the necessary information needed to interact with the message nor will it hide
* messages from other operations on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Peek the first message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages}
*
* @return A {@link PeekedMessage} that contains metadata about the message.
*/
public Flux<PeekedMessage> peekMessages() {
return peekMessages(null);
}
/**
* Peek messages from the front of the queue up to the maximum number of messages.
*
* Peeked messages don't contain the necessary information needed to interact with the message nor will it hide
* messages from other operations on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Peek up to the first five messages</p>
*
* <pre>
* client.peekMessages(5)
* .subscribe(result -> System.out.printf("Peeked message %s has been dequeued %d times", result.messageId(), result.dequeueCount()));
* </pre>
*
* @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested
* all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32
* messages.
* @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains
* metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds
*/
public Flux<PeekedMessage> peekMessages(Integer maxMessages) {
return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Updates the specific message in the queue with a new message and resets the visibility timeout.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage}
*
* @param messageText Updated value for the message
* @param messageId Id of the message to update
* @param popReceipt Unique identifier that must match for the message to be updated
* @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The
* timeout period must be between 1 second and 7 days.
* @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage
* with the message, additionally contains the updated metadata about the message.
* @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message,
* or the {@code visibilityTimeout} is outside the allowed bounds
*/
public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) {
QueueMessage message = new QueueMessage().messageText(messageText);
return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE)
.map(this::getUpdatedMessageResponse);
}
/**
* Deletes the specified message in the queue
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the first message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage}
*
* @param messageId Id of the message to deleted
* @param popReceipt Unique identifier that must match for the message to be deleted
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message
*/
public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) {
return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE)
.map(VoidResponse::new);
}
/*
* Maps the HTTP headers returned from the service to the expected response type
* @param response Service response
* @return Mapped response
*/
private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) {
QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders();
QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount());
return new SimpleResponse<>(response, properties);
}
/*
* Maps the HTTP headers returned from the service to the expected response type
* @param response Service response
* @return Mapped response
*/
private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) {
MessageIdUpdateHeaders headers = response.deserializedHeaders();
UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible());
return new SimpleResponse<>(response, updatedMessage);
}
}
|
class QueueAsyncClient {
private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class);
private final AzureQueueStorageImpl client;
private final String queueName;
/**
* Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl
* Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}.
*
* @param client Client that interacts with the service interfaces
* @param queueName Name of the queue
*/
QueueAsyncClient(AzureQueueStorageImpl client, String queueName) {
this.queueName = queueName;
this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline())
.url(client.url())
.version(client.version())
.build();
}
/**
* Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}.
* Each service call goes through the {@code httpPipeline}.
*
* @param endpoint URL for the Storage Queue service
* @param httpPipeline HttpPipeline that the HTTP requests and response flow through
* @param queueName Name of the queue
*/
QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) {
this.queueName = queueName;
this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline)
.url(endpoint.toString())
.build();
}
/**
* @return the URL of the storage queue
* @throws RuntimeException If the queue is using a malformed URL.
*/
/**
* Creates a new queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create a queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.create}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If a queue with the same name already exists in the queue service.
*/
public Mono<VoidResponse> create() {
return create(null);
}
/**
* Creates a new queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Create a queue with metadata "queue:metadataMap"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.create
*
* @param metadata Metadata to associate with the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service.
*/
public Mono<VoidResponse> create(Map<String, String> metadata) {
return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Permanently deletes the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete a queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.delete}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> delete() {
return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE)
.map(VoidResponse::new);
}
/**
* Retrieves metadata and approximate message count of the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Get the properties of the queue</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties}
*
* @return A response containing a {@link QueueProperties} value which contains the metadata and approximate
* messages count of the queue.
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<Response<QueueProperties>> getProperties() {
return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE)
.map(this::getQueuePropertiesResponse);
}
/**
* Sets the metadata of the queue.
*
* Passing in a {@code null} value for metadata will clear the metadata associated with the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set the queue's metadata to "queue:metadataMap"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata
*
* <p>Clear the queue's metadata</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata
*
* @param metadata Metadata to set on the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> setMetadata(Map<String, String> metadata) {
return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Retrieves stored access policies specified on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>List the stored access policies</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy}
*
* @return The stored access policies specified on the queue.
* @throws StorageErrorException If the queue doesn't exist
*/
public Flux<SignedIdentifier> getAccessPolicy() {
return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Sets stored access policies on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Set a read only stored access policy</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy}
*
* @param permissions Access policies to set on the queue
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out,
* or the queue will have more than five policies.
*/
public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) {
return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE)
.map(VoidResponse::new);
}
/**
* Deletes all messages in the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Clear the messages</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages}
*
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<VoidResponse> clearMessages() {
return client.messages().clearWithRestResponseAsync(queueName, Context.NONE)
.map(VoidResponse::new);
}
/**
* Enqueues a message that has a time-to-live of 7 days and is instantly visible.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Enqueue a message of "Hello, Azure"</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage
*
* @param messageText Message text
* @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage
* {@link EnqueuedMessage
* about the enqueued message.
* @throws StorageErrorException If the queue doesn't exist
*/
public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) {
return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7));
}
/**
* Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage
*
* <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime
*
* @param messageText Message text
* @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds.
* If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0
* seconds and 7 days.
* @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will
* default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number.
* @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage
* {@link EnqueuedMessage
* about the enqueued message.
* @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive}
* are outside of the allowed limits.
*/
public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) {
Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds();
Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds();
QueueMessage message = new QueueMessage().messageText(messageText);
return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE)
.map(response -> new SimpleResponse<>(response, response.value().get(0)));
}
/**
* Retrieves the first message in the queue and hides it from other operations for 30 seconds.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue a message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages}
*
* @return The first {@link DequeuedMessage} in the queue, it contains
* {@link DequeuedMessage
* with the message, additionally it contains other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist
*/
public Flux<DequeuedMessage> dequeueMessages() {
return dequeueMessages(1, Duration.ofSeconds(30));
}
/**
* Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue up to 5 messages</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages
*
* @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested
* all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32
* messages.
* @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains
* {@link DequeuedMessage
* with the message and other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds
*/
public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) {
return dequeueMessages(maxMessages, Duration.ofSeconds(30));
}
/**
* Retrieves up to the maximum number of messages from the queue and hides them from other operations for the
* timeout period.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue up to 5 messages and give them a 60 second timeout period</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages
*
* @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested
* all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32
* messages.
* @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds.
* If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days.
* @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains
* {@link DequeuedMessage
* with the message and other metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is
* outside of the allowed bounds
*/
public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) {
Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds();
return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Peeks the first message in the queue.
*
* Peeked messages don't contain the necessary information needed to interact with the message nor will it hide
* messages from other operations on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Peek the first message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages}
*
* @return A {@link PeekedMessage} that contains metadata about the message.
*/
public Flux<PeekedMessage> peekMessages() {
return peekMessages(null);
}
/**
* Peek messages from the front of the queue up to the maximum number of messages.
*
* Peeked messages don't contain the necessary information needed to interact with the message nor will it hide
* messages from other operations on the queue.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Peek up to the first five messages</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages
*
* @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested
* all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32
* messages.
* @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains
* metadata about the message.
* @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds
*/
public Flux<PeekedMessage> peekMessages(Integer maxMessages) {
return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE)
.flatMapMany(response -> Flux.fromIterable(response.value()));
}
/**
* Updates the specific message in the queue with a new message and resets the visibility timeout.
*
* <p><strong>Code Samples</strong></p>
*
* <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage}
*
* @param messageText Updated value for the message
* @param messageId Id of the message to update
* @param popReceipt Unique identifier that must match for the message to be updated
* @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The
* timeout period must be between 1 second and 7 days.
* @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage
* with the message, additionally contains the updated metadata about the message.
* @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message,
* or the {@code visibilityTimeout} is outside the allowed bounds
*/
public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) {
QueueMessage message = new QueueMessage().messageText(messageText);
return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE)
.map(this::getUpdatedMessageResponse);
}
/**
* Deletes the specified message in the queue
*
* <p><strong>Code Samples</strong></p>
*
* <p>Delete the first message</p>
*
* {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage}
*
* @param messageId Id of the message to deleted
* @param popReceipt Unique identifier that must match for the message to be deleted
* @return A response that only contains headers and response status code
* @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message
*/
public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) {
return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE)
.map(VoidResponse::new);
}
/*
* Maps the HTTP headers returned from the service to the expected response type
* @param response Service response
* @return Mapped response
*/
private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) {
QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders();
QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount());
return new SimpleResponse<>(response, properties);
}
/*
* Maps the HTTP headers returned from the service to the expected response type
* @param response Service response
* @return Mapped response
*/
private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) {
MessageIdUpdateHeaders headers = response.deserializedHeaders();
UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible());
return new SimpleResponse<>(response, updatedMessage);
}
}
|
<!--thread_id:cc_182613813_t; commit:a16a978c74d440d81ca78b3c44be72eae312ef74; resolved:1--> <!--section:context-quote--> > **tgroh** wrote: > Why is this uninterruptible? <!--section:body--> To ensure that we sleep fully for the amount of time requested. Given that we don't use interrupts anywhere in Beam code, we don't expect this to happen anyway. Alternatively we could just crash on interrupts (i.e., throw a RuntimeException wrapping the interruption).
|
public State waitUntilFinish(Duration duration) {
if (duration.compareTo(Duration.millis(1)) < 1) {
return waitUntilFinish();
} else {
CompletableFuture<State> result = CompletableFuture.supplyAsync(this::waitUntilFinish);
try {
return Uninterruptibles.getUninterruptibly(
result, duration.getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
return null;
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
}
|
return Uninterruptibles.getUninterruptibly(
|
public State waitUntilFinish(Duration duration) {
if (duration.compareTo(Duration.millis(1)) < 1) {
return waitUntilFinish();
} else {
CompletableFuture<State> result = CompletableFuture.supplyAsync(this::waitUntilFinish);
try {
return result.get(duration.getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
return null;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
}
|
class JobServicePipelineResult implements PipelineResult {
private static final long POLL_INTERVAL_SEC = 10;
private static final Logger LOG = LoggerFactory.getLogger(JobServicePipelineResult.class);
private final ByteString jobId;
private final CloseableResource<JobServiceBlockingStub> jobService;
JobServicePipelineResult(ByteString jobId, CloseableResource<JobServiceBlockingStub> jobService) {
this.jobId = jobId;
this.jobService = jobService;
}
@Override
public State getState() {
JobServiceBlockingStub stub = jobService.get();
GetJobStateResponse response =
stub.getState(GetJobStateRequest.newBuilder().setJobIdBytes(jobId).build());
return getJavaState(response.getState());
}
@Override
public State cancel() {
JobServiceBlockingStub stub = jobService.get();
CancelJobResponse response =
stub.cancel(CancelJobRequest.newBuilder().setJobIdBytes(jobId).build());
return getJavaState(response.getState());
}
@Override
@Override
public State waitUntilFinish() {
JobServiceBlockingStub stub = jobService.get();
GetJobStateRequest request = GetJobStateRequest.newBuilder().setJobIdBytes(jobId).build();
GetJobStateResponse response = stub.getState(request);
State lastState = getJavaState(response.getState());
while (!lastState.isTerminal()) {
Uninterruptibles.sleepUninterruptibly(POLL_INTERVAL_SEC, TimeUnit.SECONDS);
response = stub.getState(request);
lastState = getJavaState(response.getState());
}
try {
jobService.close();
} catch (Exception e) {
LOG.warn("Error cleaning up job service", e);
}
return lastState;
}
@Override
public MetricResults metrics() {
throw new UnsupportedOperationException("Not yet implemented.");
}
private static State getJavaState(JobApi.JobState.Enum protoState) {
switch (protoState) {
case UNSPECIFIED:
return State.UNKNOWN;
case STOPPED:
return State.STOPPED;
case RUNNING:
return State.RUNNING;
case DONE:
return State.DONE;
case FAILED:
return State.FAILED;
case CANCELLED:
return State.CANCELLED;
case UPDATED:
return State.UPDATED;
case DRAINING:
return State.UNKNOWN;
case DRAINED:
return State.UNKNOWN;
case STARTING:
return State.RUNNING;
case CANCELLING:
return State.CANCELLED;
default:
LOG.warn("Unrecognized state from server: {}", protoState);
return State.UNKNOWN;
}
}
}
|
class JobServicePipelineResult implements PipelineResult {
private static final long POLL_INTERVAL_MS = 10 * 1000;
private static final Logger LOG = LoggerFactory.getLogger(JobServicePipelineResult.class);
private final ByteString jobId;
private final CloseableResource<JobServiceBlockingStub> jobService;
JobServicePipelineResult(ByteString jobId, CloseableResource<JobServiceBlockingStub> jobService) {
this.jobId = jobId;
this.jobService = jobService;
}
@Override
public State getState() {
JobServiceBlockingStub stub = jobService.get();
GetJobStateResponse response =
stub.getState(GetJobStateRequest.newBuilder().setJobIdBytes(jobId).build());
return getJavaState(response.getState());
}
@Override
public State cancel() {
JobServiceBlockingStub stub = jobService.get();
CancelJobResponse response =
stub.cancel(CancelJobRequest.newBuilder().setJobIdBytes(jobId).build());
return getJavaState(response.getState());
}
@Override
@Override
public State waitUntilFinish() {
JobServiceBlockingStub stub = jobService.get();
GetJobStateRequest request = GetJobStateRequest.newBuilder().setJobIdBytes(jobId).build();
GetJobStateResponse response = stub.getState(request);
State lastState = getJavaState(response.getState());
while (!lastState.isTerminal()) {
try {
Thread.sleep(POLL_INTERVAL_MS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
response = stub.getState(request);
lastState = getJavaState(response.getState());
}
try {
jobService.close();
} catch (Exception e) {
LOG.warn("Error cleaning up job service", e);
}
return lastState;
}
@Override
public MetricResults metrics() {
throw new UnsupportedOperationException("Not yet implemented.");
}
private static State getJavaState(JobApi.JobState.Enum protoState) {
switch (protoState) {
case UNSPECIFIED:
return State.UNKNOWN;
case STOPPED:
return State.STOPPED;
case RUNNING:
return State.RUNNING;
case DONE:
return State.DONE;
case FAILED:
return State.FAILED;
case CANCELLED:
return State.CANCELLED;
case UPDATED:
return State.UPDATED;
case DRAINING:
return State.UNKNOWN;
case DRAINED:
return State.UNKNOWN;
case STARTING:
return State.RUNNING;
case CANCELLING:
return State.CANCELLED;
default:
LOG.warn("Unrecognized state from server: {}", protoState);
return State.UNKNOWN;
}
}
}
|
Hehe. You don't have a bad point.
|
public Optional<String> oldestIncompleteResultId() {
synchronized (monitor) {
return Optional.of(docSendInfoByOperationId.keySet().iterator())
.filter(Iterator::hasNext)
.map(Iterator::next);
}
}
|
return Optional.of(docSendInfoByOperationId.keySet().iterator())
|
public Optional<String> oldestIncompleteResultId() {
synchronized (monitor) {
return docSendInfoByOperationId.isEmpty()
? Optional.empty()
: Optional.of(docSendInfoByOperationId.keySet().iterator().next());
}
}
|
class OperationProcessor {
private static final Logger log = Logger.getLogger(OperationProcessor.class.getName());
private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>();
private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create();
private final Set<String> inflightDocumentIds = new HashSet<>();
private final int numDestinations;
private final FeedClient.ResultCallback resultCallback;
private final Object monitor = new Object();
private final IncompleteResultsThrottler incompleteResultsThrottler;
private final List<ClusterConnection> clusters = new ArrayList<>();
private final ScheduledThreadPoolExecutor timeoutExecutor;
private final OperationStats operationStats;
private final int maxRetries;
private final long minTimeBetweenRetriesMs;
private final Random random = new SecureRandom();
private final int traceEveryXOperation;
private final boolean blockOperationsToSameDocument;
private int traceCounter = 0;
private final boolean traceToStderr;
private final String clientId = new BigInteger(130, random).toString(32);;
public OperationProcessor(
IncompleteResultsThrottler incompleteResultsThrottler,
FeedClient.ResultCallback resultCallback,
SessionParams sessionParams,
ScheduledThreadPoolExecutor timeoutExecutor) {
this.numDestinations = sessionParams.getClusters().size();
this.resultCallback = resultCallback;
this.incompleteResultsThrottler = incompleteResultsThrottler;
this.timeoutExecutor = timeoutExecutor;
this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol();
if (sessionParams.getClusters().isEmpty()) {
throw new IllegalArgumentException("Cannot feed to 0 clusters.");
}
for (Cluster cluster : sessionParams.getClusters()) {
if (cluster.getEndpoints().isEmpty()) {
throw new IllegalArgumentException("Cannot feed to empty cluster.");
}
}
for (int i = 0; i < sessionParams.getClusters().size(); i++) {
Cluster cluster = sessionParams.getClusters().get(i);
clusters.add(new ClusterConnection(
this,
sessionParams.getFeedParams(),
sessionParams.getConnectionParams(),
sessionParams.getErrorReport(),
cluster,
i,
sessionParams.getClientQueueSize() / sessionParams.getClusters().size(),
timeoutExecutor));
}
operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler);
maxRetries = sessionParams.getConnectionParams().getMaxRetries();
minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs();
traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation();
traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr();
}
public int getIncompleteResultQueueSize() {
synchronized (monitor) {
return docSendInfoByOperationId.size();
}
}
/** Returns the id of the oldest operation to be sent. */
public String getClientId() {
return clientId;
}
private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) {
final Result.Detail detail = endpointResult.getDetail();
if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) {
return false;
}
int retries = documentSendInfo.incRetries(clusterId, detail);
if (retries > maxRetries) {
return false;
}
String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage();
if (exceptionMessage == null) {
exceptionMessage = "";
}
boolean retryThisOperation =
detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR ||
exceptionMessage.contains("SEND_QUEUE_CLOSED") ||
exceptionMessage.contains("ILLEGAL_ROUTE") ||
exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") ||
exceptionMessage.contains("NETWORK_ERROR") ||
exceptionMessage.contains("SEQUENCE_ERROR") ||
exceptionMessage.contains("NETWORK_SHUTDOWN") ||
exceptionMessage.contains("TIMEOUT");
if (retryThisOperation) {
int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3));
log.finest("Retrying due to " + detail.toString() + " attempt " + retries
+ " in " + waitTime + " ms.");
timeoutExecutor.schedule(
() -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()),
waitTime,
TimeUnit.MILLISECONDS);
return true;
}
return false;
}
private Result process(EndpointResult endpointResult, int clusterId) {
synchronized (monitor) {
if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) {
log.finer("Received out-of-order or too late result, discarding: " + endpointResult);
return null;
}
DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId());
if (retriedThis(endpointResult, documentSendInfo, clusterId)) {
return null;
}
if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) {
return null;
}
if (documentSendInfo.detailCount() != numDestinations) {
return null;
}
Result result = documentSendInfo.createResult();
docSendInfoByOperationId.remove(endpointResult.getOperationId());
String documentId = documentSendInfo.getDocument().getDocumentId();
/**
* If we got a pending operation against this document
* dont't remove it from inflightDocuments and send blocked document operation
*/
List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId);
if (blockedDocuments.isEmpty()) {
inflightDocumentIds.remove(documentId);
} else {
sendToClusters(blockedDocuments.remove(0));
}
return result;
}
}
public void resultReceived(EndpointResult endpointResult, int clusterId) {
final Result result = process(endpointResult, clusterId);
if (result != null) {
incompleteResultsThrottler.resultReady(result.isSuccess());
resultCallback.onCompletion(result.getDocumentId(), result);
if (traceToStderr && result.hasLocalTrace()) {
System.err.println(result.toString());
}
}
}
public void onEndpointError(FeedEndpointException e) {
resultCallback.onEndpointException(e);
}
public List<Exception> closeClusters() {
List<Exception> exceptions = new ArrayList<>();
for (ClusterConnection cluster : clusters) {
try {
cluster.close();
} catch (Exception e) {
exceptions.add(e);
}
}
return exceptions;
}
public void sendDocument(Document document) {
incompleteResultsThrottler.operationStart();
synchronized (monitor) {
if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) {
blockedDocumentsByDocumentId.put(document.getDocumentId(), document);
return;
}
inflightDocumentIds.add(document.getDocumentId());
}
sendToClusters(document);
}
private void sendToClusters(Document document) {
synchronized (monitor) {
boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0;
docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc));
}
for (ClusterConnection clusterConnection : clusters) {
postToCluster(clusterConnection, document);
}
}
private void postToCluster(ClusterConnection clusterConnection, Document document) {
try {
clusterConnection.post(document);
} catch (EndpointIOException eio) {
resultReceived(EndPointResultFactory.createError(eio.getEndpoint(),
document.getOperationId(),
eio),
clusterConnection.getClusterId());
}
}
public String getStatsAsJson() {
return operationStats.getStatsAsJson();
}
public void close() {
List<Exception> exceptions = closeClusters();
try {
closeExecutor();
} catch (InterruptedException e) {
exceptions.add(e);
}
if (exceptions.isEmpty()) {
return;
}
if (exceptions.size() == 1) {
if (exceptions.get(0) instanceof RuntimeException) {
throw (RuntimeException) exceptions.get(0);
} else {
throw new RuntimeException(exceptions.get(0));
}
}
StringBuilder b = new StringBuilder();
b.append("Exception thrown while closing one or more clusters: ");
for (int i = 0; i < exceptions.size(); i++) {
Exception e = exceptions.get(i);
b.append(Exceptions.toMessageString(e));
if (i != (exceptions.size() - 1)) {
b.append(", ");
}
}
throw new RuntimeException(b.toString(), exceptions.get(0));
}
private void closeExecutor() throws InterruptedException {
log.log(Level.FINE, "Shutting down timeout executor.");
timeoutExecutor.shutdownNow();
log.log(Level.FINE, "Awaiting termination of already running timeout tasks.");
if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) {
log.severe("Did not manage to shut down the executors within 300 secs, system stuck?");
throw new RuntimeException("Did not manage to shut down retry threads. Please report problem.");
}
}
}
|
class OperationProcessor {
private static final Logger log = Logger.getLogger(OperationProcessor.class.getName());
private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>();
private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create();
private final Set<String> inflightDocumentIds = new HashSet<>();
private final int numDestinations;
private final FeedClient.ResultCallback resultCallback;
private final Object monitor = new Object();
private final IncompleteResultsThrottler incompleteResultsThrottler;
private final List<ClusterConnection> clusters = new ArrayList<>();
private final ScheduledThreadPoolExecutor timeoutExecutor;
private final OperationStats operationStats;
private final int maxRetries;
private final long minTimeBetweenRetriesMs;
private final Random random = new SecureRandom();
private final int traceEveryXOperation;
private final boolean blockOperationsToSameDocument;
private int traceCounter = 0;
private final boolean traceToStderr;
private final String clientId = new BigInteger(130, random).toString(32);;
public OperationProcessor(
IncompleteResultsThrottler incompleteResultsThrottler,
FeedClient.ResultCallback resultCallback,
SessionParams sessionParams,
ScheduledThreadPoolExecutor timeoutExecutor) {
this.numDestinations = sessionParams.getClusters().size();
this.resultCallback = resultCallback;
this.incompleteResultsThrottler = incompleteResultsThrottler;
this.timeoutExecutor = timeoutExecutor;
this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol();
if (sessionParams.getClusters().isEmpty()) {
throw new IllegalArgumentException("Cannot feed to 0 clusters.");
}
for (Cluster cluster : sessionParams.getClusters()) {
if (cluster.getEndpoints().isEmpty()) {
throw new IllegalArgumentException("Cannot feed to empty cluster.");
}
}
for (int i = 0; i < sessionParams.getClusters().size(); i++) {
Cluster cluster = sessionParams.getClusters().get(i);
clusters.add(new ClusterConnection(
this,
sessionParams.getFeedParams(),
sessionParams.getConnectionParams(),
sessionParams.getErrorReport(),
cluster,
i,
sessionParams.getClientQueueSize() / sessionParams.getClusters().size(),
timeoutExecutor));
}
operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler);
maxRetries = sessionParams.getConnectionParams().getMaxRetries();
minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs();
traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation();
traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr();
}
public int getIncompleteResultQueueSize() {
synchronized (monitor) {
return docSendInfoByOperationId.size();
}
}
/** Returns the id of the oldest operation to be sent. */
public String getClientId() {
return clientId;
}
private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) {
final Result.Detail detail = endpointResult.getDetail();
if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) {
return false;
}
int retries = documentSendInfo.incRetries(clusterId, detail);
if (retries > maxRetries) {
return false;
}
String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage();
if (exceptionMessage == null) {
exceptionMessage = "";
}
boolean retryThisOperation =
detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR ||
exceptionMessage.contains("SEND_QUEUE_CLOSED") ||
exceptionMessage.contains("ILLEGAL_ROUTE") ||
exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") ||
exceptionMessage.contains("NETWORK_ERROR") ||
exceptionMessage.contains("SEQUENCE_ERROR") ||
exceptionMessage.contains("NETWORK_SHUTDOWN") ||
exceptionMessage.contains("TIMEOUT");
if (retryThisOperation) {
int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3));
log.finest("Retrying due to " + detail.toString() + " attempt " + retries
+ " in " + waitTime + " ms.");
timeoutExecutor.schedule(
() -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()),
waitTime,
TimeUnit.MILLISECONDS);
return true;
}
return false;
}
private Result process(EndpointResult endpointResult, int clusterId) {
synchronized (monitor) {
if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) {
log.finer("Received out-of-order or too late result, discarding: " + endpointResult);
return null;
}
DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId());
if (retriedThis(endpointResult, documentSendInfo, clusterId)) {
return null;
}
if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) {
return null;
}
if (documentSendInfo.detailCount() != numDestinations) {
return null;
}
Result result = documentSendInfo.createResult();
docSendInfoByOperationId.remove(endpointResult.getOperationId());
String documentId = documentSendInfo.getDocument().getDocumentId();
/**
* If we got a pending operation against this document
* dont't remove it from inflightDocuments and send blocked document operation
*/
List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId);
if (blockedDocuments.isEmpty()) {
inflightDocumentIds.remove(documentId);
} else {
sendToClusters(blockedDocuments.remove(0));
}
return result;
}
}
public void resultReceived(EndpointResult endpointResult, int clusterId) {
final Result result = process(endpointResult, clusterId);
if (result != null) {
incompleteResultsThrottler.resultReady(result.isSuccess());
resultCallback.onCompletion(result.getDocumentId(), result);
if (traceToStderr && result.hasLocalTrace()) {
System.err.println(result.toString());
}
}
}
public void onEndpointError(FeedEndpointException e) {
resultCallback.onEndpointException(e);
}
public List<Exception> closeClusters() {
List<Exception> exceptions = new ArrayList<>();
for (ClusterConnection cluster : clusters) {
try {
cluster.close();
} catch (Exception e) {
exceptions.add(e);
}
}
return exceptions;
}
public void sendDocument(Document document) {
incompleteResultsThrottler.operationStart();
synchronized (monitor) {
if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) {
blockedDocumentsByDocumentId.put(document.getDocumentId(), document);
return;
}
inflightDocumentIds.add(document.getDocumentId());
}
sendToClusters(document);
}
private void sendToClusters(Document document) {
synchronized (monitor) {
boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0;
docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc));
}
for (ClusterConnection clusterConnection : clusters) {
postToCluster(clusterConnection, document);
}
}
private void postToCluster(ClusterConnection clusterConnection, Document document) {
try {
clusterConnection.post(document);
} catch (EndpointIOException eio) {
resultReceived(EndPointResultFactory.createError(eio.getEndpoint(),
document.getOperationId(),
eio),
clusterConnection.getClusterId());
}
}
public String getStatsAsJson() {
return operationStats.getStatsAsJson();
}
public void close() {
List<Exception> exceptions = closeClusters();
try {
closeExecutor();
} catch (InterruptedException e) {
exceptions.add(e);
}
if (exceptions.isEmpty()) {
return;
}
if (exceptions.size() == 1) {
if (exceptions.get(0) instanceof RuntimeException) {
throw (RuntimeException) exceptions.get(0);
} else {
throw new RuntimeException(exceptions.get(0));
}
}
StringBuilder b = new StringBuilder();
b.append("Exception thrown while closing one or more clusters: ");
for (int i = 0; i < exceptions.size(); i++) {
Exception e = exceptions.get(i);
b.append(Exceptions.toMessageString(e));
if (i != (exceptions.size() - 1)) {
b.append(", ");
}
}
throw new RuntimeException(b.toString(), exceptions.get(0));
}
private void closeExecutor() throws InterruptedException {
log.log(Level.FINE, "Shutting down timeout executor.");
timeoutExecutor.shutdownNow();
log.log(Level.FINE, "Awaiting termination of already running timeout tasks.");
if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) {
log.severe("Did not manage to shut down the executors within 300 secs, system stuck?");
throw new RuntimeException("Did not manage to shut down retry threads. Please report problem.");
}
}
}
|
```suggestion List<PrivEntry> userPrivEntryList = map.get(userIdentity); ```
|
public void dropEntry(PrivEntry entry) {
UserIdentity userIdentity = entry.getUserIdent();
List<PrivEntry> userPrivEntryList = map.get(entry.getUserIdent());
if (userPrivEntryList == null) {
return;
}
Iterator<PrivEntry> iter = userPrivEntryList.iterator();
while (iter.hasNext()) {
PrivEntry privEntry = iter.next();
if (privEntry.keyMatch(entry)) {
iter.remove();
LOG.info("drop priv entry: {}", privEntry);
break;
}
}
if (userPrivEntryList.isEmpty()) {
map.remove(userIdentity);
}
}
|
List<PrivEntry> userPrivEntryList = map.get(entry.getUserIdent());
|
public void dropEntry(PrivEntry entry) {
UserIdentity userIdentity = entry.getUserIdent();
List<PrivEntry> userPrivEntryList = map.get(userIdentity);
if (userPrivEntryList == null) {
return;
}
Iterator<PrivEntry> iter = userPrivEntryList.iterator();
while (iter.hasNext()) {
PrivEntry privEntry = iter.next();
if (privEntry.keyMatch(entry)) {
iter.remove();
LOG.info("drop priv entry: {}", privEntry);
break;
}
}
if (userPrivEntryList.isEmpty()) {
map.remove(userIdentity);
}
}
|
class PrivTable implements Writable {
private static final Logger LOG = LogManager.getLogger(PrivTable.class);
protected Map<UserIdentity, List<PrivEntry>> map = new TreeMap<>(new Comparator<UserIdentity>() {
@Override
public int compare(UserIdentity o1, UserIdentity o2) {
int compareByUser = o1.getQualifiedUser().compareTo(o2.getQualifiedUser());
if (compareByUser != 0) {
return - compareByUser;
}
return - o1.getHost().compareTo(o2.getHost());
}
});
protected boolean isClassNameWrote = false;
/*
* Add an entry to priv table.
* If entry already exists and errOnExist is false, we try to reset or merge the new priv entry with existing one.
* NOTICE, this method does not set password for the newly added entry if this is a user priv table, the caller
* need to set password later.
*/
public PrivEntry addEntry(PrivEntry newEntry, boolean errOnExist, boolean errOnNonExist) throws DdlException {
PrivEntry existingEntry = getExistingEntry(newEntry);
if (existingEntry == null) {
if (errOnNonExist) {
throw new DdlException("User " + newEntry.getUserIdent() + " does not exist");
}
UserIdentity newUser = newEntry.getUserIdent();
List<PrivEntry> entries = map.computeIfAbsent(newUser, k -> new ArrayList<>());
entries.add(newEntry);
LOG.debug("add priv entry: {}", newEntry);
return newEntry;
} else {
if (errOnExist) {
throw new DdlException("User already exist");
} else {
checkOperationAllowed(existingEntry, newEntry, "ADD ENTRY");
if (existingEntry.isSetByDomainResolver()) {
existingEntry.setPrivSet(newEntry.getPrivSet());
existingEntry.setSetByDomainResolver(newEntry.isSetByDomainResolver());
LOG.debug("reset priv entry: {}", existingEntry);
} else if (!newEntry.isSetByDomainResolver()) {
mergePriv(existingEntry, newEntry);
existingEntry.setSetByDomainResolver(false);
LOG.debug("merge priv entry: {}", existingEntry);
}
}
return existingEntry;
}
}
public void clearEntriesSetByResolver() {
Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIter = map.entrySet().iterator();
while (mapIter.hasNext()) {
Map.Entry<UserIdentity, List<PrivEntry>> entry = mapIter.next();
Iterator<PrivEntry> iter = entry.getValue().iterator();
while (iter.hasNext()) {
PrivEntry privEntry = iter.next();
if (privEntry.isSetByDomainResolver()) {
iter.remove();
LOG.info("drop priv entry set by resolver: {}", privEntry);
}
}
if (entry.getValue().isEmpty()) {
mapIter.remove();
}
}
}
public void dropUser(UserIdentity userIdentity) {
map.remove(userIdentity);
}
public void revoke(PrivEntry entry, boolean errOnNonExist, boolean deleteEntryWhenEmpty) throws DdlException {
PrivEntry existingEntry = getExistingEntry(entry);
if (existingEntry == null) {
if (errOnNonExist) {
ErrorReport.reportDdlException(ErrorCode.ERR_NONEXISTING_GRANT, entry.getOrigUser(),
entry.getOrigHost());
}
return;
}
checkOperationAllowed(existingEntry, entry, "REVOKE");
PrivBitSet tmp = existingEntry.getPrivSet().copy();
tmp.and(entry.getPrivSet());
if (tmp.isEmpty()) {
if (errOnNonExist) {
ErrorReport.reportDdlException(ErrorCode.ERR_NONEXISTING_GRANT, entry.getOrigUser(),
entry.getOrigHost());
}
return;
}
LOG.debug("before revoke: {}, privs to be revoked: {}",
existingEntry.getPrivSet(), entry.getPrivSet());
tmp = existingEntry.getPrivSet().copy();
tmp.xor(entry.getPrivSet());
existingEntry.getPrivSet().and(tmp);
LOG.debug("after revoke: {}", existingEntry);
if (existingEntry.getPrivSet().isEmpty() && deleteEntryWhenEmpty) {
dropEntry(existingEntry);
}
}
/*
* the priv entry is classified by 'set by domain resolver'
* or 'NOT set by domain resolver'(other specified operations).
* if the existing entry is set by resolver, it can be reset by resolver or set by specified ops.
* in other word, if the existing entry is NOT set by resolver, it can not be set by resolver.
*/
protected void checkOperationAllowed(PrivEntry existingEntry, PrivEntry newEntry, String op) throws DdlException {
if (!existingEntry.isSetByDomainResolver() && newEntry.isSetByDomainResolver()) {
throw new DdlException("the existing entry is NOT set by resolver: " + existingEntry + ","
+ " can not be set by resolver " + newEntry + ", op: " + op);
}
}
protected PrivEntry getExistingEntry(PrivEntry entry) {
List<PrivEntry> userPrivEntryList = map.get(entry.getUserIdent());
if (userPrivEntryList == null) {
return null;
}
for (PrivEntry existingEntry : userPrivEntryList) {
if (existingEntry.keyMatch(entry)) {
return existingEntry;
}
}
return null;
}
private void mergePriv(PrivEntry first, PrivEntry second) {
first.getPrivSet().or(second.getPrivSet());
first.setSetByDomainResolver(first.isSetByDomainResolver() || second.isSetByDomainResolver());
}
public boolean doesUsernameExist(String qualifiedUsername) {
for (UserIdentity userIdentity : map.keySet()) {
if (userIdentity.getQualifiedUser().equals(qualifiedUsername)) {
return true;
}
}
return false;
}
public void clear() {
map.clear();
}
public boolean isEmpty() {
return map.isEmpty();
}
public static PrivTable read(DataInput in) throws IOException {
String className = Text.readString(in);
if (className.startsWith("org.apache.doris")) {
className = className.replaceFirst("org.apache.doris", "com.starrocks");
}
PrivTable privTable = null;
try {
Class<? extends PrivTable> derivedClass = (Class<? extends PrivTable>) Class.forName(className);
privTable = derivedClass.newInstance();
Class[] paramTypes = {DataInput.class};
Method readMethod = derivedClass.getMethod("readFields", paramTypes);
Object[] params = {in};
readMethod.invoke(privTable, params);
return privTable;
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException
| SecurityException | IllegalArgumentException | InvocationTargetException e) {
throw new IOException("failed read PrivTable", e);
}
}
public int size() {
int sum = 0;
for (Map.Entry<UserIdentity, List<PrivEntry>> entry : map.entrySet()) {
sum += entry.getValue().size();
}
return sum;
}
/**
* return a iterator used for a complete loop in the whole table
* This is READ ONLY, please don't use it for any kinds of modification
*/
public Iterator<PrivEntry> getFullReadOnlyIterator() {
return new Iterator<PrivEntry>() {
private Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIterator;
private Iterator<PrivEntry> privEntryIterator;
{
mapIterator = map.entrySet().iterator();
privEntryIterator = null;
}
@Override
public boolean hasNext() {
return mapIterator.hasNext() || privEntryIterator != null && privEntryIterator.hasNext();
}
@Override
public PrivEntry next() {
if (privEntryIterator == null || ! privEntryIterator.hasNext()) {
if (! mapIterator.hasNext()) {
return null;
}
Map.Entry<UserIdentity, List<PrivEntry>> next = mapIterator.next();
privEntryIterator = next.getValue().iterator();
}
return privEntryIterator.next();
}
};
}
/**
* return a iterator to all the entries that match currentUser
*/
public Iterator<PrivEntry> getReadOnlyIteratorByUser(UserIdentity currentUser) {
return getReadOnlyIteratorByUser(currentUser.getQualifiedUser(), currentUser.getHost());
}
/**
* return a iterator to all the entries that match user@host
*/
public Iterator<PrivEntry> getReadOnlyIteratorByUser(String user, String host) {
return new Iterator<PrivEntry>() {
private Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIterator;
private Iterator<PrivEntry> privEntryIterator;
{
mapIterator = map.entrySet().iterator();
privEntryIterator = null;
iterMapToNextMatchedIdentity();
}
/**
* iterator to the next user identity that match user
* return false if no such user found, true if found
*/
private boolean iterMapToNextMatchedIdentity() {
while (mapIterator.hasNext()) {
Map.Entry<UserIdentity, List<PrivEntry>> mapEntry = mapIterator.next();
List<PrivEntry> entries = mapEntry.getValue();
Preconditions.checkArgument(entries.size() > 0);
PrivEntry privEntry = entries.get(0);
if (!privEntry.isAnyUser() && !privEntry.getUserPattern().match(user)) {
continue;
}
if (!privEntry.isAnyHost() && !privEntry.getHostPattern().match(host)) {
continue;
}
privEntryIterator = entries.iterator();
return true;
}
return false;
}
@Override
public boolean hasNext() {
if (privEntryIterator == null) {
return false;
}
if (privEntryIterator.hasNext()) {
return true;
} else {
return iterMapToNextMatchedIdentity();
}
}
@Override
public PrivEntry next() {
return privEntryIterator.next();
}
};
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("\n");
Iterator<PrivEntry> iter = this.getFullReadOnlyIterator();
while (iter.hasNext()) {
sb.append(iter.next()).append("\n");
}
return sb.toString();
}
@Override
public void write(DataOutput out) throws IOException {
if (!isClassNameWrote) {
String className = PrivTable.class.getCanonicalName();
Text.writeString(out, className);
isClassNameWrote = true;
}
out.writeInt(this.size());
Iterator<PrivEntry> iter = this.getFullReadOnlyIterator();
while (iter.hasNext()) {
iter.next().write(out);
}
isClassNameWrote = false;
}
public void readFields(DataInput in) throws IOException {
int size = in.readInt();
for (int i = 0; i < size; i++) {
PrivEntry entry = PrivEntry.read(in);
UserIdentity newUser = entry.getUserIdent();
List<PrivEntry> entries = map.computeIfAbsent(newUser, k -> new ArrayList<>());
entries.add(entry);
}
}
}
|
class PrivTable implements Writable {
private static final Logger LOG = LogManager.getLogger(PrivTable.class);
protected Map<UserIdentity, List<PrivEntry>> map = new TreeMap<>(new Comparator<UserIdentity>() {
@Override
public int compare(UserIdentity o1, UserIdentity o2) {
int compareByHost = o1.getHost().compareTo(o2.getHost());
if (compareByHost != 0) {
return - compareByHost;
}
return - o1.getQualifiedUser().compareTo(o2.getQualifiedUser());
}
});
protected boolean isClassNameWrote = false;
/*
* Add an entry to priv table.
* If entry already exists and errOnExist is false, we try to reset or merge the new priv entry with existing one.
* NOTICE, this method does not set password for the newly added entry if this is a user priv table, the caller
* need to set password later.
*/
public PrivEntry addEntry(PrivEntry newEntry, boolean errOnExist, boolean errOnNonExist) throws DdlException {
PrivEntry existingEntry = getExistingEntry(newEntry);
if (existingEntry == null) {
if (errOnNonExist) {
throw new DdlException("User " + newEntry.getUserIdent() + " does not exist");
}
UserIdentity newUser = newEntry.getUserIdent();
List<PrivEntry> entries = map.computeIfAbsent(newUser, k -> new ArrayList<>());
entries.add(newEntry);
LOG.debug("add priv entry: {}", newEntry);
return newEntry;
} else {
if (errOnExist) {
throw new DdlException("User already exist");
} else {
checkOperationAllowed(existingEntry, newEntry, "ADD ENTRY");
if (existingEntry.isSetByDomainResolver()) {
existingEntry.setPrivSet(newEntry.getPrivSet());
existingEntry.setSetByDomainResolver(newEntry.isSetByDomainResolver());
LOG.debug("reset priv entry: {}", existingEntry);
} else if (!newEntry.isSetByDomainResolver()) {
mergePriv(existingEntry, newEntry);
existingEntry.setSetByDomainResolver(false);
LOG.debug("merge priv entry: {}", existingEntry);
}
}
return existingEntry;
}
}
public void clearEntriesSetByResolver() {
Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIter = map.entrySet().iterator();
while (mapIter.hasNext()) {
Map.Entry<UserIdentity, List<PrivEntry>> entry = mapIter.next();
Iterator<PrivEntry> iter = entry.getValue().iterator();
while (iter.hasNext()) {
PrivEntry privEntry = iter.next();
if (privEntry.isSetByDomainResolver()) {
iter.remove();
LOG.info("drop priv entry set by resolver: {}", privEntry);
}
}
if (entry.getValue().isEmpty()) {
mapIter.remove();
}
}
}
public void dropUser(UserIdentity userIdentity) {
map.remove(userIdentity);
}
public void revoke(PrivEntry entry, boolean errOnNonExist, boolean deleteEntryWhenEmpty) throws DdlException {
PrivEntry existingEntry = getExistingEntry(entry);
if (existingEntry == null) {
if (errOnNonExist) {
ErrorReport.reportDdlException(ErrorCode.ERR_NONEXISTING_GRANT, entry.getOrigUser(),
entry.getOrigHost());
}
return;
}
checkOperationAllowed(existingEntry, entry, "REVOKE");
PrivBitSet tmp = existingEntry.getPrivSet().copy();
tmp.and(entry.getPrivSet());
if (tmp.isEmpty()) {
if (errOnNonExist) {
ErrorReport.reportDdlException(ErrorCode.ERR_NONEXISTING_GRANT, entry.getOrigUser(),
entry.getOrigHost());
}
return;
}
LOG.debug("before revoke: {}, privs to be revoked: {}",
existingEntry.getPrivSet(), entry.getPrivSet());
tmp = existingEntry.getPrivSet().copy();
tmp.xor(entry.getPrivSet());
existingEntry.getPrivSet().and(tmp);
LOG.debug("after revoke: {}", existingEntry);
if (existingEntry.getPrivSet().isEmpty() && deleteEntryWhenEmpty) {
dropEntry(existingEntry);
}
}
/*
* the priv entry is classified by 'set by domain resolver'
* or 'NOT set by domain resolver'(other specified operations).
* if the existing entry is set by resolver, it can be reset by resolver or set by specified ops.
* in other word, if the existing entry is NOT set by resolver, it can not be set by resolver.
*/
protected void checkOperationAllowed(PrivEntry existingEntry, PrivEntry newEntry, String op) throws DdlException {
if (!existingEntry.isSetByDomainResolver() && newEntry.isSetByDomainResolver()) {
throw new DdlException("the existing entry is NOT set by resolver: " + existingEntry + ","
+ " can not be set by resolver " + newEntry + ", op: " + op);
}
}
protected PrivEntry getExistingEntry(PrivEntry entry) {
List<PrivEntry> userPrivEntryList = map.get(entry.getUserIdent());
if (userPrivEntryList == null) {
return null;
}
for (PrivEntry existingEntry : userPrivEntryList) {
if (existingEntry.keyMatch(entry)) {
return existingEntry;
}
}
return null;
}
private void mergePriv(PrivEntry first, PrivEntry second) {
first.getPrivSet().or(second.getPrivSet());
first.setSetByDomainResolver(first.isSetByDomainResolver() || second.isSetByDomainResolver());
}
public boolean doesUsernameExist(String qualifiedUsername) {
for (UserIdentity userIdentity : map.keySet()) {
if (userIdentity.getQualifiedUser().equals(qualifiedUsername)) {
return true;
}
}
return false;
}
public void clear() {
map.clear();
}
public boolean isEmpty() {
return map.isEmpty();
}
public static PrivTable read(DataInput in) throws IOException {
String className = Text.readString(in);
if (className.startsWith("org.apache.doris")) {
className = className.replaceFirst("org.apache.doris", "com.starrocks");
}
PrivTable privTable = null;
try {
Class<? extends PrivTable> derivedClass = (Class<? extends PrivTable>) Class.forName(className);
privTable = derivedClass.newInstance();
Class[] paramTypes = {DataInput.class};
Method readMethod = derivedClass.getMethod("readFields", paramTypes);
Object[] params = {in};
readMethod.invoke(privTable, params);
return privTable;
} catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException
| SecurityException | IllegalArgumentException | InvocationTargetException e) {
throw new IOException("failed read PrivTable", e);
}
}
public int size() {
int sum = 0;
for (Map.Entry<UserIdentity, List<PrivEntry>> entry : map.entrySet()) {
sum += entry.getValue().size();
}
return sum;
}
/**
* return a iterator used for a complete loop in the whole table
* This is READ ONLY, please don't use it for any kinds of modification
*/
public Iterator<PrivEntry> getFullReadOnlyIterator() {
return new Iterator<PrivEntry>() {
private Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIterator;
private Iterator<PrivEntry> privEntryIterator;
{
mapIterator = map.entrySet().iterator();
privEntryIterator = null;
}
@Override
public boolean hasNext() {
return mapIterator.hasNext() || privEntryIterator != null && privEntryIterator.hasNext();
}
@Override
public PrivEntry next() {
if (privEntryIterator == null || !privEntryIterator.hasNext()) {
if (!mapIterator.hasNext()) {
return null;
}
Map.Entry<UserIdentity, List<PrivEntry>> next = mapIterator.next();
privEntryIterator = next.getValue().iterator();
}
return privEntryIterator.next();
}
};
}
/**
* return a iterator to all the entries that match currentUser
*/
public Iterator<PrivEntry> getReadOnlyIteratorByUser(UserIdentity currentUser) {
return getReadOnlyIteratorByUser(currentUser.getQualifiedUser(), currentUser.getHost());
}
/**
* return a iterator to all the entries that match user@host
*/
public Iterator<PrivEntry> getReadOnlyIteratorByUser(String user, String host) {
return new Iterator<PrivEntry>() {
private Iterator<Map.Entry<UserIdentity, List<PrivEntry>>> mapIterator;
private Iterator<PrivEntry> privEntryIterator;
{
mapIterator = map.entrySet().iterator();
privEntryIterator = null;
iterMapToNextMatchedIdentity();
}
/**
* iterator to the next user identity that match user
* return false if no such user found, true if found
*/
private boolean iterMapToNextMatchedIdentity() {
while (mapIterator.hasNext()) {
Map.Entry<UserIdentity, List<PrivEntry>> mapEntry = mapIterator.next();
List<PrivEntry> entries = mapEntry.getValue();
Preconditions.checkArgument(entries.size() > 0);
PrivEntry privEntry = entries.get(0);
if (!privEntry.isAnyUser() && !privEntry.getUserPattern().match(user)) {
continue;
}
if (!privEntry.isAnyHost() && !privEntry.getHostPattern().match(host)) {
continue;
}
privEntryIterator = entries.iterator();
return true;
}
return false;
}
@Override
public boolean hasNext() {
if (privEntryIterator == null) {
return false;
}
if (privEntryIterator.hasNext()) {
return true;
} else {
return iterMapToNextMatchedIdentity();
}
}
@Override
public PrivEntry next() {
return privEntryIterator.next();
}
};
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("\n");
Iterator<PrivEntry> iter = this.getFullReadOnlyIterator();
while (iter.hasNext()) {
sb.append(iter.next()).append("\n");
}
return sb.toString();
}
@Override
public void write(DataOutput out) throws IOException {
if (!isClassNameWrote) {
String className = PrivTable.class.getCanonicalName();
Text.writeString(out, className);
isClassNameWrote = true;
}
out.writeInt(this.size());
Iterator<PrivEntry> iter = this.getFullReadOnlyIterator();
while (iter.hasNext()) {
iter.next().write(out);
}
isClassNameWrote = false;
}
public void readFields(DataInput in) throws IOException {
int size = in.readInt();
for (int i = 0; i < size; i++) {
PrivEntry entry = PrivEntry.read(in);
UserIdentity newUser = entry.getUserIdent();
List<PrivEntry> entries = map.computeIfAbsent(newUser, k -> new ArrayList<>());
entries.add(entry);
}
}
}
|
Hello @gastaldi, in this specific testcase I was just interested in seeing if the `Multi-Release: true` attribtue made it into the jar file and for that test, using any variant of `JarFile` constructor and then calling the `isMultiRelease()` works fine.
|
private void verifyUberJar() throws IOException {
final File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
assertThat(getNumberOfFilesEndingWith(targetDir, ".original")).isEqualTo(1);
try (JarFile jarFile = new JarFile(jars.get(0))) {
Assertions.assertTrue(jarFile.isMultiRelease(), "uber-jar " + jars.get(0)
+ " was expected to be a multi-release jar but wasn't");
}
ensureManifestOfJarIsReadableByJarInputStream(jars.get(0));
}
|
try (JarFile jarFile = new JarFile(jars.get(0))) {
|
private void verifyUberJar() throws IOException {
final File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
assertThat(getNumberOfFilesEndingWith(targetDir, ".original")).isEqualTo(1);
try (JarFile jarFile = new JarFile(jars.get(0))) {
Assertions.assertTrue(jarFile.isMultiRelease(), "uber-jar " + jars.get(0)
+ " was expected to be a multi-release jar but wasn't");
}
ensureManifestOfJarIsReadableByJarInputStream(jars.get(0));
}
|
class PackageIT extends MojoTestBase {
private RunningInvoker running;
private File testDir;
@Test
public void testUberJarMavenPluginConfiguration()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/uberjar-maven-plugin-config");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
verifyUberJar();
}
private void ensureManifestOfJarIsReadableByJarInputStream(File jar) throws IOException {
try (InputStream fileInputStream = new FileInputStream(jar)) {
try (JarInputStream stream = new JarInputStream(fileInputStream)) {
Manifest manifest = stream.getManifest();
assertThat(manifest).isNotNull();
}
}
}
@Test
public void testQuarkusPackageOutputDirectory()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/quarkus.package.output-directory");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
targetDir = new File(targetDir, "custom-output-dir");
assertThat(targetDir).exists();
jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
}
/**
* POM files are often found among the project's dependencies.
* This test makes sure such projects can be built with mutable-jar format
* without choking on non-jar dependencies.
*/
@Test
public void testDependencyOnPomMutableJar()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/dependency-on-pom");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
}
@Test
public void testPackageWorksWhenUberjarIsTrue()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/uberjar-check");
createAndVerifyUberJar();
createAndVerifyUberJar();
}
private void createAndVerifyUberJar() throws IOException, MavenInvocationException, InterruptedException {
Properties p = new Properties();
p.setProperty("quarkus.package.type", "uber-jar");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap(), p);
assertThat(result.getProcess().waitFor()).isEqualTo(0);
verifyUberJar();
}
@Test
public void testCustomPackaging()
throws Exception {
testDir = getTargetDir("projects/custom-packaging-plugin");
running = new RunningInvoker(testDir, false);
MavenProcessInvocationResult result = running.execute(Collections.singletonList("install"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
testDir = getTargetDir("projects/custom-packaging-app");
running = new RunningInvoker(testDir, false);
result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
final File[] files = targetDir.listFiles(f -> f.getName().endsWith(".jar"));
Set<String> jarNames = new HashSet<>(files.length);
for (File f : files) {
jarNames.add(f.getName());
}
final Path runnerJar = getTargetDir().toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the uber runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testRunnerUberJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-uberjar-crc");
running = new RunningInvoker(testDir, false);
Properties p = new Properties();
p.setProperty("quarkus.package.type", "uber-jar");
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap(), p);
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
assertThat(getNumberOfFilesEndingWith(targetDir, ".jar")).isEqualTo(1);
assertThat(getNumberOfFilesEndingWith(targetDir, ".original")).isEqualTo(1);
final Path runnerJar = targetDir.toPath().resolve("acme-1.0-SNAPSHOT-runner.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testLegacyJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-legacyjar-crc");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.singletonMap("QUARKUS_PACKAGE_TYPE", "legacy-jar"));
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
assertThat(getNumberOfFilesEndingWith(targetDir, ".jar")).isEqualTo(2);
final Path runnerJar = targetDir.toPath().resolve("acme-1.0-SNAPSHOT-runner.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testFastJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-fastjar-crc");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final Path runnerJar = getTargetDir().toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that quarkus.index-dependency.* can be used for modules in a multimodule project
*/
@Test
public void testQuarkusIndexDependencyOnLocalModule() throws Exception {
testDir = initProject("projects/quarkus-index-dependencies");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = new File(testDir.getAbsoluteFile(), "runner" + File.separator + "target");
final Path runnerJar = targetDir.toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
@Test
public void testNativeSourcesPackage() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-native-sources");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(
Arrays.asList("package", "-Dquarkus.package.type=native-sources"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
final Path nativeSourcesDir = targetDir.toPath().resolve("native-sources");
assertThat(nativeSourcesDir).exists()
.isDirectoryContaining(p -> "native-image.args".equals(p.getFileName().toString()))
.isDirectoryContaining(p -> "acme-1.0-SNAPSHOT-runner.jar".equals(p.getFileName().toString()));
}
private int getNumberOfFilesEndingWith(File dir, String suffix) {
return getFilesEndingWith(dir, suffix).size();
}
private File getTargetDir() {
return new File(testDir.getAbsoluteFile() + "/target");
}
private void assertZipEntriesCanBeOpenedAndClosed(final Path jar) throws Exception {
try (final InputStream is = Files.newInputStream(jar)) {
final ZipInputStream zis = new ZipInputStream(is);
ZipEntry e = null;
while ((e = zis.getNextEntry()) != null) {
zis.closeEntry();
}
}
}
}
|
class PackageIT extends MojoTestBase {
private RunningInvoker running;
private File testDir;
@Test
public void testUberJarMavenPluginConfiguration()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/uberjar-maven-plugin-config");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
verifyUberJar();
}
private void ensureManifestOfJarIsReadableByJarInputStream(File jar) throws IOException {
try (InputStream fileInputStream = new FileInputStream(jar)) {
try (JarInputStream stream = new JarInputStream(fileInputStream)) {
Manifest manifest = stream.getManifest();
assertThat(manifest).isNotNull();
}
}
}
@Test
public void testQuarkusPackageOutputDirectory()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/quarkus.package.output-directory");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
targetDir = new File(targetDir, "custom-output-dir");
assertThat(targetDir).exists();
jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
}
/**
* POM files are often found among the project's dependencies.
* This test makes sure such projects can be built with mutable-jar format
* without choking on non-jar dependencies.
*/
@Test
public void testDependencyOnPomMutableJar()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/dependency-on-pom");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
File targetDir = getTargetDir();
List<File> jars = getFilesEndingWith(targetDir, ".jar");
assertThat(jars).hasSize(1);
}
@Test
public void testPackageWorksWhenUberjarIsTrue()
throws MavenInvocationException, IOException, InterruptedException {
testDir = initProject("projects/uberjar-check");
createAndVerifyUberJar();
createAndVerifyUberJar();
}
private void createAndVerifyUberJar() throws IOException, MavenInvocationException, InterruptedException {
Properties p = new Properties();
p.setProperty("quarkus.package.type", "uber-jar");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap(), p);
assertThat(result.getProcess().waitFor()).isEqualTo(0);
verifyUberJar();
}
@Test
public void testCustomPackaging()
throws Exception {
testDir = getTargetDir("projects/custom-packaging-plugin");
running = new RunningInvoker(testDir, false);
MavenProcessInvocationResult result = running.execute(Collections.singletonList("install"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
testDir = getTargetDir("projects/custom-packaging-app");
running = new RunningInvoker(testDir, false);
result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
final File[] files = targetDir.listFiles(f -> f.getName().endsWith(".jar"));
Set<String> jarNames = new HashSet<>(files.length);
for (File f : files) {
jarNames.add(f.getName());
}
final Path runnerJar = getTargetDir().toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the uber runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testRunnerUberJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-uberjar-crc");
running = new RunningInvoker(testDir, false);
Properties p = new Properties();
p.setProperty("quarkus.package.type", "uber-jar");
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap(), p);
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
assertThat(getNumberOfFilesEndingWith(targetDir, ".jar")).isEqualTo(1);
assertThat(getNumberOfFilesEndingWith(targetDir, ".original")).isEqualTo(1);
final Path runnerJar = targetDir.toPath().resolve("acme-1.0-SNAPSHOT-runner.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testLegacyJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-legacyjar-crc");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.singletonMap("QUARKUS_PACKAGE_TYPE", "legacy-jar"));
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
assertThat(getNumberOfFilesEndingWith(targetDir, ".jar")).isEqualTo(2);
final Path runnerJar = targetDir.toPath().resolve("acme-1.0-SNAPSHOT-runner.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that the runner jar created by Quarkus has valid CRC entries. The verification
* is pretty trivial and involves opening and closing the ZipEntry entries that are part of the
* runner jar. That internally triggers the CRC checks.
*
* @throws Exception
* @see <a href="https:
*/
@Test
public void testFastJarHasValidCRC() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-fastjar-crc");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final Path runnerJar = getTargetDir().toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
/**
* Tests that quarkus.index-dependency.* can be used for modules in a multimodule project
*/
@Test
public void testQuarkusIndexDependencyOnLocalModule() throws Exception {
testDir = initProject("projects/quarkus-index-dependencies");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(Collections.singletonList("package"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = new File(testDir.getAbsoluteFile(), "runner" + File.separator + "target");
final Path runnerJar = targetDir.toPath().resolve("quarkus-app").resolve("quarkus-run.jar");
Assertions.assertTrue(Files.exists(runnerJar), "Runner jar " + runnerJar + " is missing");
assertZipEntriesCanBeOpenedAndClosed(runnerJar);
}
@Test
public void testNativeSourcesPackage() throws Exception {
testDir = initProject("projects/uberjar-check", "projects/project-native-sources");
running = new RunningInvoker(testDir, false);
final MavenProcessInvocationResult result = running.execute(
Arrays.asList("package", "-Dquarkus.package.type=native-sources"),
Collections.emptyMap());
assertThat(result.getProcess().waitFor()).isEqualTo(0);
final File targetDir = getTargetDir();
final Path nativeSourcesDir = targetDir.toPath().resolve("native-sources");
assertThat(nativeSourcesDir).exists()
.isDirectoryContaining(p -> "native-image.args".equals(p.getFileName().toString()))
.isDirectoryContaining(p -> "acme-1.0-SNAPSHOT-runner.jar".equals(p.getFileName().toString()));
}
private int getNumberOfFilesEndingWith(File dir, String suffix) {
return getFilesEndingWith(dir, suffix).size();
}
private File getTargetDir() {
return new File(testDir.getAbsoluteFile() + "/target");
}
private void assertZipEntriesCanBeOpenedAndClosed(final Path jar) throws Exception {
try (final InputStream is = Files.newInputStream(jar)) {
final ZipInputStream zis = new ZipInputStream(is);
ZipEntry e = null;
while ((e = zis.getNextEntry()) != null) {
zis.closeEntry();
}
}
}
}
|
Yes `super.trySplit()` handles empty range. The special case here is to handle `range.getTo() == range.getFrom == Long.MAX_VALUE`.
|
public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) {
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.trySplit(fractionOfRemainder);
}
long cur = (lastAttemptedOffset == null) ? range.getFrom() - 1 : lastAttemptedOffset;
if (cur == Long.MAX_VALUE) {
return null;
}
long estimateLatestOffset = Long.max(poller.estimateRangeEnd(), cur + 1);
long splitPos =
cur
+ Math.max(
1L,
(Double.valueOf((estimateLatestOffset - cur) * fractionOfRemainder)).longValue());
if (splitPos > estimateLatestOffset) {
return null;
}
OffsetRange res = new OffsetRange(splitPos, range.getTo());
this.range = new OffsetRange(range.getFrom(), splitPos);
return SplitResult.of(range, res);
}
|
1L,
|
public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) {
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.trySplit(fractionOfRemainder);
}
if (lastAttemptedOffset != null && lastAttemptedOffset == Long.MAX_VALUE) {
return null;
}
BigDecimal cur =
(lastAttemptedOffset == null)
? BigDecimal.valueOf(range.getFrom()).subtract(BigDecimal.ONE, MathContext.DECIMAL128)
: BigDecimal.valueOf(lastAttemptedOffset);
BigDecimal estimateRangeEnd =
BigDecimal.valueOf(rangeEndEstimator.estimate())
.max(cur.add(BigDecimal.ONE, MathContext.DECIMAL128));
BigDecimal splitPos =
cur.add(
estimateRangeEnd
.subtract(cur, MathContext.DECIMAL128)
.multiply(BigDecimal.valueOf(fractionOfRemainder), MathContext.DECIMAL128)
.max(BigDecimal.ONE),
MathContext.DECIMAL128);
long split = splitPos.longValue();
if (split > estimateRangeEnd.longValue()) {
return null;
}
OffsetRange res = new OffsetRange(split, range.getTo());
this.range = new OffsetRange(range.getFrom(), split);
return SplitResult.of(range, res);
}
|
class GrowableOffsetRangeTracker extends OffsetRangeTracker {
/**
* An interface that should be implemented to fetch estimated end offset of range.
*
* <p>{@code estimateRangeEnd} is called to give te end offset when {@code trySplit} or {@code
* getProgress} is invoked. The end offset is exclusive for the range. It's not necessary to
* increase monotonically but it's only taken into computation when it's larger than the current
* position. When returning Long.MAX_VALUE as estimate, it means the largest possible position for
* the range is Long.MAX_VALUE - 1. Having a good estimate is important for providing a good
* signal of progress and splitting at a proper position.
*/
public interface OffsetPoller {
long estimateRangeEnd();
}
private final OffsetPoller poller;
public GrowableOffsetRangeTracker(long start, OffsetPoller offsetPoller) {
super(new OffsetRange(start, Long.MAX_VALUE));
this.poller = checkNotNull(offsetPoller);
}
@Override
@Override
public Progress getProgress() {
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.getProgress();
}
long estimateRangeEnd = poller.estimateRangeEnd();
if (lastAttemptedOffset == null) {
return Progress.from(0, Math.max(estimateRangeEnd - range.getFrom(), 0));
}
long workRemaining = Math.max(estimateRangeEnd - lastAttemptedOffset, 0);
return Progress.from(
Math.max(estimateRangeEnd, lastAttemptedOffset) - range.getFrom() - workRemaining,
workRemaining);
}
}
|
class GrowableOffsetRangeTracker extends OffsetRangeTracker {
/**
* Provides the estimated end offset of the range.
*
* <p>{@link
*
* required to monotonically increase as it will only be taken into consideration when the
* estimated end offset is larger than the current position. Returning {@code Long.MAX_VALUE} as
* the estimate implies the largest possible position for the range is {@code Long.MAX_VALUE - 1}.
* Return {@code Long.MIN_VALUE} if an estimate can not be provided.
*
* <p>Providing a good estimate is important for an accurate progress signal and will impact
* splitting decisions by the runner.
*
* <p>If {@link
* {@link Suppliers
*
* <p>TODO(BEAM-10032): Also consider using {@link RangeEndEstimator} when the range is not ended
* with {@code Long.MAX_VALUE}.
*/
@FunctionalInterface
public interface RangeEndEstimator {
long estimate();
}
private final RangeEndEstimator rangeEndEstimator;
public GrowableOffsetRangeTracker(long start, RangeEndEstimator rangeEndEstimator) {
super(new OffsetRange(start, Long.MAX_VALUE));
this.rangeEndEstimator = checkNotNull(rangeEndEstimator);
}
@Override
@Override
public Progress getProgress() {
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.getProgress();
}
BigDecimal estimateRangeEnd = BigDecimal.valueOf(rangeEndEstimator.estimate());
if (lastAttemptedOffset == null) {
return Progress.from(
0,
estimateRangeEnd
.subtract(BigDecimal.valueOf(range.getFrom()), MathContext.DECIMAL128)
.max(BigDecimal.ZERO)
.doubleValue());
}
BigDecimal workRemaining =
estimateRangeEnd
.subtract(BigDecimal.valueOf(lastAttemptedOffset), MathContext.DECIMAL128)
.max(BigDecimal.ZERO);
BigDecimal totalWork =
estimateRangeEnd
.max(BigDecimal.valueOf(lastAttemptedOffset))
.subtract(BigDecimal.valueOf(range.getFrom()), MathContext.DECIMAL128);
return Progress.from(
totalWork.subtract(workRemaining, MathContext.DECIMAL128).doubleValue(),
workRemaining.doubleValue());
}
}
|
Guilty as charged! Should be fixed now :)
|
protected void implementIfExistsGetReception(ClassCreator observerCreator) {
MethodCreator getObservedType = observerCreator.getMethodCreator("getReception", Reception.class)
.setModifiers(ACC_PUBLIC);
getObservedType.returnValue(getObservedType.load(Reception.IF_EXISTS));
}
|
getObservedType.returnValue(getObservedType.load(Reception.IF_EXISTS));
|
protected void implementIfExistsGetReception(ClassCreator observerCreator) {
MethodCreator getReception = observerCreator.getMethodCreator("getReception", Reception.class)
.setModifiers(ACC_PUBLIC);
getReception.returnValue(getReception.load(Reception.IF_EXISTS));
}
|
class name " + generatedName + " already exists for "
+ generatedObserver);
} else {
return Collections.emptyList();
}
|
class name " + generatedName + " already exists for "
+ generatedObserver);
} else {
return Collections.emptyList();
}
|
But it should. Even the `tsymbol.type` can be a reference type eg ```ballerina type Foo int; type Bar Foo; public function foo() returns Bar { return 1; } ```
|
private void checkForExportableType(BTypeSymbol symbol, Location pos, HashSet<BTypeSymbol> visitedSymbols) {
if (symbol == null || symbol.type == null || Symbols.isFlagOn(symbol.flags, Flags.TYPE_PARAM)) {
return;
}
if (!visitedSymbols.add(symbol)) {
return;
}
BType symbolType = Types.getReferredType(symbol.type);
switch (symbolType.tag) {
case TypeTags.ARRAY:
checkForExportableType(((BArrayType) symbolType).eType.tsymbol, pos, visitedSymbols);
return;
case TypeTags.TUPLE:
BTupleType tupleType = (BTupleType) symbolType;
tupleType.tupleTypes.forEach(t -> checkForExportableType(t.tsymbol, pos, visitedSymbols));
if (tupleType.restType != null) {
checkForExportableType(tupleType.restType.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.MAP:
checkForExportableType(((BMapType) symbolType).constraint.tsymbol, pos, visitedSymbols);
return;
case TypeTags.RECORD:
if (Symbols.isFlagOn(symbol.flags, Flags.ANONYMOUS)) {
BRecordType recordType = (BRecordType) symbolType;
recordType.fields.values().forEach(f -> checkForExportableType(f.type.tsymbol, pos,
visitedSymbols));
if (recordType.restFieldType != null) {
checkForExportableType(recordType.restFieldType.tsymbol, pos, visitedSymbols);
}
return;
}
break;
case TypeTags.TABLE:
BTableType tableType = (BTableType) symbolType;
if (tableType.constraint != null) {
checkForExportableType(tableType.constraint.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.STREAM:
BStreamType streamType = (BStreamType) symbolType;
if (streamType.constraint != null) {
checkForExportableType(streamType.constraint.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.INVOKABLE:
BInvokableType invokableType = (BInvokableType) symbolType;
if (Symbols.isFlagOn(invokableType.flags, Flags.ANY_FUNCTION)) {
return;
}
if (invokableType.paramTypes != null) {
for (BType paramType : invokableType.paramTypes) {
checkForExportableType(paramType.tsymbol, pos, visitedSymbols);
}
}
if (invokableType.restType != null) {
checkForExportableType(invokableType.restType.tsymbol, pos, visitedSymbols);
}
checkForExportableType(invokableType.retType.tsymbol, pos, visitedSymbols);
return;
case TypeTags.PARAMETERIZED_TYPE:
BTypeSymbol parameterizedType = ((BParameterizedType) symbolType).paramValueType.tsymbol;
checkForExportableType(parameterizedType, pos, visitedSymbols);
return;
case TypeTags.ERROR:
if (Symbols.isFlagOn(symbol.flags, Flags.ANONYMOUS)) {
checkForExportableType((((BErrorType) symbolType).detailType.tsymbol), pos, visitedSymbols);
return;
}
}
if (!Symbols.isPublic(symbol)) {
dlog.warning(pos, DiagnosticWarningCode.ATTEMPT_EXPOSE_NON_PUBLIC_SYMBOL, symbol.name);
}
}
|
checkForExportableType((((BErrorType) symbolType).detailType.tsymbol), pos, visitedSymbols);
|
private void checkForExportableType(BTypeSymbol symbol, Location pos, HashSet<BTypeSymbol> visitedSymbols) {
if (symbol == null || symbol.type == null || Symbols.isFlagOn(symbol.flags, Flags.TYPE_PARAM)) {
return;
}
if (!visitedSymbols.add(symbol)) {
return;
}
BType symbolType = symbol.type;
switch (symbolType.tag) {
case TypeTags.ARRAY:
checkForExportableType(((BArrayType) symbolType).eType.tsymbol, pos, visitedSymbols);
return;
case TypeTags.TUPLE:
BTupleType tupleType = (BTupleType) symbolType;
tupleType.tupleTypes.forEach(t -> checkForExportableType(t.tsymbol, pos, visitedSymbols));
if (tupleType.restType != null) {
checkForExportableType(tupleType.restType.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.MAP:
checkForExportableType(((BMapType) symbolType).constraint.tsymbol, pos, visitedSymbols);
return;
case TypeTags.RECORD:
if (Symbols.isFlagOn(symbol.flags, Flags.ANONYMOUS)) {
BRecordType recordType = (BRecordType) symbolType;
recordType.fields.values().forEach(f -> checkForExportableType(f.type.tsymbol, pos,
visitedSymbols));
if (recordType.restFieldType != null) {
checkForExportableType(recordType.restFieldType.tsymbol, pos, visitedSymbols);
}
return;
}
break;
case TypeTags.TABLE:
BTableType tableType = (BTableType) symbolType;
if (tableType.constraint != null) {
checkForExportableType(tableType.constraint.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.STREAM:
BStreamType streamType = (BStreamType) symbolType;
if (streamType.constraint != null) {
checkForExportableType(streamType.constraint.tsymbol, pos, visitedSymbols);
}
return;
case TypeTags.INVOKABLE:
BInvokableType invokableType = (BInvokableType) symbolType;
if (Symbols.isFlagOn(invokableType.flags, Flags.ANY_FUNCTION)) {
return;
}
if (invokableType.paramTypes != null) {
for (BType paramType : invokableType.paramTypes) {
checkForExportableType(paramType.tsymbol, pos, visitedSymbols);
}
}
if (invokableType.restType != null) {
checkForExportableType(invokableType.restType.tsymbol, pos, visitedSymbols);
}
checkForExportableType(invokableType.retType.tsymbol, pos, visitedSymbols);
return;
case TypeTags.PARAMETERIZED_TYPE:
BTypeSymbol parameterizedType = ((BParameterizedType) symbolType).paramValueType.tsymbol;
checkForExportableType(parameterizedType, pos, visitedSymbols);
return;
case TypeTags.ERROR:
if (Symbols.isFlagOn(symbol.flags, Flags.ANONYMOUS)) {
checkForExportableType((((BErrorType) symbolType).detailType.tsymbol), pos, visitedSymbols);
return;
}
break;
case TypeTags.TYPEREFDESC:
symbolType = Types.getReferredType(symbolType);
checkForExportableType(symbolType.tsymbol, pos, visitedSymbols);
return;
}
if (!Symbols.isPublic(symbol)) {
dlog.warning(pos, DiagnosticWarningCode.ATTEMPT_EXPOSE_NON_PUBLIC_SYMBOL, symbol.name);
}
}
|
class CodeAnalyzer extends SimpleBLangNodeAnalyzer<CodeAnalyzer.AnalyzerData> {
private static final CompilerContext.Key<CodeAnalyzer> CODE_ANALYZER_KEY =
new CompilerContext.Key<>();
private final SymbolResolver symResolver;
private final SymbolTable symTable;
private final Types types;
private final BLangDiagnosticLog dlog;
private final TypeChecker typeChecker;
private final Names names;
private final ReachabilityAnalyzer reachabilityAnalyzer;
public static CodeAnalyzer getInstance(CompilerContext context) {
CodeAnalyzer codeGenerator = context.get(CODE_ANALYZER_KEY);
if (codeGenerator == null) {
codeGenerator = new CodeAnalyzer(context);
}
return codeGenerator;
}
public CodeAnalyzer(CompilerContext context) {
context.put(CODE_ANALYZER_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.types = Types.getInstance(context);
this.dlog = BLangDiagnosticLog.getInstance(context);
this.typeChecker = TypeChecker.getInstance(context);
this.names = Names.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.reachabilityAnalyzer = ReachabilityAnalyzer.getInstance(context);
}
public BLangPackage analyze(BLangPackage pkgNode) {
final AnalyzerData data = new AnalyzerData();
visitNode(pkgNode, data);
return pkgNode;
}
@Override
public void visit(BLangPackage pkgNode, AnalyzerData data) {
this.dlog.setCurrentPackageId(pkgNode.packageID);
if (pkgNode.completedPhases.contains(CompilerPhase.CODE_ANALYZE)) {
return;
}
data.parent = pkgNode;
data.env = this.symTable.pkgEnvMap.get(pkgNode.symbol);
analyzeTopLevelNodes(pkgNode, data);
pkgNode.getTestablePkgs().forEach(testablePackage -> visitNode(testablePackage, data));
}
@Override
public void visit(BLangTestablePackage node, AnalyzerData data) {
visit((BLangPackage) node, data);
}
private void analyzeTopLevelNodes(BLangPackage pkgNode, AnalyzerData data) {
List<TopLevelNode> topLevelNodes = pkgNode.topLevelNodes;
for (int i = 0; i < topLevelNodes.size(); i++) {
analyzeNode((BLangNode) topLevelNodes.get(i), data);
}
pkgNode.completedPhases.add(CompilerPhase.CODE_ANALYZE);
}
@Override
public void analyzeNode(BLangNode node, AnalyzerData data) {
SymbolEnv prevEnv = data.env;
BLangNode parent = data.parent;
node.parent = parent;
data.parent = node;
visitNode(node, data);
data.parent = parent;
data.env = prevEnv;
}
private void analyzeTypeNode(BLangType node, AnalyzerData data) {
if (node == null) {
return;
}
analyzeNode(node, data);
}
@Override
public void visit(BLangCompilationUnit compUnitNode, AnalyzerData data) {
compUnitNode.topLevelNodes.forEach(e -> analyzeNode((BLangNode) e, data));
}
@Override
public void visit(BLangTypeDefinition typeDefinition, AnalyzerData data) {
analyzeTypeNode(typeDefinition.typeNode, data);
typeDefinition.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangClassDefinition classDefinition, AnalyzerData data) {
data.env = SymbolEnv.createClassEnv(classDefinition, classDefinition.symbol.scope, data.env);
for (BLangSimpleVariable field : classDefinition.fields) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
data.defaultValueState = DefaultValueState.OBJECT_FIELD_INITIALIZER;
analyzeNode(field, data);
data.defaultValueState = prevDefaultValueState;
}
List<BLangFunction> bLangFunctionList = new ArrayList<>(classDefinition.functions);
if (classDefinition.initFunction != null) {
bLangFunctionList.add(classDefinition.initFunction);
}
bLangFunctionList.sort(Comparator.comparingInt(function -> function.pos.lineRange().startLine().line()));
for (BLangFunction function : bLangFunctionList) {
analyzeNode(function, data);
}
classDefinition.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangObjectConstructorExpression objectConstructorExpression, AnalyzerData data) {
visit(objectConstructorExpression.typeInit, data);
}
@Override
public void visit(BLangTupleVariableDef bLangTupleVariableDef, AnalyzerData data) {
analyzeNode(bLangTupleVariableDef.var, data);
}
@Override
public void visit(BLangRecordVariableDef bLangRecordVariableDef, AnalyzerData data) {
analyzeNode(bLangRecordVariableDef.var, data);
}
@Override
public void visit(BLangErrorVariableDef bLangErrorVariableDef, AnalyzerData data) {
analyzeNode(bLangErrorVariableDef.errorVariable, data);
}
@Override
public void visit(BLangResourceFunction funcNode, AnalyzerData data) {
visit((BLangFunction) funcNode, data);
}
@Override
public void visit(BLangFunction funcNode, AnalyzerData data) {
validateParams(funcNode, data);
analyzeNode(funcNode.returnTypeNode, data);
boolean isLambda = funcNode.flagSet.contains(Flag.LAMBDA);
if (isLambda) {
return;
}
if (Symbols.isPublic(funcNode.symbol)) {
funcNode.symbol.params.forEach(symbol -> analyzeExportableTypeRef(funcNode.symbol, symbol.type.tsymbol,
true,
funcNode.pos));
if (funcNode.symbol.restParam != null) {
analyzeExportableTypeRef(funcNode.symbol, funcNode.symbol.restParam.type.tsymbol, true,
funcNode.restParam.pos);
}
analyzeExportableTypeRef(funcNode.symbol, funcNode.symbol.retType.tsymbol, true,
funcNode.returnTypeNode.pos);
}
if (MAIN_FUNCTION_NAME.equals(funcNode.name.value)) {
new MainFunctionValidator(types, dlog).validateMainFunction(funcNode);
}
this.validateModuleInitFunction(funcNode);
try {
this.initNewWorkerActionSystem(data);
data.workerActionSystemStack.peek().startWorkerActionStateMachine(DEFAULT_WORKER_NAME,
funcNode.pos,
funcNode);
this.visitFunction(funcNode, data);
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
} finally {
this.finalizeCurrentWorkerActionSystem(data);
}
funcNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
validateNamedWorkerUniqueReferences(data);
}
private void validateNamedWorkerUniqueReferences(AnalyzerData data) {
for (var nodes : data.workerReferences.values()) {
if (nodes.size() > 1) {
for (BLangNode node: nodes) {
dlog.error(node.pos, DiagnosticErrorCode.ILLEGAL_WORKER_REFERENCE_AS_A_VARIABLE_REFERENCE, node);
}
}
}
data.workerReferences.clear();
}
private void validateParams(BLangFunction funcNode, AnalyzerData data) {
for (BLangSimpleVariable parameter : funcNode.requiredParams) {
analyzeNode(parameter, data);
}
if (funcNode.restParam != null) {
analyzeNode(funcNode.restParam, data);
}
}
private void visitFunction(BLangFunction funcNode, AnalyzerData data) {
data.env = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, data.env);
data.returnWithinTransactionCheckStack.push(true);
data.returnTypes.push(new LinkedHashSet<>());
data.transactionalFuncCheckStack.push(funcNode.flagSet.contains(Flag.TRANSACTIONAL));
if (Symbols.isNative(funcNode.symbol)) {
return;
}
if (isPublicInvokableNode(funcNode)) {
analyzeNode(funcNode.returnTypeNode, data);
}
/* the body can be null in the case of Object type function declarations */
if (funcNode.body != null) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
if (prevDefaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT ||
prevDefaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
data.defaultValueState = DefaultValueState.FUNCTION_IN_DEFAULT_VALUE;
}
analyzeNode(funcNode.body, data);
data.defaultValueState = prevDefaultValueState;
}
reachabilityAnalyzer.analyzeReachability(funcNode, data.env);
data.returnTypes.pop();
data.returnWithinTransactionCheckStack.pop();
data.transactionalFuncCheckStack.pop();
}
private boolean isPublicInvokableNode(BLangInvokableNode invNode) {
return Symbols.isPublic(invNode.symbol) && (SymbolKind.PACKAGE.equals(invNode.symbol.owner.getKind()) ||
Symbols.isPublic(invNode.symbol.owner));
}
@Override
public void visit(BLangBlockFunctionBody body, AnalyzerData data) {
boolean prevWithinTxScope = data.withinTransactionScope;
boolean prevLoopAlterNotAllowed = data.loopAlterNotAllowed;
data.loopAlterNotAllowed = data.loopCount > 0;
if (!prevWithinTxScope) {
data.withinTransactionScope = data.transactionalFuncCheckStack.peek();
}
data.env = SymbolEnv.createFuncBodyEnv(body, data.env);
for (BLangStatement e : body.stmts) {
data.inInternallyDefinedBlockStmt = true;
analyzeNode(e, data);
}
data.inInternallyDefinedBlockStmt = false;
if (data.transactionalFuncCheckStack.peek()) {
data.withinTransactionScope = prevWithinTxScope;
}
data.loopAlterNotAllowed = prevLoopAlterNotAllowed;
}
@Override
public void visit(BLangExprFunctionBody body, AnalyzerData data) {
analyzeExpr(body.expr, data);
}
@Override
public void visit(BLangExternalFunctionBody body, AnalyzerData data) {
}
@Override
public void visit(BLangForkJoin forkJoin, AnalyzerData data) {
if (forkJoin.workers.isEmpty()) {
dlog.error(forkJoin.pos, DiagnosticErrorCode.INVALID_FOR_JOIN_SYNTAX_EMPTY_FORK);
}
}
@Override
public void visit(BLangTransaction transactionNode, AnalyzerData data) {
if (data.transactionalFuncCheckStack.peek()) {
this.dlog.error(transactionNode.pos,
DiagnosticErrorCode.TRANSACTION_CANNOT_BE_USED_WITHIN_TRANSACTIONAL_SCOPE);
return;
}
data.errorTypes.push(new LinkedHashSet<>());
boolean previousWithinTxScope = data.withinTransactionScope;
int previousCommitCount = data.commitCount;
int previousRollbackCount = data.rollbackCount;
boolean prevCommitRollbackAllowed = data.commitRollbackAllowed;
data.commitRollbackAllowed = true;
data.commitCount = 0;
data.rollbackCount = 0;
data.withinTransactionScope = true;
data.loopWithinTransactionCheckStack.push(false);
data.returnWithinTransactionCheckStack.push(false);
data.transactionCount++;
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = transactionNode.onFailClause != null;
}
analyzeNode(transactionNode.transactionBody, data);
data.failureHandled = failureHandled;
if (data.commitCount < 1) {
this.dlog.error(transactionNode.pos, DiagnosticErrorCode.INVALID_COMMIT_COUNT);
}
data.transactionCount--;
data.withinTransactionScope = previousWithinTxScope;
data.commitCount = previousCommitCount;
data.rollbackCount = previousRollbackCount;
data.commitRollbackAllowed = prevCommitRollbackAllowed;
data.returnWithinTransactionCheckStack.pop();
data.loopWithinTransactionCheckStack.pop();
analyzeOnFailClause(transactionNode.onFailClause, data);
data.errorTypes.pop();
}
private void analyzeOnFailClause(BLangOnFailClause onFailClause, AnalyzerData data) {
if (onFailClause != null) {
analyzeNode(onFailClause, data);
}
}
@Override
public void visit(BLangTransactionalExpr transactionalExpr, AnalyzerData data) {
}
@Override
public void visit(BLangCommitExpr commitExpr, AnalyzerData data) {
data.commitCount++;
data.commitCountWithinBlock++;
if (data.transactionCount == 0) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_CANNOT_BE_OUTSIDE_TRANSACTION_BLOCK);
return;
}
if (data.transactionalFuncCheckStack.peek()) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_CANNOT_BE_WITHIN_TRANSACTIONAL_FUNCTION);
return;
}
if (!data.withinTransactionScope || !data.commitRollbackAllowed ||
data.loopWithinTransactionCheckStack.peek()) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_NOT_ALLOWED);
return;
}
data.withinTransactionScope = false;
}
@Override
public void visit(BLangRollback rollbackNode, AnalyzerData data) {
data.rollbackCount++;
data.rollbackCountWithinBlock++;
if (data.transactionCount == 0 && !data.withinTransactionScope) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_CANNOT_BE_OUTSIDE_TRANSACTION_BLOCK);
return;
}
if (!data.transactionalFuncCheckStack.empty() && data.transactionalFuncCheckStack.peek()) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_CANNOT_BE_WITHIN_TRANSACTIONAL_FUNCTION);
return;
}
if (!data.withinTransactionScope || !data.commitRollbackAllowed ||
(!data.loopWithinTransactionCheckStack.empty() && data.loopWithinTransactionCheckStack.peek())) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_NOT_ALLOWED);
return;
}
data.withinTransactionScope = false;
analyzeExpr(rollbackNode.expr, data);
}
@Override
public void visit(BLangRetry retryNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = retryNode.onFailClause != null;
}
visitNode(retryNode.retrySpec, data);
visitNode(retryNode.retryBody, data);
data.failureHandled = failureHandled;
retryNode.retryBody.failureBreakMode = retryNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(retryNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangRetrySpec retrySpec, AnalyzerData data) {
if (retrySpec.retryManagerType != null) {
BSymbol retryManagerTypeSymbol = symTable.langErrorModuleSymbol.scope
.lookup(names.fromString("RetryManager")).symbol;
BType abstractRetryManagerType = retryManagerTypeSymbol.type;
if (!types.isAssignable(retrySpec.retryManagerType.getBType(), abstractRetryManagerType)) {
dlog.error(retrySpec.pos, DiagnosticErrorCode.INVALID_INTERFACE_ON_NON_ABSTRACT_OBJECT,
RETRY_MANAGER_OBJECT_SHOULD_RETRY_FUNC, retrySpec.retryManagerType.getBType());
}
}
}
@Override
public void visit(BLangRetryTransaction retryTransaction, AnalyzerData data) {
analyzeNode(retryTransaction.retrySpec, data);
analyzeNode(retryTransaction.transaction, data);
}
@Override
public void visit(BLangBlockStmt blockNode, AnalyzerData data) {
int prevCommitCount = data.commitCountWithinBlock;
int prevRollbackCount = data.rollbackCountWithinBlock;
data.commitCountWithinBlock = 0;
data.rollbackCountWithinBlock = 0;
boolean inInternallyDefinedBlockStmt = data.inInternallyDefinedBlockStmt;
data.inInternallyDefinedBlockStmt = checkBlockIsAnInternalBlockInImmediateFunctionBody(blockNode);
data.env = SymbolEnv.createBlockEnv(blockNode, data.env);
blockNode.stmts.forEach(e -> analyzeNode(e, data));
data.inInternallyDefinedBlockStmt = inInternallyDefinedBlockStmt;
if (data.commitCountWithinBlock > 1 || data.rollbackCountWithinBlock > 1) {
this.dlog.error(blockNode.pos, DiagnosticErrorCode.MAX_ONE_COMMIT_ROLLBACK_ALLOWED_WITHIN_A_BRANCH);
}
data.commitCountWithinBlock = prevCommitCount;
data.rollbackCountWithinBlock = prevRollbackCount;
}
private boolean checkBlockIsAnInternalBlockInImmediateFunctionBody(BLangNode node) {
BLangNode parent = node.parent;
while (parent != null) {
final NodeKind kind = parent.getKind();
if (kind == NodeKind.BLOCK_FUNCTION_BODY) {
return true;
}
if (kind == NodeKind.BLOCK) {
parent = parent.parent;
} else {
return false;
}
}
return false;
}
@Override
public void visit(BLangReturn returnStmt, AnalyzerData data) {
if (checkReturnValidityInTransaction(data)) {
this.dlog.error(returnStmt.pos, DiagnosticErrorCode.RETURN_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
analyzeExpr(returnStmt.expr, data);
data.returnTypes.peek().add(returnStmt.expr.getBType());
}
@Override
public void visit(BLangIf ifStmt, AnalyzerData data) {
boolean independentBlocks = false;
int prevCommitCount = data.commitCount;
int prevRollbackCount = data.rollbackCount;
BLangStatement elseStmt = ifStmt.elseStmt;
if (data.withinTransactionScope && elseStmt != null && elseStmt.getKind() != NodeKind.IF) {
independentBlocks = true;
data.commitRollbackAllowed = true;
}
boolean prevTxMode = data.withinTransactionScope;
if ((ifStmt.expr.getKind() == NodeKind.GROUP_EXPR ?
((BLangGroupExpr) ifStmt.expr).expression.getKind() :
ifStmt.expr.getKind()) == NodeKind.TRANSACTIONAL_EXPRESSION) {
data.withinTransactionScope = true;
}
BLangBlockStmt body = ifStmt.body;
analyzeNode(body, data);
if (ifStmt.expr.getKind() == NodeKind.TRANSACTIONAL_EXPRESSION) {
data.withinTransactionScope = prevTxMode;
}
if (elseStmt != null) {
if (independentBlocks) {
data.commitRollbackAllowed = true;
data.withinTransactionScope = true;
}
analyzeNode(elseStmt, data);
if ((prevCommitCount != data.commitCount) || prevRollbackCount != data.rollbackCount) {
data.commitRollbackAllowed = false;
}
}
analyzeExpr(ifStmt.expr, data);
}
@Override
public void visit(BLangMatchStatement matchStatement, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
analyzeExpr(matchStatement.expr, data);
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = matchStatement.onFailClause != null;
}
List<BLangMatchClause> matchClauses = matchStatement.matchClauses;
int clausesSize = matchClauses.size();
for (int i = 0; i < clausesSize; i++) {
BLangMatchClause firstClause = matchClauses.get(i);
for (int j = i + 1; j < clausesSize; j++) {
BLangMatchClause secondClause = matchClauses.get(j);
if (!checkSimilarMatchGuard(firstClause.matchGuard, secondClause.matchGuard)) {
if (firstClause.matchGuard == null) {
checkSimilarMatchPatternsBetweenClauses(firstClause, secondClause);
}
continue;
}
checkSimilarMatchPatternsBetweenClauses(firstClause, secondClause);
}
analyzeNode(firstClause, data);
}
data.failureHandled = failureHandled;
analyzeOnFailClause(matchStatement.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangMatchClause matchClause, AnalyzerData data) {
Map<String, BVarSymbol> variablesInMatchPattern = new HashMap<>();
boolean patternListContainsSameVars = true;
List<BLangMatchPattern> matchPatterns = matchClause.matchPatterns;
BLangMatchGuard matchGuard = matchClause.matchGuard;
for (int i = 0; i < matchPatterns.size(); i++) {
BLangMatchPattern matchPattern = matchPatterns.get(i);
if (matchPattern.getBType() == symTable.noType) {
dlog.warning(matchPattern.pos, DiagnosticWarningCode.MATCH_STMT_UNMATCHED_PATTERN);
}
if (patternListContainsSameVars) {
patternListContainsSameVars = compareVariables(variablesInMatchPattern, matchPattern);
}
for (int j = i - 1; j >= 0; j--) {
if (checkSimilarMatchPatterns(matchPatterns.get(j), matchPattern)) {
dlog.warning(matchPattern.pos, DiagnosticWarningCode.MATCH_STMT_PATTERN_UNREACHABLE);
}
}
analyzeNode(matchPattern, data);
}
if (matchGuard != null) {
analyzeNode(matchGuard, data);
}
if (!patternListContainsSameVars) {
dlog.error(matchClause.pos, DiagnosticErrorCode.MATCH_PATTERNS_SHOULD_CONTAIN_SAME_SET_OF_VARIABLES);
}
analyzeNode(matchClause.blockStmt, data);
}
@Override
public void visit(BLangMappingMatchPattern mappingMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangFieldMatchPattern fieldMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangMatchGuard matchGuard, AnalyzerData data) {
analyzeExpr(matchGuard.expr, data);
}
private void checkSimilarMatchPatternsBetweenClauses(BLangMatchClause firstClause, BLangMatchClause secondClause) {
for (BLangMatchPattern firstMatchPattern : firstClause.matchPatterns) {
for (BLangMatchPattern secondMatchPattern : secondClause.matchPatterns) {
if (checkSimilarMatchPatterns(firstMatchPattern, secondMatchPattern)) {
dlog.warning(secondMatchPattern.pos, DiagnosticWarningCode.MATCH_STMT_PATTERN_UNREACHABLE);
}
}
}
}
private boolean checkSimilarMatchPatterns(BLangMatchPattern firstPattern, BLangMatchPattern secondPattern) {
NodeKind firstPatternKind = firstPattern.getKind();
NodeKind secondPatternKind = secondPattern.getKind();
if (firstPatternKind != secondPatternKind) {
if (firstPatternKind == NodeKind.VAR_BINDING_PATTERN_MATCH_PATTERN) {
return checkEmptyListOrMapMatchWithVarBindingPatternMatch(secondPattern,
((BLangVarBindingPatternMatchPattern) firstPattern));
}
if (secondPatternKind == NodeKind.VAR_BINDING_PATTERN_MATCH_PATTERN) {
return checkEmptyListOrMapMatchWithVarBindingPatternMatch(firstPattern,
((BLangVarBindingPatternMatchPattern) secondPattern));
}
return false;
}
switch (firstPatternKind) {
case WILDCARD_MATCH_PATTERN:
case REST_MATCH_PATTERN:
return true;
case CONST_MATCH_PATTERN:
return checkSimilarConstMatchPattern((BLangConstPattern) firstPattern,
(BLangConstPattern) secondPattern);
case VAR_BINDING_PATTERN_MATCH_PATTERN:
return checkSimilarBindingPatterns(
((BLangVarBindingPatternMatchPattern) firstPattern).getBindingPattern(),
((BLangVarBindingPatternMatchPattern) secondPattern).getBindingPattern());
case LIST_MATCH_PATTERN:
return checkSimilarListMatchPattern((BLangListMatchPattern) firstPattern,
(BLangListMatchPattern) secondPattern);
case MAPPING_MATCH_PATTERN:
return checkSimilarMappingMatchPattern((BLangMappingMatchPattern) firstPattern,
(BLangMappingMatchPattern) secondPattern);
case ERROR_MATCH_PATTERN:
return checkSimilarErrorMatchPattern((BLangErrorMatchPattern) firstPattern,
(BLangErrorMatchPattern) secondPattern);
default:
return false;
}
}
private boolean checkEmptyListOrMapMatchWithVarBindingPatternMatch(BLangMatchPattern firstPattern,
BLangVarBindingPatternMatchPattern secondPattern) {
if (firstPattern.getKind() == NodeKind.LIST_MATCH_PATTERN) {
BLangBindingPattern bindingPattern = secondPattern.getBindingPattern();
if (bindingPattern.getKind() != NodeKind.LIST_BINDING_PATTERN) {
return false;
}
BLangListMatchPattern listMatchPattern = (BLangListMatchPattern) firstPattern;
BLangListBindingPattern listBindingPattern = (BLangListBindingPattern) bindingPattern;
return listMatchPattern.matchPatterns.isEmpty() && listBindingPattern.bindingPatterns.isEmpty() &&
listMatchPattern.restMatchPattern == null && listBindingPattern.restBindingPattern == null;
}
if (firstPattern.getKind() == NodeKind.MAPPING_MATCH_PATTERN) {
BLangBindingPattern bindingPattern = secondPattern.getBindingPattern();
if (secondPattern.getBindingPattern().getKind() != NodeKind.MAPPING_BINDING_PATTERN) {
return false;
}
BLangMappingMatchPattern mappingMatchPattern = (BLangMappingMatchPattern) firstPattern;
BLangMappingBindingPattern mappingBindingPattern = (BLangMappingBindingPattern) bindingPattern;
return mappingMatchPattern.fieldMatchPatterns.isEmpty() &&
mappingBindingPattern.fieldBindingPatterns.isEmpty() &&
mappingMatchPattern.restMatchPattern == null && mappingBindingPattern.restBindingPattern == null;
}
return false;
}
private boolean checkSimilarErrorMatchPattern(BLangErrorMatchPattern firstErrorMatchPattern,
BLangErrorMatchPattern secondErrorMatchPattern) {
if (firstErrorMatchPattern == null || secondErrorMatchPattern == null) {
return false;
}
if (!checkSimilarErrorTypeReference(firstErrorMatchPattern.errorTypeReference,
secondErrorMatchPattern.errorTypeReference)) {
return false;
}
if (!checkSimilarErrorMessagePattern(firstErrorMatchPattern.errorMessageMatchPattern,
secondErrorMatchPattern.errorMessageMatchPattern)) {
return false;
}
if (!checkSimilarErrorCauseMatchPattern(firstErrorMatchPattern.errorCauseMatchPattern,
secondErrorMatchPattern.errorCauseMatchPattern)) {
return false;
}
return checkSimilarErrorFieldMatchPatterns(firstErrorMatchPattern.errorFieldMatchPatterns,
secondErrorMatchPattern.errorFieldMatchPatterns);
}
private boolean checkSimilarErrorTypeReference(BLangUserDefinedType firstErrorTypeRef,
BLangUserDefinedType secondErrorTypeRef) {
if (firstErrorTypeRef != null && secondErrorTypeRef != null) {
return firstErrorTypeRef.typeName.value.equals(secondErrorTypeRef.typeName.value);
}
return firstErrorTypeRef == null && secondErrorTypeRef == null;
}
private boolean checkSimilarErrorMessagePattern(BLangErrorMessageMatchPattern firstErrorMsgMatchPattern,
BLangErrorMessageMatchPattern secondErrorMsgMatchPattern) {
if (firstErrorMsgMatchPattern != null && secondErrorMsgMatchPattern != null) {
return checkSimilarSimpleMatchPattern(firstErrorMsgMatchPattern.simpleMatchPattern,
secondErrorMsgMatchPattern.simpleMatchPattern);
}
return firstErrorMsgMatchPattern == null && secondErrorMsgMatchPattern == null;
}
private boolean checkSimilarSimpleMatchPattern(BLangSimpleMatchPattern firstSimpleMatchPattern,
BLangSimpleMatchPattern secondSimpleMatchPattern) {
if (firstSimpleMatchPattern != null && secondSimpleMatchPattern != null) {
if (firstSimpleMatchPattern.varVariableName != null) {
return true;
}
BLangConstPattern firstConstPattern = firstSimpleMatchPattern.constPattern;
BLangConstPattern secondConstPattern = secondSimpleMatchPattern.constPattern;
if (firstConstPattern != null) {
if (secondConstPattern != null) {
return checkSimilarConstMatchPattern(firstConstPattern, secondConstPattern);
}
return false;
}
return secondSimpleMatchPattern.varVariableName == null;
}
return firstSimpleMatchPattern == null && secondSimpleMatchPattern == null;
}
private boolean checkSimilarErrorCauseMatchPattern(BLangErrorCauseMatchPattern firstErrorCauseMatchPattern,
BLangErrorCauseMatchPattern secondErrorCauseMatchPattern) {
if (firstErrorCauseMatchPattern != null && secondErrorCauseMatchPattern != null) {
if (!checkSimilarSimpleMatchPattern(firstErrorCauseMatchPattern.simpleMatchPattern,
secondErrorCauseMatchPattern.simpleMatchPattern)) {
return false;
}
return checkSimilarErrorMatchPattern(firstErrorCauseMatchPattern.errorMatchPattern,
secondErrorCauseMatchPattern.errorMatchPattern);
}
return firstErrorCauseMatchPattern == null && secondErrorCauseMatchPattern == null;
}
private boolean checkSimilarErrorFieldMatchPatterns(BLangErrorFieldMatchPatterns firstErrorFieldMatchPatterns,
BLangErrorFieldMatchPatterns secondErrorFieldMatchPatterns) {
if (firstErrorFieldMatchPatterns == null) {
return true;
}
List<BLangNamedArgMatchPattern> firstNamedArgPatterns = firstErrorFieldMatchPatterns.namedArgMatchPatterns;
int firstNamedArgPatternsSize = firstNamedArgPatterns.size();
if (firstNamedArgPatternsSize == 0) {
return true;
}
if (secondErrorFieldMatchPatterns == null) {
return false;
}
List<BLangNamedArgMatchPattern> secondNamedArgPatterns = secondErrorFieldMatchPatterns.namedArgMatchPatterns;
if (firstNamedArgPatternsSize > secondNamedArgPatterns.size()) {
return false;
}
for (int i = 0; i < firstNamedArgPatternsSize; i++) {
if (!checkSimilarNamedArgMatchPatterns(firstNamedArgPatterns.get(i), secondNamedArgPatterns.get(i))) {
return false;
}
}
return true;
}
private boolean checkSimilarNamedArgMatchPatterns(BLangNamedArgMatchPattern firstNamedArgMatchPattern,
BLangNamedArgMatchPattern secondNamedArgMatchPattern) {
if (firstNamedArgMatchPattern.argName.value.equals(secondNamedArgMatchPattern.argName.value)) {
return checkSimilarMatchPatterns(firstNamedArgMatchPattern.matchPattern,
secondNamedArgMatchPattern.matchPattern);
}
return false;
}
private boolean checkSimilarConstMatchPattern(BLangConstPattern firstConstMatchPattern,
BLangConstPattern secondConstMatchPattern) {
Object firstConstValue = getConstValue(firstConstMatchPattern).keySet().iterator().next();
Object secondConstValue = getConstValue(secondConstMatchPattern).keySet().iterator().next();
BType firstConstType = getConstValue(firstConstMatchPattern).values().iterator().next();
BType secondConstType = getConstValue(secondConstMatchPattern).values().iterator().next();
if (firstConstValue == null || secondConstValue == null) {
return false;
}
if (firstConstValue.equals(secondConstValue)) {
return true;
}
if (firstConstType != null && Types.getReferredType(firstConstType).tag == TypeTags.FINITE) {
firstConstValue = getConstValueFromFiniteType(((BFiniteType) firstConstType));
}
if (secondConstType != null && Types.getReferredType(secondConstType).tag == TypeTags.FINITE) {
secondConstValue = getConstValueFromFiniteType(((BFiniteType) secondConstType));
}
if (firstConstValue == null || secondConstValue == null) {
return false;
}
return firstConstValue.equals(secondConstValue);
}
private HashMap<Object, BType> getConstValue(BLangConstPattern constPattern) {
HashMap<Object, BType> constValAndType = new HashMap<>();
switch (constPattern.expr.getKind()) {
case NUMERIC_LITERAL:
constValAndType.put(((BLangNumericLiteral) constPattern.expr).value, null);
break;
case LITERAL:
constValAndType.put(((BLangLiteral) constPattern.expr).value, null);
break;
case SIMPLE_VARIABLE_REF:
constValAndType.put(((BLangSimpleVarRef) constPattern.expr).variableName, constPattern.getBType());
break;
case UNARY_EXPR:
BLangNumericLiteral newNumericLiteral = Types.constructNumericLiteralFromUnaryExpr(
(BLangUnaryExpr) constPattern.expr);
constValAndType.put(newNumericLiteral.value, null);
}
return constValAndType;
}
private Object getConstValueFromFiniteType(BFiniteType type) {
if (type.getValueSpace().size() == 1) {
BLangExpression expr = type.getValueSpace().iterator().next();
switch (expr.getKind()) {
case NUMERIC_LITERAL:
return ((BLangNumericLiteral) expr).value;
case LITERAL:
return ((BLangLiteral) expr).value;
}
}
return null;
}
private boolean checkSimilarListMatchPattern(BLangListMatchPattern firstListMatchPattern,
BLangListMatchPattern secondListMatchPattern) {
List<BLangMatchPattern> firstMatchPatterns = firstListMatchPattern.matchPatterns;
List<BLangMatchPattern> secondMatchPatterns = secondListMatchPattern.matchPatterns;
int firstPatternsSize = firstMatchPatterns.size();
int secondPatternsSize = secondMatchPatterns.size();
if (firstPatternsSize <= secondPatternsSize) {
for (int i = 0; i < firstPatternsSize; i++) {
if (!checkSimilarMatchPatterns(firstMatchPatterns.get(i), secondMatchPatterns.get(i))) {
return false;
}
}
if (firstPatternsSize == secondPatternsSize) {
if (firstListMatchPattern.restMatchPattern != null) {
return true;
}
return secondListMatchPattern.restMatchPattern == null;
}
return firstListMatchPattern.restMatchPattern != null;
}
return false;
}
private boolean checkSimilarMappingMatchPattern(BLangMappingMatchPattern firstMappingMatchPattern,
BLangMappingMatchPattern secondMappingMatchPattern) {
List<BLangFieldMatchPattern> firstFieldMatchPatterns = firstMappingMatchPattern.fieldMatchPatterns;
List<BLangFieldMatchPattern> secondFieldMatchPatterns = secondMappingMatchPattern.fieldMatchPatterns;
return checkSimilarFieldMatchPatterns(firstFieldMatchPatterns, secondFieldMatchPatterns);
}
private boolean checkSimilarFieldMatchPatterns(List<BLangFieldMatchPattern> firstFieldMatchPatterns,
List<BLangFieldMatchPattern> secondFieldMatchPatterns) {
for (BLangFieldMatchPattern firstFieldMatchPattern : firstFieldMatchPatterns) {
boolean isSamePattern = false;
for (BLangFieldMatchPattern secondFieldMatchPattern : secondFieldMatchPatterns) {
if (checkSimilarFieldMatchPattern(firstFieldMatchPattern, secondFieldMatchPattern)) {
isSamePattern = true;
break;
}
}
if (!isSamePattern) {
return false;
}
}
return true;
}
private boolean checkSimilarFieldMatchPattern(BLangFieldMatchPattern firstFieldMatchPattern,
BLangFieldMatchPattern secondFieldMatchPattern) {
return firstFieldMatchPattern.fieldName.value.equals(secondFieldMatchPattern.fieldName.value) &&
checkSimilarMatchPatterns(firstFieldMatchPattern.matchPattern, secondFieldMatchPattern.matchPattern);
}
private boolean checkSimilarBindingPatterns(BLangBindingPattern firstBidingPattern,
BLangBindingPattern secondBindingPattern) {
NodeKind firstBindingPatternKind = firstBidingPattern.getKind();
NodeKind secondBindingPatternKind = secondBindingPattern.getKind();
if (firstBindingPatternKind != secondBindingPatternKind) {
return false;
}
switch (firstBindingPatternKind) {
case WILDCARD_BINDING_PATTERN:
case REST_BINDING_PATTERN:
case CAPTURE_BINDING_PATTERN:
return true;
case LIST_BINDING_PATTERN:
return checkSimilarListBindingPatterns((BLangListBindingPattern) firstBidingPattern,
(BLangListBindingPattern) secondBindingPattern);
case MAPPING_BINDING_PATTERN:
return checkSimilarMappingBindingPattern((BLangMappingBindingPattern) firstBidingPattern,
(BLangMappingBindingPattern) secondBindingPattern);
case ERROR_BINDING_PATTERN:
return checkSimilarErrorBindingPatterns((BLangErrorBindingPattern) firstBidingPattern,
(BLangErrorBindingPattern) secondBindingPattern);
default:
return false;
}
}
private boolean checkSimilarMappingBindingPattern(BLangMappingBindingPattern firstMappingBindingPattern,
BLangMappingBindingPattern secondMappingBindingPattern) {
List<BLangFieldBindingPattern> firstFieldBindingPatterns = firstMappingBindingPattern.fieldBindingPatterns;
List<BLangFieldBindingPattern> secondFieldBindingPatterns = secondMappingBindingPattern.fieldBindingPatterns;
return checkSimilarFieldBindingPatterns(firstFieldBindingPatterns, secondFieldBindingPatterns);
}
private boolean checkSimilarFieldBindingPatterns(List<BLangFieldBindingPattern> firstFieldBindingPatterns,
List<BLangFieldBindingPattern> secondFieldBindingPatterns) {
for (BLangFieldBindingPattern firstFieldBindingPattern : firstFieldBindingPatterns) {
boolean isSamePattern = false;
for (BLangFieldBindingPattern secondFieldBindingPattern : secondFieldBindingPatterns) {
if (checkSimilarFieldBindingPattern(firstFieldBindingPattern, secondFieldBindingPattern)) {
isSamePattern = true;
break;
}
}
if (!isSamePattern) {
return false;
}
}
return true;
}
private boolean checkSimilarFieldBindingPattern(BLangFieldBindingPattern firstFieldBindingPattern,
BLangFieldBindingPattern secondFieldBindingPattern) {
boolean hasSameFieldNames = firstFieldBindingPattern.fieldName.value.
equals(secondFieldBindingPattern.fieldName.value);
if (firstFieldBindingPattern.bindingPattern.getKind() == secondFieldBindingPattern.bindingPattern.getKind()) {
return hasSameFieldNames && checkSimilarBindingPatterns(firstFieldBindingPattern.bindingPattern,
secondFieldBindingPattern.bindingPattern);
}
return hasSameFieldNames && firstFieldBindingPattern.bindingPattern.getKind() ==
NodeKind.CAPTURE_BINDING_PATTERN;
}
private boolean checkSimilarListBindingPatterns(BLangListBindingPattern firstBindingPattern,
BLangListBindingPattern secondBindingPattern) {
List<BLangBindingPattern> firstPatterns = firstBindingPattern.bindingPatterns;
List<BLangBindingPattern> secondPatterns = secondBindingPattern.bindingPatterns;
int firstPatternsSize = firstPatterns.size();
int secondPatternsSize = secondPatterns.size();
if (firstPatternsSize <= secondPatternsSize) {
for (int i = 0; i < firstPatternsSize; i++) {
if (!checkSimilarBindingPatterns(firstPatterns.get(i), secondPatterns.get(i))) {
return firstPatterns.get(i).getKind() == NodeKind.CAPTURE_BINDING_PATTERN;
}
}
if (firstPatternsSize == secondPatternsSize) {
if (firstBindingPattern.restBindingPattern != null) {
return true;
}
return secondBindingPattern.restBindingPattern == null;
}
return secondBindingPattern.restBindingPattern != null;
}
return false;
}
private boolean checkSimilarErrorBindingPatterns(BLangErrorBindingPattern firstErrorBindingPattern,
BLangErrorBindingPattern secondErrorBindingPattern) {
if (firstErrorBindingPattern == null || secondErrorBindingPattern == null) {
return false;
}
if (!checkSimilarErrorTypeReference(firstErrorBindingPattern.errorTypeReference,
secondErrorBindingPattern.errorTypeReference)) {
return false;
}
if (!checkSimilarErrorMessageBindingPattern(firstErrorBindingPattern.errorMessageBindingPattern,
secondErrorBindingPattern.errorMessageBindingPattern)) {
return false;
}
if (!checkSimilarErrorCauseBindingPattern(firstErrorBindingPattern.errorCauseBindingPattern,
secondErrorBindingPattern.errorCauseBindingPattern)) {
return false;
}
return checkSimilarErrorFieldBindingPatterns(firstErrorBindingPattern.errorFieldBindingPatterns,
secondErrorBindingPattern.errorFieldBindingPatterns);
}
private boolean checkSimilarErrorMessageBindingPattern(BLangErrorMessageBindingPattern firstErrorMsgBindingPattern,
BLangErrorMessageBindingPattern secondErrorMsgBindingPattern) {
if (firstErrorMsgBindingPattern != null && secondErrorMsgBindingPattern != null) {
return checkSimilarSimpleBindingPattern(firstErrorMsgBindingPattern.simpleBindingPattern,
secondErrorMsgBindingPattern.simpleBindingPattern);
}
return firstErrorMsgBindingPattern == null && secondErrorMsgBindingPattern == null;
}
private boolean checkSimilarSimpleBindingPattern(BLangSimpleBindingPattern firstSimpleBindingPattern,
BLangSimpleBindingPattern secondSimpleBindingPattern) {
if (firstSimpleBindingPattern != null && secondSimpleBindingPattern != null) {
BLangBindingPattern firstCaptureBindingPattern = firstSimpleBindingPattern.captureBindingPattern;
BLangBindingPattern secondCaptureBindingPattern = secondSimpleBindingPattern.captureBindingPattern;
if (firstCaptureBindingPattern != null && secondCaptureBindingPattern != null) {
return checkSimilarBindingPatterns(firstCaptureBindingPattern, secondCaptureBindingPattern);
}
return firstSimpleBindingPattern.wildCardBindingPattern != null;
}
return firstSimpleBindingPattern == null && secondSimpleBindingPattern == null;
}
private boolean checkSimilarErrorCauseBindingPattern(BLangErrorCauseBindingPattern firstErrorCauseBindingPattern,
BLangErrorCauseBindingPattern secondErrorCauseBindingPattern) {
if (firstErrorCauseBindingPattern != null && secondErrorCauseBindingPattern != null) {
if (!checkSimilarSimpleBindingPattern(firstErrorCauseBindingPattern.simpleBindingPattern,
secondErrorCauseBindingPattern.simpleBindingPattern)) {
return false;
}
return checkSimilarErrorBindingPatterns(firstErrorCauseBindingPattern.errorBindingPattern,
secondErrorCauseBindingPattern.errorBindingPattern);
}
return firstErrorCauseBindingPattern == null && secondErrorCauseBindingPattern == null;
}
private boolean checkSimilarErrorFieldBindingPatterns(
BLangErrorFieldBindingPatterns firstErrorFieldBindingPatterns,
BLangErrorFieldBindingPatterns secondErrorFieldBindingPatterns) {
if (firstErrorFieldBindingPatterns == null) {
return true;
}
List<BLangNamedArgBindingPattern> firstNamedArgPatterns =
firstErrorFieldBindingPatterns.namedArgBindingPatterns;
int firstNamedArgPatternsSize = firstNamedArgPatterns.size();
if (firstNamedArgPatternsSize == 0) {
return true;
}
if (secondErrorFieldBindingPatterns == null) {
return false;
}
List<BLangNamedArgBindingPattern> secondNamedArgPatterns =
secondErrorFieldBindingPatterns.namedArgBindingPatterns;
if (firstNamedArgPatternsSize > secondNamedArgPatterns.size()) {
return false;
}
for (int i = 0; i < firstNamedArgPatternsSize; i++) {
if (!checkSimilarNamedArgBindingPatterns(firstNamedArgPatterns.get(i), secondNamedArgPatterns.get(i))) {
return false;
}
}
return true;
}
private boolean checkSimilarNamedArgBindingPatterns(BLangNamedArgBindingPattern firstNamedArgBindingPattern,
BLangNamedArgBindingPattern secondNamedArgBindingPattern) {
if (firstNamedArgBindingPattern.argName.value.equals(secondNamedArgBindingPattern.argName.value)) {
return checkSimilarBindingPatterns(firstNamedArgBindingPattern.bindingPattern,
secondNamedArgBindingPattern.bindingPattern);
}
return false;
}
private boolean checkSimilarMatchGuard(BLangMatchGuard firstMatchGuard, BLangMatchGuard secondMatchGuard) {
if (firstMatchGuard == null && secondMatchGuard == null) {
return true;
}
if (firstMatchGuard == null || secondMatchGuard == null) {
return false;
}
if (firstMatchGuard.expr.getKind() == NodeKind.TYPE_TEST_EXPR &&
secondMatchGuard.expr.getKind() == NodeKind.TYPE_TEST_EXPR &&
((BLangTypeTestExpr) firstMatchGuard.expr).expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF &&
((BLangTypeTestExpr) secondMatchGuard.expr).expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangTypeTestExpr firstTypeTest = (BLangTypeTestExpr) firstMatchGuard.expr;
BLangTypeTestExpr secondTypeTest = (BLangTypeTestExpr) secondMatchGuard.expr;
return ((BLangSimpleVarRef) firstTypeTest.expr).variableName.toString().equals(
((BLangSimpleVarRef) secondTypeTest.expr).variableName.toString()) &&
types.isAssignable(firstTypeTest.typeNode.getBType(),
secondTypeTest.typeNode.getBType());
}
return false;
}
private boolean compareVariables(Map<String, BVarSymbol> varsInPreviousMatchPattern,
BLangMatchPattern matchPattern) {
Map<String, BVarSymbol> varsInCurrentMatchPattern = matchPattern.declaredVars;
if (varsInPreviousMatchPattern.size() == 0) {
varsInPreviousMatchPattern.putAll(varsInCurrentMatchPattern);
return true;
}
if (varsInPreviousMatchPattern.size() != varsInCurrentMatchPattern.size()) {
return false;
}
for (String identifier : varsInPreviousMatchPattern.keySet()) {
if (!varsInCurrentMatchPattern.containsKey(identifier)) {
return false;
}
}
return true;
}
@Override
public void visit(BLangWildCardMatchPattern wildCardMatchPattern, AnalyzerData data) {
wildCardMatchPattern.isLastPattern =
wildCardMatchPattern.matchExpr != null && types.isAssignable(wildCardMatchPattern.matchExpr.getBType(),
symTable.anyType);
}
@Override
public void visit(BLangConstPattern constMatchPattern, AnalyzerData data) {
analyzeNode(constMatchPattern.expr, data);
}
@Override
public void visit(BLangVarBindingPatternMatchPattern varBindingPattern, AnalyzerData data) {
BLangBindingPattern bindingPattern = varBindingPattern.getBindingPattern();
analyzeNode(bindingPattern, data);
switch (bindingPattern.getKind()) {
case WILDCARD_BINDING_PATTERN:
varBindingPattern.isLastPattern =
varBindingPattern.matchExpr != null && types.isAssignable(
varBindingPattern.matchExpr.getBType(),
symTable.anyType);
return;
case CAPTURE_BINDING_PATTERN:
varBindingPattern.isLastPattern =
varBindingPattern.matchExpr != null && !varBindingPattern.matchGuardIsAvailable;
return;
case LIST_BINDING_PATTERN:
if (varBindingPattern.matchExpr == null) {
return;
}
varBindingPattern.isLastPattern = types.isSameType(varBindingPattern.matchExpr.getBType(),
varBindingPattern.getBType()) || types.isAssignable(
varBindingPattern.matchExpr.getBType(),
varBindingPattern.getBType());
}
}
@Override
public void visit(BLangMappingBindingPattern mappingBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangWildCardBindingPattern wildCardBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangListMatchPattern listMatchPattern, AnalyzerData data) {
if (listMatchPattern.matchExpr == null) {
return;
}
listMatchPattern.isLastPattern = types.isAssignable(listMatchPattern.matchExpr.getBType(),
listMatchPattern.getBType()) && !isConstMatchPatternExist(listMatchPattern);
}
private boolean isConstMatchPatternExist(BLangMatchPattern matchPattern) {
switch (matchPattern.getKind()) {
case CONST_MATCH_PATTERN:
return true;
case LIST_MATCH_PATTERN:
for (BLangMatchPattern memberMatchPattern : ((BLangListMatchPattern) matchPattern).matchPatterns) {
if (isConstMatchPatternExist(memberMatchPattern)) {
return true;
}
}
return false;
case MAPPING_MATCH_PATTERN:
for (BLangFieldMatchPattern fieldMatchPattern :
((BLangMappingMatchPattern) matchPattern).fieldMatchPatterns) {
if (isConstMatchPatternExist(fieldMatchPattern.matchPattern)) {
return true;
}
}
return false;
default:
return false;
}
}
@Override
public void visit(BLangCaptureBindingPattern captureBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangListBindingPattern listBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangErrorMatchPattern errorMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangErrorBindingPattern errorBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangForeach foreach, AnalyzerData data) {
data.loopWithinTransactionCheckStack.push(true);
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = foreach.onFailClause != null;
}
data.loopCount++;
BLangBlockStmt body = foreach.body;
data.env = SymbolEnv.createLoopEnv(foreach, data.env);
analyzeNode(body, data);
data.loopCount--;
data.failureHandled = failureHandled;
data.loopWithinTransactionCheckStack.pop();
analyzeExpr(foreach.collection, data);
body.failureBreakMode = foreach.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(foreach.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangWhile whileNode, AnalyzerData data) {
data.loopWithinTransactionCheckStack.push(true);
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = whileNode.onFailClause != null;
}
data.loopCount++;
BLangBlockStmt body = whileNode.body;
data.env = SymbolEnv.createLoopEnv(whileNode, data.env);
analyzeNode(body, data);
data.loopCount--;
data.failureHandled = failureHandled;
data.loopWithinTransactionCheckStack.pop();
analyzeExpr(whileNode.expr, data);
analyzeOnFailClause(whileNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangDo doNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = doNode.onFailClause != null;
}
analyzeNode(doNode.body, data);
data.failureHandled = failureHandled;
doNode.body.failureBreakMode = doNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(doNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangFail failNode, AnalyzerData data) {
data.failVisited = true;
analyzeExpr(failNode.expr, data);
if (data.env.scope.owner.getKind() == SymbolKind.PACKAGE) {
return;
}
typeChecker.checkExpr(failNode.expr, data.env);
if (!data.errorTypes.empty()) {
data.errorTypes.peek().add(getErrorTypes(failNode.expr.getBType()));
}
if (!data.failureHandled) {
BType exprType = data.env.enclInvokable.getReturnTypeNode().getBType();
data.returnTypes.peek().add(exprType);
if (!types.isAssignable(getErrorTypes(failNode.expr.getBType()), exprType)) {
dlog.error(failNode.pos, DiagnosticErrorCode.FAIL_EXPR_NO_MATCHING_ERROR_RETURN_IN_ENCL_INVOKABLE);
}
}
}
@Override
public void visit(BLangLock lockNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = lockNode.onFailClause != null;
}
boolean previousWithinLockBlock = data.withinLockBlock;
data.withinLockBlock = true;
lockNode.body.stmts.forEach(e -> analyzeNode(e, data));
data.withinLockBlock = previousWithinLockBlock;
data.failureHandled = failureHandled;
lockNode.body.failureBreakMode = lockNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(lockNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangContinue continueNode, AnalyzerData data) {
if (data.loopCount == 0) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_CANNOT_BE_OUTSIDE_LOOP);
return;
}
if (checkNextBreakValidityInTransaction(data)) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
if (data.loopAlterNotAllowed) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_NOT_ALLOWED);
}
}
@Override
public void visit(BLangImportPackage importPkgNode, AnalyzerData data) {
BPackageSymbol pkgSymbol = importPkgNode.symbol;
SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgSymbol);
if (pkgEnv == null) {
return;
}
analyzeNode(pkgEnv.node, data);
}
@Override
public void visit(BLangXMLNS xmlnsNode, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangClientDeclaration node, AnalyzerData data) {
}
@Override
public void visit(BLangService serviceNode, AnalyzerData data) {
}
private void analyzeExportableTypeRef(BSymbol owner, BTypeSymbol symbol, boolean inFuncSignature,
Location pos) {
if (!inFuncSignature && Symbols.isFlagOn(owner.flags, Flags.ANONYMOUS)) {
return;
}
if (Symbols.isPublic(owner)) {
HashSet<BTypeSymbol> visitedSymbols = new HashSet<>();
checkForExportableType(symbol, pos, visitedSymbols);
}
}
@Override
public void visit(BLangLetExpression letExpression, AnalyzerData data) {
int ownerSymTag = data.env.scope.owner.tag;
if ((ownerSymTag & SymTag.RECORD) == SymTag.RECORD) {
dlog.error(letExpression.pos, DiagnosticErrorCode.LET_EXPRESSION_NOT_YET_SUPPORTED_RECORD_FIELD);
} else if ((ownerSymTag & SymTag.OBJECT) == SymTag.OBJECT) {
dlog.error(letExpression.pos, DiagnosticErrorCode.LET_EXPRESSION_NOT_YET_SUPPORTED_OBJECT_FIELD);
}
data.env = letExpression.env;
for (BLangLetVariable letVariable : letExpression.letVarDeclarations) {
analyzeNode((BLangNode) letVariable.definitionNode, data);
}
analyzeExpr(letExpression.expr, data);
}
@Override
public void visit(BLangSimpleVariable varNode, AnalyzerData data) {
analyzeTypeNode(varNode.typeNode, data);
analyzeExpr(varNode.expr, data);
if (Objects.isNull(varNode.symbol)) {
return;
}
if (!Symbols.isPublic(varNode.symbol)) {
return;
}
int ownerSymTag = data.env.scope.owner.tag;
if ((ownerSymTag & SymTag.RECORD) == SymTag.RECORD || (ownerSymTag & SymTag.OBJECT) == SymTag.OBJECT) {
analyzeExportableTypeRef(data.env.scope.owner, varNode.getBType().tsymbol, false, varNode.pos);
} else if ((ownerSymTag & SymTag.INVOKABLE) != SymTag.INVOKABLE) {
analyzeExportableTypeRef(varNode.symbol, varNode.getBType().tsymbol, false, varNode.pos);
}
varNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
private boolean isValidInferredArray(BLangNode node) {
switch (node.getKind()) {
case INTERSECTION_TYPE_NODE:
case UNION_TYPE_NODE:
return isValidInferredArray(node.parent);
case VARIABLE:
BLangSimpleVariable varNode = (BLangSimpleVariable) node;
BLangExpression expr = varNode.expr;
return expr != null && isValidContextForInferredArray(node.parent) &&
isValidVariableForInferredArray(expr);
default:
return false;
}
}
private boolean isValidContextForInferredArray(BLangNode node) {
switch (node.getKind()) {
case PACKAGE:
case EXPR_FUNCTION_BODY:
case BLOCK_FUNCTION_BODY:
case BLOCK:
return true;
case VARIABLE_DEF:
return isValidContextForInferredArray(node.parent);
default:
return false;
}
}
private boolean isValidVariableForInferredArray(BLangNode node) {
switch (node.getKind()) {
case LITERAL:
if (node.getBType().tag == TypeTags.ARRAY) {
return true;
}
break;
case LIST_CONSTRUCTOR_EXPR:
return true;
case GROUP_EXPR:
return isValidVariableForInferredArray(((BLangGroupExpr) node).expression);
}
return false;
}
@Override
public void visit(BLangTupleVariable bLangTupleVariable, AnalyzerData data) {
if (bLangTupleVariable.typeNode != null) {
analyzeNode(bLangTupleVariable.typeNode, data);
}
analyzeExpr(bLangTupleVariable.expr, data);
}
@Override
public void visit(BLangRecordVariable bLangRecordVariable, AnalyzerData data) {
if (bLangRecordVariable.typeNode != null) {
analyzeNode(bLangRecordVariable.typeNode, data);
}
analyzeExpr(bLangRecordVariable.expr, data);
}
@Override
public void visit(BLangErrorVariable bLangErrorVariable, AnalyzerData data) {
if (bLangErrorVariable.typeNode != null) {
analyzeNode(bLangErrorVariable.typeNode, data);
}
analyzeExpr(bLangErrorVariable.expr, data);
}
@Override
public void visit(BLangIdentifier identifierNode, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangAnnotation annotationNode, AnalyzerData data) {
annotationNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangAnnotationAttachment annAttachmentNode, AnalyzerData data) {
analyzeExpr(annAttachmentNode.expr, data);
BAnnotationSymbol annotationSymbol = annAttachmentNode.annotationSymbol;
if (annotationSymbol != null && Symbols.isFlagOn(annotationSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(annAttachmentNode.annotationName.toString(), annotationSymbol, annAttachmentNode.pos);
}
}
@Override
public void visit(BLangSimpleVariableDef varDefNode, AnalyzerData data) {
analyzeNode(varDefNode.var, data);
}
@Override
public void visit(BLangCompoundAssignment compoundAssignment, AnalyzerData data) {
BLangValueExpression varRef = compoundAssignment.varRef;
analyzeExpr(varRef, data);
analyzeExpr(compoundAssignment.expr, data);
}
@Override
public void visit(BLangAssignment assignNode, AnalyzerData data) {
BLangExpression varRef = assignNode.varRef;
analyzeExpr(varRef, data);
analyzeExpr(assignNode.expr, data);
}
@Override
public void visit(BLangRecordDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
@Override
public void visit(BLangErrorDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
@Override
public void visit(BLangTupleDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
private void checkDuplicateVarRefs(List<BLangExpression> varRefs) {
checkDuplicateVarRefs(varRefs, new HashSet<>());
}
private void checkDuplicateVarRefs(List<BLangExpression> varRefs, Set<BSymbol> symbols) {
for (BLangExpression varRef : varRefs) {
if (varRef == null) {
continue;
}
NodeKind kind = varRef.getKind();
if (kind != NodeKind.SIMPLE_VARIABLE_REF
&& kind != NodeKind.RECORD_VARIABLE_REF
&& kind != NodeKind.ERROR_VARIABLE_REF
&& kind != NodeKind.TUPLE_VARIABLE_REF) {
continue;
}
if (kind == NodeKind.SIMPLE_VARIABLE_REF
&& names.fromIdNode(((BLangSimpleVarRef) varRef).variableName) == Names.IGNORE) {
continue;
}
if (kind == NodeKind.TUPLE_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangTupleVarRef) varRef), symbols);
} else if (kind == NodeKind.RECORD_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangRecordVarRef) varRef), symbols);
} else if (kind == NodeKind.ERROR_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangErrorVarRef) varRef), symbols);
}
BLangVariableReference varRefExpr = (BLangVariableReference) varRef;
if (varRefExpr.symbol != null && !symbols.add(varRefExpr.symbol)) {
this.dlog.error(varRef.pos, DiagnosticErrorCode.DUPLICATE_VARIABLE_IN_BINDING_PATTERN,
varRefExpr.symbol);
}
}
}
private List<BLangExpression> getVarRefs(BLangRecordVarRef varRef) {
List<BLangExpression> varRefs = varRef.recordRefFields.stream()
.map(e -> e.variableReference).collect(Collectors.toList());
varRefs.add(varRef.restParam);
return varRefs;
}
private List<BLangExpression> getVarRefs(BLangErrorVarRef varRef) {
List<BLangExpression> varRefs = new ArrayList<>();
if (varRef.message != null) {
varRefs.add(varRef.message);
}
if (varRef.cause != null) {
varRefs.add(varRef.cause);
}
varRefs.addAll(varRef.detail.stream().map(e -> e.expr).collect(Collectors.toList()));
varRefs.add(varRef.restVar);
return varRefs;
}
private List<BLangExpression> getVarRefs(BLangTupleVarRef varRef) {
List<BLangExpression> varRefs = new ArrayList<>(varRef.expressions);
varRefs.add(varRef.restParam);
return varRefs;
}
@Override
public void visit(BLangBreak breakNode, AnalyzerData data) {
if (data.loopCount == 0) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_CANNOT_BE_OUTSIDE_LOOP);
return;
}
if (checkNextBreakValidityInTransaction(data)) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
if (data.loopAlterNotAllowed) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_NOT_ALLOWED);
}
}
@Override
public void visit(BLangPanic panicNode, AnalyzerData data) {
analyzeExpr(panicNode.expr, data);
}
@Override
public void visit(BLangXMLNSStatement xmlnsStmtNode, AnalyzerData data) {
}
@Override
public void visit(BLangClientDeclarationStatement clientDeclarationStatement, AnalyzerData data) {
analyzeNode(clientDeclarationStatement.clientDeclaration, data);
}
@Override
public void visit(BLangExpressionStmt exprStmtNode, AnalyzerData data) {
BLangExpression expr = exprStmtNode.expr;
analyzeExpr(expr, data);
}
private boolean isTopLevel(SymbolEnv env) {
return env.enclInvokable.body == env.node;
}
private boolean isInWorker(SymbolEnv env) {
return env.enclInvokable.flagSet.contains(Flag.WORKER);
}
private boolean isCommunicationAllowedLocation(SymbolEnv env) {
return isTopLevel(env);
}
private boolean isDefaultWorkerCommunication(String workerIdentifier) {
return workerIdentifier.equals(DEFAULT_WORKER_NAME);
}
private boolean workerExists(BType type, String workerName, SymbolEnv env) {
if (isDefaultWorkerCommunication(workerName) && isInWorker(env)) {
return true;
}
if (type == symTable.semanticError) {
return false;
}
BType refType = Types.getReferredType(type);
return refType.tag == TypeTags.FUTURE && ((BFutureType) refType).workerDerivative;
}
@Override
public void visit(BLangWorkerSend workerSendNode, AnalyzerData data) {
BSymbol receiver =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(workerSendNode.workerIdentifier));
if ((receiver.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
receiver = symTable.notFoundSymbol;
}
verifyPeerCommunication(workerSendNode.pos, receiver, workerSendNode.workerIdentifier.value, data.env);
WorkerActionSystem was = data.workerActionSystemStack.peek();
BType type = workerSendNode.expr.getBType();
if (type == symTable.semanticError) {
was.hasErrors = true;
} else if (workerSendNode.expr instanceof ActionNode) {
this.dlog.error(workerSendNode.expr.pos, DiagnosticErrorCode.INVALID_SEND_EXPR);
} else if (!types.isAssignable(type, symTable.cloneableType)) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.INVALID_TYPE_FOR_SEND, type);
}
String workerName = workerSendNode.workerIdentifier.getValue();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.UNSUPPORTED_WORKER_SEND_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(workerSendNode.getBType(), workerName, data.env)
|| (!isWorkerFromFunction(data.env, names.fromString(workerName)) && !workerName.equals("function"))) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.UNDEFINED_WORKER, workerName);
was.hasErrors = true;
}
workerSendNode.setBType(
createAccumulatedErrorTypeForMatchingReceive(workerSendNode.pos, workerSendNode.expr.getBType(), data));
was.addWorkerAction(workerSendNode);
analyzeExpr(workerSendNode.expr, data);
validateActionParentNode(workerSendNode.pos, workerSendNode.expr);
}
private BType createAccumulatedErrorTypeForMatchingReceive(Location pos, BType exprType, AnalyzerData data) {
Set<BType> returnTypesUpToNow = data.returnTypes.peek();
LinkedHashSet<BType> returnTypeAndSendType = new LinkedHashSet<>() {
{
Comparator.comparing(BType::toString);
}
};
for (BType returnType : returnTypesUpToNow) {
if (onlyContainErrors(returnType)) {
returnTypeAndSendType.add(returnType);
} else {
this.dlog.error(pos, DiagnosticErrorCode.WORKER_SEND_AFTER_RETURN);
}
}
returnTypeAndSendType.add(exprType);
if (returnTypeAndSendType.size() > 1) {
return BUnionType.create(null, returnTypeAndSendType);
} else {
return exprType;
}
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr, AnalyzerData data) {
BSymbol receiver =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(syncSendExpr.workerIdentifier));
if ((receiver.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
receiver = symTable.notFoundSymbol;
}
verifyPeerCommunication(syncSendExpr.pos, receiver, syncSendExpr.workerIdentifier.value, data.env);
validateActionParentNode(syncSendExpr.pos, syncSendExpr);
String workerName = syncSendExpr.workerIdentifier.getValue();
WorkerActionSystem was = data.workerActionSystemStack.peek();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(syncSendExpr.pos, DiagnosticErrorCode.UNSUPPORTED_WORKER_SEND_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(syncSendExpr.workerType, workerName, data.env)) {
this.dlog.error(syncSendExpr.pos, DiagnosticErrorCode.UNDEFINED_WORKER, syncSendExpr.workerSymbol);
was.hasErrors = true;
}
syncSendExpr.setBType(
createAccumulatedErrorTypeForMatchingReceive(syncSendExpr.pos, syncSendExpr.expr.getBType(), data));
was.addWorkerAction(syncSendExpr);
analyzeExpr(syncSendExpr.expr, data);
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode, AnalyzerData data) {
validateActionParentNode(workerReceiveNode.pos, workerReceiveNode);
BSymbol sender =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(workerReceiveNode.workerIdentifier));
if ((sender.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
sender = symTable.notFoundSymbol;
}
verifyPeerCommunication(workerReceiveNode.pos, sender, workerReceiveNode.workerIdentifier.value, data.env);
WorkerActionSystem was = data.workerActionSystemStack.peek();
String workerName = workerReceiveNode.workerIdentifier.getValue();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.INVALID_WORKER_RECEIVE_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(workerReceiveNode.workerType, workerName, data.env)) {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.UNDEFINED_WORKER, workerName);
was.hasErrors = true;
}
workerReceiveNode.matchingSendsError = createAccumulatedErrorTypeForMatchingSyncSend(workerReceiveNode, data);
was.addWorkerAction(workerReceiveNode);
}
private void verifyPeerCommunication(Location pos, BSymbol otherWorker, String otherWorkerName, SymbolEnv env) {
if (env.enclEnv.node.getKind() != NodeKind.FUNCTION) {
return;
}
BLangFunction funcNode = (BLangFunction) env.enclEnv.node;
Set<Flag> flagSet = funcNode.flagSet;
Name workerDerivedName = names.fromString("0" + otherWorker.name.value);
if (flagSet.contains(Flag.WORKER)) {
if (otherWorkerName.equals(DEFAULT_WORKER_NAME)) {
if (flagSet.contains(Flag.FORKED)) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
return;
}
Scope enclFunctionScope = env.enclEnv.enclEnv.scope;
BInvokableSymbol wLambda = (BInvokableSymbol) enclFunctionScope.lookup(workerDerivedName).symbol;
if (wLambda != null && funcNode.anonForkName != null
&& !funcNode.anonForkName.equals(wLambda.enclForkName)) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
} else {
BInvokableSymbol wLambda = (BInvokableSymbol) env.scope.lookup(workerDerivedName).symbol;
if (wLambda != null && wLambda.enclForkName != null) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
}
}
public BType createAccumulatedErrorTypeForMatchingSyncSend(BLangWorkerReceive workerReceiveNode,
AnalyzerData data) {
Set<BType> returnTypesUpToNow = data.returnTypes.peek();
LinkedHashSet<BType> returnTypeAndSendType = new LinkedHashSet<>();
for (BType returnType : returnTypesUpToNow) {
if (onlyContainErrors(returnType)) {
returnTypeAndSendType.add(returnType);
} else {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.WORKER_RECEIVE_AFTER_RETURN);
}
}
returnTypeAndSendType.add(symTable.nilType);
if (returnTypeAndSendType.size() > 1) {
return BUnionType.create(null, returnTypeAndSendType);
} else {
return symTable.nilType;
}
}
private boolean onlyContainErrors(BType returnType) {
if (returnType == null) {
return false;
}
returnType = types.getTypeWithEffectiveIntersectionTypes(returnType);
returnType = Types.getReferredType(returnType);
if (returnType.tag == TypeTags.ERROR) {
return true;
}
if (returnType.tag == TypeTags.UNION) {
for (BType memberType : ((BUnionType) returnType).getMemberTypes()) {
BType t = types.getTypeWithEffectiveIntersectionTypes(memberType);
if (t.tag != TypeTags.ERROR) {
return false;
}
}
return true;
}
return false;
}
@Override
public void visit(BLangLiteral literalExpr, AnalyzerData data) {
}
@Override
public void visit(BLangConstRef constRef, AnalyzerData data) {
}
@Override
public void visit(BLangListConstructorExpr listConstructorExpr, AnalyzerData data) {
for (BLangExpression expr : listConstructorExpr.exprs) {
if (expr.getKind() == NodeKind.LIST_CONSTRUCTOR_SPREAD_OP) {
expr = ((BLangListConstructorSpreadOpExpr) expr).expr;
}
analyzeExpr(expr, data);
}
}
@Override
public void visit(BLangTableConstructorExpr tableConstructorExpr, AnalyzerData data) {
analyzeExprs(tableConstructorExpr.recordLiteralList, data);
}
@Override
public void visit(BLangRecordLiteral recordLiteral, AnalyzerData data) {
List<RecordLiteralNode.RecordField> fields = recordLiteral.fields;
for (RecordLiteralNode.RecordField field : fields) {
if (field.isKeyValueField()) {
analyzeExpr(((BLangRecordKeyValueField) field).valueExpr, data);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
analyzeExpr((BLangRecordLiteral.BLangRecordVarNameField) field, data);
} else {
analyzeExpr(((BLangRecordLiteral.BLangRecordSpreadOperatorField) field).expr, data);
}
}
Set<Object> names = new HashSet<>();
Set<Object> neverTypedKeys = new HashSet<>();
BType literalBType = recordLiteral.getBType();
BType type = Types.getReferredType(literalBType);
boolean isRecord = type.tag == TypeTags.RECORD;
boolean isOpenRecord = isRecord && !((BRecordType) type).sealed;
boolean isInferredRecordForMapCET = isRecord && recordLiteral.expectedType != null &&
recordLiteral.expectedType.tag == TypeTags.MAP;
BLangRecordLiteral.BLangRecordSpreadOperatorField inclusiveTypeSpreadField = null;
for (RecordLiteralNode.RecordField field : fields) {
BLangExpression keyExpr;
if (field.getKind() == NodeKind.RECORD_LITERAL_SPREAD_OP) {
BLangRecordLiteral.BLangRecordSpreadOperatorField spreadOpField =
(BLangRecordLiteral.BLangRecordSpreadOperatorField) field;
BLangExpression spreadOpExpr = spreadOpField.expr;
analyzeExpr(spreadOpExpr, data);
BType spreadOpExprType = Types.getReferredType(spreadOpExpr.getBType());
int spreadFieldTypeTag = spreadOpExprType.tag;
if (spreadFieldTypeTag == TypeTags.MAP) {
if (inclusiveTypeSpreadField != null) {
this.dlog.error(spreadOpExpr.pos, DiagnosticErrorCode.MULTIPLE_INCLUSIVE_TYPES);
continue;
}
inclusiveTypeSpreadField = spreadOpField;
if (fields.size() > 1) {
if (names.size() > 0) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.SPREAD_FIELD_MAY_DULPICATE_ALREADY_SPECIFIED_KEYS,
spreadOpExpr);
}
continue;
}
}
if (spreadFieldTypeTag != TypeTags.RECORD) {
continue;
}
BRecordType spreadExprRecordType = (BRecordType) spreadOpExprType;
boolean isSpreadExprRecordTypeSealed = spreadExprRecordType.sealed;
if (!isSpreadExprRecordTypeSealed) {
if (inclusiveTypeSpreadField != null) {
this.dlog.error(spreadOpExpr.pos, DiagnosticErrorCode.MULTIPLE_INCLUSIVE_TYPES);
} else {
inclusiveTypeSpreadField = spreadOpField;
}
}
LinkedHashMap<String, BField> fieldsInRecordType = getUnescapedFieldList(spreadExprRecordType.fields);
for (Object fieldName : names) {
if (!fieldsInRecordType.containsKey(fieldName) && !isSpreadExprRecordTypeSealed) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.SPREAD_FIELD_MAY_DULPICATE_ALREADY_SPECIFIED_KEYS,
spreadOpExpr);
break;
}
}
for (String fieldName : fieldsInRecordType.keySet()) {
BField bField = fieldsInRecordType.get(fieldName);
if (names.contains(fieldName)) {
if (bField.type.tag != TypeTags.NEVER) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL_SPREAD_OP,
type.getKind().typeName(), fieldName, spreadOpField);
}
continue;
}
if (bField.type.tag == TypeTags.NEVER) {
neverTypedKeys.add(fieldName);
continue;
}
if (!neverTypedKeys.remove(fieldName) &&
inclusiveTypeSpreadField != null && isSpreadExprRecordTypeSealed) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
Types.getReferredType(recordLiteral.expectedType).getKind().typeName(),
bField.symbol, spreadOpField);
}
names.add(fieldName);
}
} else {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKey key = ((BLangRecordKeyValueField) field).key;
keyExpr = key.expr;
if (key.computedKey) {
analyzeExpr(keyExpr, data);
continue;
}
} else {
keyExpr = (BLangRecordLiteral.BLangRecordVarNameField) field;
}
if (keyExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
String name = ((BLangSimpleVarRef) keyExpr).variableName.value;
String unescapedName = Utils.unescapeJava(name);
if (names.contains(unescapedName)) {
this.dlog.error(keyExpr.pos, DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL,
Types.getReferredType(recordLiteral.expectedType).getKind().typeName(),
unescapedName);
} else if (inclusiveTypeSpreadField != null && !neverTypedKeys.contains(unescapedName)) {
this.dlog.error(keyExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
unescapedName, inclusiveTypeSpreadField);
}
if (!isInferredRecordForMapCET && isOpenRecord && !((BRecordType) type).fields.containsKey(name)) {
dlog.error(keyExpr.pos, DiagnosticErrorCode.INVALID_RECORD_LITERAL_IDENTIFIER_KEY,
unescapedName);
}
names.add(unescapedName);
} else if (keyExpr.getKind() == NodeKind.LITERAL || keyExpr.getKind() == NodeKind.NUMERIC_LITERAL) {
Object name = ((BLangLiteral) keyExpr).value;
if (names.contains(name)) {
this.dlog.error(keyExpr.pos, DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL,
Types.getReferredType(recordLiteral.parent.getBType())
.getKind().typeName(), name);
} else if (inclusiveTypeSpreadField != null && !neverTypedKeys.contains(name)) {
this.dlog.error(keyExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
name, inclusiveTypeSpreadField);
}
names.add(name);
}
}
}
if (isInferredRecordForMapCET) {
recordLiteral.expectedType = type;
}
}
@Override
public void visit(BLangRecordLiteral.BLangRecordVarNameField node, AnalyzerData data) {
visit((BLangSimpleVarRef) node, data);
}
private LinkedHashMap<String, BField> getUnescapedFieldList(LinkedHashMap<String, BField> fieldMap) {
LinkedHashMap<String, BField> newMap = new LinkedHashMap<>();
for (String key : fieldMap.keySet()) {
newMap.put(Utils.unescapeJava(key), fieldMap.get(key));
}
return newMap;
}
@Override
public void visit(BLangSimpleVarRef varRefExpr, AnalyzerData data) {
switch (varRefExpr.parent.getKind()) {
case WORKER_RECEIVE:
case WORKER_SEND:
case WORKER_SYNC_SEND:
return;
default:
if (varRefExpr.getBType() != null && varRefExpr.getBType().tag == TypeTags.FUTURE) {
trackNamedWorkerReferences(varRefExpr, data);
}
}
BSymbol symbol = varRefExpr.symbol;
if (symbol != null && Symbols.isFlagOn(symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(varRefExpr.variableName.toString(), symbol, varRefExpr.pos);
}
}
private void trackNamedWorkerReferences(BLangSimpleVarRef varRefExpr, AnalyzerData data) {
if (varRefExpr.symbol == null || (varRefExpr.symbol.flags & Flags.WORKER) != Flags.WORKER) {
return;
}
data.workerReferences.computeIfAbsent(varRefExpr.symbol, s -> new LinkedHashSet<>());
data.workerReferences.get(varRefExpr.symbol).add(varRefExpr);
}
@Override
public void visit(BLangRecordVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangErrorVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangTupleVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr, AnalyzerData data) {
analyzeFieldBasedAccessExpr(fieldAccessExpr, data);
}
@Override
public void visit(BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess nsPrefixedFieldBasedAccess,
AnalyzerData data) {
analyzeFieldBasedAccessExpr(nsPrefixedFieldBasedAccess, data);
}
private void analyzeFieldBasedAccessExpr(BLangFieldBasedAccess fieldAccessExpr, AnalyzerData data) {
BLangExpression expr = fieldAccessExpr.expr;
analyzeExpr(expr, data);
BSymbol symbol = fieldAccessExpr.symbol;
if (symbol != null && Symbols.isFlagOn(fieldAccessExpr.symbol.flags, Flags.DEPRECATED)) {
String deprecatedConstruct = generateDeprecatedConstructString(expr, fieldAccessExpr.field.toString(),
symbol);
dlog.warning(fieldAccessExpr.pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr, AnalyzerData data) {
analyzeExpr(indexAccessExpr.indexExpr, data);
analyzeExpr(indexAccessExpr.expr, data);
}
@Override
public void visit(BLangInvocation invocationExpr, AnalyzerData data) {
analyzeExpr(invocationExpr.expr, data);
analyzeExprs(invocationExpr.requiredArgs, data);
analyzeExprs(invocationExpr.restArgs, data);
validateInvocationInMatchGuard(invocationExpr);
if ((invocationExpr.symbol != null) && invocationExpr.symbol.kind == SymbolKind.FUNCTION) {
BSymbol funcSymbol = invocationExpr.symbol;
if (Symbols.isFlagOn(funcSymbol.flags, Flags.TRANSACTIONAL) && !data.withinTransactionScope) {
dlog.error(invocationExpr.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (Symbols.isFlagOn(funcSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(invocationExpr);
}
}
}
@Override
public void visit(BLangErrorConstructorExpr errorConstructorExpr, AnalyzerData data) {
analyzeExprs(errorConstructorExpr.positionalArgs, data);
if (!errorConstructorExpr.namedArgs.isEmpty()) {
analyzeExprs(errorConstructorExpr.namedArgs, data);
}
}
@Override
public void visit(BLangInvocation.BLangActionInvocation actionInvocation, AnalyzerData data) {
validateInvocationInMatchGuard(actionInvocation);
if (!actionInvocation.async && !data.withinTransactionScope &&
Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.TRANSACTIONAL)) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED,
actionInvocation.symbol);
return;
}
if (actionInvocation.async && data.withinTransactionScope &&
!Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.TRANSACTIONAL)) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.USAGE_OF_START_WITHIN_TRANSACTION_IS_PROHIBITED);
return;
}
analyzeExpr(actionInvocation.expr, data);
analyzeExprs(actionInvocation.requiredArgs, data);
analyzeExprs(actionInvocation.restArgs, data);
if (actionInvocation.symbol != null && actionInvocation.symbol.kind == SymbolKind.FUNCTION &&
Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(actionInvocation);
}
if (actionInvocation.flagSet.contains(Flag.TRANSACTIONAL) && !data.withinTransactionScope) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (actionInvocation.async && data.withinLockBlock) {
dlog.error(actionInvocation.pos, actionInvocation.functionPointerInvocation ?
DiagnosticErrorCode.USAGE_OF_WORKER_WITHIN_LOCK_IS_PROHIBITED :
DiagnosticErrorCode.USAGE_OF_START_WITHIN_LOCK_IS_PROHIBITED);
return;
}
if (actionInvocation.symbol != null &&
(actionInvocation.symbol.tag & SymTag.CONSTRUCTOR) == SymTag.CONSTRUCTOR) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.INVALID_FUNCTIONAL_CONSTRUCTOR_INVOCATION,
actionInvocation.symbol);
return;
}
validateActionInvocation(actionInvocation.pos, actionInvocation);
if (!actionInvocation.async && data.withinTransactionScope) {
actionInvocation.invokedInsideTransaction = true;
}
}
@Override
public void visit(BLangInvocation.BLangResourceAccessInvocation resourceActionInvocation, AnalyzerData data) {
validateInvocationInMatchGuard(resourceActionInvocation);
analyzeExpr(resourceActionInvocation.expr, data);
analyzeExprs(resourceActionInvocation.requiredArgs, data);
analyzeExprs(resourceActionInvocation.restArgs, data);
analyzeExpr(resourceActionInvocation.resourceAccessPathSegments, data);
resourceActionInvocation.invokedInsideTransaction = data.withinTransactionScope;
if (Symbols.isFlagOn(resourceActionInvocation.symbol.flags, Flags.TRANSACTIONAL) &&
!data.withinTransactionScope) {
dlog.error(resourceActionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (Symbols.isFlagOn(resourceActionInvocation.symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(resourceActionInvocation);
}
validateActionInvocation(resourceActionInvocation.pos, resourceActionInvocation);
}
private void logDeprecatedWarningForInvocation(BLangInvocation invocationExpr) {
String deprecatedConstruct = invocationExpr.name.toString();
BLangExpression expr = invocationExpr.expr;
BSymbol funcSymbol = invocationExpr.symbol;
if (expr != null) {
deprecatedConstruct = generateDeprecatedConstructString(expr, deprecatedConstruct, funcSymbol);
} else if (!Names.DOT.equals(funcSymbol.pkgID.name)) {
deprecatedConstruct = funcSymbol.pkgID + ":" + deprecatedConstruct;
}
dlog.warning(invocationExpr.pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
private String generateDeprecatedConstructString(BLangExpression expr, String fieldOrMethodName,
BSymbol symbol) {
BType bType = expr.getBType();
if (bType.tag == TypeTags.TYPEREFDESC) {
return bType + "." + fieldOrMethodName;
}
if (bType.tag == TypeTags.OBJECT) {
BObjectType objectType = (BObjectType) bType;
if (objectType.classDef == null || objectType.classDef.internal == false) {
fieldOrMethodName = bType + "." + fieldOrMethodName;
}
return fieldOrMethodName;
}
if (symbol.kind == SymbolKind.FUNCTION && !Names.DOT.equals(symbol.pkgID.name)) {
fieldOrMethodName = symbol.pkgID + ":" + fieldOrMethodName;
}
return fieldOrMethodName;
}
private void validateActionInvocation(Location pos, BLangInvocation iExpr) {
if (iExpr.expr != null) {
final NodeKind clientNodeKind = iExpr.expr.getKind();
if (clientNodeKind == NodeKind.FIELD_BASED_ACCESS_EXPR) {
final BLangFieldBasedAccess fieldBasedAccess = (BLangFieldBasedAccess) iExpr.expr;
if (fieldBasedAccess.expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
} else {
final BLangSimpleVarRef selfName = (BLangSimpleVarRef) fieldBasedAccess.expr;
if (!Names.SELF.equals(selfName.symbol.name)) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
}
}
} else if (clientNodeKind != NodeKind.SIMPLE_VARIABLE_REF &&
clientNodeKind != NodeKind.GROUP_EXPR) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
}
}
validateActionParentNode(pos, iExpr);
}
/**
* Actions can only occur as part of a statement or nested inside other actions.
*/
private boolean validateActionParentNode(Location pos, BLangNode node) {
BLangNode parent = node.parent;
while (parent != null) {
final NodeKind kind = parent.getKind();
if (parent instanceof StatementNode || checkActionInQuery(kind)) {
return true;
} else if (parent instanceof ActionNode || parent instanceof BLangVariable || kind == NodeKind.CHECK_EXPR ||
kind == NodeKind.CHECK_PANIC_EXPR || kind == NodeKind.TRAP_EXPR || kind == NodeKind.GROUP_EXPR ||
kind == NodeKind.TYPE_CONVERSION_EXPR) {
if (parent instanceof BLangInvocation.BLangActionInvocation) {
break;
}
parent = parent.parent;
continue;
}
break;
}
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
return false;
}
private boolean checkActionInQuery(NodeKind parentKind) {
return parentKind == NodeKind.FROM || parentKind == NodeKind.SELECT ||
parentKind == NodeKind.LET_CLAUSE;
}
@Override
public void visit(BLangTypeInit cIExpr, AnalyzerData data) {
analyzeExprs(cIExpr.argsExpr, data);
analyzeExpr(cIExpr.initInvocation, data);
BType type = cIExpr.getBType();
if (cIExpr.userDefinedType != null && Symbols.isFlagOn(type.tsymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(((BLangUserDefinedType) cIExpr.userDefinedType).typeName.toString(), type.tsymbol,
cIExpr.pos);
}
}
@Override
public void visit(BLangTernaryExpr ternaryExpr, AnalyzerData data) {
analyzeExpr(ternaryExpr.expr, data);
analyzeExpr(ternaryExpr.thenExpr, data);
analyzeExpr(ternaryExpr.elseExpr, data);
}
@Override
public void visit(BLangWaitExpr awaitExpr, AnalyzerData data) {
BLangExpression expr = awaitExpr.getExpression();
boolean validWaitFuture = validateWaitFutureExpr(expr);
analyzeExpr(expr, data);
boolean validActionParent = validateActionParentNode(awaitExpr.pos, awaitExpr);
WorkerActionSystem was = data.workerActionSystemStack.peek();
was.addWorkerAction(awaitExpr, data.env);
was.hasErrors = !(validWaitFuture || validActionParent);
}
@Override
public void visit(BLangWaitForAllExpr waitForAllExpr, AnalyzerData data) {
boolean validWaitFuture = true;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValue : waitForAllExpr.keyValuePairs) {
BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr;
validWaitFuture = validWaitFuture && validateWaitFutureExpr(expr);
analyzeExpr(expr, data);
}
boolean validActionParent = validateActionParentNode(waitForAllExpr.pos, waitForAllExpr);
WorkerActionSystem was = data.workerActionSystemStack.peek();
was.addWorkerAction(waitForAllExpr, data.env);
was.hasErrors = !(validWaitFuture || validActionParent);
}
private boolean validateWaitFutureExpr(BLangExpression expr) {
if (expr.getKind() == NodeKind.RECORD_LITERAL_EXPR) {
dlog.error(expr.pos, DiagnosticErrorCode.INVALID_WAIT_MAPPING_CONSTRUCTORS);
return false;
}
if (expr instanceof ActionNode) {
dlog.error(expr.pos, DiagnosticErrorCode.INVALID_WAIT_ACTIONS);
return false;
}
return true;
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess, AnalyzerData data) {
analyzeExpr(xmlElementAccess.expr, data);
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation, AnalyzerData data) {
analyzeExpr(xmlNavigation.expr, data);
if (xmlNavigation.childIndex != null) {
if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.DESCENDANTS
|| xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.CHILDREN) {
dlog.error(xmlNavigation.pos, DiagnosticErrorCode.UNSUPPORTED_MEMBER_ACCESS_IN_XML_NAVIGATION);
}
analyzeExpr(xmlNavigation.childIndex, data);
}
validateMethodInvocationsInXMLNavigationExpression(xmlNavigation);
}
private void validateMethodInvocationsInXMLNavigationExpression(BLangXMLNavigationAccess expression) {
if (!expression.methodInvocationAnalyzed && expression.parent.getKind() == NodeKind.INVOCATION) {
BLangInvocation invocation = (BLangInvocation) expression.parent;
if (invocation.argExprs.contains(expression)
&& ((invocation.symbol.flags & Flags.LANG_LIB) != Flags.LANG_LIB)) {
return;
}
dlog.error(invocation.pos, DiagnosticErrorCode.UNSUPPORTED_METHOD_INVOCATION_XML_NAV);
}
expression.methodInvocationAnalyzed = true;
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr, AnalyzerData data) {
BLangIdentifier flushWrkIdentifier = workerFlushExpr.workerIdentifier;
Stack<WorkerActionSystem> workerActionSystems = data.workerActionSystemStack;
WorkerActionSystem currentWrkerAction = workerActionSystems.peek();
List<BLangWorkerSend> sendStmts = getAsyncSendStmtsOfWorker(currentWrkerAction);
if (flushWrkIdentifier != null) {
List<BLangWorkerSend> sendsToGivenWrkr = sendStmts.stream()
.filter(bLangNode -> bLangNode.workerIdentifier.equals
(flushWrkIdentifier))
.collect(Collectors.toList());
if (sendsToGivenWrkr.size() == 0) {
this.dlog.error(workerFlushExpr.pos, DiagnosticErrorCode.INVALID_WORKER_FLUSH_FOR_WORKER,
workerFlushExpr.workerSymbol, currentWrkerAction.currentWorkerId());
return;
} else {
sendStmts = sendsToGivenWrkr;
}
} else {
if (sendStmts.size() == 0) {
this.dlog.error(workerFlushExpr.pos, DiagnosticErrorCode.INVALID_WORKER_FLUSH,
currentWrkerAction.currentWorkerId());
return;
}
}
workerFlushExpr.cachedWorkerSendStmts = sendStmts;
validateActionParentNode(workerFlushExpr.pos, workerFlushExpr);
}
private List<BLangWorkerSend> getAsyncSendStmtsOfWorker(WorkerActionSystem currentWorkerAction) {
List<BLangNode> actions = currentWorkerAction.workerActionStateMachines.peek().actions;
return actions.stream()
.filter(CodeAnalyzer::isWorkerSend)
.map(bLangNode -> (BLangWorkerSend) bLangNode)
.collect(Collectors.toList());
}
@Override
public void visit(BLangTrapExpr trapExpr, AnalyzerData data) {
analyzeExpr(trapExpr.expr, data);
}
@Override
public void visit(BLangBinaryExpr binaryExpr, AnalyzerData data) {
if (validateBinaryExpr(binaryExpr)) {
analyzeExpr(binaryExpr.lhsExpr, data);
analyzeExpr(binaryExpr.rhsExpr, data);
}
}
private boolean validateBinaryExpr(BLangBinaryExpr binaryExpr) {
if (binaryExpr.lhsExpr.getBType().tag != TypeTags.FUTURE
&& binaryExpr.rhsExpr.getBType().tag != TypeTags.FUTURE) {
return true;
}
BLangNode parentNode = binaryExpr.parent;
if (binaryExpr.lhsExpr.getBType().tag == TypeTags.FUTURE
|| binaryExpr.rhsExpr.getBType().tag == TypeTags.FUTURE) {
if (parentNode == null) {
return false;
}
if (parentNode.getKind() == NodeKind.WAIT_EXPR) {
return true;
}
}
if (parentNode.getKind() != NodeKind.BINARY_EXPR && binaryExpr.opKind == OperatorKind.BITWISE_OR) {
dlog.error(binaryExpr.pos, DiagnosticErrorCode.OPERATOR_NOT_SUPPORTED, OperatorKind.BITWISE_OR,
symTable.futureType);
return false;
}
if (parentNode.getKind() == NodeKind.BINARY_EXPR) {
return validateBinaryExpr((BLangBinaryExpr) parentNode);
}
return true;
}
@Override
public void visit(BLangElvisExpr elvisExpr, AnalyzerData data) {
analyzeExpr(elvisExpr.lhsExpr, data);
analyzeExpr(elvisExpr.rhsExpr, data);
}
@Override
public void visit(BLangGroupExpr groupExpr, AnalyzerData data) {
analyzeExpr(groupExpr.expression, data);
}
@Override
public void visit(BLangUnaryExpr unaryExpr, AnalyzerData data) {
analyzeExpr(unaryExpr.expr, data);
}
@Override
public void visit(BLangTypedescExpr accessExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr, AnalyzerData data) {
analyzeExpr(conversionExpr.expr, data);
conversionExpr.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangXMLQName xmlQName, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangXMLAttribute xmlAttribute, AnalyzerData data) {
analyzeExpr(xmlAttribute.name, data);
analyzeExpr(xmlAttribute.value, data);
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral, AnalyzerData data) {
analyzeExpr(xmlElementLiteral.startTagName, data);
analyzeExpr(xmlElementLiteral.endTagName, data);
analyzeExprs(xmlElementLiteral.attributes, data);
analyzeExprs(xmlElementLiteral.children, data);
}
@Override
public void visit(BLangXMLSequenceLiteral xmlSequenceLiteral, AnalyzerData data) {
analyzeExprs(xmlSequenceLiteral.xmlItems, data);
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral, AnalyzerData data) {
analyzeExprs(xmlTextLiteral.textFragments, data);
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral, AnalyzerData data) {
analyzeExprs(xmlCommentLiteral.textFragments, data);
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral, AnalyzerData data) {
analyzeExprs(xmlProcInsLiteral.dataFragments, data);
analyzeExpr(xmlProcInsLiteral.target, data);
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString, AnalyzerData data) {
analyzeExprs(xmlQuotedString.textFragments, data);
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral, AnalyzerData data) {
analyzeExprs(stringTemplateLiteral.exprs, data);
}
@Override
public void visit(BLangRawTemplateLiteral rawTemplateLiteral, AnalyzerData data) {
analyzeExprs(rawTemplateLiteral.strings, data);
analyzeExprs(rawTemplateLiteral.insertions, data);
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction, AnalyzerData data) {
boolean isWorker = false;
analyzeNode(bLangLambdaFunction.function, data);
if (bLangLambdaFunction.function.flagSet.contains(Flag.TRANSACTIONAL) &&
bLangLambdaFunction.function.flagSet.contains(Flag.WORKER) && !data.withinTransactionScope) {
dlog.error(bLangLambdaFunction.pos, DiagnosticErrorCode.TRANSACTIONAL_WORKER_OUT_OF_TRANSACTIONAL_SCOPE,
bLangLambdaFunction);
return;
}
if (bLangLambdaFunction.parent.getKind() == NodeKind.VARIABLE) {
String workerVarName = ((BLangSimpleVariable) bLangLambdaFunction.parent).name.value;
if (workerVarName.startsWith(WORKER_LAMBDA_VAR_PREFIX)) {
String workerName = workerVarName.substring(1);
isWorker = true;
data.workerActionSystemStack.peek().startWorkerActionStateMachine(workerName,
bLangLambdaFunction.function.pos,
bLangLambdaFunction.function);
}
}
if (isWorker) {
this.visitFunction(bLangLambdaFunction.function, data);
} else {
try {
this.initNewWorkerActionSystem(data);
data.workerActionSystemStack.peek().startWorkerActionStateMachine(DEFAULT_WORKER_NAME,
bLangLambdaFunction.pos,
bLangLambdaFunction.function);
this.visitFunction(bLangLambdaFunction.function, data);
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
} finally {
this.finalizeCurrentWorkerActionSystem(data);
}
}
if (isWorker) {
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
}
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction, AnalyzerData data) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
if (prevDefaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT ||
prevDefaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
data.defaultValueState = DefaultValueState.FUNCTION_IN_DEFAULT_VALUE;
}
analyzeExpr(bLangArrowFunction.body.expr, data);
data.defaultValueState = prevDefaultValueState;
}
/* Type Nodes */
@Override
public void visit(BLangRecordTypeNode recordTypeNode, AnalyzerData data) {
data.env = SymbolEnv.createTypeEnv(recordTypeNode, recordTypeNode.symbol.scope, data.env);
for (BLangSimpleVariable field : recordTypeNode.fields) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
data.defaultValueState = DefaultValueState.RECORD_FIELD_DEFAULT;
analyzeNode(field, data);
data.defaultValueState = prevDefaultValueState;
}
}
@Override
public void visit(BLangObjectTypeNode objectTypeNode, AnalyzerData data) {
data.env = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, data.env);
for (BLangSimpleVariable field : objectTypeNode.fields) {
analyzeNode(field, data);
}
List<BLangFunction> bLangFunctionList = new ArrayList<>(objectTypeNode.functions);
if (objectTypeNode.initFunction != null) {
bLangFunctionList.add(objectTypeNode.initFunction);
}
bLangFunctionList.sort(Comparator.comparingInt(function -> function.pos.lineRange().startLine().line()));
for (BLangFunction function : bLangFunctionList) {
analyzeNode(function, data);
}
}
@Override
public void visit(BLangValueType valueType, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangArrayType arrayType, AnalyzerData data) {
if (containsInferredArraySizesOfHigherDimensions(arrayType.sizes)) {
dlog.error(arrayType.pos, DiagnosticErrorCode.INFER_SIZE_ONLY_SUPPORTED_IN_FIRST_DIMENSION);
} else if (isSizeInferredArray(arrayType.sizes) && !isValidInferredArray(arrayType.parent)) {
dlog.error(arrayType.pos, DiagnosticErrorCode.CANNOT_INFER_SIZE_ARRAY_SIZE_FROM_THE_CONTEXT);
}
analyzeTypeNode(arrayType.elemtype, data);
}
private boolean isSizeInferredArray(List<BLangExpression> indexSizes) {
return !indexSizes.isEmpty() && isInferredArrayIndicator(indexSizes.get(indexSizes.size() - 1));
}
private boolean isInferredArrayIndicator(BLangExpression size) {
return size.getKind() == LITERAL && ((BLangLiteral) size).value.equals(Constants.INFERRED_ARRAY_INDICATOR);
}
private boolean containsInferredArraySizesOfHigherDimensions(List<BLangExpression> sizes) {
if (sizes.size() < 2) {
return false;
}
for (int i = 0; i < sizes.size() - 1; i++) {
if (isInferredArrayIndicator(sizes.get(i))) {
return true;
}
}
return false;
}
@Override
public void visit(BLangBuiltInRefTypeNode builtInRefType, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangConstrainedType constrainedType, AnalyzerData data) {
analyzeTypeNode(constrainedType.constraint, data);
}
@Override
public void visit(BLangStreamType streamType, AnalyzerData data) {
analyzeTypeNode(streamType.constraint, data);
analyzeTypeNode(streamType.error, data);
}
@Override
public void visit(BLangTableTypeNode tableType, AnalyzerData data) {
analyzeTypeNode(tableType.constraint, data);
if (tableType.tableKeyTypeConstraint != null) {
analyzeTypeNode(tableType.tableKeyTypeConstraint.keyType, data);
}
}
@Override
public void visit(BLangErrorType errorType, AnalyzerData data) {
BLangType detailType = errorType.detailType;
if (detailType != null && detailType.getKind() == NodeKind.CONSTRAINED_TYPE) {
BLangType constraint = ((BLangConstrainedType) detailType).constraint;
if (constraint.getKind() == NodeKind.USER_DEFINED_TYPE) {
BLangUserDefinedType userDefinedType = (BLangUserDefinedType) constraint;
if (userDefinedType.typeName.value.equals(TypeDefBuilderHelper.INTERSECTED_ERROR_DETAIL)) {
return;
}
}
}
analyzeTypeNode(errorType.detailType, data);
}
@Override
public void visit(BLangUserDefinedType userDefinedType, AnalyzerData data) {
BTypeSymbol typeSymbol = userDefinedType.getBType().tsymbol;
if (typeSymbol != null && Symbols.isFlagOn(typeSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(userDefinedType.typeName.toString(), typeSymbol, userDefinedType.pos);
}
}
@Override
public void visit(BLangTupleTypeNode tupleTypeNode, AnalyzerData data) {
tupleTypeNode.memberTypeNodes.forEach(memberType -> analyzeTypeNode(memberType, data));
analyzeTypeNode(tupleTypeNode.restParamType, data);
}
@Override
public void visit(BLangUnionTypeNode unionTypeNode, AnalyzerData data) {
unionTypeNode.memberTypeNodes.forEach(memberType -> analyzeTypeNode(memberType, data));
}
@Override
public void visit(BLangIntersectionTypeNode intersectionTypeNode, AnalyzerData data) {
for (BLangType constituentTypeNode : intersectionTypeNode.constituentTypeNodes) {
analyzeTypeNode(constituentTypeNode, data);
}
}
@Override
public void visit(BLangFunctionTypeNode functionTypeNode, AnalyzerData data) {
if (functionTypeNode.flagSet.contains(Flag.ANY_FUNCTION)) {
return;
}
functionTypeNode.params.forEach(node -> analyzeNode(node, data));
analyzeTypeNode(functionTypeNode.returnTypeNode, data);
}
@Override
public void visit(BLangFiniteTypeNode finiteTypeNode, AnalyzerData data) {
/* Ignore */
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression, AnalyzerData data) {
analyzeExpr(bLangVarArgsExpression.expr, data);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression, AnalyzerData data) {
analyzeExpr(bLangNamedArgsExpression.expr, data);
}
@Override
public void visit(BLangCheckedExpr checkedExpr, AnalyzerData data) {
data.failVisited = true;
analyzeExpr(checkedExpr.expr, data);
if (data.env.scope.owner.getKind() == SymbolKind.PACKAGE) {
return;
}
BLangInvokableNode enclInvokable = data.env.enclInvokable;
List<BType> equivalentErrorTypeList = checkedExpr.equivalentErrorTypeList;
if (equivalentErrorTypeList != null && !equivalentErrorTypeList.isEmpty()) {
if (data.defaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode.INVALID_USAGE_OF_CHECK_IN_RECORD_FIELD_DEFAULT_EXPRESSION);
return;
}
if (data.defaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
BAttachedFunction initializerFunc =
((BObjectTypeSymbol) getEnclosingClass(data.env).getBType().tsymbol).initializerFunc;
if (initializerFunc == null) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode
.INVALID_USAGE_OF_CHECK_IN_OBJECT_FIELD_INITIALIZER_IN_OBJECT_WITH_NO_INIT_METHOD);
return;
}
BType exprErrorTypes = getErrorTypes(checkedExpr.expr.getBType());
BType initMethodReturnType = initializerFunc.type.retType;
if (!types.isAssignable(exprErrorTypes, initMethodReturnType)) {
dlog.error(checkedExpr.pos, DiagnosticErrorCode
.INVALID_USAGE_OF_CHECK_IN_OBJECT_FIELD_INITIALIZER_WITH_INIT_METHOD_RETURN_TYPE_MISMATCH,
initMethodReturnType, exprErrorTypes);
}
return;
}
}
if (enclInvokable == null) {
return;
}
BType exprType = enclInvokable.getReturnTypeNode().getBType();
BType checkedExprType = checkedExpr.expr.getBType();
BType errorType = getErrorTypes(checkedExprType);
if (errorType == symTable.semanticError) {
return;
}
if (!data.failureHandled && !types.isAssignable(errorType, exprType) &&
!types.isNeverTypeOrStructureTypeWithARequiredNeverMember(checkedExprType)) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode.CHECKED_EXPR_NO_MATCHING_ERROR_RETURN_IN_ENCL_INVOKABLE);
}
if (!data.errorTypes.empty()) {
data.errorTypes.peek().add(getErrorTypes(checkedExpr.expr.getBType()));
}
BType errorTypes;
if (exprType.tag == TypeTags.UNION) {
errorTypes = types.getErrorType((BUnionType) exprType);
} else {
errorTypes = exprType;
}
data.returnTypes.peek().add(errorTypes);
}
@Override
public void visit(BLangCheckPanickedExpr checkPanicExpr, AnalyzerData data) {
analyzeExpr(checkPanicExpr.expr, data);
}
@Override
public void visit(BLangServiceConstructorExpr serviceConstructorExpr, AnalyzerData data) {
}
@Override
public void visit(BLangQueryExpr queryExpr, AnalyzerData data) {
boolean prevQueryToTableWithKey = data.queryToTableWithKey;
data.queryToTableWithKey = queryExpr.isTable() && !queryExpr.fieldNameIdentifierList.isEmpty();
data.queryToMap = queryExpr.isMap;
boolean prevWithinQuery = data.withinQuery;
data.withinQuery = true;
int fromCount = 0;
for (BLangNode clause : queryExpr.getQueryClauses()) {
if (clause.getKind() == NodeKind.FROM) {
fromCount++;
BLangFromClause fromClause = (BLangFromClause) clause;
BLangExpression collection = (BLangExpression) fromClause.getCollection();
if (fromCount > 1) {
if (TypeTags.STREAM == Types.getReferredType(collection.getBType()).tag) {
this.dlog.error(collection.pos, DiagnosticErrorCode.NOT_ALLOWED_STREAM_USAGE_WITH_FROM);
}
}
}
analyzeNode(clause, data);
}
data.withinQuery = prevWithinQuery;
data.queryToTableWithKey = prevQueryToTableWithKey;
}
@Override
public void visit(BLangQueryAction queryAction, AnalyzerData data) {
boolean prevFailureHandled = data.failureHandled;
data.failureHandled = true;
boolean prevWithinQuery = data.withinQuery;
data.withinQuery = true;
int fromCount = 0;
for (BLangNode clause : queryAction.getQueryClauses()) {
if (clause.getKind() == NodeKind.FROM) {
fromCount++;
BLangFromClause fromClause = (BLangFromClause) clause;
BLangExpression collection = (BLangExpression) fromClause.getCollection();
if (fromCount > 1) {
if (TypeTags.STREAM == Types.getReferredType(collection.getBType()).tag) {
this.dlog.error(collection.pos, DiagnosticErrorCode.NOT_ALLOWED_STREAM_USAGE_WITH_FROM);
}
}
}
analyzeNode(clause, data);
}
validateActionParentNode(queryAction.pos, queryAction);
data.failureHandled = prevFailureHandled;
data.withinQuery = prevWithinQuery;
}
@Override
public void visit(BLangFromClause fromClause, AnalyzerData data) {
analyzeExpr(fromClause.collection, data);
}
@Override
public void visit(BLangJoinClause joinClause, AnalyzerData data) {
analyzeExpr(joinClause.collection, data);
if (joinClause.onClause != null) {
analyzeNode(joinClause.onClause, data);
}
}
@Override
public void visit(BLangLetClause letClause, AnalyzerData data) {
for (BLangLetVariable letVariable : letClause.letVarDeclarations) {
analyzeNode((BLangNode) letVariable.definitionNode.getVariable(), data);
}
}
@Override
public void visit(BLangWhereClause whereClause, AnalyzerData data) {
analyzeExpr(whereClause.expression, data);
}
@Override
public void visit(BLangOnClause onClause, AnalyzerData data) {
analyzeExpr(onClause.lhsExpr, data);
analyzeExpr(onClause.rhsExpr, data);
}
@Override
public void visit(BLangOrderByClause orderByClause, AnalyzerData data) {
orderByClause.orderByKeyList.forEach(value -> analyzeExpr((BLangExpression) value.getOrderKey(), data));
}
@Override
public void visit(BLangSelectClause selectClause, AnalyzerData data) {
analyzeExpr(selectClause.expression, data);
}
@Override
public void visit(BLangOnConflictClause onConflictClause, AnalyzerData data) {
analyzeExpr(onConflictClause.expression, data);
if (!(data.queryToTableWithKey || data.queryToMap)) {
dlog.error(onConflictClause.pos,
DiagnosticErrorCode.ON_CONFLICT_ONLY_WORKS_WITH_MAPS_OR_TABLES_WITH_KEY_SPECIFIER);
}
}
@Override
public void visit(BLangDoClause doClause, AnalyzerData data) {
analyzeNode(doClause.body, data);
}
@Override
public void visit(BLangOnFailClause onFailClause, AnalyzerData data) {
boolean currentFailVisited = data.failVisited;
data.failVisited = false;
VariableDefinitionNode onFailVarDefNode = onFailClause.variableDefinitionNode;
if (onFailVarDefNode != null) {
BLangVariable onFailVarNode = (BLangVariable) onFailVarDefNode.getVariable();
for (BType errorType : data.errorTypes.peek()) {
if (!types.isAssignable(errorType, onFailVarNode.getBType())) {
dlog.error(onFailVarNode.pos, DiagnosticErrorCode.INCOMPATIBLE_ON_FAIL_ERROR_DEFINITION, errorType,
onFailVarNode.getBType());
}
}
}
analyzeNode(onFailClause.body, data);
onFailClause.bodyContainsFail = data.failVisited;
data.failVisited = currentFailVisited;
}
@Override
public void visit(BLangLimitClause limitClause, AnalyzerData data) {
analyzeExpr(limitClause.expression, data);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr, AnalyzerData data) {
BLangExpression expr = typeTestExpr.expr;
analyzeNode(expr, data);
BType exprType = expr.getBType();
BType typeNodeType = typeTestExpr.typeNode.getBType();
if (typeNodeType == symTable.semanticError || exprType == symTable.semanticError) {
return;
}
if (types.isAssignable(exprType, typeNodeType)) {
if (typeTestExpr.isNegation) {
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.EXPRESSION_ALWAYS_FALSE);
return;
}
if (types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprType)) {
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.UNNECESSARY_CONDITION_FOR_VARIABLE_OF_TYPE_NEVER);
return;
}
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.UNNECESSARY_CONDITION);
return;
}
if (!intersectionExists(expr, typeNodeType, data, typeTestExpr.pos)) {
dlog.error(typeTestExpr.pos, DiagnosticErrorCode.INCOMPATIBLE_TYPE_CHECK, exprType, typeNodeType);
}
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr, AnalyzerData data) {
analyzeExpr(annotAccessExpr.expr, data);
BAnnotationSymbol annotationSymbol = annotAccessExpr.annotationSymbol;
if (annotationSymbol != null && Symbols.isFlagOn(annotationSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(annotAccessExpr.annotationName.toString(), annotationSymbol, annotAccessExpr.pos);
}
}
@Override
public void visit(BLangRegExpTemplateLiteral regExpTemplateLiteral, AnalyzerData data) {
List<BLangExpression> interpolationsList =
symResolver.getListOfInterpolations(regExpTemplateLiteral.reDisjunction.sequenceList);
interpolationsList.forEach(interpolation -> analyzeExpr(interpolation, data));
}
private void logDeprecatedWaring(String deprecatedConstruct, BSymbol symbol, Location pos) {
if (!Names.DOT.equals(symbol.pkgID.name)) {
deprecatedConstruct = symbol.pkgID + ":" + deprecatedConstruct;
}
dlog.warning(pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
private boolean intersectionExists(BLangExpression expression, BType testType, AnalyzerData data,
Location intersectionPos) {
BType expressionType = expression.getBType();
BType intersectionType = types.getTypeIntersection(
Types.IntersectionContext.typeTestIntersectionExistenceContext(intersectionPos),
expressionType, testType, data.env);
return (intersectionType != symTable.semanticError) ||
(expressionType.tag == TypeTags.ANY && testType.tag == TypeTags.READONLY);
}
@Override
public void visit(BLangInferredTypedescDefaultNode inferTypedescExpr, AnalyzerData data) {
/* Ignore */
}
private <E extends BLangExpression> void analyzeExpr(E node, AnalyzerData data) {
if (node == null) {
return;
}
SymbolEnv prevEnv = data.env;
BLangNode parent = data.parent;
node.parent = data.parent;
data.parent = node;
node.accept(this, data);
data.parent = parent;
checkAccess(node, data);
checkExpressionValidity(node, data);
data.env = prevEnv;
}
private <E extends BLangExpression> void checkExpressionValidity(E exprNode, AnalyzerData data) {
if (exprNode.getKind() == NodeKind.GROUP_EXPR ||
!types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprNode.getBType())) {
return;
}
if (!checkExpressionInValidParent(exprNode.parent, data)) {
dlog.error(exprNode.pos, DiagnosticErrorCode.EXPRESSION_OF_NEVER_TYPE_NOT_ALLOWED);
}
}
private boolean checkExpressionInValidParent(BLangNode currentParent, AnalyzerData data) {
if (currentParent == null) {
return false;
}
if (currentParent.getKind() == NodeKind.GROUP_EXPR) {
return checkExpressionInValidParent(currentParent.parent, data);
}
return currentParent.getKind() == NodeKind.EXPRESSION_STATEMENT ||
(currentParent.getKind() == NodeKind.VARIABLE &&
((BLangSimpleVariable) data.parent).typeNode.getBType().tag == TypeTags.FUTURE)
|| currentParent.getKind() == NodeKind.TRAP_EXPR;
}
@Override
public void visit(BLangConstant constant, AnalyzerData data) {
analyzeTypeNode(constant.typeNode, data);
analyzeNode(constant.expr, data);
analyzeExportableTypeRef(constant.symbol, constant.symbol.type.tsymbol, false, constant.pos);
constant.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
/**
* This method checks for private symbols being accessed or used outside of package and|or private symbols being
* used in public fields of objects/records and will fail those occurrences.
*
* @param node expression node to analyze
* @param data data used to analyze the node
*/
private <E extends BLangExpression> void checkAccess(E node, AnalyzerData data) {
if (node.getBType() != null) {
checkAccessSymbol(node.getBType().tsymbol, data.env.enclPkg.symbol.pkgID, node.pos);
}
if (node.getKind() == NodeKind.INVOCATION) {
BLangInvocation bLangInvocation = (BLangInvocation) node;
checkAccessSymbol(bLangInvocation.symbol, data.env.enclPkg.symbol.pkgID, bLangInvocation.pos);
}
}
private void checkAccessSymbol(BSymbol symbol, PackageID pkgID, Location position) {
if (symbol == null) {
return;
}
if (!pkgID.equals(symbol.pkgID) && !Symbols.isPublic(symbol)) {
dlog.error(position, DiagnosticErrorCode.ATTEMPT_REFER_NON_ACCESSIBLE_SYMBOL, symbol.name);
}
}
private <E extends BLangExpression> void analyzeExprs(List<E> nodeList, AnalyzerData data) {
for (int i = 0; i < nodeList.size(); i++) {
analyzeExpr(nodeList.get(i), data);
}
}
private void initNewWorkerActionSystem(AnalyzerData data) {
data.workerActionSystemStack.push(new WorkerActionSystem());
}
private void finalizeCurrentWorkerActionSystem(AnalyzerData data) {
WorkerActionSystem was = data.workerActionSystemStack.pop();
if (!was.hasErrors) {
this.validateWorkerInteractions(was, data);
}
}
private static boolean isWorkerSend(BLangNode action) {
return action.getKind() == NodeKind.WORKER_SEND;
}
private static boolean isWorkerSyncSend(BLangNode action) {
return action.getKind() == NodeKind.WORKER_SYNC_SEND;
}
private static boolean isWaitAction(BLangNode action) {
return action.getKind() == NodeKind.WAIT_EXPR;
}
private String extractWorkerId(BLangNode action) {
if (isWorkerSend(action)) {
return ((BLangWorkerSend) action).workerIdentifier.value;
} else if (isWorkerSyncSend(action)) {
return ((BLangWorkerSyncSendExpr) action).workerIdentifier.value;
} else {
return ((BLangWorkerReceive) action).workerIdentifier.value;
}
}
private void validateWorkerInteractions(WorkerActionSystem workerActionSystem, AnalyzerData data) {
if (!validateWorkerInteractionsAfterWaitAction(workerActionSystem)) {
return;
}
BLangNode currentAction;
boolean systemRunning;
data.workerSystemMovementSequence = 0;
int systemIterationCount = 0;
int prevWorkerSystemMovementSequence = data.workerSystemMovementSequence;
do {
systemRunning = false;
systemIterationCount++;
for (WorkerActionStateMachine worker : workerActionSystem.finshedWorkers) {
if (worker.done()) {
continue;
}
currentAction = worker.currentAction();
if (isWaitAction(currentAction)) {
handleWaitAction(workerActionSystem, currentAction, worker, data);
systemRunning = true;
continue;
}
if (!isWorkerSend(currentAction) && !isWorkerSyncSend(currentAction)) {
continue;
}
WorkerActionStateMachine otherSM = workerActionSystem.find(this.extractWorkerId(currentAction));
if (otherSM.done()) {
continue;
}
if (isWaitAction(otherSM.currentAction())) {
systemRunning = false;
continue;
}
if (!otherSM.currentIsReceive(worker.workerId)) {
continue;
}
BLangWorkerReceive receive = (BLangWorkerReceive) otherSM.currentAction();
if (isWorkerSyncSend(currentAction)) {
this.validateWorkerActionParameters((BLangWorkerSyncSendExpr) currentAction, receive);
} else {
this.validateWorkerActionParameters((BLangWorkerSend) currentAction, receive);
}
otherSM.next();
data.workerSystemMovementSequence++;
worker.next();
data.workerSystemMovementSequence++;
systemRunning = true;
String channelName = generateChannelName(worker.workerId, otherSM.workerId);
otherSM.node.sendsToThis.add(channelName);
worker.node.sendsToThis.add(channelName);
}
if (systemIterationCount > workerActionSystem.finshedWorkers.size()) {
systemIterationCount = 0;
if (prevWorkerSystemMovementSequence == data.workerSystemMovementSequence) {
systemRunning = false;
}
prevWorkerSystemMovementSequence = data.workerSystemMovementSequence;
}
} while (systemRunning);
if (!workerActionSystem.everyoneDone()) {
this.reportInvalidWorkerInteractionDiagnostics(workerActionSystem);
}
}
private boolean validateWorkerInteractionsAfterWaitAction(WorkerActionSystem workerActionSystem) {
boolean isValid = true;
for (WorkerActionStateMachine worker : workerActionSystem.finshedWorkers) {
Set<String> waitingOnWorkerSet = new HashSet<>();
for (BLangNode action : worker.actions) {
if (isWaitAction(action)) {
if (action instanceof BLangWaitForAllExpr) {
BLangWaitForAllExpr waitForAllExpr = (BLangWaitForAllExpr) action;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair : waitForAllExpr.keyValuePairs) {
BSymbol workerSymbol = getWorkerSymbol(keyValuePair);
if (workerSymbol != null) {
waitingOnWorkerSet.add(workerSymbol.name.value);
}
}
} else {
BLangWaitExpr wait = (BLangWaitExpr) action;
for (String workerName : getWorkerNameList(wait.exprList.get(0),
workerActionSystem.getActionEnvironment(wait))) {
waitingOnWorkerSet.add(workerName);
}
}
} else if (isWorkerSend(action)) {
BLangWorkerSend send = (BLangWorkerSend) action;
if (waitingOnWorkerSet.contains(send.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
} else if (isWorkerSyncSend(action)) {
BLangWorkerSyncSendExpr syncSend = (BLangWorkerSyncSendExpr) action;
if (waitingOnWorkerSet.contains(syncSend.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
} else if (action.getKind() == NodeKind.WORKER_RECEIVE) {
BLangWorkerReceive receive = (BLangWorkerReceive) action;
if (waitingOnWorkerSet.contains(receive.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
}
}
}
return isValid;
}
private void handleWaitAction(WorkerActionSystem workerActionSystem, BLangNode currentAction,
WorkerActionStateMachine worker, AnalyzerData data) {
if (currentAction instanceof BLangWaitForAllExpr) {
boolean allWorkersAreDone = true;
BLangWaitForAllExpr waitForAllExpr = (BLangWaitForAllExpr) currentAction;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair : waitForAllExpr.keyValuePairs) {
BSymbol workerSymbol = getWorkerSymbol(keyValuePair);
if (isWorkerSymbol(workerSymbol)) {
Name workerName = workerSymbol.name;
if (isWorkerFromFunction(workerActionSystem.getActionEnvironment(currentAction), workerName)) {
WorkerActionStateMachine otherSM = workerActionSystem.find(workerName.value);
allWorkersAreDone = allWorkersAreDone && otherSM.done();
}
}
}
if (allWorkersAreDone) {
worker.next();
data.workerSystemMovementSequence++;
}
} else {
BLangWaitExpr wait = (BLangWaitExpr) currentAction;
List<String> workerNameList = getWorkerNameList(wait.exprList.get(0),
workerActionSystem.getActionEnvironment(currentAction));
if (workerNameList.isEmpty()) {
worker.next();
data.workerSystemMovementSequence++;
}
for (String workerName : workerNameList) {
var otherSM = workerActionSystem.find(workerName);
if (otherSM.done()) {
worker.next();
data.workerSystemMovementSequence++;
break;
}
}
}
}
private BSymbol getWorkerSymbol(BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair) {
BLangExpression value = keyValuePair.getValue();
if (value != null && value.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
return ((BLangSimpleVarRef) value).symbol;
} else if (keyValuePair.keyExpr != null && keyValuePair.keyExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
return ((BLangSimpleVarRef) keyValuePair.keyExpr).symbol;
}
return null;
}
private List<String> getWorkerNameList(BLangExpression expr, SymbolEnv functionEnv) {
ArrayList<String> workerNames = new ArrayList<>();
populateWorkerNameList(expr, workerNames, functionEnv);
return workerNames;
}
private void populateWorkerNameList(BLangExpression expr, ArrayList<String> workerNames, SymbolEnv functionEnv) {
if (expr.getKind() == NodeKind.BINARY_EXPR) {
BLangBinaryExpr binaryExpr = (BLangBinaryExpr) expr;
populateWorkerNameList(binaryExpr.lhsExpr, workerNames, functionEnv);
populateWorkerNameList(binaryExpr.rhsExpr, workerNames, functionEnv);
} else if (expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangSimpleVarRef varRef = (BLangSimpleVarRef) expr;
if (isWorkerSymbol(varRef.symbol) && isWorkerFromFunction(functionEnv, varRef.symbol.name)) {
workerNames.add(varRef.variableName.value);
}
}
}
private boolean isWorkerFromFunction(SymbolEnv functionEnv, Name workerName) {
if (functionEnv == null) {
return false;
}
if (functionEnv.scope.lookup(workerName).symbol != null) {
return true;
}
if (functionEnv.enclInvokable != null) {
Set<Flag> flagSet = functionEnv.enclInvokable.flagSet;
if (flagSet.contains(Flag.LAMBDA) && !flagSet.contains(Flag.WORKER)) {
return false;
}
}
return isWorkerFromFunction(functionEnv.enclEnv, workerName);
}
private boolean isWorkerSymbol(BSymbol symbol) {
return symbol != null && (symbol.flags & Flags.WORKER) == Flags.WORKER;
}
private void reportInvalidWorkerInteractionDiagnostics(WorkerActionSystem workerActionSystem) {
this.dlog.error(workerActionSystem.getRootPosition(), DiagnosticErrorCode.INVALID_WORKER_INTERACTION,
workerActionSystem.toString());
}
private void validateWorkerActionParameters(BLangWorkerSend send, BLangWorkerReceive receive) {
types.checkType(receive, send.getBType(), receive.getBType());
addImplicitCast(send.getBType(), receive);
NodeKind kind = receive.parent.getKind();
if (kind == NodeKind.TRAP_EXPR || kind == NodeKind.CHECK_EXPR || kind == NodeKind.CHECK_PANIC_EXPR ||
kind == NodeKind.FAIL) {
typeChecker.checkExpr((BLangExpression) receive.parent, receive.env);
}
receive.sendExpression = send.expr;
}
private void validateWorkerActionParameters(BLangWorkerSyncSendExpr send, BLangWorkerReceive receive) {
send.receive = receive;
NodeKind parentNodeKind = send.parent.getKind();
if (parentNodeKind == NodeKind.VARIABLE) {
BLangSimpleVariable variable = (BLangSimpleVariable) send.parent;
if (variable.isDeclaredWithVar) {
variable.setBType(variable.symbol.type = send.expectedType = receive.matchingSendsError);
}
} else if (parentNodeKind == NodeKind.ASSIGNMENT) {
BLangAssignment assignment = (BLangAssignment) send.parent;
if (assignment.varRef.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol varSymbol = ((BLangSimpleVarRef) assignment.varRef).symbol;
if (varSymbol != null) {
send.expectedType = varSymbol.type;
}
}
}
if (receive.matchingSendsError != symTable.nilType && parentNodeKind == NodeKind.EXPRESSION_STATEMENT) {
dlog.error(send.pos, DiagnosticErrorCode.ASSIGNMENT_REQUIRED, send.workerSymbol);
} else {
types.checkType(send.pos, receive.matchingSendsError, send.expectedType,
DiagnosticErrorCode.INCOMPATIBLE_TYPES);
}
types.checkType(receive, send.getBType(), receive.getBType());
addImplicitCast(send.getBType(), receive);
NodeKind kind = receive.parent.getKind();
if (kind == NodeKind.TRAP_EXPR || kind == NodeKind.CHECK_EXPR || kind == NodeKind.CHECK_PANIC_EXPR) {
typeChecker.checkExpr((BLangExpression) receive.parent, receive.env);
}
receive.sendExpression = send;
}
private void addImplicitCast(BType actualType, BLangWorkerReceive receive) {
if (receive.getBType() != null && receive.getBType() != symTable.semanticError) {
types.setImplicitCastExpr(receive, actualType, receive.getBType());
receive.setBType(actualType);
}
}
private boolean checkNextBreakValidityInTransaction(AnalyzerData data) {
return !data.loopWithinTransactionCheckStack.peek() && data.transactionCount > 0 && data.withinTransactionScope;
}
private boolean checkReturnValidityInTransaction(AnalyzerData data) {
return !data.returnWithinTransactionCheckStack.peek() && data.transactionCount > 0
&& data.withinTransactionScope;
}
private void validateModuleInitFunction(BLangFunction funcNode) {
if (funcNode.attachedFunction || !Names.USER_DEFINED_INIT_SUFFIX.value.equals(funcNode.name.value)) {
return;
}
if (Symbols.isPublic(funcNode.symbol)) {
this.dlog.error(funcNode.pos, DiagnosticErrorCode.MODULE_INIT_CANNOT_BE_PUBLIC);
}
if (!funcNode.requiredParams.isEmpty() || funcNode.restParam != null) {
this.dlog.error(funcNode.pos, DiagnosticErrorCode.MODULE_INIT_CANNOT_HAVE_PARAMS);
}
types.validateErrorOrNilReturn(funcNode, DiagnosticErrorCode.MODULE_INIT_RETURN_SHOULD_BE_ERROR_OR_NIL);
}
private BType getErrorTypes(BType bType) {
if (bType == null) {
return symTable.semanticError;
}
BType errorType = symTable.semanticError;
int tag = bType.tag;
if (tag == TypeTags.TYPEREFDESC) {
return getErrorTypes(Types.getReferredType(bType));
}
if (tag == TypeTags.ERROR) {
errorType = bType;
} else if (tag == TypeTags.READONLY) {
errorType = symTable.errorType;
} else if (tag == TypeTags.UNION) {
LinkedHashSet<BType> errTypes = new LinkedHashSet<>();
Set<BType> memTypes = ((BUnionType) bType).getMemberTypes();
for (BType memType : memTypes) {
BType memErrType = getErrorTypes(memType);
if (memErrType != symTable.semanticError) {
errTypes.add(memErrType);
}
}
if (!errTypes.isEmpty()) {
errorType = errTypes.size() == 1 ? errTypes.iterator().next() : BUnionType.create(null, errTypes);
}
}
return errorType;
}
/**
* This class contains the state machines for a set of workers.
*/
private static class WorkerActionSystem {
public List<WorkerActionStateMachine> finshedWorkers = new ArrayList<>();
private Stack<WorkerActionStateMachine> workerActionStateMachines = new Stack<>();
private Map<BLangNode, SymbolEnv> workerInteractionEnvironments = new IdentityHashMap<>();
private boolean hasErrors = false;
public void startWorkerActionStateMachine(String workerId, Location pos, BLangFunction node) {
workerActionStateMachines.push(new WorkerActionStateMachine(pos, workerId, node));
}
public void endWorkerActionStateMachine() {
finshedWorkers.add(workerActionStateMachines.pop());
}
public void addWorkerAction(BLangNode action) {
this.workerActionStateMachines.peek().actions.add(action);
}
public WorkerActionStateMachine find(String workerId) {
for (WorkerActionStateMachine worker : this.finshedWorkers) {
if (worker.workerId.equals(workerId)) {
return worker;
}
}
throw new AssertionError("Reference to non existing worker " + workerId);
}
public boolean everyoneDone() {
return this.finshedWorkers.stream().allMatch(WorkerActionStateMachine::done);
}
public Location getRootPosition() {
return this.finshedWorkers.iterator().next().pos;
}
@Override
public String toString() {
return this.finshedWorkers.toString();
}
public String currentWorkerId() {
return workerActionStateMachines.peek().workerId;
}
public void addWorkerAction(BLangNode action, SymbolEnv env) {
addWorkerAction(action);
this.workerInteractionEnvironments.put(action, env);
}
private SymbolEnv getActionEnvironment(BLangNode currentAction) {
return workerInteractionEnvironments.get(currentAction);
}
}
/**
* This class represents a state machine to maintain the state of the send/receive
* actions of a worker.
*/
private static class WorkerActionStateMachine {
private static final String WORKER_SM_FINISHED = "FINISHED";
public int currentState;
public List<BLangNode> actions = new ArrayList<>();
public Location pos;
public String workerId;
public BLangFunction node;
public WorkerActionStateMachine(Location pos, String workerId, BLangFunction node) {
this.pos = pos;
this.workerId = workerId;
this.node = node;
}
public boolean done() {
return this.actions.size() == this.currentState;
}
public BLangNode currentAction() {
return this.actions.get(this.currentState);
}
public boolean currentIsReceive(String sourceWorkerId) {
if (this.done()) {
return false;
}
BLangNode action = this.currentAction();
return !isWorkerSend(action) && !isWorkerSyncSend(action) && !isWaitAction(action)
&& ((BLangWorkerReceive) action).workerIdentifier.value.equals(sourceWorkerId);
}
public void next() {
this.currentState++;
}
@Override
public String toString() {
if (this.done()) {
return WORKER_SM_FINISHED;
} else {
BLangNode action = this.currentAction();
if (isWorkerSend(action)) {
return ((BLangWorkerSend) action).toActionString();
} else if (isWorkerSyncSend(action)) {
return ((BLangWorkerSyncSendExpr) action).toActionString();
} else if (isWaitAction(action)) {
return action.toString();
} else {
return ((BLangWorkerReceive) action).toActionString();
}
}
}
}
public static String generateChannelName(String source, String target) {
return source + "->" + target;
}
private BLangNode getEnclosingClass(SymbolEnv env) {
BLangNode node = env.node;
while (node.getKind() != NodeKind.CLASS_DEFN) {
env = env.enclEnv;
node = env.node;
}
return node;
}
private void validateInvocationInMatchGuard(BLangInvocation invocation) {
BLangExpression matchedExpr = getMatchedExprIfCalledInMatchGuard(invocation);
if (matchedExpr == null) {
return;
}
BType matchedExprType = matchedExpr.getBType();
if (types.isInherentlyImmutableType(matchedExprType) ||
Symbols.isFlagOn(matchedExprType.flags, Flags.READONLY)) {
return;
}
BSymbol invocationSymbol = invocation.symbol;
if (invocationSymbol == null) {
BLangNode parent = invocation.parent;
if (parent == null || parent.getKind() != NodeKind.TYPE_INIT_EXPR) {
return;
}
BLangTypeInit newExpr = (BLangTypeInit) parent;
if (newExpr.getBType().tag != TypeTags.STREAM) {
return;
}
List<BLangExpression> argsExpr = newExpr.argsExpr;
if (argsExpr.isEmpty()) {
return;
}
BLangExpression streamImplementorExpr = argsExpr.get(0);
BType type = streamImplementorExpr.getBType();
if (!types.isInherentlyImmutableType(type) && !Symbols.isFlagOn(type.flags, Flags.READONLY)) {
dlog.error(streamImplementorExpr.pos,
DiagnosticErrorCode.INVALID_CALL_WITH_MUTABLE_ARGS_IN_MATCH_GUARD);
}
return;
}
long flags = invocationSymbol.flags;
boolean methodCall = Symbols.isFlagOn(flags, Flags.ATTACHED);
boolean callsNonIsolatedFunction = !Symbols.isFlagOn(flags, Flags.ISOLATED) ||
(methodCall && !Symbols.isFlagOn(invocationSymbol.owner.flags, Flags.ISOLATED));
if (callsNonIsolatedFunction) {
dlog.error(invocation.pos, DiagnosticErrorCode.INVALID_NON_ISOLATED_CALL_IN_MATCH_GUARD);
}
List<BLangExpression> args = new ArrayList<>(invocation.requiredArgs);
args.addAll(invocation.restArgs);
for (BLangExpression arg : args) {
BType type = arg.getBType();
if (type != symTable.semanticError &&
!types.isInherentlyImmutableType(type) &&
!Symbols.isFlagOn(type.flags, Flags.READONLY)) {
dlog.error(arg.pos, DiagnosticErrorCode.INVALID_CALL_WITH_MUTABLE_ARGS_IN_MATCH_GUARD);
}
}
}
private BLangExpression getMatchedExprIfCalledInMatchGuard(BLangInvocation invocation) {
BLangNode prevParent = invocation;
BLangNode parent = invocation.parent;
boolean encounteredMatchGuard = false;
while (parent != null) {
NodeKind parentKind = parent.getKind();
switch (parentKind) {
case LAMBDA:
case FUNCTION:
case RESOURCE_FUNC:
return null;
case MATCH_CLAUSE:
if (encounteredMatchGuard) {
return ((BLangMatchStatement) parent.parent).expr;
}
return null;
case MATCH_GUARD:
encounteredMatchGuard = true;
break;
case INVOCATION:
BLangInvocation parentInvocation = (BLangInvocation) parent;
if (parentInvocation.langLibInvocation || prevParent != parentInvocation.expr) {
return null;
}
}
prevParent = parent;
parent = parent.parent;
}
return null;
}
private enum DefaultValueState {
NOT_IN_DEFAULT_VALUE,
RECORD_FIELD_DEFAULT,
OBJECT_FIELD_INITIALIZER,
FUNCTION_IN_DEFAULT_VALUE
}
/**
* @since 2.0.0
*/
public static class AnalyzerData {
SymbolEnv env;
BLangNode parent;
int loopCount;
boolean loopAlterNotAllowed;
boolean inInternallyDefinedBlockStmt;
int workerSystemMovementSequence;
Stack<WorkerActionSystem> workerActionSystemStack = new Stack<>();
Map<BSymbol, Set<BLangNode>> workerReferences = new HashMap<>();
int transactionCount;
boolean withinTransactionScope;
int commitCount;
int rollbackCount;
boolean commitRollbackAllowed;
int commitCountWithinBlock;
int rollbackCountWithinBlock;
Stack<Boolean> loopWithinTransactionCheckStack = new Stack<>();
Stack<Boolean> returnWithinTransactionCheckStack = new Stack<>();
Stack<Boolean> transactionalFuncCheckStack = new Stack<>();
boolean withinLockBlock;
boolean failureHandled;
boolean failVisited;
boolean queryToTableWithKey;
boolean withinQuery;
boolean queryToMap;
Stack<LinkedHashSet<BType>> returnTypes = new Stack<>();
Stack<LinkedHashSet<BType>> errorTypes = new Stack<>();
DefaultValueState defaultValueState = DefaultValueState.NOT_IN_DEFAULT_VALUE;
}
}
|
class CodeAnalyzer extends SimpleBLangNodeAnalyzer<CodeAnalyzer.AnalyzerData> {
private static final CompilerContext.Key<CodeAnalyzer> CODE_ANALYZER_KEY =
new CompilerContext.Key<>();
private final SymbolResolver symResolver;
private final SymbolTable symTable;
private final Types types;
private final BLangDiagnosticLog dlog;
private final TypeChecker typeChecker;
private final Names names;
private final ReachabilityAnalyzer reachabilityAnalyzer;
public static CodeAnalyzer getInstance(CompilerContext context) {
CodeAnalyzer codeGenerator = context.get(CODE_ANALYZER_KEY);
if (codeGenerator == null) {
codeGenerator = new CodeAnalyzer(context);
}
return codeGenerator;
}
public CodeAnalyzer(CompilerContext context) {
context.put(CODE_ANALYZER_KEY, this);
this.symTable = SymbolTable.getInstance(context);
this.types = Types.getInstance(context);
this.dlog = BLangDiagnosticLog.getInstance(context);
this.typeChecker = TypeChecker.getInstance(context);
this.names = Names.getInstance(context);
this.symResolver = SymbolResolver.getInstance(context);
this.reachabilityAnalyzer = ReachabilityAnalyzer.getInstance(context);
}
public BLangPackage analyze(BLangPackage pkgNode) {
final AnalyzerData data = new AnalyzerData();
visitNode(pkgNode, data);
return pkgNode;
}
@Override
public void visit(BLangPackage pkgNode, AnalyzerData data) {
this.dlog.setCurrentPackageId(pkgNode.packageID);
if (pkgNode.completedPhases.contains(CompilerPhase.CODE_ANALYZE)) {
return;
}
data.parent = pkgNode;
data.env = this.symTable.pkgEnvMap.get(pkgNode.symbol);
analyzeTopLevelNodes(pkgNode, data);
pkgNode.getTestablePkgs().forEach(testablePackage -> visitNode(testablePackage, data));
}
@Override
public void visit(BLangTestablePackage node, AnalyzerData data) {
visit((BLangPackage) node, data);
}
private void analyzeTopLevelNodes(BLangPackage pkgNode, AnalyzerData data) {
List<TopLevelNode> topLevelNodes = pkgNode.topLevelNodes;
for (int i = 0; i < topLevelNodes.size(); i++) {
analyzeNode((BLangNode) topLevelNodes.get(i), data);
}
pkgNode.completedPhases.add(CompilerPhase.CODE_ANALYZE);
}
@Override
public void analyzeNode(BLangNode node, AnalyzerData data) {
SymbolEnv prevEnv = data.env;
BLangNode parent = data.parent;
node.parent = parent;
data.parent = node;
visitNode(node, data);
data.parent = parent;
data.env = prevEnv;
}
private void analyzeTypeNode(BLangType node, AnalyzerData data) {
if (node == null) {
return;
}
analyzeNode(node, data);
}
@Override
public void visit(BLangCompilationUnit compUnitNode, AnalyzerData data) {
compUnitNode.topLevelNodes.forEach(e -> analyzeNode((BLangNode) e, data));
}
@Override
public void visit(BLangTypeDefinition typeDefinition, AnalyzerData data) {
analyzeTypeNode(typeDefinition.typeNode, data);
typeDefinition.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangClassDefinition classDefinition, AnalyzerData data) {
data.env = SymbolEnv.createClassEnv(classDefinition, classDefinition.symbol.scope, data.env);
for (BLangSimpleVariable field : classDefinition.fields) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
data.defaultValueState = DefaultValueState.OBJECT_FIELD_INITIALIZER;
analyzeNode(field, data);
data.defaultValueState = prevDefaultValueState;
}
List<BLangFunction> bLangFunctionList = new ArrayList<>(classDefinition.functions);
if (classDefinition.initFunction != null) {
bLangFunctionList.add(classDefinition.initFunction);
}
bLangFunctionList.sort(Comparator.comparingInt(function -> function.pos.lineRange().startLine().line()));
for (BLangFunction function : bLangFunctionList) {
analyzeNode(function, data);
}
classDefinition.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangObjectConstructorExpression objectConstructorExpression, AnalyzerData data) {
visit(objectConstructorExpression.typeInit, data);
}
@Override
public void visit(BLangTupleVariableDef bLangTupleVariableDef, AnalyzerData data) {
analyzeNode(bLangTupleVariableDef.var, data);
}
@Override
public void visit(BLangRecordVariableDef bLangRecordVariableDef, AnalyzerData data) {
analyzeNode(bLangRecordVariableDef.var, data);
}
@Override
public void visit(BLangErrorVariableDef bLangErrorVariableDef, AnalyzerData data) {
analyzeNode(bLangErrorVariableDef.errorVariable, data);
}
@Override
public void visit(BLangResourceFunction funcNode, AnalyzerData data) {
visit((BLangFunction) funcNode, data);
}
@Override
public void visit(BLangFunction funcNode, AnalyzerData data) {
validateParams(funcNode, data);
analyzeNode(funcNode.returnTypeNode, data);
boolean isLambda = funcNode.flagSet.contains(Flag.LAMBDA);
if (isLambda) {
return;
}
if (Symbols.isPublic(funcNode.symbol)) {
funcNode.symbol.params.forEach(symbol -> analyzeExportableTypeRef(funcNode.symbol, symbol.type.tsymbol,
true,
funcNode.pos));
if (funcNode.symbol.restParam != null) {
analyzeExportableTypeRef(funcNode.symbol, funcNode.symbol.restParam.type.tsymbol, true,
funcNode.restParam.pos);
}
analyzeExportableTypeRef(funcNode.symbol, funcNode.symbol.retType.tsymbol, true,
funcNode.returnTypeNode.pos);
}
if (MAIN_FUNCTION_NAME.equals(funcNode.name.value)) {
new MainFunctionValidator(types, dlog).validateMainFunction(funcNode);
}
this.validateModuleInitFunction(funcNode);
try {
this.initNewWorkerActionSystem(data);
data.workerActionSystemStack.peek().startWorkerActionStateMachine(DEFAULT_WORKER_NAME,
funcNode.pos,
funcNode);
this.visitFunction(funcNode, data);
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
} finally {
this.finalizeCurrentWorkerActionSystem(data);
}
funcNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
validateNamedWorkerUniqueReferences(data);
}
private void validateNamedWorkerUniqueReferences(AnalyzerData data) {
for (var nodes : data.workerReferences.values()) {
if (nodes.size() > 1) {
for (BLangNode node: nodes) {
dlog.error(node.pos, DiagnosticErrorCode.ILLEGAL_WORKER_REFERENCE_AS_A_VARIABLE_REFERENCE, node);
}
}
}
data.workerReferences.clear();
}
private void validateParams(BLangFunction funcNode, AnalyzerData data) {
for (BLangSimpleVariable parameter : funcNode.requiredParams) {
analyzeNode(parameter, data);
}
if (funcNode.restParam != null) {
analyzeNode(funcNode.restParam, data);
}
}
private void visitFunction(BLangFunction funcNode, AnalyzerData data) {
data.env = SymbolEnv.createFunctionEnv(funcNode, funcNode.symbol.scope, data.env);
data.returnWithinTransactionCheckStack.push(true);
data.returnTypes.push(new LinkedHashSet<>());
data.transactionalFuncCheckStack.push(funcNode.flagSet.contains(Flag.TRANSACTIONAL));
if (Symbols.isNative(funcNode.symbol)) {
return;
}
if (isPublicInvokableNode(funcNode)) {
analyzeNode(funcNode.returnTypeNode, data);
}
/* the body can be null in the case of Object type function declarations */
if (funcNode.body != null) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
if (prevDefaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT ||
prevDefaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
data.defaultValueState = DefaultValueState.FUNCTION_IN_DEFAULT_VALUE;
}
analyzeNode(funcNode.body, data);
data.defaultValueState = prevDefaultValueState;
}
reachabilityAnalyzer.analyzeReachability(funcNode, data.env);
data.returnTypes.pop();
data.returnWithinTransactionCheckStack.pop();
data.transactionalFuncCheckStack.pop();
}
private boolean isPublicInvokableNode(BLangInvokableNode invNode) {
return Symbols.isPublic(invNode.symbol) && (SymbolKind.PACKAGE.equals(invNode.symbol.owner.getKind()) ||
Symbols.isPublic(invNode.symbol.owner));
}
@Override
public void visit(BLangBlockFunctionBody body, AnalyzerData data) {
boolean prevWithinTxScope = data.withinTransactionScope;
boolean prevLoopAlterNotAllowed = data.loopAlterNotAllowed;
data.loopAlterNotAllowed = data.loopCount > 0;
if (!prevWithinTxScope) {
data.withinTransactionScope = data.transactionalFuncCheckStack.peek();
}
data.env = SymbolEnv.createFuncBodyEnv(body, data.env);
for (BLangStatement e : body.stmts) {
data.inInternallyDefinedBlockStmt = true;
analyzeNode(e, data);
}
data.inInternallyDefinedBlockStmt = false;
if (data.transactionalFuncCheckStack.peek()) {
data.withinTransactionScope = prevWithinTxScope;
}
data.loopAlterNotAllowed = prevLoopAlterNotAllowed;
}
@Override
public void visit(BLangExprFunctionBody body, AnalyzerData data) {
analyzeExpr(body.expr, data);
}
@Override
public void visit(BLangExternalFunctionBody body, AnalyzerData data) {
}
@Override
public void visit(BLangForkJoin forkJoin, AnalyzerData data) {
if (forkJoin.workers.isEmpty()) {
dlog.error(forkJoin.pos, DiagnosticErrorCode.INVALID_FOR_JOIN_SYNTAX_EMPTY_FORK);
}
}
@Override
public void visit(BLangTransaction transactionNode, AnalyzerData data) {
if (data.transactionalFuncCheckStack.peek()) {
this.dlog.error(transactionNode.pos,
DiagnosticErrorCode.TRANSACTION_CANNOT_BE_USED_WITHIN_TRANSACTIONAL_SCOPE);
return;
}
data.errorTypes.push(new LinkedHashSet<>());
boolean previousWithinTxScope = data.withinTransactionScope;
int previousCommitCount = data.commitCount;
int previousRollbackCount = data.rollbackCount;
boolean prevCommitRollbackAllowed = data.commitRollbackAllowed;
data.commitRollbackAllowed = true;
data.commitCount = 0;
data.rollbackCount = 0;
data.withinTransactionScope = true;
data.loopWithinTransactionCheckStack.push(false);
data.returnWithinTransactionCheckStack.push(false);
data.transactionCount++;
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = transactionNode.onFailClause != null;
}
analyzeNode(transactionNode.transactionBody, data);
data.failureHandled = failureHandled;
if (data.commitCount < 1) {
this.dlog.error(transactionNode.pos, DiagnosticErrorCode.INVALID_COMMIT_COUNT);
}
data.transactionCount--;
data.withinTransactionScope = previousWithinTxScope;
data.commitCount = previousCommitCount;
data.rollbackCount = previousRollbackCount;
data.commitRollbackAllowed = prevCommitRollbackAllowed;
data.returnWithinTransactionCheckStack.pop();
data.loopWithinTransactionCheckStack.pop();
analyzeOnFailClause(transactionNode.onFailClause, data);
data.errorTypes.pop();
}
private void analyzeOnFailClause(BLangOnFailClause onFailClause, AnalyzerData data) {
if (onFailClause != null) {
analyzeNode(onFailClause, data);
}
}
@Override
public void visit(BLangTransactionalExpr transactionalExpr, AnalyzerData data) {
}
@Override
public void visit(BLangCommitExpr commitExpr, AnalyzerData data) {
data.commitCount++;
data.commitCountWithinBlock++;
if (data.transactionCount == 0) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_CANNOT_BE_OUTSIDE_TRANSACTION_BLOCK);
return;
}
if (data.transactionalFuncCheckStack.peek()) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_CANNOT_BE_WITHIN_TRANSACTIONAL_FUNCTION);
return;
}
if (!data.withinTransactionScope || !data.commitRollbackAllowed ||
data.loopWithinTransactionCheckStack.peek()) {
this.dlog.error(commitExpr.pos, DiagnosticErrorCode.COMMIT_NOT_ALLOWED);
return;
}
data.withinTransactionScope = false;
}
@Override
public void visit(BLangRollback rollbackNode, AnalyzerData data) {
data.rollbackCount++;
data.rollbackCountWithinBlock++;
if (data.transactionCount == 0 && !data.withinTransactionScope) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_CANNOT_BE_OUTSIDE_TRANSACTION_BLOCK);
return;
}
if (!data.transactionalFuncCheckStack.empty() && data.transactionalFuncCheckStack.peek()) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_CANNOT_BE_WITHIN_TRANSACTIONAL_FUNCTION);
return;
}
if (!data.withinTransactionScope || !data.commitRollbackAllowed ||
(!data.loopWithinTransactionCheckStack.empty() && data.loopWithinTransactionCheckStack.peek())) {
this.dlog.error(rollbackNode.pos, DiagnosticErrorCode.ROLLBACK_NOT_ALLOWED);
return;
}
data.withinTransactionScope = false;
analyzeExpr(rollbackNode.expr, data);
}
@Override
public void visit(BLangRetry retryNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = retryNode.onFailClause != null;
}
visitNode(retryNode.retrySpec, data);
visitNode(retryNode.retryBody, data);
data.failureHandled = failureHandled;
retryNode.retryBody.failureBreakMode = retryNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(retryNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangRetrySpec retrySpec, AnalyzerData data) {
if (retrySpec.retryManagerType != null) {
BSymbol retryManagerTypeSymbol = symTable.langErrorModuleSymbol.scope
.lookup(names.fromString("RetryManager")).symbol;
BType abstractRetryManagerType = retryManagerTypeSymbol.type;
if (!types.isAssignable(retrySpec.retryManagerType.getBType(), abstractRetryManagerType)) {
dlog.error(retrySpec.pos, DiagnosticErrorCode.INVALID_INTERFACE_ON_NON_ABSTRACT_OBJECT,
RETRY_MANAGER_OBJECT_SHOULD_RETRY_FUNC, retrySpec.retryManagerType.getBType());
}
}
}
@Override
public void visit(BLangRetryTransaction retryTransaction, AnalyzerData data) {
analyzeNode(retryTransaction.retrySpec, data);
analyzeNode(retryTransaction.transaction, data);
}
@Override
public void visit(BLangBlockStmt blockNode, AnalyzerData data) {
int prevCommitCount = data.commitCountWithinBlock;
int prevRollbackCount = data.rollbackCountWithinBlock;
data.commitCountWithinBlock = 0;
data.rollbackCountWithinBlock = 0;
boolean inInternallyDefinedBlockStmt = data.inInternallyDefinedBlockStmt;
data.inInternallyDefinedBlockStmt = checkBlockIsAnInternalBlockInImmediateFunctionBody(blockNode);
data.env = SymbolEnv.createBlockEnv(blockNode, data.env);
blockNode.stmts.forEach(e -> analyzeNode(e, data));
data.inInternallyDefinedBlockStmt = inInternallyDefinedBlockStmt;
if (data.commitCountWithinBlock > 1 || data.rollbackCountWithinBlock > 1) {
this.dlog.error(blockNode.pos, DiagnosticErrorCode.MAX_ONE_COMMIT_ROLLBACK_ALLOWED_WITHIN_A_BRANCH);
}
data.commitCountWithinBlock = prevCommitCount;
data.rollbackCountWithinBlock = prevRollbackCount;
}
private boolean checkBlockIsAnInternalBlockInImmediateFunctionBody(BLangNode node) {
BLangNode parent = node.parent;
while (parent != null) {
final NodeKind kind = parent.getKind();
if (kind == NodeKind.BLOCK_FUNCTION_BODY) {
return true;
}
if (kind == NodeKind.BLOCK) {
parent = parent.parent;
} else {
return false;
}
}
return false;
}
@Override
public void visit(BLangReturn returnStmt, AnalyzerData data) {
if (checkReturnValidityInTransaction(data)) {
this.dlog.error(returnStmt.pos, DiagnosticErrorCode.RETURN_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
analyzeExpr(returnStmt.expr, data);
data.returnTypes.peek().add(returnStmt.expr.getBType());
}
@Override
public void visit(BLangIf ifStmt, AnalyzerData data) {
boolean independentBlocks = false;
int prevCommitCount = data.commitCount;
int prevRollbackCount = data.rollbackCount;
BLangStatement elseStmt = ifStmt.elseStmt;
if (data.withinTransactionScope && elseStmt != null && elseStmt.getKind() != NodeKind.IF) {
independentBlocks = true;
data.commitRollbackAllowed = true;
}
boolean prevTxMode = data.withinTransactionScope;
if ((ifStmt.expr.getKind() == NodeKind.GROUP_EXPR ?
((BLangGroupExpr) ifStmt.expr).expression.getKind() :
ifStmt.expr.getKind()) == NodeKind.TRANSACTIONAL_EXPRESSION) {
data.withinTransactionScope = true;
}
BLangBlockStmt body = ifStmt.body;
analyzeNode(body, data);
if (ifStmt.expr.getKind() == NodeKind.TRANSACTIONAL_EXPRESSION) {
data.withinTransactionScope = prevTxMode;
}
if (elseStmt != null) {
if (independentBlocks) {
data.commitRollbackAllowed = true;
data.withinTransactionScope = true;
}
analyzeNode(elseStmt, data);
if ((prevCommitCount != data.commitCount) || prevRollbackCount != data.rollbackCount) {
data.commitRollbackAllowed = false;
}
}
analyzeExpr(ifStmt.expr, data);
}
@Override
public void visit(BLangMatchStatement matchStatement, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
analyzeExpr(matchStatement.expr, data);
boolean failureHandled = data.failureHandled;
if (!failureHandled) {
data.failureHandled = matchStatement.onFailClause != null;
}
List<BLangMatchClause> matchClauses = matchStatement.matchClauses;
int clausesSize = matchClauses.size();
for (int i = 0; i < clausesSize; i++) {
BLangMatchClause firstClause = matchClauses.get(i);
for (int j = i + 1; j < clausesSize; j++) {
BLangMatchClause secondClause = matchClauses.get(j);
if (!checkSimilarMatchGuard(firstClause.matchGuard, secondClause.matchGuard)) {
if (firstClause.matchGuard == null) {
checkSimilarMatchPatternsBetweenClauses(firstClause, secondClause);
}
continue;
}
checkSimilarMatchPatternsBetweenClauses(firstClause, secondClause);
}
analyzeNode(firstClause, data);
}
data.failureHandled = failureHandled;
analyzeOnFailClause(matchStatement.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangMatchClause matchClause, AnalyzerData data) {
Map<String, BVarSymbol> variablesInMatchPattern = new HashMap<>();
boolean patternListContainsSameVars = true;
List<BLangMatchPattern> matchPatterns = matchClause.matchPatterns;
BLangMatchGuard matchGuard = matchClause.matchGuard;
for (int i = 0; i < matchPatterns.size(); i++) {
BLangMatchPattern matchPattern = matchPatterns.get(i);
if (matchPattern.getBType() == symTable.noType) {
dlog.warning(matchPattern.pos, DiagnosticWarningCode.MATCH_STMT_UNMATCHED_PATTERN);
}
if (patternListContainsSameVars) {
patternListContainsSameVars = compareVariables(variablesInMatchPattern, matchPattern);
}
for (int j = i - 1; j >= 0; j--) {
if (checkSimilarMatchPatterns(matchPatterns.get(j), matchPattern)) {
dlog.warning(matchPattern.pos, DiagnosticWarningCode.MATCH_STMT_PATTERN_UNREACHABLE);
}
}
analyzeNode(matchPattern, data);
}
if (matchGuard != null) {
analyzeNode(matchGuard, data);
}
if (!patternListContainsSameVars) {
dlog.error(matchClause.pos, DiagnosticErrorCode.MATCH_PATTERNS_SHOULD_CONTAIN_SAME_SET_OF_VARIABLES);
}
analyzeNode(matchClause.blockStmt, data);
}
@Override
public void visit(BLangMappingMatchPattern mappingMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangFieldMatchPattern fieldMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangMatchGuard matchGuard, AnalyzerData data) {
analyzeExpr(matchGuard.expr, data);
}
private void checkSimilarMatchPatternsBetweenClauses(BLangMatchClause firstClause, BLangMatchClause secondClause) {
for (BLangMatchPattern firstMatchPattern : firstClause.matchPatterns) {
for (BLangMatchPattern secondMatchPattern : secondClause.matchPatterns) {
if (checkSimilarMatchPatterns(firstMatchPattern, secondMatchPattern)) {
dlog.warning(secondMatchPattern.pos, DiagnosticWarningCode.MATCH_STMT_PATTERN_UNREACHABLE);
}
}
}
}
private boolean checkSimilarMatchPatterns(BLangMatchPattern firstPattern, BLangMatchPattern secondPattern) {
NodeKind firstPatternKind = firstPattern.getKind();
NodeKind secondPatternKind = secondPattern.getKind();
if (firstPatternKind != secondPatternKind) {
if (firstPatternKind == NodeKind.VAR_BINDING_PATTERN_MATCH_PATTERN) {
return checkEmptyListOrMapMatchWithVarBindingPatternMatch(secondPattern,
((BLangVarBindingPatternMatchPattern) firstPattern));
}
if (secondPatternKind == NodeKind.VAR_BINDING_PATTERN_MATCH_PATTERN) {
return checkEmptyListOrMapMatchWithVarBindingPatternMatch(firstPattern,
((BLangVarBindingPatternMatchPattern) secondPattern));
}
return false;
}
switch (firstPatternKind) {
case WILDCARD_MATCH_PATTERN:
case REST_MATCH_PATTERN:
return true;
case CONST_MATCH_PATTERN:
return checkSimilarConstMatchPattern((BLangConstPattern) firstPattern,
(BLangConstPattern) secondPattern);
case VAR_BINDING_PATTERN_MATCH_PATTERN:
return checkSimilarBindingPatterns(
((BLangVarBindingPatternMatchPattern) firstPattern).getBindingPattern(),
((BLangVarBindingPatternMatchPattern) secondPattern).getBindingPattern());
case LIST_MATCH_PATTERN:
return checkSimilarListMatchPattern((BLangListMatchPattern) firstPattern,
(BLangListMatchPattern) secondPattern);
case MAPPING_MATCH_PATTERN:
return checkSimilarMappingMatchPattern((BLangMappingMatchPattern) firstPattern,
(BLangMappingMatchPattern) secondPattern);
case ERROR_MATCH_PATTERN:
return checkSimilarErrorMatchPattern((BLangErrorMatchPattern) firstPattern,
(BLangErrorMatchPattern) secondPattern);
default:
return false;
}
}
private boolean checkEmptyListOrMapMatchWithVarBindingPatternMatch(BLangMatchPattern firstPattern,
BLangVarBindingPatternMatchPattern secondPattern) {
if (firstPattern.getKind() == NodeKind.LIST_MATCH_PATTERN) {
BLangBindingPattern bindingPattern = secondPattern.getBindingPattern();
if (bindingPattern.getKind() != NodeKind.LIST_BINDING_PATTERN) {
return false;
}
BLangListMatchPattern listMatchPattern = (BLangListMatchPattern) firstPattern;
BLangListBindingPattern listBindingPattern = (BLangListBindingPattern) bindingPattern;
return listMatchPattern.matchPatterns.isEmpty() && listBindingPattern.bindingPatterns.isEmpty() &&
listMatchPattern.restMatchPattern == null && listBindingPattern.restBindingPattern == null;
}
if (firstPattern.getKind() == NodeKind.MAPPING_MATCH_PATTERN) {
BLangBindingPattern bindingPattern = secondPattern.getBindingPattern();
if (secondPattern.getBindingPattern().getKind() != NodeKind.MAPPING_BINDING_PATTERN) {
return false;
}
BLangMappingMatchPattern mappingMatchPattern = (BLangMappingMatchPattern) firstPattern;
BLangMappingBindingPattern mappingBindingPattern = (BLangMappingBindingPattern) bindingPattern;
return mappingMatchPattern.fieldMatchPatterns.isEmpty() &&
mappingBindingPattern.fieldBindingPatterns.isEmpty() &&
mappingMatchPattern.restMatchPattern == null && mappingBindingPattern.restBindingPattern == null;
}
return false;
}
private boolean checkSimilarErrorMatchPattern(BLangErrorMatchPattern firstErrorMatchPattern,
BLangErrorMatchPattern secondErrorMatchPattern) {
if (firstErrorMatchPattern == null || secondErrorMatchPattern == null) {
return false;
}
if (!checkSimilarErrorTypeReference(firstErrorMatchPattern.errorTypeReference,
secondErrorMatchPattern.errorTypeReference)) {
return false;
}
if (!checkSimilarErrorMessagePattern(firstErrorMatchPattern.errorMessageMatchPattern,
secondErrorMatchPattern.errorMessageMatchPattern)) {
return false;
}
if (!checkSimilarErrorCauseMatchPattern(firstErrorMatchPattern.errorCauseMatchPattern,
secondErrorMatchPattern.errorCauseMatchPattern)) {
return false;
}
return checkSimilarErrorFieldMatchPatterns(firstErrorMatchPattern.errorFieldMatchPatterns,
secondErrorMatchPattern.errorFieldMatchPatterns);
}
private boolean checkSimilarErrorTypeReference(BLangUserDefinedType firstErrorTypeRef,
BLangUserDefinedType secondErrorTypeRef) {
if (firstErrorTypeRef != null && secondErrorTypeRef != null) {
return firstErrorTypeRef.typeName.value.equals(secondErrorTypeRef.typeName.value);
}
return firstErrorTypeRef == null && secondErrorTypeRef == null;
}
private boolean checkSimilarErrorMessagePattern(BLangErrorMessageMatchPattern firstErrorMsgMatchPattern,
BLangErrorMessageMatchPattern secondErrorMsgMatchPattern) {
if (firstErrorMsgMatchPattern != null && secondErrorMsgMatchPattern != null) {
return checkSimilarSimpleMatchPattern(firstErrorMsgMatchPattern.simpleMatchPattern,
secondErrorMsgMatchPattern.simpleMatchPattern);
}
return firstErrorMsgMatchPattern == null && secondErrorMsgMatchPattern == null;
}
private boolean checkSimilarSimpleMatchPattern(BLangSimpleMatchPattern firstSimpleMatchPattern,
BLangSimpleMatchPattern secondSimpleMatchPattern) {
if (firstSimpleMatchPattern != null && secondSimpleMatchPattern != null) {
if (firstSimpleMatchPattern.varVariableName != null) {
return true;
}
BLangConstPattern firstConstPattern = firstSimpleMatchPattern.constPattern;
BLangConstPattern secondConstPattern = secondSimpleMatchPattern.constPattern;
if (firstConstPattern != null) {
if (secondConstPattern != null) {
return checkSimilarConstMatchPattern(firstConstPattern, secondConstPattern);
}
return false;
}
return secondSimpleMatchPattern.varVariableName == null;
}
return firstSimpleMatchPattern == null && secondSimpleMatchPattern == null;
}
private boolean checkSimilarErrorCauseMatchPattern(BLangErrorCauseMatchPattern firstErrorCauseMatchPattern,
BLangErrorCauseMatchPattern secondErrorCauseMatchPattern) {
if (firstErrorCauseMatchPattern != null && secondErrorCauseMatchPattern != null) {
if (!checkSimilarSimpleMatchPattern(firstErrorCauseMatchPattern.simpleMatchPattern,
secondErrorCauseMatchPattern.simpleMatchPattern)) {
return false;
}
return checkSimilarErrorMatchPattern(firstErrorCauseMatchPattern.errorMatchPattern,
secondErrorCauseMatchPattern.errorMatchPattern);
}
return firstErrorCauseMatchPattern == null && secondErrorCauseMatchPattern == null;
}
private boolean checkSimilarErrorFieldMatchPatterns(BLangErrorFieldMatchPatterns firstErrorFieldMatchPatterns,
BLangErrorFieldMatchPatterns secondErrorFieldMatchPatterns) {
if (firstErrorFieldMatchPatterns == null) {
return true;
}
List<BLangNamedArgMatchPattern> firstNamedArgPatterns = firstErrorFieldMatchPatterns.namedArgMatchPatterns;
int firstNamedArgPatternsSize = firstNamedArgPatterns.size();
if (firstNamedArgPatternsSize == 0) {
return true;
}
if (secondErrorFieldMatchPatterns == null) {
return false;
}
List<BLangNamedArgMatchPattern> secondNamedArgPatterns = secondErrorFieldMatchPatterns.namedArgMatchPatterns;
if (firstNamedArgPatternsSize > secondNamedArgPatterns.size()) {
return false;
}
for (int i = 0; i < firstNamedArgPatternsSize; i++) {
if (!checkSimilarNamedArgMatchPatterns(firstNamedArgPatterns.get(i), secondNamedArgPatterns.get(i))) {
return false;
}
}
return true;
}
private boolean checkSimilarNamedArgMatchPatterns(BLangNamedArgMatchPattern firstNamedArgMatchPattern,
BLangNamedArgMatchPattern secondNamedArgMatchPattern) {
if (firstNamedArgMatchPattern.argName.value.equals(secondNamedArgMatchPattern.argName.value)) {
return checkSimilarMatchPatterns(firstNamedArgMatchPattern.matchPattern,
secondNamedArgMatchPattern.matchPattern);
}
return false;
}
private boolean checkSimilarConstMatchPattern(BLangConstPattern firstConstMatchPattern,
BLangConstPattern secondConstMatchPattern) {
Object firstConstValue = getConstValue(firstConstMatchPattern).keySet().iterator().next();
Object secondConstValue = getConstValue(secondConstMatchPattern).keySet().iterator().next();
BType firstConstType = getConstValue(firstConstMatchPattern).values().iterator().next();
BType secondConstType = getConstValue(secondConstMatchPattern).values().iterator().next();
if (firstConstValue == null || secondConstValue == null) {
return false;
}
if (firstConstValue.equals(secondConstValue)) {
return true;
}
if (firstConstType != null && Types.getReferredType(firstConstType).tag == TypeTags.FINITE) {
firstConstValue = getConstValueFromFiniteType(((BFiniteType) firstConstType));
}
if (secondConstType != null && Types.getReferredType(secondConstType).tag == TypeTags.FINITE) {
secondConstValue = getConstValueFromFiniteType(((BFiniteType) secondConstType));
}
if (firstConstValue == null || secondConstValue == null) {
return false;
}
return firstConstValue.equals(secondConstValue);
}
private HashMap<Object, BType> getConstValue(BLangConstPattern constPattern) {
HashMap<Object, BType> constValAndType = new HashMap<>();
switch (constPattern.expr.getKind()) {
case NUMERIC_LITERAL:
constValAndType.put(((BLangNumericLiteral) constPattern.expr).value, null);
break;
case LITERAL:
constValAndType.put(((BLangLiteral) constPattern.expr).value, null);
break;
case SIMPLE_VARIABLE_REF:
constValAndType.put(((BLangSimpleVarRef) constPattern.expr).variableName, constPattern.getBType());
break;
case UNARY_EXPR:
BLangNumericLiteral newNumericLiteral = Types.constructNumericLiteralFromUnaryExpr(
(BLangUnaryExpr) constPattern.expr);
constValAndType.put(newNumericLiteral.value, null);
}
return constValAndType;
}
private Object getConstValueFromFiniteType(BFiniteType type) {
if (type.getValueSpace().size() == 1) {
BLangExpression expr = type.getValueSpace().iterator().next();
switch (expr.getKind()) {
case NUMERIC_LITERAL:
return ((BLangNumericLiteral) expr).value;
case LITERAL:
return ((BLangLiteral) expr).value;
}
}
return null;
}
private boolean checkSimilarListMatchPattern(BLangListMatchPattern firstListMatchPattern,
BLangListMatchPattern secondListMatchPattern) {
List<BLangMatchPattern> firstMatchPatterns = firstListMatchPattern.matchPatterns;
List<BLangMatchPattern> secondMatchPatterns = secondListMatchPattern.matchPatterns;
int firstPatternsSize = firstMatchPatterns.size();
int secondPatternsSize = secondMatchPatterns.size();
if (firstPatternsSize <= secondPatternsSize) {
for (int i = 0; i < firstPatternsSize; i++) {
if (!checkSimilarMatchPatterns(firstMatchPatterns.get(i), secondMatchPatterns.get(i))) {
return false;
}
}
if (firstPatternsSize == secondPatternsSize) {
if (firstListMatchPattern.restMatchPattern != null) {
return true;
}
return secondListMatchPattern.restMatchPattern == null;
}
return firstListMatchPattern.restMatchPattern != null;
}
return false;
}
private boolean checkSimilarMappingMatchPattern(BLangMappingMatchPattern firstMappingMatchPattern,
BLangMappingMatchPattern secondMappingMatchPattern) {
List<BLangFieldMatchPattern> firstFieldMatchPatterns = firstMappingMatchPattern.fieldMatchPatterns;
List<BLangFieldMatchPattern> secondFieldMatchPatterns = secondMappingMatchPattern.fieldMatchPatterns;
return checkSimilarFieldMatchPatterns(firstFieldMatchPatterns, secondFieldMatchPatterns);
}
private boolean checkSimilarFieldMatchPatterns(List<BLangFieldMatchPattern> firstFieldMatchPatterns,
List<BLangFieldMatchPattern> secondFieldMatchPatterns) {
for (BLangFieldMatchPattern firstFieldMatchPattern : firstFieldMatchPatterns) {
boolean isSamePattern = false;
for (BLangFieldMatchPattern secondFieldMatchPattern : secondFieldMatchPatterns) {
if (checkSimilarFieldMatchPattern(firstFieldMatchPattern, secondFieldMatchPattern)) {
isSamePattern = true;
break;
}
}
if (!isSamePattern) {
return false;
}
}
return true;
}
private boolean checkSimilarFieldMatchPattern(BLangFieldMatchPattern firstFieldMatchPattern,
BLangFieldMatchPattern secondFieldMatchPattern) {
return firstFieldMatchPattern.fieldName.value.equals(secondFieldMatchPattern.fieldName.value) &&
checkSimilarMatchPatterns(firstFieldMatchPattern.matchPattern, secondFieldMatchPattern.matchPattern);
}
private boolean checkSimilarBindingPatterns(BLangBindingPattern firstBidingPattern,
BLangBindingPattern secondBindingPattern) {
NodeKind firstBindingPatternKind = firstBidingPattern.getKind();
NodeKind secondBindingPatternKind = secondBindingPattern.getKind();
if (firstBindingPatternKind != secondBindingPatternKind) {
return false;
}
switch (firstBindingPatternKind) {
case WILDCARD_BINDING_PATTERN:
case REST_BINDING_PATTERN:
case CAPTURE_BINDING_PATTERN:
return true;
case LIST_BINDING_PATTERN:
return checkSimilarListBindingPatterns((BLangListBindingPattern) firstBidingPattern,
(BLangListBindingPattern) secondBindingPattern);
case MAPPING_BINDING_PATTERN:
return checkSimilarMappingBindingPattern((BLangMappingBindingPattern) firstBidingPattern,
(BLangMappingBindingPattern) secondBindingPattern);
case ERROR_BINDING_PATTERN:
return checkSimilarErrorBindingPatterns((BLangErrorBindingPattern) firstBidingPattern,
(BLangErrorBindingPattern) secondBindingPattern);
default:
return false;
}
}
private boolean checkSimilarMappingBindingPattern(BLangMappingBindingPattern firstMappingBindingPattern,
BLangMappingBindingPattern secondMappingBindingPattern) {
List<BLangFieldBindingPattern> firstFieldBindingPatterns = firstMappingBindingPattern.fieldBindingPatterns;
List<BLangFieldBindingPattern> secondFieldBindingPatterns = secondMappingBindingPattern.fieldBindingPatterns;
return checkSimilarFieldBindingPatterns(firstFieldBindingPatterns, secondFieldBindingPatterns);
}
private boolean checkSimilarFieldBindingPatterns(List<BLangFieldBindingPattern> firstFieldBindingPatterns,
List<BLangFieldBindingPattern> secondFieldBindingPatterns) {
for (BLangFieldBindingPattern firstFieldBindingPattern : firstFieldBindingPatterns) {
boolean isSamePattern = false;
for (BLangFieldBindingPattern secondFieldBindingPattern : secondFieldBindingPatterns) {
if (checkSimilarFieldBindingPattern(firstFieldBindingPattern, secondFieldBindingPattern)) {
isSamePattern = true;
break;
}
}
if (!isSamePattern) {
return false;
}
}
return true;
}
private boolean checkSimilarFieldBindingPattern(BLangFieldBindingPattern firstFieldBindingPattern,
BLangFieldBindingPattern secondFieldBindingPattern) {
boolean hasSameFieldNames = firstFieldBindingPattern.fieldName.value.
equals(secondFieldBindingPattern.fieldName.value);
if (firstFieldBindingPattern.bindingPattern.getKind() == secondFieldBindingPattern.bindingPattern.getKind()) {
return hasSameFieldNames && checkSimilarBindingPatterns(firstFieldBindingPattern.bindingPattern,
secondFieldBindingPattern.bindingPattern);
}
return hasSameFieldNames && firstFieldBindingPattern.bindingPattern.getKind() ==
NodeKind.CAPTURE_BINDING_PATTERN;
}
private boolean checkSimilarListBindingPatterns(BLangListBindingPattern firstBindingPattern,
BLangListBindingPattern secondBindingPattern) {
List<BLangBindingPattern> firstPatterns = firstBindingPattern.bindingPatterns;
List<BLangBindingPattern> secondPatterns = secondBindingPattern.bindingPatterns;
int firstPatternsSize = firstPatterns.size();
int secondPatternsSize = secondPatterns.size();
if (firstPatternsSize <= secondPatternsSize) {
for (int i = 0; i < firstPatternsSize; i++) {
if (!checkSimilarBindingPatterns(firstPatterns.get(i), secondPatterns.get(i))) {
return firstPatterns.get(i).getKind() == NodeKind.CAPTURE_BINDING_PATTERN;
}
}
if (firstPatternsSize == secondPatternsSize) {
if (firstBindingPattern.restBindingPattern != null) {
return true;
}
return secondBindingPattern.restBindingPattern == null;
}
return secondBindingPattern.restBindingPattern != null;
}
return false;
}
private boolean checkSimilarErrorBindingPatterns(BLangErrorBindingPattern firstErrorBindingPattern,
BLangErrorBindingPattern secondErrorBindingPattern) {
if (firstErrorBindingPattern == null || secondErrorBindingPattern == null) {
return false;
}
if (!checkSimilarErrorTypeReference(firstErrorBindingPattern.errorTypeReference,
secondErrorBindingPattern.errorTypeReference)) {
return false;
}
if (!checkSimilarErrorMessageBindingPattern(firstErrorBindingPattern.errorMessageBindingPattern,
secondErrorBindingPattern.errorMessageBindingPattern)) {
return false;
}
if (!checkSimilarErrorCauseBindingPattern(firstErrorBindingPattern.errorCauseBindingPattern,
secondErrorBindingPattern.errorCauseBindingPattern)) {
return false;
}
return checkSimilarErrorFieldBindingPatterns(firstErrorBindingPattern.errorFieldBindingPatterns,
secondErrorBindingPattern.errorFieldBindingPatterns);
}
private boolean checkSimilarErrorMessageBindingPattern(BLangErrorMessageBindingPattern firstErrorMsgBindingPattern,
BLangErrorMessageBindingPattern secondErrorMsgBindingPattern) {
if (firstErrorMsgBindingPattern != null && secondErrorMsgBindingPattern != null) {
return checkSimilarSimpleBindingPattern(firstErrorMsgBindingPattern.simpleBindingPattern,
secondErrorMsgBindingPattern.simpleBindingPattern);
}
return firstErrorMsgBindingPattern == null && secondErrorMsgBindingPattern == null;
}
private boolean checkSimilarSimpleBindingPattern(BLangSimpleBindingPattern firstSimpleBindingPattern,
BLangSimpleBindingPattern secondSimpleBindingPattern) {
if (firstSimpleBindingPattern != null && secondSimpleBindingPattern != null) {
BLangBindingPattern firstCaptureBindingPattern = firstSimpleBindingPattern.captureBindingPattern;
BLangBindingPattern secondCaptureBindingPattern = secondSimpleBindingPattern.captureBindingPattern;
if (firstCaptureBindingPattern != null && secondCaptureBindingPattern != null) {
return checkSimilarBindingPatterns(firstCaptureBindingPattern, secondCaptureBindingPattern);
}
return firstSimpleBindingPattern.wildCardBindingPattern != null;
}
return firstSimpleBindingPattern == null && secondSimpleBindingPattern == null;
}
private boolean checkSimilarErrorCauseBindingPattern(BLangErrorCauseBindingPattern firstErrorCauseBindingPattern,
BLangErrorCauseBindingPattern secondErrorCauseBindingPattern) {
if (firstErrorCauseBindingPattern != null && secondErrorCauseBindingPattern != null) {
if (!checkSimilarSimpleBindingPattern(firstErrorCauseBindingPattern.simpleBindingPattern,
secondErrorCauseBindingPattern.simpleBindingPattern)) {
return false;
}
return checkSimilarErrorBindingPatterns(firstErrorCauseBindingPattern.errorBindingPattern,
secondErrorCauseBindingPattern.errorBindingPattern);
}
return firstErrorCauseBindingPattern == null && secondErrorCauseBindingPattern == null;
}
private boolean checkSimilarErrorFieldBindingPatterns(
BLangErrorFieldBindingPatterns firstErrorFieldBindingPatterns,
BLangErrorFieldBindingPatterns secondErrorFieldBindingPatterns) {
if (firstErrorFieldBindingPatterns == null) {
return true;
}
List<BLangNamedArgBindingPattern> firstNamedArgPatterns =
firstErrorFieldBindingPatterns.namedArgBindingPatterns;
int firstNamedArgPatternsSize = firstNamedArgPatterns.size();
if (firstNamedArgPatternsSize == 0) {
return true;
}
if (secondErrorFieldBindingPatterns == null) {
return false;
}
List<BLangNamedArgBindingPattern> secondNamedArgPatterns =
secondErrorFieldBindingPatterns.namedArgBindingPatterns;
if (firstNamedArgPatternsSize > secondNamedArgPatterns.size()) {
return false;
}
for (int i = 0; i < firstNamedArgPatternsSize; i++) {
if (!checkSimilarNamedArgBindingPatterns(firstNamedArgPatterns.get(i), secondNamedArgPatterns.get(i))) {
return false;
}
}
return true;
}
private boolean checkSimilarNamedArgBindingPatterns(BLangNamedArgBindingPattern firstNamedArgBindingPattern,
BLangNamedArgBindingPattern secondNamedArgBindingPattern) {
if (firstNamedArgBindingPattern.argName.value.equals(secondNamedArgBindingPattern.argName.value)) {
return checkSimilarBindingPatterns(firstNamedArgBindingPattern.bindingPattern,
secondNamedArgBindingPattern.bindingPattern);
}
return false;
}
private boolean checkSimilarMatchGuard(BLangMatchGuard firstMatchGuard, BLangMatchGuard secondMatchGuard) {
if (firstMatchGuard == null && secondMatchGuard == null) {
return true;
}
if (firstMatchGuard == null || secondMatchGuard == null) {
return false;
}
if (firstMatchGuard.expr.getKind() == NodeKind.TYPE_TEST_EXPR &&
secondMatchGuard.expr.getKind() == NodeKind.TYPE_TEST_EXPR &&
((BLangTypeTestExpr) firstMatchGuard.expr).expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF &&
((BLangTypeTestExpr) secondMatchGuard.expr).expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangTypeTestExpr firstTypeTest = (BLangTypeTestExpr) firstMatchGuard.expr;
BLangTypeTestExpr secondTypeTest = (BLangTypeTestExpr) secondMatchGuard.expr;
return ((BLangSimpleVarRef) firstTypeTest.expr).variableName.toString().equals(
((BLangSimpleVarRef) secondTypeTest.expr).variableName.toString()) &&
types.isAssignable(firstTypeTest.typeNode.getBType(),
secondTypeTest.typeNode.getBType());
}
return false;
}
private boolean compareVariables(Map<String, BVarSymbol> varsInPreviousMatchPattern,
BLangMatchPattern matchPattern) {
Map<String, BVarSymbol> varsInCurrentMatchPattern = matchPattern.declaredVars;
if (varsInPreviousMatchPattern.size() == 0) {
varsInPreviousMatchPattern.putAll(varsInCurrentMatchPattern);
return true;
}
if (varsInPreviousMatchPattern.size() != varsInCurrentMatchPattern.size()) {
return false;
}
for (String identifier : varsInPreviousMatchPattern.keySet()) {
if (!varsInCurrentMatchPattern.containsKey(identifier)) {
return false;
}
}
return true;
}
@Override
public void visit(BLangWildCardMatchPattern wildCardMatchPattern, AnalyzerData data) {
wildCardMatchPattern.isLastPattern =
wildCardMatchPattern.matchExpr != null && types.isAssignable(wildCardMatchPattern.matchExpr.getBType(),
symTable.anyType);
}
@Override
public void visit(BLangConstPattern constMatchPattern, AnalyzerData data) {
analyzeNode(constMatchPattern.expr, data);
}
@Override
public void visit(BLangVarBindingPatternMatchPattern varBindingPattern, AnalyzerData data) {
BLangBindingPattern bindingPattern = varBindingPattern.getBindingPattern();
analyzeNode(bindingPattern, data);
switch (bindingPattern.getKind()) {
case WILDCARD_BINDING_PATTERN:
varBindingPattern.isLastPattern =
varBindingPattern.matchExpr != null && types.isAssignable(
varBindingPattern.matchExpr.getBType(),
symTable.anyType);
return;
case CAPTURE_BINDING_PATTERN:
varBindingPattern.isLastPattern =
varBindingPattern.matchExpr != null && !varBindingPattern.matchGuardIsAvailable;
return;
case LIST_BINDING_PATTERN:
if (varBindingPattern.matchExpr == null) {
return;
}
varBindingPattern.isLastPattern = types.isSameType(varBindingPattern.matchExpr.getBType(),
varBindingPattern.getBType()) || types.isAssignable(
varBindingPattern.matchExpr.getBType(),
varBindingPattern.getBType());
}
}
@Override
public void visit(BLangMappingBindingPattern mappingBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangWildCardBindingPattern wildCardBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangListMatchPattern listMatchPattern, AnalyzerData data) {
if (listMatchPattern.matchExpr == null) {
return;
}
listMatchPattern.isLastPattern = types.isAssignable(listMatchPattern.matchExpr.getBType(),
listMatchPattern.getBType()) && !isConstMatchPatternExist(listMatchPattern);
}
private boolean isConstMatchPatternExist(BLangMatchPattern matchPattern) {
switch (matchPattern.getKind()) {
case CONST_MATCH_PATTERN:
return true;
case LIST_MATCH_PATTERN:
for (BLangMatchPattern memberMatchPattern : ((BLangListMatchPattern) matchPattern).matchPatterns) {
if (isConstMatchPatternExist(memberMatchPattern)) {
return true;
}
}
return false;
case MAPPING_MATCH_PATTERN:
for (BLangFieldMatchPattern fieldMatchPattern :
((BLangMappingMatchPattern) matchPattern).fieldMatchPatterns) {
if (isConstMatchPatternExist(fieldMatchPattern.matchPattern)) {
return true;
}
}
return false;
default:
return false;
}
}
@Override
public void visit(BLangCaptureBindingPattern captureBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangListBindingPattern listBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangErrorMatchPattern errorMatchPattern, AnalyzerData data) {
}
@Override
public void visit(BLangErrorBindingPattern errorBindingPattern, AnalyzerData data) {
}
@Override
public void visit(BLangForeach foreach, AnalyzerData data) {
data.loopWithinTransactionCheckStack.push(true);
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = foreach.onFailClause != null;
}
data.loopCount++;
BLangBlockStmt body = foreach.body;
data.env = SymbolEnv.createLoopEnv(foreach, data.env);
analyzeNode(body, data);
data.loopCount--;
data.failureHandled = failureHandled;
data.loopWithinTransactionCheckStack.pop();
analyzeExpr(foreach.collection, data);
body.failureBreakMode = foreach.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(foreach.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangWhile whileNode, AnalyzerData data) {
data.loopWithinTransactionCheckStack.push(true);
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = whileNode.onFailClause != null;
}
data.loopCount++;
BLangBlockStmt body = whileNode.body;
data.env = SymbolEnv.createLoopEnv(whileNode, data.env);
analyzeNode(body, data);
data.loopCount--;
data.failureHandled = failureHandled;
data.loopWithinTransactionCheckStack.pop();
analyzeExpr(whileNode.expr, data);
analyzeOnFailClause(whileNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangDo doNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = doNode.onFailClause != null;
}
analyzeNode(doNode.body, data);
data.failureHandled = failureHandled;
doNode.body.failureBreakMode = doNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(doNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangFail failNode, AnalyzerData data) {
data.failVisited = true;
analyzeExpr(failNode.expr, data);
if (data.env.scope.owner.getKind() == SymbolKind.PACKAGE) {
return;
}
typeChecker.checkExpr(failNode.expr, data.env);
if (!data.errorTypes.empty()) {
data.errorTypes.peek().add(getErrorTypes(failNode.expr.getBType()));
}
if (!data.failureHandled) {
BType exprType = data.env.enclInvokable.getReturnTypeNode().getBType();
data.returnTypes.peek().add(exprType);
if (!types.isAssignable(getErrorTypes(failNode.expr.getBType()), exprType)) {
dlog.error(failNode.pos, DiagnosticErrorCode.FAIL_EXPR_NO_MATCHING_ERROR_RETURN_IN_ENCL_INVOKABLE);
}
}
}
@Override
public void visit(BLangLock lockNode, AnalyzerData data) {
data.errorTypes.push(new LinkedHashSet<>());
boolean failureHandled = data.failureHandled;
if (!data.failureHandled) {
data.failureHandled = lockNode.onFailClause != null;
}
boolean previousWithinLockBlock = data.withinLockBlock;
data.withinLockBlock = true;
lockNode.body.stmts.forEach(e -> analyzeNode(e, data));
data.withinLockBlock = previousWithinLockBlock;
data.failureHandled = failureHandled;
lockNode.body.failureBreakMode = lockNode.onFailClause != null ?
BLangBlockStmt.FailureBreakMode.BREAK_TO_OUTER_BLOCK : BLangBlockStmt.FailureBreakMode.NOT_BREAKABLE;
analyzeOnFailClause(lockNode.onFailClause, data);
data.errorTypes.pop();
}
@Override
public void visit(BLangContinue continueNode, AnalyzerData data) {
if (data.loopCount == 0) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_CANNOT_BE_OUTSIDE_LOOP);
return;
}
if (checkNextBreakValidityInTransaction(data)) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
if (data.loopAlterNotAllowed) {
this.dlog.error(continueNode.pos, DiagnosticErrorCode.CONTINUE_NOT_ALLOWED);
}
}
@Override
public void visit(BLangImportPackage importPkgNode, AnalyzerData data) {
BPackageSymbol pkgSymbol = importPkgNode.symbol;
SymbolEnv pkgEnv = this.symTable.pkgEnvMap.get(pkgSymbol);
if (pkgEnv == null) {
return;
}
analyzeNode(pkgEnv.node, data);
}
@Override
public void visit(BLangXMLNS xmlnsNode, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangClientDeclaration node, AnalyzerData data) {
}
@Override
public void visit(BLangService serviceNode, AnalyzerData data) {
}
private void analyzeExportableTypeRef(BSymbol owner, BTypeSymbol symbol, boolean inFuncSignature,
Location pos) {
if (!inFuncSignature && Symbols.isFlagOn(owner.flags, Flags.ANONYMOUS)) {
return;
}
if (Symbols.isPublic(owner)) {
HashSet<BTypeSymbol> visitedSymbols = new HashSet<>();
checkForExportableType(symbol, pos, visitedSymbols);
}
}
@Override
public void visit(BLangLetExpression letExpression, AnalyzerData data) {
int ownerSymTag = data.env.scope.owner.tag;
if ((ownerSymTag & SymTag.RECORD) == SymTag.RECORD) {
dlog.error(letExpression.pos, DiagnosticErrorCode.LET_EXPRESSION_NOT_YET_SUPPORTED_RECORD_FIELD);
} else if ((ownerSymTag & SymTag.OBJECT) == SymTag.OBJECT) {
dlog.error(letExpression.pos, DiagnosticErrorCode.LET_EXPRESSION_NOT_YET_SUPPORTED_OBJECT_FIELD);
}
data.env = letExpression.env;
for (BLangLetVariable letVariable : letExpression.letVarDeclarations) {
analyzeNode((BLangNode) letVariable.definitionNode, data);
}
analyzeExpr(letExpression.expr, data);
}
@Override
public void visit(BLangSimpleVariable varNode, AnalyzerData data) {
analyzeTypeNode(varNode.typeNode, data);
analyzeExpr(varNode.expr, data);
if (Objects.isNull(varNode.symbol)) {
return;
}
if (!Symbols.isPublic(varNode.symbol)) {
return;
}
int ownerSymTag = data.env.scope.owner.tag;
if ((ownerSymTag & SymTag.RECORD) == SymTag.RECORD || (ownerSymTag & SymTag.OBJECT) == SymTag.OBJECT) {
analyzeExportableTypeRef(data.env.scope.owner, varNode.getBType().tsymbol, false, varNode.pos);
} else if ((ownerSymTag & SymTag.INVOKABLE) != SymTag.INVOKABLE) {
analyzeExportableTypeRef(varNode.symbol, varNode.getBType().tsymbol, false, varNode.pos);
}
varNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
private boolean isValidInferredArray(BLangNode node) {
switch (node.getKind()) {
case INTERSECTION_TYPE_NODE:
case UNION_TYPE_NODE:
return isValidInferredArray(node.parent);
case VARIABLE:
BLangSimpleVariable varNode = (BLangSimpleVariable) node;
BLangExpression expr = varNode.expr;
return expr != null && isValidContextForInferredArray(node.parent) &&
isValidVariableForInferredArray(expr);
default:
return false;
}
}
private boolean isValidContextForInferredArray(BLangNode node) {
switch (node.getKind()) {
case PACKAGE:
case EXPR_FUNCTION_BODY:
case BLOCK_FUNCTION_BODY:
case BLOCK:
return true;
case VARIABLE_DEF:
return isValidContextForInferredArray(node.parent);
default:
return false;
}
}
private boolean isValidVariableForInferredArray(BLangNode node) {
switch (node.getKind()) {
case LITERAL:
if (node.getBType().tag == TypeTags.ARRAY) {
return true;
}
break;
case LIST_CONSTRUCTOR_EXPR:
return true;
case GROUP_EXPR:
return isValidVariableForInferredArray(((BLangGroupExpr) node).expression);
}
return false;
}
@Override
public void visit(BLangTupleVariable bLangTupleVariable, AnalyzerData data) {
if (bLangTupleVariable.typeNode != null) {
analyzeNode(bLangTupleVariable.typeNode, data);
}
analyzeExpr(bLangTupleVariable.expr, data);
}
@Override
public void visit(BLangRecordVariable bLangRecordVariable, AnalyzerData data) {
if (bLangRecordVariable.typeNode != null) {
analyzeNode(bLangRecordVariable.typeNode, data);
}
analyzeExpr(bLangRecordVariable.expr, data);
}
@Override
public void visit(BLangErrorVariable bLangErrorVariable, AnalyzerData data) {
if (bLangErrorVariable.typeNode != null) {
analyzeNode(bLangErrorVariable.typeNode, data);
}
analyzeExpr(bLangErrorVariable.expr, data);
}
@Override
public void visit(BLangIdentifier identifierNode, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangAnnotation annotationNode, AnalyzerData data) {
annotationNode.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangAnnotationAttachment annAttachmentNode, AnalyzerData data) {
analyzeExpr(annAttachmentNode.expr, data);
BAnnotationSymbol annotationSymbol = annAttachmentNode.annotationSymbol;
if (annotationSymbol != null && Symbols.isFlagOn(annotationSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(annAttachmentNode.annotationName.toString(), annotationSymbol, annAttachmentNode.pos);
}
}
@Override
public void visit(BLangSimpleVariableDef varDefNode, AnalyzerData data) {
analyzeNode(varDefNode.var, data);
}
@Override
public void visit(BLangCompoundAssignment compoundAssignment, AnalyzerData data) {
BLangValueExpression varRef = compoundAssignment.varRef;
analyzeExpr(varRef, data);
analyzeExpr(compoundAssignment.expr, data);
}
@Override
public void visit(BLangAssignment assignNode, AnalyzerData data) {
BLangExpression varRef = assignNode.varRef;
analyzeExpr(varRef, data);
analyzeExpr(assignNode.expr, data);
}
@Override
public void visit(BLangRecordDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
@Override
public void visit(BLangErrorDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
@Override
public void visit(BLangTupleDestructure stmt, AnalyzerData data) {
List<BLangExpression> varRefs = getVarRefs(stmt.varRef);
this.checkDuplicateVarRefs(varRefs);
analyzeExpr(stmt.varRef, data);
analyzeExpr(stmt.expr, data);
}
private void checkDuplicateVarRefs(List<BLangExpression> varRefs) {
checkDuplicateVarRefs(varRefs, new HashSet<>());
}
private void checkDuplicateVarRefs(List<BLangExpression> varRefs, Set<BSymbol> symbols) {
for (BLangExpression varRef : varRefs) {
if (varRef == null) {
continue;
}
NodeKind kind = varRef.getKind();
if (kind != NodeKind.SIMPLE_VARIABLE_REF
&& kind != NodeKind.RECORD_VARIABLE_REF
&& kind != NodeKind.ERROR_VARIABLE_REF
&& kind != NodeKind.TUPLE_VARIABLE_REF) {
continue;
}
if (kind == NodeKind.SIMPLE_VARIABLE_REF
&& names.fromIdNode(((BLangSimpleVarRef) varRef).variableName) == Names.IGNORE) {
continue;
}
if (kind == NodeKind.TUPLE_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangTupleVarRef) varRef), symbols);
} else if (kind == NodeKind.RECORD_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangRecordVarRef) varRef), symbols);
} else if (kind == NodeKind.ERROR_VARIABLE_REF) {
checkDuplicateVarRefs(getVarRefs((BLangErrorVarRef) varRef), symbols);
}
BLangVariableReference varRefExpr = (BLangVariableReference) varRef;
if (varRefExpr.symbol != null && !symbols.add(varRefExpr.symbol)) {
this.dlog.error(varRef.pos, DiagnosticErrorCode.DUPLICATE_VARIABLE_IN_BINDING_PATTERN,
varRefExpr.symbol);
}
}
}
private List<BLangExpression> getVarRefs(BLangRecordVarRef varRef) {
List<BLangExpression> varRefs = varRef.recordRefFields.stream()
.map(e -> e.variableReference).collect(Collectors.toList());
varRefs.add(varRef.restParam);
return varRefs;
}
private List<BLangExpression> getVarRefs(BLangErrorVarRef varRef) {
List<BLangExpression> varRefs = new ArrayList<>();
if (varRef.message != null) {
varRefs.add(varRef.message);
}
if (varRef.cause != null) {
varRefs.add(varRef.cause);
}
varRefs.addAll(varRef.detail.stream().map(e -> e.expr).collect(Collectors.toList()));
varRefs.add(varRef.restVar);
return varRefs;
}
private List<BLangExpression> getVarRefs(BLangTupleVarRef varRef) {
List<BLangExpression> varRefs = new ArrayList<>(varRef.expressions);
varRefs.add(varRef.restParam);
return varRefs;
}
@Override
public void visit(BLangBreak breakNode, AnalyzerData data) {
if (data.loopCount == 0) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_CANNOT_BE_OUTSIDE_LOOP);
return;
}
if (checkNextBreakValidityInTransaction(data)) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_CANNOT_BE_USED_TO_EXIT_TRANSACTION);
return;
}
if (data.loopAlterNotAllowed) {
this.dlog.error(breakNode.pos, DiagnosticErrorCode.BREAK_NOT_ALLOWED);
}
}
@Override
public void visit(BLangPanic panicNode, AnalyzerData data) {
analyzeExpr(panicNode.expr, data);
}
@Override
public void visit(BLangXMLNSStatement xmlnsStmtNode, AnalyzerData data) {
}
@Override
public void visit(BLangClientDeclarationStatement clientDeclarationStatement, AnalyzerData data) {
analyzeNode(clientDeclarationStatement.clientDeclaration, data);
}
@Override
public void visit(BLangExpressionStmt exprStmtNode, AnalyzerData data) {
BLangExpression expr = exprStmtNode.expr;
analyzeExpr(expr, data);
}
private boolean isTopLevel(SymbolEnv env) {
return env.enclInvokable.body == env.node;
}
private boolean isInWorker(SymbolEnv env) {
return env.enclInvokable.flagSet.contains(Flag.WORKER);
}
private boolean isCommunicationAllowedLocation(SymbolEnv env) {
return isTopLevel(env);
}
private boolean isDefaultWorkerCommunication(String workerIdentifier) {
return workerIdentifier.equals(DEFAULT_WORKER_NAME);
}
private boolean workerExists(BType type, String workerName, SymbolEnv env) {
if (isDefaultWorkerCommunication(workerName) && isInWorker(env)) {
return true;
}
if (type == symTable.semanticError) {
return false;
}
BType refType = Types.getReferredType(type);
return refType.tag == TypeTags.FUTURE && ((BFutureType) refType).workerDerivative;
}
@Override
public void visit(BLangWorkerSend workerSendNode, AnalyzerData data) {
BSymbol receiver =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(workerSendNode.workerIdentifier));
if ((receiver.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
receiver = symTable.notFoundSymbol;
}
verifyPeerCommunication(workerSendNode.pos, receiver, workerSendNode.workerIdentifier.value, data.env);
WorkerActionSystem was = data.workerActionSystemStack.peek();
BType type = workerSendNode.expr.getBType();
if (type == symTable.semanticError) {
was.hasErrors = true;
} else if (workerSendNode.expr instanceof ActionNode) {
this.dlog.error(workerSendNode.expr.pos, DiagnosticErrorCode.INVALID_SEND_EXPR);
} else if (!types.isAssignable(type, symTable.cloneableType)) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.INVALID_TYPE_FOR_SEND, type);
}
String workerName = workerSendNode.workerIdentifier.getValue();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.UNSUPPORTED_WORKER_SEND_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(workerSendNode.getBType(), workerName, data.env)
|| (!isWorkerFromFunction(data.env, names.fromString(workerName)) && !workerName.equals("function"))) {
this.dlog.error(workerSendNode.pos, DiagnosticErrorCode.UNDEFINED_WORKER, workerName);
was.hasErrors = true;
}
workerSendNode.setBType(
createAccumulatedErrorTypeForMatchingReceive(workerSendNode.pos, workerSendNode.expr.getBType(), data));
was.addWorkerAction(workerSendNode);
analyzeExpr(workerSendNode.expr, data);
validateActionParentNode(workerSendNode.pos, workerSendNode.expr);
}
private BType createAccumulatedErrorTypeForMatchingReceive(Location pos, BType exprType, AnalyzerData data) {
Set<BType> returnTypesUpToNow = data.returnTypes.peek();
LinkedHashSet<BType> returnTypeAndSendType = new LinkedHashSet<>() {
{
Comparator.comparing(BType::toString);
}
};
for (BType returnType : returnTypesUpToNow) {
if (onlyContainErrors(returnType)) {
returnTypeAndSendType.add(returnType);
} else {
this.dlog.error(pos, DiagnosticErrorCode.WORKER_SEND_AFTER_RETURN);
}
}
returnTypeAndSendType.add(exprType);
if (returnTypeAndSendType.size() > 1) {
return BUnionType.create(null, returnTypeAndSendType);
} else {
return exprType;
}
}
@Override
public void visit(BLangWorkerSyncSendExpr syncSendExpr, AnalyzerData data) {
BSymbol receiver =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(syncSendExpr.workerIdentifier));
if ((receiver.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
receiver = symTable.notFoundSymbol;
}
verifyPeerCommunication(syncSendExpr.pos, receiver, syncSendExpr.workerIdentifier.value, data.env);
validateActionParentNode(syncSendExpr.pos, syncSendExpr);
String workerName = syncSendExpr.workerIdentifier.getValue();
WorkerActionSystem was = data.workerActionSystemStack.peek();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(syncSendExpr.pos, DiagnosticErrorCode.UNSUPPORTED_WORKER_SEND_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(syncSendExpr.workerType, workerName, data.env)) {
this.dlog.error(syncSendExpr.pos, DiagnosticErrorCode.UNDEFINED_WORKER, syncSendExpr.workerSymbol);
was.hasErrors = true;
}
syncSendExpr.setBType(
createAccumulatedErrorTypeForMatchingReceive(syncSendExpr.pos, syncSendExpr.expr.getBType(), data));
was.addWorkerAction(syncSendExpr);
analyzeExpr(syncSendExpr.expr, data);
}
@Override
public void visit(BLangWorkerReceive workerReceiveNode, AnalyzerData data) {
validateActionParentNode(workerReceiveNode.pos, workerReceiveNode);
BSymbol sender =
symResolver.lookupSymbolInMainSpace(data.env, names.fromIdNode(workerReceiveNode.workerIdentifier));
if ((sender.tag & SymTag.VARIABLE) != SymTag.VARIABLE) {
sender = symTable.notFoundSymbol;
}
verifyPeerCommunication(workerReceiveNode.pos, sender, workerReceiveNode.workerIdentifier.value, data.env);
WorkerActionSystem was = data.workerActionSystemStack.peek();
String workerName = workerReceiveNode.workerIdentifier.getValue();
if (data.withinQuery || (!isCommunicationAllowedLocation(data.env) && !data.inInternallyDefinedBlockStmt)) {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.INVALID_WORKER_RECEIVE_POSITION);
was.hasErrors = true;
}
if (!this.workerExists(workerReceiveNode.workerType, workerName, data.env)) {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.UNDEFINED_WORKER, workerName);
was.hasErrors = true;
}
workerReceiveNode.matchingSendsError = createAccumulatedErrorTypeForMatchingSyncSend(workerReceiveNode, data);
was.addWorkerAction(workerReceiveNode);
}
private void verifyPeerCommunication(Location pos, BSymbol otherWorker, String otherWorkerName, SymbolEnv env) {
if (env.enclEnv.node.getKind() != NodeKind.FUNCTION) {
return;
}
BLangFunction funcNode = (BLangFunction) env.enclEnv.node;
Set<Flag> flagSet = funcNode.flagSet;
Name workerDerivedName = names.fromString("0" + otherWorker.name.value);
if (flagSet.contains(Flag.WORKER)) {
if (otherWorkerName.equals(DEFAULT_WORKER_NAME)) {
if (flagSet.contains(Flag.FORKED)) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
return;
}
Scope enclFunctionScope = env.enclEnv.enclEnv.scope;
BInvokableSymbol wLambda = (BInvokableSymbol) enclFunctionScope.lookup(workerDerivedName).symbol;
if (wLambda != null && funcNode.anonForkName != null
&& !funcNode.anonForkName.equals(wLambda.enclForkName)) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
} else {
BInvokableSymbol wLambda = (BInvokableSymbol) env.scope.lookup(workerDerivedName).symbol;
if (wLambda != null && wLambda.enclForkName != null) {
dlog.error(pos, DiagnosticErrorCode.WORKER_INTERACTIONS_ONLY_ALLOWED_BETWEEN_PEERS);
}
}
}
public BType createAccumulatedErrorTypeForMatchingSyncSend(BLangWorkerReceive workerReceiveNode,
AnalyzerData data) {
Set<BType> returnTypesUpToNow = data.returnTypes.peek();
LinkedHashSet<BType> returnTypeAndSendType = new LinkedHashSet<>();
for (BType returnType : returnTypesUpToNow) {
if (onlyContainErrors(returnType)) {
returnTypeAndSendType.add(returnType);
} else {
this.dlog.error(workerReceiveNode.pos, DiagnosticErrorCode.WORKER_RECEIVE_AFTER_RETURN);
}
}
returnTypeAndSendType.add(symTable.nilType);
if (returnTypeAndSendType.size() > 1) {
return BUnionType.create(null, returnTypeAndSendType);
} else {
return symTable.nilType;
}
}
private boolean onlyContainErrors(BType returnType) {
if (returnType == null) {
return false;
}
returnType = types.getTypeWithEffectiveIntersectionTypes(returnType);
returnType = Types.getReferredType(returnType);
if (returnType.tag == TypeTags.ERROR) {
return true;
}
if (returnType.tag == TypeTags.UNION) {
for (BType memberType : ((BUnionType) returnType).getMemberTypes()) {
BType t = types.getTypeWithEffectiveIntersectionTypes(memberType);
if (t.tag != TypeTags.ERROR) {
return false;
}
}
return true;
}
return false;
}
@Override
public void visit(BLangLiteral literalExpr, AnalyzerData data) {
}
@Override
public void visit(BLangConstRef constRef, AnalyzerData data) {
}
@Override
public void visit(BLangListConstructorExpr listConstructorExpr, AnalyzerData data) {
for (BLangExpression expr : listConstructorExpr.exprs) {
if (expr.getKind() == NodeKind.LIST_CONSTRUCTOR_SPREAD_OP) {
expr = ((BLangListConstructorSpreadOpExpr) expr).expr;
}
analyzeExpr(expr, data);
}
}
@Override
public void visit(BLangTableConstructorExpr tableConstructorExpr, AnalyzerData data) {
analyzeExprs(tableConstructorExpr.recordLiteralList, data);
}
@Override
public void visit(BLangRecordLiteral recordLiteral, AnalyzerData data) {
List<RecordLiteralNode.RecordField> fields = recordLiteral.fields;
for (RecordLiteralNode.RecordField field : fields) {
if (field.isKeyValueField()) {
analyzeExpr(((BLangRecordKeyValueField) field).valueExpr, data);
} else if (field.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
analyzeExpr((BLangRecordLiteral.BLangRecordVarNameField) field, data);
} else {
analyzeExpr(((BLangRecordLiteral.BLangRecordSpreadOperatorField) field).expr, data);
}
}
Set<Object> names = new HashSet<>();
Set<Object> neverTypedKeys = new HashSet<>();
BType literalBType = recordLiteral.getBType();
BType type = Types.getReferredType(literalBType);
boolean isRecord = type.tag == TypeTags.RECORD;
boolean isOpenRecord = isRecord && !((BRecordType) type).sealed;
boolean isInferredRecordForMapCET = isRecord && recordLiteral.expectedType != null &&
recordLiteral.expectedType.tag == TypeTags.MAP;
BLangRecordLiteral.BLangRecordSpreadOperatorField inclusiveTypeSpreadField = null;
for (RecordLiteralNode.RecordField field : fields) {
BLangExpression keyExpr;
if (field.getKind() == NodeKind.RECORD_LITERAL_SPREAD_OP) {
BLangRecordLiteral.BLangRecordSpreadOperatorField spreadOpField =
(BLangRecordLiteral.BLangRecordSpreadOperatorField) field;
BLangExpression spreadOpExpr = spreadOpField.expr;
analyzeExpr(spreadOpExpr, data);
BType spreadOpExprType = Types.getReferredType(spreadOpExpr.getBType());
int spreadFieldTypeTag = spreadOpExprType.tag;
if (spreadFieldTypeTag == TypeTags.MAP) {
if (inclusiveTypeSpreadField != null) {
this.dlog.error(spreadOpExpr.pos, DiagnosticErrorCode.MULTIPLE_INCLUSIVE_TYPES);
continue;
}
inclusiveTypeSpreadField = spreadOpField;
if (fields.size() > 1) {
if (names.size() > 0) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.SPREAD_FIELD_MAY_DULPICATE_ALREADY_SPECIFIED_KEYS,
spreadOpExpr);
}
continue;
}
}
if (spreadFieldTypeTag != TypeTags.RECORD) {
continue;
}
BRecordType spreadExprRecordType = (BRecordType) spreadOpExprType;
boolean isSpreadExprRecordTypeSealed = spreadExprRecordType.sealed;
if (!isSpreadExprRecordTypeSealed) {
if (inclusiveTypeSpreadField != null) {
this.dlog.error(spreadOpExpr.pos, DiagnosticErrorCode.MULTIPLE_INCLUSIVE_TYPES);
} else {
inclusiveTypeSpreadField = spreadOpField;
}
}
LinkedHashMap<String, BField> fieldsInRecordType = getUnescapedFieldList(spreadExprRecordType.fields);
for (Object fieldName : names) {
if (!fieldsInRecordType.containsKey(fieldName) && !isSpreadExprRecordTypeSealed) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.SPREAD_FIELD_MAY_DULPICATE_ALREADY_SPECIFIED_KEYS,
spreadOpExpr);
break;
}
}
for (String fieldName : fieldsInRecordType.keySet()) {
BField bField = fieldsInRecordType.get(fieldName);
if (names.contains(fieldName)) {
if (bField.type.tag != TypeTags.NEVER) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL_SPREAD_OP,
type.getKind().typeName(), fieldName, spreadOpField);
}
continue;
}
if (bField.type.tag == TypeTags.NEVER) {
neverTypedKeys.add(fieldName);
continue;
}
if (!neverTypedKeys.remove(fieldName) &&
inclusiveTypeSpreadField != null && isSpreadExprRecordTypeSealed) {
this.dlog.error(spreadOpExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
Types.getReferredType(recordLiteral.expectedType).getKind().typeName(),
bField.symbol, spreadOpField);
}
names.add(fieldName);
}
} else {
if (field.isKeyValueField()) {
BLangRecordLiteral.BLangRecordKey key = ((BLangRecordKeyValueField) field).key;
keyExpr = key.expr;
if (key.computedKey) {
analyzeExpr(keyExpr, data);
continue;
}
} else {
keyExpr = (BLangRecordLiteral.BLangRecordVarNameField) field;
}
if (keyExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
String name = ((BLangSimpleVarRef) keyExpr).variableName.value;
String unescapedName = Utils.unescapeJava(name);
if (names.contains(unescapedName)) {
this.dlog.error(keyExpr.pos, DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL,
Types.getReferredType(recordLiteral.expectedType).getKind().typeName(),
unescapedName);
} else if (inclusiveTypeSpreadField != null && !neverTypedKeys.contains(unescapedName)) {
this.dlog.error(keyExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
unescapedName, inclusiveTypeSpreadField);
}
if (!isInferredRecordForMapCET && isOpenRecord && !((BRecordType) type).fields.containsKey(name)) {
dlog.error(keyExpr.pos, DiagnosticErrorCode.INVALID_RECORD_LITERAL_IDENTIFIER_KEY,
unescapedName);
}
names.add(unescapedName);
} else if (keyExpr.getKind() == NodeKind.LITERAL || keyExpr.getKind() == NodeKind.NUMERIC_LITERAL) {
Object name = ((BLangLiteral) keyExpr).value;
if (names.contains(name)) {
this.dlog.error(keyExpr.pos, DiagnosticErrorCode.DUPLICATE_KEY_IN_RECORD_LITERAL,
Types.getReferredType(recordLiteral.parent.getBType())
.getKind().typeName(), name);
} else if (inclusiveTypeSpreadField != null && !neverTypedKeys.contains(name)) {
this.dlog.error(keyExpr.pos,
DiagnosticErrorCode.POSSIBLE_DUPLICATE_OF_FIELD_SPECIFIED_VIA_SPREAD_OP,
name, inclusiveTypeSpreadField);
}
names.add(name);
}
}
}
if (isInferredRecordForMapCET) {
recordLiteral.expectedType = type;
}
}
@Override
public void visit(BLangRecordLiteral.BLangRecordVarNameField node, AnalyzerData data) {
visit((BLangSimpleVarRef) node, data);
}
private LinkedHashMap<String, BField> getUnescapedFieldList(LinkedHashMap<String, BField> fieldMap) {
LinkedHashMap<String, BField> newMap = new LinkedHashMap<>();
for (String key : fieldMap.keySet()) {
newMap.put(Utils.unescapeJava(key), fieldMap.get(key));
}
return newMap;
}
@Override
public void visit(BLangSimpleVarRef varRefExpr, AnalyzerData data) {
switch (varRefExpr.parent.getKind()) {
case WORKER_RECEIVE:
case WORKER_SEND:
case WORKER_SYNC_SEND:
return;
default:
if (varRefExpr.getBType() != null && varRefExpr.getBType().tag == TypeTags.FUTURE) {
trackNamedWorkerReferences(varRefExpr, data);
}
}
BSymbol symbol = varRefExpr.symbol;
if (symbol != null && Symbols.isFlagOn(symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(varRefExpr.variableName.toString(), symbol, varRefExpr.pos);
}
}
private void trackNamedWorkerReferences(BLangSimpleVarRef varRefExpr, AnalyzerData data) {
if (varRefExpr.symbol == null || (varRefExpr.symbol.flags & Flags.WORKER) != Flags.WORKER) {
return;
}
data.workerReferences.computeIfAbsent(varRefExpr.symbol, s -> new LinkedHashSet<>());
data.workerReferences.get(varRefExpr.symbol).add(varRefExpr);
}
@Override
public void visit(BLangRecordVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangErrorVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangTupleVarRef varRefExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangFieldBasedAccess fieldAccessExpr, AnalyzerData data) {
analyzeFieldBasedAccessExpr(fieldAccessExpr, data);
}
@Override
public void visit(BLangFieldBasedAccess.BLangNSPrefixedFieldBasedAccess nsPrefixedFieldBasedAccess,
AnalyzerData data) {
analyzeFieldBasedAccessExpr(nsPrefixedFieldBasedAccess, data);
}
private void analyzeFieldBasedAccessExpr(BLangFieldBasedAccess fieldAccessExpr, AnalyzerData data) {
BLangExpression expr = fieldAccessExpr.expr;
analyzeExpr(expr, data);
BSymbol symbol = fieldAccessExpr.symbol;
if (symbol != null && Symbols.isFlagOn(fieldAccessExpr.symbol.flags, Flags.DEPRECATED)) {
String deprecatedConstruct = generateDeprecatedConstructString(expr, fieldAccessExpr.field.toString(),
symbol);
dlog.warning(fieldAccessExpr.pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
}
@Override
public void visit(BLangIndexBasedAccess indexAccessExpr, AnalyzerData data) {
analyzeExpr(indexAccessExpr.indexExpr, data);
analyzeExpr(indexAccessExpr.expr, data);
}
@Override
public void visit(BLangInvocation invocationExpr, AnalyzerData data) {
analyzeExpr(invocationExpr.expr, data);
analyzeExprs(invocationExpr.requiredArgs, data);
analyzeExprs(invocationExpr.restArgs, data);
validateInvocationInMatchGuard(invocationExpr);
if ((invocationExpr.symbol != null) && invocationExpr.symbol.kind == SymbolKind.FUNCTION) {
BSymbol funcSymbol = invocationExpr.symbol;
if (Symbols.isFlagOn(funcSymbol.flags, Flags.TRANSACTIONAL) && !data.withinTransactionScope) {
dlog.error(invocationExpr.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (Symbols.isFlagOn(funcSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(invocationExpr);
}
}
}
@Override
public void visit(BLangErrorConstructorExpr errorConstructorExpr, AnalyzerData data) {
analyzeExprs(errorConstructorExpr.positionalArgs, data);
if (!errorConstructorExpr.namedArgs.isEmpty()) {
analyzeExprs(errorConstructorExpr.namedArgs, data);
}
}
@Override
public void visit(BLangInvocation.BLangActionInvocation actionInvocation, AnalyzerData data) {
validateInvocationInMatchGuard(actionInvocation);
if (!actionInvocation.async && !data.withinTransactionScope &&
Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.TRANSACTIONAL)) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED,
actionInvocation.symbol);
return;
}
if (actionInvocation.async && data.withinTransactionScope &&
!Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.TRANSACTIONAL)) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.USAGE_OF_START_WITHIN_TRANSACTION_IS_PROHIBITED);
return;
}
analyzeExpr(actionInvocation.expr, data);
analyzeExprs(actionInvocation.requiredArgs, data);
analyzeExprs(actionInvocation.restArgs, data);
if (actionInvocation.symbol != null && actionInvocation.symbol.kind == SymbolKind.FUNCTION &&
Symbols.isFlagOn(actionInvocation.symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(actionInvocation);
}
if (actionInvocation.flagSet.contains(Flag.TRANSACTIONAL) && !data.withinTransactionScope) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (actionInvocation.async && data.withinLockBlock) {
dlog.error(actionInvocation.pos, actionInvocation.functionPointerInvocation ?
DiagnosticErrorCode.USAGE_OF_WORKER_WITHIN_LOCK_IS_PROHIBITED :
DiagnosticErrorCode.USAGE_OF_START_WITHIN_LOCK_IS_PROHIBITED);
return;
}
if (actionInvocation.symbol != null &&
(actionInvocation.symbol.tag & SymTag.CONSTRUCTOR) == SymTag.CONSTRUCTOR) {
dlog.error(actionInvocation.pos, DiagnosticErrorCode.INVALID_FUNCTIONAL_CONSTRUCTOR_INVOCATION,
actionInvocation.symbol);
return;
}
validateActionInvocation(actionInvocation.pos, actionInvocation);
if (!actionInvocation.async && data.withinTransactionScope) {
actionInvocation.invokedInsideTransaction = true;
}
}
@Override
public void visit(BLangInvocation.BLangResourceAccessInvocation resourceActionInvocation, AnalyzerData data) {
validateInvocationInMatchGuard(resourceActionInvocation);
analyzeExpr(resourceActionInvocation.expr, data);
analyzeExprs(resourceActionInvocation.requiredArgs, data);
analyzeExprs(resourceActionInvocation.restArgs, data);
analyzeExpr(resourceActionInvocation.resourceAccessPathSegments, data);
resourceActionInvocation.invokedInsideTransaction = data.withinTransactionScope;
if (Symbols.isFlagOn(resourceActionInvocation.symbol.flags, Flags.TRANSACTIONAL) &&
!data.withinTransactionScope) {
dlog.error(resourceActionInvocation.pos, DiagnosticErrorCode.TRANSACTIONAL_FUNC_INVOKE_PROHIBITED);
return;
}
if (Symbols.isFlagOn(resourceActionInvocation.symbol.flags, Flags.DEPRECATED)) {
logDeprecatedWarningForInvocation(resourceActionInvocation);
}
validateActionInvocation(resourceActionInvocation.pos, resourceActionInvocation);
}
private void logDeprecatedWarningForInvocation(BLangInvocation invocationExpr) {
String deprecatedConstruct = invocationExpr.name.toString();
BLangExpression expr = invocationExpr.expr;
BSymbol funcSymbol = invocationExpr.symbol;
if (expr != null) {
deprecatedConstruct = generateDeprecatedConstructString(expr, deprecatedConstruct, funcSymbol);
} else if (!Names.DOT.equals(funcSymbol.pkgID.name)) {
deprecatedConstruct = funcSymbol.pkgID + ":" + deprecatedConstruct;
}
dlog.warning(invocationExpr.pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
private String generateDeprecatedConstructString(BLangExpression expr, String fieldOrMethodName,
BSymbol symbol) {
BType bType = expr.getBType();
if (bType.tag == TypeTags.TYPEREFDESC) {
return bType + "." + fieldOrMethodName;
}
if (bType.tag == TypeTags.OBJECT) {
BObjectType objectType = (BObjectType) bType;
if (objectType.classDef == null || objectType.classDef.internal == false) {
fieldOrMethodName = bType + "." + fieldOrMethodName;
}
return fieldOrMethodName;
}
if (symbol.kind == SymbolKind.FUNCTION && !Names.DOT.equals(symbol.pkgID.name)) {
fieldOrMethodName = symbol.pkgID + ":" + fieldOrMethodName;
}
return fieldOrMethodName;
}
private void validateActionInvocation(Location pos, BLangInvocation iExpr) {
if (iExpr.expr != null) {
final NodeKind clientNodeKind = iExpr.expr.getKind();
if (clientNodeKind == NodeKind.FIELD_BASED_ACCESS_EXPR) {
final BLangFieldBasedAccess fieldBasedAccess = (BLangFieldBasedAccess) iExpr.expr;
if (fieldBasedAccess.expr.getKind() != NodeKind.SIMPLE_VARIABLE_REF) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
} else {
final BLangSimpleVarRef selfName = (BLangSimpleVarRef) fieldBasedAccess.expr;
if (!Names.SELF.equals(selfName.symbol.name)) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
}
}
} else if (clientNodeKind != NodeKind.SIMPLE_VARIABLE_REF &&
clientNodeKind != NodeKind.GROUP_EXPR) {
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
}
}
validateActionParentNode(pos, iExpr);
}
/**
* Actions can only occur as part of a statement or nested inside other actions.
*/
private boolean validateActionParentNode(Location pos, BLangNode node) {
BLangNode parent = node.parent;
while (parent != null) {
final NodeKind kind = parent.getKind();
if (parent instanceof StatementNode || checkActionInQuery(kind)) {
return true;
} else if (parent instanceof ActionNode || parent instanceof BLangVariable || kind == NodeKind.CHECK_EXPR ||
kind == NodeKind.CHECK_PANIC_EXPR || kind == NodeKind.TRAP_EXPR || kind == NodeKind.GROUP_EXPR ||
kind == NodeKind.TYPE_CONVERSION_EXPR) {
if (parent instanceof BLangInvocation.BLangActionInvocation) {
break;
}
parent = parent.parent;
continue;
}
break;
}
dlog.error(pos, DiagnosticErrorCode.INVALID_ACTION_INVOCATION_AS_EXPR);
return false;
}
private boolean checkActionInQuery(NodeKind parentKind) {
return parentKind == NodeKind.FROM || parentKind == NodeKind.SELECT ||
parentKind == NodeKind.LET_CLAUSE;
}
@Override
public void visit(BLangTypeInit cIExpr, AnalyzerData data) {
analyzeExprs(cIExpr.argsExpr, data);
analyzeExpr(cIExpr.initInvocation, data);
BType type = cIExpr.getBType();
if (cIExpr.userDefinedType != null && Symbols.isFlagOn(type.tsymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(((BLangUserDefinedType) cIExpr.userDefinedType).typeName.toString(), type.tsymbol,
cIExpr.pos);
}
}
@Override
public void visit(BLangTernaryExpr ternaryExpr, AnalyzerData data) {
analyzeExpr(ternaryExpr.expr, data);
analyzeExpr(ternaryExpr.thenExpr, data);
analyzeExpr(ternaryExpr.elseExpr, data);
}
@Override
public void visit(BLangWaitExpr awaitExpr, AnalyzerData data) {
BLangExpression expr = awaitExpr.getExpression();
boolean validWaitFuture = validateWaitFutureExpr(expr);
analyzeExpr(expr, data);
boolean validActionParent = validateActionParentNode(awaitExpr.pos, awaitExpr);
WorkerActionSystem was = data.workerActionSystemStack.peek();
was.addWorkerAction(awaitExpr, data.env);
was.hasErrors = !(validWaitFuture || validActionParent);
}
@Override
public void visit(BLangWaitForAllExpr waitForAllExpr, AnalyzerData data) {
boolean validWaitFuture = true;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValue : waitForAllExpr.keyValuePairs) {
BLangExpression expr = keyValue.valueExpr != null ? keyValue.valueExpr : keyValue.keyExpr;
validWaitFuture = validWaitFuture && validateWaitFutureExpr(expr);
analyzeExpr(expr, data);
}
boolean validActionParent = validateActionParentNode(waitForAllExpr.pos, waitForAllExpr);
WorkerActionSystem was = data.workerActionSystemStack.peek();
was.addWorkerAction(waitForAllExpr, data.env);
was.hasErrors = !(validWaitFuture || validActionParent);
}
private boolean validateWaitFutureExpr(BLangExpression expr) {
if (expr.getKind() == NodeKind.RECORD_LITERAL_EXPR) {
dlog.error(expr.pos, DiagnosticErrorCode.INVALID_WAIT_MAPPING_CONSTRUCTORS);
return false;
}
if (expr instanceof ActionNode) {
dlog.error(expr.pos, DiagnosticErrorCode.INVALID_WAIT_ACTIONS);
return false;
}
return true;
}
@Override
public void visit(BLangXMLElementAccess xmlElementAccess, AnalyzerData data) {
analyzeExpr(xmlElementAccess.expr, data);
}
@Override
public void visit(BLangXMLNavigationAccess xmlNavigation, AnalyzerData data) {
analyzeExpr(xmlNavigation.expr, data);
if (xmlNavigation.childIndex != null) {
if (xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.DESCENDANTS
|| xmlNavigation.navAccessType == XMLNavigationAccess.NavAccessType.CHILDREN) {
dlog.error(xmlNavigation.pos, DiagnosticErrorCode.UNSUPPORTED_MEMBER_ACCESS_IN_XML_NAVIGATION);
}
analyzeExpr(xmlNavigation.childIndex, data);
}
validateMethodInvocationsInXMLNavigationExpression(xmlNavigation);
}
private void validateMethodInvocationsInXMLNavigationExpression(BLangXMLNavigationAccess expression) {
if (!expression.methodInvocationAnalyzed && expression.parent.getKind() == NodeKind.INVOCATION) {
BLangInvocation invocation = (BLangInvocation) expression.parent;
if (invocation.argExprs.contains(expression)
&& ((invocation.symbol.flags & Flags.LANG_LIB) != Flags.LANG_LIB)) {
return;
}
dlog.error(invocation.pos, DiagnosticErrorCode.UNSUPPORTED_METHOD_INVOCATION_XML_NAV);
}
expression.methodInvocationAnalyzed = true;
}
@Override
public void visit(BLangWorkerFlushExpr workerFlushExpr, AnalyzerData data) {
BLangIdentifier flushWrkIdentifier = workerFlushExpr.workerIdentifier;
Stack<WorkerActionSystem> workerActionSystems = data.workerActionSystemStack;
WorkerActionSystem currentWrkerAction = workerActionSystems.peek();
List<BLangWorkerSend> sendStmts = getAsyncSendStmtsOfWorker(currentWrkerAction);
if (flushWrkIdentifier != null) {
List<BLangWorkerSend> sendsToGivenWrkr = sendStmts.stream()
.filter(bLangNode -> bLangNode.workerIdentifier.equals
(flushWrkIdentifier))
.collect(Collectors.toList());
if (sendsToGivenWrkr.size() == 0) {
this.dlog.error(workerFlushExpr.pos, DiagnosticErrorCode.INVALID_WORKER_FLUSH_FOR_WORKER,
workerFlushExpr.workerSymbol, currentWrkerAction.currentWorkerId());
return;
} else {
sendStmts = sendsToGivenWrkr;
}
} else {
if (sendStmts.size() == 0) {
this.dlog.error(workerFlushExpr.pos, DiagnosticErrorCode.INVALID_WORKER_FLUSH,
currentWrkerAction.currentWorkerId());
return;
}
}
workerFlushExpr.cachedWorkerSendStmts = sendStmts;
validateActionParentNode(workerFlushExpr.pos, workerFlushExpr);
}
private List<BLangWorkerSend> getAsyncSendStmtsOfWorker(WorkerActionSystem currentWorkerAction) {
List<BLangNode> actions = currentWorkerAction.workerActionStateMachines.peek().actions;
return actions.stream()
.filter(CodeAnalyzer::isWorkerSend)
.map(bLangNode -> (BLangWorkerSend) bLangNode)
.collect(Collectors.toList());
}
@Override
public void visit(BLangTrapExpr trapExpr, AnalyzerData data) {
analyzeExpr(trapExpr.expr, data);
}
@Override
public void visit(BLangBinaryExpr binaryExpr, AnalyzerData data) {
if (validateBinaryExpr(binaryExpr)) {
analyzeExpr(binaryExpr.lhsExpr, data);
analyzeExpr(binaryExpr.rhsExpr, data);
}
}
private boolean validateBinaryExpr(BLangBinaryExpr binaryExpr) {
if (binaryExpr.lhsExpr.getBType().tag != TypeTags.FUTURE
&& binaryExpr.rhsExpr.getBType().tag != TypeTags.FUTURE) {
return true;
}
BLangNode parentNode = binaryExpr.parent;
if (binaryExpr.lhsExpr.getBType().tag == TypeTags.FUTURE
|| binaryExpr.rhsExpr.getBType().tag == TypeTags.FUTURE) {
if (parentNode == null) {
return false;
}
if (parentNode.getKind() == NodeKind.WAIT_EXPR) {
return true;
}
}
if (parentNode.getKind() != NodeKind.BINARY_EXPR && binaryExpr.opKind == OperatorKind.BITWISE_OR) {
dlog.error(binaryExpr.pos, DiagnosticErrorCode.OPERATOR_NOT_SUPPORTED, OperatorKind.BITWISE_OR,
symTable.futureType);
return false;
}
if (parentNode.getKind() == NodeKind.BINARY_EXPR) {
return validateBinaryExpr((BLangBinaryExpr) parentNode);
}
return true;
}
@Override
public void visit(BLangElvisExpr elvisExpr, AnalyzerData data) {
analyzeExpr(elvisExpr.lhsExpr, data);
analyzeExpr(elvisExpr.rhsExpr, data);
}
@Override
public void visit(BLangGroupExpr groupExpr, AnalyzerData data) {
analyzeExpr(groupExpr.expression, data);
}
@Override
public void visit(BLangUnaryExpr unaryExpr, AnalyzerData data) {
analyzeExpr(unaryExpr.expr, data);
}
@Override
public void visit(BLangTypedescExpr accessExpr, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangTypeConversionExpr conversionExpr, AnalyzerData data) {
analyzeExpr(conversionExpr.expr, data);
conversionExpr.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
@Override
public void visit(BLangXMLQName xmlQName, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangXMLAttribute xmlAttribute, AnalyzerData data) {
analyzeExpr(xmlAttribute.name, data);
analyzeExpr(xmlAttribute.value, data);
}
@Override
public void visit(BLangXMLElementLiteral xmlElementLiteral, AnalyzerData data) {
analyzeExpr(xmlElementLiteral.startTagName, data);
analyzeExpr(xmlElementLiteral.endTagName, data);
analyzeExprs(xmlElementLiteral.attributes, data);
analyzeExprs(xmlElementLiteral.children, data);
}
@Override
public void visit(BLangXMLSequenceLiteral xmlSequenceLiteral, AnalyzerData data) {
analyzeExprs(xmlSequenceLiteral.xmlItems, data);
}
@Override
public void visit(BLangXMLTextLiteral xmlTextLiteral, AnalyzerData data) {
analyzeExprs(xmlTextLiteral.textFragments, data);
}
@Override
public void visit(BLangXMLCommentLiteral xmlCommentLiteral, AnalyzerData data) {
analyzeExprs(xmlCommentLiteral.textFragments, data);
}
@Override
public void visit(BLangXMLProcInsLiteral xmlProcInsLiteral, AnalyzerData data) {
analyzeExprs(xmlProcInsLiteral.dataFragments, data);
analyzeExpr(xmlProcInsLiteral.target, data);
}
@Override
public void visit(BLangXMLQuotedString xmlQuotedString, AnalyzerData data) {
analyzeExprs(xmlQuotedString.textFragments, data);
}
@Override
public void visit(BLangStringTemplateLiteral stringTemplateLiteral, AnalyzerData data) {
analyzeExprs(stringTemplateLiteral.exprs, data);
}
@Override
public void visit(BLangRawTemplateLiteral rawTemplateLiteral, AnalyzerData data) {
analyzeExprs(rawTemplateLiteral.strings, data);
analyzeExprs(rawTemplateLiteral.insertions, data);
}
@Override
public void visit(BLangLambdaFunction bLangLambdaFunction, AnalyzerData data) {
boolean isWorker = false;
analyzeNode(bLangLambdaFunction.function, data);
if (bLangLambdaFunction.function.flagSet.contains(Flag.TRANSACTIONAL) &&
bLangLambdaFunction.function.flagSet.contains(Flag.WORKER) && !data.withinTransactionScope) {
dlog.error(bLangLambdaFunction.pos, DiagnosticErrorCode.TRANSACTIONAL_WORKER_OUT_OF_TRANSACTIONAL_SCOPE,
bLangLambdaFunction);
return;
}
if (bLangLambdaFunction.parent.getKind() == NodeKind.VARIABLE) {
String workerVarName = ((BLangSimpleVariable) bLangLambdaFunction.parent).name.value;
if (workerVarName.startsWith(WORKER_LAMBDA_VAR_PREFIX)) {
String workerName = workerVarName.substring(1);
isWorker = true;
data.workerActionSystemStack.peek().startWorkerActionStateMachine(workerName,
bLangLambdaFunction.function.pos,
bLangLambdaFunction.function);
}
}
if (isWorker) {
this.visitFunction(bLangLambdaFunction.function, data);
} else {
try {
this.initNewWorkerActionSystem(data);
data.workerActionSystemStack.peek().startWorkerActionStateMachine(DEFAULT_WORKER_NAME,
bLangLambdaFunction.pos,
bLangLambdaFunction.function);
this.visitFunction(bLangLambdaFunction.function, data);
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
} finally {
this.finalizeCurrentWorkerActionSystem(data);
}
}
if (isWorker) {
data.workerActionSystemStack.peek().endWorkerActionStateMachine();
}
}
@Override
public void visit(BLangArrowFunction bLangArrowFunction, AnalyzerData data) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
if (prevDefaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT ||
prevDefaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
data.defaultValueState = DefaultValueState.FUNCTION_IN_DEFAULT_VALUE;
}
analyzeExpr(bLangArrowFunction.body.expr, data);
data.defaultValueState = prevDefaultValueState;
}
/* Type Nodes */
@Override
public void visit(BLangRecordTypeNode recordTypeNode, AnalyzerData data) {
data.env = SymbolEnv.createTypeEnv(recordTypeNode, recordTypeNode.symbol.scope, data.env);
for (BLangSimpleVariable field : recordTypeNode.fields) {
DefaultValueState prevDefaultValueState = data.defaultValueState;
data.defaultValueState = DefaultValueState.RECORD_FIELD_DEFAULT;
analyzeNode(field, data);
data.defaultValueState = prevDefaultValueState;
}
}
@Override
public void visit(BLangObjectTypeNode objectTypeNode, AnalyzerData data) {
data.env = SymbolEnv.createTypeEnv(objectTypeNode, objectTypeNode.symbol.scope, data.env);
for (BLangSimpleVariable field : objectTypeNode.fields) {
analyzeNode(field, data);
}
List<BLangFunction> bLangFunctionList = new ArrayList<>(objectTypeNode.functions);
if (objectTypeNode.initFunction != null) {
bLangFunctionList.add(objectTypeNode.initFunction);
}
bLangFunctionList.sort(Comparator.comparingInt(function -> function.pos.lineRange().startLine().line()));
for (BLangFunction function : bLangFunctionList) {
analyzeNode(function, data);
}
}
@Override
public void visit(BLangValueType valueType, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangArrayType arrayType, AnalyzerData data) {
if (containsInferredArraySizesOfHigherDimensions(arrayType.sizes)) {
dlog.error(arrayType.pos, DiagnosticErrorCode.INFER_SIZE_ONLY_SUPPORTED_IN_FIRST_DIMENSION);
} else if (isSizeInferredArray(arrayType.sizes) && !isValidInferredArray(arrayType.parent)) {
dlog.error(arrayType.pos, DiagnosticErrorCode.CANNOT_INFER_SIZE_ARRAY_SIZE_FROM_THE_CONTEXT);
}
analyzeTypeNode(arrayType.elemtype, data);
}
private boolean isSizeInferredArray(List<BLangExpression> indexSizes) {
return !indexSizes.isEmpty() && isInferredArrayIndicator(indexSizes.get(indexSizes.size() - 1));
}
private boolean isInferredArrayIndicator(BLangExpression size) {
return size.getKind() == LITERAL && ((BLangLiteral) size).value.equals(Constants.INFERRED_ARRAY_INDICATOR);
}
private boolean containsInferredArraySizesOfHigherDimensions(List<BLangExpression> sizes) {
if (sizes.size() < 2) {
return false;
}
for (int i = 0; i < sizes.size() - 1; i++) {
if (isInferredArrayIndicator(sizes.get(i))) {
return true;
}
}
return false;
}
@Override
public void visit(BLangBuiltInRefTypeNode builtInRefType, AnalyzerData data) {
/* ignore */
}
@Override
public void visit(BLangConstrainedType constrainedType, AnalyzerData data) {
analyzeTypeNode(constrainedType.constraint, data);
}
@Override
public void visit(BLangStreamType streamType, AnalyzerData data) {
analyzeTypeNode(streamType.constraint, data);
analyzeTypeNode(streamType.error, data);
}
@Override
public void visit(BLangTableTypeNode tableType, AnalyzerData data) {
analyzeTypeNode(tableType.constraint, data);
if (tableType.tableKeyTypeConstraint != null) {
analyzeTypeNode(tableType.tableKeyTypeConstraint.keyType, data);
}
}
@Override
public void visit(BLangErrorType errorType, AnalyzerData data) {
BLangType detailType = errorType.detailType;
if (detailType != null && detailType.getKind() == NodeKind.CONSTRAINED_TYPE) {
BLangType constraint = ((BLangConstrainedType) detailType).constraint;
if (constraint.getKind() == NodeKind.USER_DEFINED_TYPE) {
BLangUserDefinedType userDefinedType = (BLangUserDefinedType) constraint;
if (userDefinedType.typeName.value.equals(TypeDefBuilderHelper.INTERSECTED_ERROR_DETAIL)) {
return;
}
}
}
analyzeTypeNode(errorType.detailType, data);
}
@Override
public void visit(BLangUserDefinedType userDefinedType, AnalyzerData data) {
BTypeSymbol typeSymbol = userDefinedType.getBType().tsymbol;
if (typeSymbol != null && Symbols.isFlagOn(typeSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(userDefinedType.typeName.toString(), typeSymbol, userDefinedType.pos);
}
}
@Override
public void visit(BLangTupleTypeNode tupleTypeNode, AnalyzerData data) {
tupleTypeNode.memberTypeNodes.forEach(memberType -> analyzeTypeNode(memberType, data));
analyzeTypeNode(tupleTypeNode.restParamType, data);
}
@Override
public void visit(BLangUnionTypeNode unionTypeNode, AnalyzerData data) {
unionTypeNode.memberTypeNodes.forEach(memberType -> analyzeTypeNode(memberType, data));
}
@Override
public void visit(BLangIntersectionTypeNode intersectionTypeNode, AnalyzerData data) {
for (BLangType constituentTypeNode : intersectionTypeNode.constituentTypeNodes) {
analyzeTypeNode(constituentTypeNode, data);
}
}
@Override
public void visit(BLangFunctionTypeNode functionTypeNode, AnalyzerData data) {
if (functionTypeNode.flagSet.contains(Flag.ANY_FUNCTION)) {
return;
}
functionTypeNode.params.forEach(node -> analyzeNode(node, data));
analyzeTypeNode(functionTypeNode.returnTypeNode, data);
}
@Override
public void visit(BLangFiniteTypeNode finiteTypeNode, AnalyzerData data) {
/* Ignore */
}
@Override
public void visit(BLangRestArgsExpression bLangVarArgsExpression, AnalyzerData data) {
analyzeExpr(bLangVarArgsExpression.expr, data);
}
@Override
public void visit(BLangNamedArgsExpression bLangNamedArgsExpression, AnalyzerData data) {
analyzeExpr(bLangNamedArgsExpression.expr, data);
}
@Override
public void visit(BLangCheckedExpr checkedExpr, AnalyzerData data) {
data.failVisited = true;
analyzeExpr(checkedExpr.expr, data);
if (data.env.scope.owner.getKind() == SymbolKind.PACKAGE) {
return;
}
BLangInvokableNode enclInvokable = data.env.enclInvokable;
List<BType> equivalentErrorTypeList = checkedExpr.equivalentErrorTypeList;
if (equivalentErrorTypeList != null && !equivalentErrorTypeList.isEmpty()) {
if (data.defaultValueState == DefaultValueState.RECORD_FIELD_DEFAULT) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode.INVALID_USAGE_OF_CHECK_IN_RECORD_FIELD_DEFAULT_EXPRESSION);
return;
}
if (data.defaultValueState == DefaultValueState.OBJECT_FIELD_INITIALIZER) {
BAttachedFunction initializerFunc =
((BObjectTypeSymbol) getEnclosingClass(data.env).getBType().tsymbol).initializerFunc;
if (initializerFunc == null) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode
.INVALID_USAGE_OF_CHECK_IN_OBJECT_FIELD_INITIALIZER_IN_OBJECT_WITH_NO_INIT_METHOD);
return;
}
BType exprErrorTypes = getErrorTypes(checkedExpr.expr.getBType());
BType initMethodReturnType = initializerFunc.type.retType;
if (!types.isAssignable(exprErrorTypes, initMethodReturnType)) {
dlog.error(checkedExpr.pos, DiagnosticErrorCode
.INVALID_USAGE_OF_CHECK_IN_OBJECT_FIELD_INITIALIZER_WITH_INIT_METHOD_RETURN_TYPE_MISMATCH,
initMethodReturnType, exprErrorTypes);
}
return;
}
}
if (enclInvokable == null) {
return;
}
BType exprType = enclInvokable.getReturnTypeNode().getBType();
BType checkedExprType = checkedExpr.expr.getBType();
BType errorType = getErrorTypes(checkedExprType);
if (errorType == symTable.semanticError) {
return;
}
if (!data.failureHandled && !types.isAssignable(errorType, exprType) &&
!types.isNeverTypeOrStructureTypeWithARequiredNeverMember(checkedExprType)) {
dlog.error(checkedExpr.pos,
DiagnosticErrorCode.CHECKED_EXPR_NO_MATCHING_ERROR_RETURN_IN_ENCL_INVOKABLE);
}
if (!data.errorTypes.empty()) {
data.errorTypes.peek().add(getErrorTypes(checkedExpr.expr.getBType()));
}
BType errorTypes;
if (exprType.tag == TypeTags.UNION) {
errorTypes = types.getErrorType((BUnionType) exprType);
} else {
errorTypes = exprType;
}
data.returnTypes.peek().add(errorTypes);
}
@Override
public void visit(BLangCheckPanickedExpr checkPanicExpr, AnalyzerData data) {
analyzeExpr(checkPanicExpr.expr, data);
}
@Override
public void visit(BLangServiceConstructorExpr serviceConstructorExpr, AnalyzerData data) {
}
@Override
public void visit(BLangQueryExpr queryExpr, AnalyzerData data) {
boolean prevQueryToTableWithKey = data.queryToTableWithKey;
data.queryToTableWithKey = queryExpr.isTable() && !queryExpr.fieldNameIdentifierList.isEmpty();
data.queryToMap = queryExpr.isMap;
boolean prevWithinQuery = data.withinQuery;
data.withinQuery = true;
int fromCount = 0;
for (BLangNode clause : queryExpr.getQueryClauses()) {
if (clause.getKind() == NodeKind.FROM) {
fromCount++;
BLangFromClause fromClause = (BLangFromClause) clause;
BLangExpression collection = (BLangExpression) fromClause.getCollection();
if (fromCount > 1) {
if (TypeTags.STREAM == Types.getReferredType(collection.getBType()).tag) {
this.dlog.error(collection.pos, DiagnosticErrorCode.NOT_ALLOWED_STREAM_USAGE_WITH_FROM);
}
}
}
analyzeNode(clause, data);
}
data.withinQuery = prevWithinQuery;
data.queryToTableWithKey = prevQueryToTableWithKey;
}
@Override
public void visit(BLangQueryAction queryAction, AnalyzerData data) {
boolean prevFailureHandled = data.failureHandled;
data.failureHandled = true;
boolean prevWithinQuery = data.withinQuery;
data.withinQuery = true;
int fromCount = 0;
for (BLangNode clause : queryAction.getQueryClauses()) {
if (clause.getKind() == NodeKind.FROM) {
fromCount++;
BLangFromClause fromClause = (BLangFromClause) clause;
BLangExpression collection = (BLangExpression) fromClause.getCollection();
if (fromCount > 1) {
if (TypeTags.STREAM == Types.getReferredType(collection.getBType()).tag) {
this.dlog.error(collection.pos, DiagnosticErrorCode.NOT_ALLOWED_STREAM_USAGE_WITH_FROM);
}
}
}
analyzeNode(clause, data);
}
validateActionParentNode(queryAction.pos, queryAction);
data.failureHandled = prevFailureHandled;
data.withinQuery = prevWithinQuery;
}
@Override
public void visit(BLangFromClause fromClause, AnalyzerData data) {
analyzeExpr(fromClause.collection, data);
}
@Override
public void visit(BLangJoinClause joinClause, AnalyzerData data) {
analyzeExpr(joinClause.collection, data);
if (joinClause.onClause != null) {
analyzeNode(joinClause.onClause, data);
}
}
@Override
public void visit(BLangLetClause letClause, AnalyzerData data) {
for (BLangLetVariable letVariable : letClause.letVarDeclarations) {
analyzeNode((BLangNode) letVariable.definitionNode.getVariable(), data);
}
}
@Override
public void visit(BLangWhereClause whereClause, AnalyzerData data) {
analyzeExpr(whereClause.expression, data);
}
@Override
public void visit(BLangOnClause onClause, AnalyzerData data) {
analyzeExpr(onClause.lhsExpr, data);
analyzeExpr(onClause.rhsExpr, data);
}
@Override
public void visit(BLangOrderByClause orderByClause, AnalyzerData data) {
orderByClause.orderByKeyList.forEach(value -> analyzeExpr((BLangExpression) value.getOrderKey(), data));
}
@Override
public void visit(BLangSelectClause selectClause, AnalyzerData data) {
analyzeExpr(selectClause.expression, data);
}
@Override
public void visit(BLangOnConflictClause onConflictClause, AnalyzerData data) {
analyzeExpr(onConflictClause.expression, data);
if (!(data.queryToTableWithKey || data.queryToMap)) {
dlog.error(onConflictClause.pos,
DiagnosticErrorCode.ON_CONFLICT_ONLY_WORKS_WITH_MAPS_OR_TABLES_WITH_KEY_SPECIFIER);
}
}
@Override
public void visit(BLangDoClause doClause, AnalyzerData data) {
analyzeNode(doClause.body, data);
}
@Override
public void visit(BLangOnFailClause onFailClause, AnalyzerData data) {
boolean currentFailVisited = data.failVisited;
data.failVisited = false;
VariableDefinitionNode onFailVarDefNode = onFailClause.variableDefinitionNode;
if (onFailVarDefNode != null) {
BLangVariable onFailVarNode = (BLangVariable) onFailVarDefNode.getVariable();
for (BType errorType : data.errorTypes.peek()) {
if (!types.isAssignable(errorType, onFailVarNode.getBType())) {
dlog.error(onFailVarNode.pos, DiagnosticErrorCode.INCOMPATIBLE_ON_FAIL_ERROR_DEFINITION, errorType,
onFailVarNode.getBType());
}
}
}
analyzeNode(onFailClause.body, data);
onFailClause.bodyContainsFail = data.failVisited;
data.failVisited = currentFailVisited;
}
@Override
public void visit(BLangLimitClause limitClause, AnalyzerData data) {
analyzeExpr(limitClause.expression, data);
}
@Override
public void visit(BLangTypeTestExpr typeTestExpr, AnalyzerData data) {
BLangExpression expr = typeTestExpr.expr;
analyzeNode(expr, data);
BType exprType = expr.getBType();
BType typeNodeType = typeTestExpr.typeNode.getBType();
if (typeNodeType == symTable.semanticError || exprType == symTable.semanticError) {
return;
}
if (types.isAssignable(exprType, typeNodeType)) {
if (typeTestExpr.isNegation) {
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.EXPRESSION_ALWAYS_FALSE);
return;
}
if (types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprType)) {
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.UNNECESSARY_CONDITION_FOR_VARIABLE_OF_TYPE_NEVER);
return;
}
dlog.hint(typeTestExpr.pos, DiagnosticHintCode.UNNECESSARY_CONDITION);
return;
}
if (!intersectionExists(expr, typeNodeType, data, typeTestExpr.pos)) {
dlog.error(typeTestExpr.pos, DiagnosticErrorCode.INCOMPATIBLE_TYPE_CHECK, exprType, typeNodeType);
}
}
@Override
public void visit(BLangAnnotAccessExpr annotAccessExpr, AnalyzerData data) {
analyzeExpr(annotAccessExpr.expr, data);
BAnnotationSymbol annotationSymbol = annotAccessExpr.annotationSymbol;
if (annotationSymbol != null && Symbols.isFlagOn(annotationSymbol.flags, Flags.DEPRECATED)) {
logDeprecatedWaring(annotAccessExpr.annotationName.toString(), annotationSymbol, annotAccessExpr.pos);
}
}
@Override
public void visit(BLangRegExpTemplateLiteral regExpTemplateLiteral, AnalyzerData data) {
List<BLangExpression> interpolationsList =
symResolver.getListOfInterpolations(regExpTemplateLiteral.reDisjunction.sequenceList);
interpolationsList.forEach(interpolation -> analyzeExpr(interpolation, data));
}
private void logDeprecatedWaring(String deprecatedConstruct, BSymbol symbol, Location pos) {
if (!Names.DOT.equals(symbol.pkgID.name)) {
deprecatedConstruct = symbol.pkgID + ":" + deprecatedConstruct;
}
dlog.warning(pos, DiagnosticWarningCode.USAGE_OF_DEPRECATED_CONSTRUCT, deprecatedConstruct);
}
private boolean intersectionExists(BLangExpression expression, BType testType, AnalyzerData data,
Location intersectionPos) {
BType expressionType = expression.getBType();
BType intersectionType = types.getTypeIntersection(
Types.IntersectionContext.typeTestIntersectionExistenceContext(intersectionPos),
expressionType, testType, data.env);
return (intersectionType != symTable.semanticError) ||
(expressionType.tag == TypeTags.ANY && testType.tag == TypeTags.READONLY);
}
@Override
public void visit(BLangInferredTypedescDefaultNode inferTypedescExpr, AnalyzerData data) {
/* Ignore */
}
private <E extends BLangExpression> void analyzeExpr(E node, AnalyzerData data) {
if (node == null) {
return;
}
SymbolEnv prevEnv = data.env;
BLangNode parent = data.parent;
node.parent = data.parent;
data.parent = node;
node.accept(this, data);
data.parent = parent;
checkAccess(node, data);
checkExpressionValidity(node, data);
data.env = prevEnv;
}
private <E extends BLangExpression> void checkExpressionValidity(E exprNode, AnalyzerData data) {
if (exprNode.getKind() == NodeKind.GROUP_EXPR ||
!types.isNeverTypeOrStructureTypeWithARequiredNeverMember(exprNode.getBType())) {
return;
}
if (!checkExpressionInValidParent(exprNode.parent, data)) {
dlog.error(exprNode.pos, DiagnosticErrorCode.EXPRESSION_OF_NEVER_TYPE_NOT_ALLOWED);
}
}
private boolean checkExpressionInValidParent(BLangNode currentParent, AnalyzerData data) {
if (currentParent == null) {
return false;
}
if (currentParent.getKind() == NodeKind.GROUP_EXPR) {
return checkExpressionInValidParent(currentParent.parent, data);
}
return currentParent.getKind() == NodeKind.EXPRESSION_STATEMENT ||
(currentParent.getKind() == NodeKind.VARIABLE &&
((BLangSimpleVariable) data.parent).typeNode.getBType().tag == TypeTags.FUTURE)
|| currentParent.getKind() == NodeKind.TRAP_EXPR;
}
@Override
public void visit(BLangConstant constant, AnalyzerData data) {
analyzeTypeNode(constant.typeNode, data);
analyzeNode(constant.expr, data);
analyzeExportableTypeRef(constant.symbol, constant.symbol.type.tsymbol, false, constant.pos);
constant.annAttachments.forEach(annotationAttachment -> analyzeNode(annotationAttachment, data));
}
/**
* This method checks for private symbols being accessed or used outside of package and|or private symbols being
* used in public fields of objects/records and will fail those occurrences.
*
* @param node expression node to analyze
* @param data data used to analyze the node
*/
private <E extends BLangExpression> void checkAccess(E node, AnalyzerData data) {
if (node.getBType() != null) {
checkAccessSymbol(node.getBType().tsymbol, data.env.enclPkg.symbol.pkgID, node.pos);
}
if (node.getKind() == NodeKind.INVOCATION) {
BLangInvocation bLangInvocation = (BLangInvocation) node;
checkAccessSymbol(bLangInvocation.symbol, data.env.enclPkg.symbol.pkgID, bLangInvocation.pos);
}
}
private void checkAccessSymbol(BSymbol symbol, PackageID pkgID, Location position) {
if (symbol == null) {
return;
}
if (!pkgID.equals(symbol.pkgID) && !Symbols.isPublic(symbol)) {
dlog.error(position, DiagnosticErrorCode.ATTEMPT_REFER_NON_ACCESSIBLE_SYMBOL, symbol.name);
}
}
private <E extends BLangExpression> void analyzeExprs(List<E> nodeList, AnalyzerData data) {
for (int i = 0; i < nodeList.size(); i++) {
analyzeExpr(nodeList.get(i), data);
}
}
private void initNewWorkerActionSystem(AnalyzerData data) {
data.workerActionSystemStack.push(new WorkerActionSystem());
}
private void finalizeCurrentWorkerActionSystem(AnalyzerData data) {
WorkerActionSystem was = data.workerActionSystemStack.pop();
if (!was.hasErrors) {
this.validateWorkerInteractions(was, data);
}
}
private static boolean isWorkerSend(BLangNode action) {
return action.getKind() == NodeKind.WORKER_SEND;
}
private static boolean isWorkerSyncSend(BLangNode action) {
return action.getKind() == NodeKind.WORKER_SYNC_SEND;
}
private static boolean isWaitAction(BLangNode action) {
return action.getKind() == NodeKind.WAIT_EXPR;
}
private String extractWorkerId(BLangNode action) {
if (isWorkerSend(action)) {
return ((BLangWorkerSend) action).workerIdentifier.value;
} else if (isWorkerSyncSend(action)) {
return ((BLangWorkerSyncSendExpr) action).workerIdentifier.value;
} else {
return ((BLangWorkerReceive) action).workerIdentifier.value;
}
}
private void validateWorkerInteractions(WorkerActionSystem workerActionSystem, AnalyzerData data) {
if (!validateWorkerInteractionsAfterWaitAction(workerActionSystem)) {
return;
}
BLangNode currentAction;
boolean systemRunning;
data.workerSystemMovementSequence = 0;
int systemIterationCount = 0;
int prevWorkerSystemMovementSequence = data.workerSystemMovementSequence;
do {
systemRunning = false;
systemIterationCount++;
for (WorkerActionStateMachine worker : workerActionSystem.finshedWorkers) {
if (worker.done()) {
continue;
}
currentAction = worker.currentAction();
if (isWaitAction(currentAction)) {
handleWaitAction(workerActionSystem, currentAction, worker, data);
systemRunning = true;
continue;
}
if (!isWorkerSend(currentAction) && !isWorkerSyncSend(currentAction)) {
continue;
}
WorkerActionStateMachine otherSM = workerActionSystem.find(this.extractWorkerId(currentAction));
if (otherSM.done()) {
continue;
}
if (isWaitAction(otherSM.currentAction())) {
systemRunning = false;
continue;
}
if (!otherSM.currentIsReceive(worker.workerId)) {
continue;
}
BLangWorkerReceive receive = (BLangWorkerReceive) otherSM.currentAction();
if (isWorkerSyncSend(currentAction)) {
this.validateWorkerActionParameters((BLangWorkerSyncSendExpr) currentAction, receive);
} else {
this.validateWorkerActionParameters((BLangWorkerSend) currentAction, receive);
}
otherSM.next();
data.workerSystemMovementSequence++;
worker.next();
data.workerSystemMovementSequence++;
systemRunning = true;
String channelName = generateChannelName(worker.workerId, otherSM.workerId);
otherSM.node.sendsToThis.add(channelName);
worker.node.sendsToThis.add(channelName);
}
if (systemIterationCount > workerActionSystem.finshedWorkers.size()) {
systemIterationCount = 0;
if (prevWorkerSystemMovementSequence == data.workerSystemMovementSequence) {
systemRunning = false;
}
prevWorkerSystemMovementSequence = data.workerSystemMovementSequence;
}
} while (systemRunning);
if (!workerActionSystem.everyoneDone()) {
this.reportInvalidWorkerInteractionDiagnostics(workerActionSystem);
}
}
private boolean validateWorkerInteractionsAfterWaitAction(WorkerActionSystem workerActionSystem) {
boolean isValid = true;
for (WorkerActionStateMachine worker : workerActionSystem.finshedWorkers) {
Set<String> waitingOnWorkerSet = new HashSet<>();
for (BLangNode action : worker.actions) {
if (isWaitAction(action)) {
if (action instanceof BLangWaitForAllExpr) {
BLangWaitForAllExpr waitForAllExpr = (BLangWaitForAllExpr) action;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair : waitForAllExpr.keyValuePairs) {
BSymbol workerSymbol = getWorkerSymbol(keyValuePair);
if (workerSymbol != null) {
waitingOnWorkerSet.add(workerSymbol.name.value);
}
}
} else {
BLangWaitExpr wait = (BLangWaitExpr) action;
for (String workerName : getWorkerNameList(wait.exprList.get(0),
workerActionSystem.getActionEnvironment(wait))) {
waitingOnWorkerSet.add(workerName);
}
}
} else if (isWorkerSend(action)) {
BLangWorkerSend send = (BLangWorkerSend) action;
if (waitingOnWorkerSet.contains(send.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
} else if (isWorkerSyncSend(action)) {
BLangWorkerSyncSendExpr syncSend = (BLangWorkerSyncSendExpr) action;
if (waitingOnWorkerSet.contains(syncSend.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
} else if (action.getKind() == NodeKind.WORKER_RECEIVE) {
BLangWorkerReceive receive = (BLangWorkerReceive) action;
if (waitingOnWorkerSet.contains(receive.workerIdentifier.value)) {
dlog.error(action.pos, DiagnosticErrorCode.WORKER_INTERACTION_AFTER_WAIT_ACTION, action);
isValid = false;
}
}
}
}
return isValid;
}
private void handleWaitAction(WorkerActionSystem workerActionSystem, BLangNode currentAction,
WorkerActionStateMachine worker, AnalyzerData data) {
if (currentAction instanceof BLangWaitForAllExpr) {
boolean allWorkersAreDone = true;
BLangWaitForAllExpr waitForAllExpr = (BLangWaitForAllExpr) currentAction;
for (BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair : waitForAllExpr.keyValuePairs) {
BSymbol workerSymbol = getWorkerSymbol(keyValuePair);
if (isWorkerSymbol(workerSymbol)) {
Name workerName = workerSymbol.name;
if (isWorkerFromFunction(workerActionSystem.getActionEnvironment(currentAction), workerName)) {
WorkerActionStateMachine otherSM = workerActionSystem.find(workerName.value);
allWorkersAreDone = allWorkersAreDone && otherSM.done();
}
}
}
if (allWorkersAreDone) {
worker.next();
data.workerSystemMovementSequence++;
}
} else {
BLangWaitExpr wait = (BLangWaitExpr) currentAction;
List<String> workerNameList = getWorkerNameList(wait.exprList.get(0),
workerActionSystem.getActionEnvironment(currentAction));
if (workerNameList.isEmpty()) {
worker.next();
data.workerSystemMovementSequence++;
}
for (String workerName : workerNameList) {
var otherSM = workerActionSystem.find(workerName);
if (otherSM.done()) {
worker.next();
data.workerSystemMovementSequence++;
break;
}
}
}
}
private BSymbol getWorkerSymbol(BLangWaitForAllExpr.BLangWaitKeyValue keyValuePair) {
BLangExpression value = keyValuePair.getValue();
if (value != null && value.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
return ((BLangSimpleVarRef) value).symbol;
} else if (keyValuePair.keyExpr != null && keyValuePair.keyExpr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
return ((BLangSimpleVarRef) keyValuePair.keyExpr).symbol;
}
return null;
}
private List<String> getWorkerNameList(BLangExpression expr, SymbolEnv functionEnv) {
ArrayList<String> workerNames = new ArrayList<>();
populateWorkerNameList(expr, workerNames, functionEnv);
return workerNames;
}
private void populateWorkerNameList(BLangExpression expr, ArrayList<String> workerNames, SymbolEnv functionEnv) {
if (expr.getKind() == NodeKind.BINARY_EXPR) {
BLangBinaryExpr binaryExpr = (BLangBinaryExpr) expr;
populateWorkerNameList(binaryExpr.lhsExpr, workerNames, functionEnv);
populateWorkerNameList(binaryExpr.rhsExpr, workerNames, functionEnv);
} else if (expr.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BLangSimpleVarRef varRef = (BLangSimpleVarRef) expr;
if (isWorkerSymbol(varRef.symbol) && isWorkerFromFunction(functionEnv, varRef.symbol.name)) {
workerNames.add(varRef.variableName.value);
}
}
}
private boolean isWorkerFromFunction(SymbolEnv functionEnv, Name workerName) {
if (functionEnv == null) {
return false;
}
if (functionEnv.scope.lookup(workerName).symbol != null) {
return true;
}
if (functionEnv.enclInvokable != null) {
Set<Flag> flagSet = functionEnv.enclInvokable.flagSet;
if (flagSet.contains(Flag.LAMBDA) && !flagSet.contains(Flag.WORKER)) {
return false;
}
}
return isWorkerFromFunction(functionEnv.enclEnv, workerName);
}
private boolean isWorkerSymbol(BSymbol symbol) {
return symbol != null && (symbol.flags & Flags.WORKER) == Flags.WORKER;
}
private void reportInvalidWorkerInteractionDiagnostics(WorkerActionSystem workerActionSystem) {
this.dlog.error(workerActionSystem.getRootPosition(), DiagnosticErrorCode.INVALID_WORKER_INTERACTION,
workerActionSystem.toString());
}
private void validateWorkerActionParameters(BLangWorkerSend send, BLangWorkerReceive receive) {
types.checkType(receive, send.getBType(), receive.getBType());
addImplicitCast(send.getBType(), receive);
NodeKind kind = receive.parent.getKind();
if (kind == NodeKind.TRAP_EXPR || kind == NodeKind.CHECK_EXPR || kind == NodeKind.CHECK_PANIC_EXPR ||
kind == NodeKind.FAIL) {
typeChecker.checkExpr((BLangExpression) receive.parent, receive.env);
}
receive.sendExpression = send.expr;
}
private void validateWorkerActionParameters(BLangWorkerSyncSendExpr send, BLangWorkerReceive receive) {
send.receive = receive;
NodeKind parentNodeKind = send.parent.getKind();
if (parentNodeKind == NodeKind.VARIABLE) {
BLangSimpleVariable variable = (BLangSimpleVariable) send.parent;
if (variable.isDeclaredWithVar) {
variable.setBType(variable.symbol.type = send.expectedType = receive.matchingSendsError);
}
} else if (parentNodeKind == NodeKind.ASSIGNMENT) {
BLangAssignment assignment = (BLangAssignment) send.parent;
if (assignment.varRef.getKind() == NodeKind.SIMPLE_VARIABLE_REF) {
BSymbol varSymbol = ((BLangSimpleVarRef) assignment.varRef).symbol;
if (varSymbol != null) {
send.expectedType = varSymbol.type;
}
}
}
if (receive.matchingSendsError != symTable.nilType && parentNodeKind == NodeKind.EXPRESSION_STATEMENT) {
dlog.error(send.pos, DiagnosticErrorCode.ASSIGNMENT_REQUIRED, send.workerSymbol);
} else {
types.checkType(send.pos, receive.matchingSendsError, send.expectedType,
DiagnosticErrorCode.INCOMPATIBLE_TYPES);
}
types.checkType(receive, send.getBType(), receive.getBType());
addImplicitCast(send.getBType(), receive);
NodeKind kind = receive.parent.getKind();
if (kind == NodeKind.TRAP_EXPR || kind == NodeKind.CHECK_EXPR || kind == NodeKind.CHECK_PANIC_EXPR) {
typeChecker.checkExpr((BLangExpression) receive.parent, receive.env);
}
receive.sendExpression = send;
}
private void addImplicitCast(BType actualType, BLangWorkerReceive receive) {
if (receive.getBType() != null && receive.getBType() != symTable.semanticError) {
types.setImplicitCastExpr(receive, actualType, receive.getBType());
receive.setBType(actualType);
}
}
private boolean checkNextBreakValidityInTransaction(AnalyzerData data) {
return !data.loopWithinTransactionCheckStack.peek() && data.transactionCount > 0 && data.withinTransactionScope;
}
private boolean checkReturnValidityInTransaction(AnalyzerData data) {
return !data.returnWithinTransactionCheckStack.peek() && data.transactionCount > 0
&& data.withinTransactionScope;
}
private void validateModuleInitFunction(BLangFunction funcNode) {
if (funcNode.attachedFunction || !Names.USER_DEFINED_INIT_SUFFIX.value.equals(funcNode.name.value)) {
return;
}
if (Symbols.isPublic(funcNode.symbol)) {
this.dlog.error(funcNode.pos, DiagnosticErrorCode.MODULE_INIT_CANNOT_BE_PUBLIC);
}
if (!funcNode.requiredParams.isEmpty() || funcNode.restParam != null) {
this.dlog.error(funcNode.pos, DiagnosticErrorCode.MODULE_INIT_CANNOT_HAVE_PARAMS);
}
types.validateErrorOrNilReturn(funcNode, DiagnosticErrorCode.MODULE_INIT_RETURN_SHOULD_BE_ERROR_OR_NIL);
}
private BType getErrorTypes(BType bType) {
if (bType == null) {
return symTable.semanticError;
}
BType errorType = symTable.semanticError;
int tag = bType.tag;
if (tag == TypeTags.TYPEREFDESC) {
return getErrorTypes(Types.getReferredType(bType));
}
if (tag == TypeTags.ERROR) {
errorType = bType;
} else if (tag == TypeTags.READONLY) {
errorType = symTable.errorType;
} else if (tag == TypeTags.UNION) {
LinkedHashSet<BType> errTypes = new LinkedHashSet<>();
Set<BType> memTypes = ((BUnionType) bType).getMemberTypes();
for (BType memType : memTypes) {
BType memErrType = getErrorTypes(memType);
if (memErrType != symTable.semanticError) {
errTypes.add(memErrType);
}
}
if (!errTypes.isEmpty()) {
errorType = errTypes.size() == 1 ? errTypes.iterator().next() : BUnionType.create(null, errTypes);
}
}
return errorType;
}
/**
* This class contains the state machines for a set of workers.
*/
private static class WorkerActionSystem {
public List<WorkerActionStateMachine> finshedWorkers = new ArrayList<>();
private Stack<WorkerActionStateMachine> workerActionStateMachines = new Stack<>();
private Map<BLangNode, SymbolEnv> workerInteractionEnvironments = new IdentityHashMap<>();
private boolean hasErrors = false;
public void startWorkerActionStateMachine(String workerId, Location pos, BLangFunction node) {
workerActionStateMachines.push(new WorkerActionStateMachine(pos, workerId, node));
}
public void endWorkerActionStateMachine() {
finshedWorkers.add(workerActionStateMachines.pop());
}
public void addWorkerAction(BLangNode action) {
this.workerActionStateMachines.peek().actions.add(action);
}
public WorkerActionStateMachine find(String workerId) {
for (WorkerActionStateMachine worker : this.finshedWorkers) {
if (worker.workerId.equals(workerId)) {
return worker;
}
}
throw new AssertionError("Reference to non existing worker " + workerId);
}
public boolean everyoneDone() {
return this.finshedWorkers.stream().allMatch(WorkerActionStateMachine::done);
}
public Location getRootPosition() {
return this.finshedWorkers.iterator().next().pos;
}
@Override
public String toString() {
return this.finshedWorkers.toString();
}
public String currentWorkerId() {
return workerActionStateMachines.peek().workerId;
}
public void addWorkerAction(BLangNode action, SymbolEnv env) {
addWorkerAction(action);
this.workerInteractionEnvironments.put(action, env);
}
private SymbolEnv getActionEnvironment(BLangNode currentAction) {
return workerInteractionEnvironments.get(currentAction);
}
}
/**
* This class represents a state machine to maintain the state of the send/receive
* actions of a worker.
*/
private static class WorkerActionStateMachine {
private static final String WORKER_SM_FINISHED = "FINISHED";
public int currentState;
public List<BLangNode> actions = new ArrayList<>();
public Location pos;
public String workerId;
public BLangFunction node;
public WorkerActionStateMachine(Location pos, String workerId, BLangFunction node) {
this.pos = pos;
this.workerId = workerId;
this.node = node;
}
public boolean done() {
return this.actions.size() == this.currentState;
}
public BLangNode currentAction() {
return this.actions.get(this.currentState);
}
public boolean currentIsReceive(String sourceWorkerId) {
if (this.done()) {
return false;
}
BLangNode action = this.currentAction();
return !isWorkerSend(action) && !isWorkerSyncSend(action) && !isWaitAction(action)
&& ((BLangWorkerReceive) action).workerIdentifier.value.equals(sourceWorkerId);
}
public void next() {
this.currentState++;
}
@Override
public String toString() {
if (this.done()) {
return WORKER_SM_FINISHED;
} else {
BLangNode action = this.currentAction();
if (isWorkerSend(action)) {
return ((BLangWorkerSend) action).toActionString();
} else if (isWorkerSyncSend(action)) {
return ((BLangWorkerSyncSendExpr) action).toActionString();
} else if (isWaitAction(action)) {
return action.toString();
} else {
return ((BLangWorkerReceive) action).toActionString();
}
}
}
}
public static String generateChannelName(String source, String target) {
return source + "->" + target;
}
private BLangNode getEnclosingClass(SymbolEnv env) {
BLangNode node = env.node;
while (node.getKind() != NodeKind.CLASS_DEFN) {
env = env.enclEnv;
node = env.node;
}
return node;
}
private void validateInvocationInMatchGuard(BLangInvocation invocation) {
BLangExpression matchedExpr = getMatchedExprIfCalledInMatchGuard(invocation);
if (matchedExpr == null) {
return;
}
BType matchedExprType = matchedExpr.getBType();
if (types.isInherentlyImmutableType(matchedExprType) ||
Symbols.isFlagOn(matchedExprType.flags, Flags.READONLY)) {
return;
}
BSymbol invocationSymbol = invocation.symbol;
if (invocationSymbol == null) {
BLangNode parent = invocation.parent;
if (parent == null || parent.getKind() != NodeKind.TYPE_INIT_EXPR) {
return;
}
BLangTypeInit newExpr = (BLangTypeInit) parent;
if (newExpr.getBType().tag != TypeTags.STREAM) {
return;
}
List<BLangExpression> argsExpr = newExpr.argsExpr;
if (argsExpr.isEmpty()) {
return;
}
BLangExpression streamImplementorExpr = argsExpr.get(0);
BType type = streamImplementorExpr.getBType();
if (!types.isInherentlyImmutableType(type) && !Symbols.isFlagOn(type.flags, Flags.READONLY)) {
dlog.error(streamImplementorExpr.pos,
DiagnosticErrorCode.INVALID_CALL_WITH_MUTABLE_ARGS_IN_MATCH_GUARD);
}
return;
}
long flags = invocationSymbol.flags;
boolean methodCall = Symbols.isFlagOn(flags, Flags.ATTACHED);
boolean callsNonIsolatedFunction = !Symbols.isFlagOn(flags, Flags.ISOLATED) ||
(methodCall && !Symbols.isFlagOn(invocationSymbol.owner.flags, Flags.ISOLATED));
if (callsNonIsolatedFunction) {
dlog.error(invocation.pos, DiagnosticErrorCode.INVALID_NON_ISOLATED_CALL_IN_MATCH_GUARD);
}
List<BLangExpression> args = new ArrayList<>(invocation.requiredArgs);
args.addAll(invocation.restArgs);
for (BLangExpression arg : args) {
BType type = arg.getBType();
if (type != symTable.semanticError &&
!types.isInherentlyImmutableType(type) &&
!Symbols.isFlagOn(type.flags, Flags.READONLY)) {
dlog.error(arg.pos, DiagnosticErrorCode.INVALID_CALL_WITH_MUTABLE_ARGS_IN_MATCH_GUARD);
}
}
}
private BLangExpression getMatchedExprIfCalledInMatchGuard(BLangInvocation invocation) {
BLangNode prevParent = invocation;
BLangNode parent = invocation.parent;
boolean encounteredMatchGuard = false;
while (parent != null) {
NodeKind parentKind = parent.getKind();
switch (parentKind) {
case LAMBDA:
case FUNCTION:
case RESOURCE_FUNC:
return null;
case MATCH_CLAUSE:
if (encounteredMatchGuard) {
return ((BLangMatchStatement) parent.parent).expr;
}
return null;
case MATCH_GUARD:
encounteredMatchGuard = true;
break;
case INVOCATION:
BLangInvocation parentInvocation = (BLangInvocation) parent;
if (parentInvocation.langLibInvocation || prevParent != parentInvocation.expr) {
return null;
}
}
prevParent = parent;
parent = parent.parent;
}
return null;
}
private enum DefaultValueState {
NOT_IN_DEFAULT_VALUE,
RECORD_FIELD_DEFAULT,
OBJECT_FIELD_INITIALIZER,
FUNCTION_IN_DEFAULT_VALUE
}
/**
* @since 2.0.0
*/
public static class AnalyzerData {
SymbolEnv env;
BLangNode parent;
int loopCount;
boolean loopAlterNotAllowed;
boolean inInternallyDefinedBlockStmt;
int workerSystemMovementSequence;
Stack<WorkerActionSystem> workerActionSystemStack = new Stack<>();
Map<BSymbol, Set<BLangNode>> workerReferences = new HashMap<>();
int transactionCount;
boolean withinTransactionScope;
int commitCount;
int rollbackCount;
boolean commitRollbackAllowed;
int commitCountWithinBlock;
int rollbackCountWithinBlock;
Stack<Boolean> loopWithinTransactionCheckStack = new Stack<>();
Stack<Boolean> returnWithinTransactionCheckStack = new Stack<>();
Stack<Boolean> transactionalFuncCheckStack = new Stack<>();
boolean withinLockBlock;
boolean failureHandled;
boolean failVisited;
boolean queryToTableWithKey;
boolean withinQuery;
boolean queryToMap;
Stack<LinkedHashSet<BType>> returnTypes = new Stack<>();
Stack<LinkedHashSet<BType>> errorTypes = new Stack<>();
DefaultValueState defaultValueState = DefaultValueState.NOT_IN_DEFAULT_VALUE;
}
}
|
nit: you don't need to specify `this`. There is no other variable declared in this scope with the same name.
|
public int maxRetryCount() {
return this.maxRetryCount;
}
|
return this.maxRetryCount;
|
public int maxRetryCount() {
return maxRetryCount;
}
|
class Retry {
public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0);
public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30);
public static final int DEFAULT_MAX_RETRY_COUNT = 10;
private final AtomicInteger retryCount = new AtomicInteger();
private final int maxRetryCount;
/**
* An abstract representation of a policy to govern retrying of messaging operations.
* @param maxRetryCount The maximum number of retries allowed.
*/
public Retry(int maxRetryCount) {
this.maxRetryCount = maxRetryCount;
}
/**
* Check if the existing exception is a retriable exception.
*
* @param exception An exception that was observed for the operation to be retried.
* @return true if the exception is a retriable exception, otherwise false.
*/
public static boolean isRetriableException(Exception exception) {
return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient();
}
/**
* Get default configured Retry.
*
* @return Retry which has all default property set up.
*/
public static Retry getNoRetry() {
return new ExponentialRetry(
Duration.ZERO,
Duration.ZERO,
0);
}
/**
* Get default configured Retry.
*
* @return Retry which has all default property set up.
*/
public static Retry getDefaultRetry() {
return new ExponentialRetry(
DEFAULT_RETRY_MIN_BACKOFF,
DEFAULT_RETRY_MAX_BACKOFF,
DEFAULT_MAX_RETRY_COUNT);
}
/**
* Increase one count to current count value.
*
* @return current AtomicInteger value.
*/
public int incrementRetryCount() {
return retryCount.incrementAndGet();
}
/**
* Get the current retried count.
*
* @return current AtomicInteger value.
*/
public int getRetryCount() {
return retryCount.get();
}
/**
* Reset AtomicInteger to value zero.
*/
public void resetRetryInterval() {
retryCount.set(0);
}
/**
* Get the maximum allowed retry count.
*
* @return maximum allowed retry count value.
*/
/**
* Calculates the amount of time to delay before the next retry attempt
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts.
* @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried.
*/
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) {
int baseWaitTime = 0;
if (lastException == null || !(lastException instanceof AmqpException)) {
return null;
}
if (this.retryCount.get() >= maxRetryCount) {
return null;
}
if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) {
baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS;
}
return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount());
}
/**
* Allows a concrete retry policy implementation to offer a base retry interval to be used in
* the calculations performed by 'Retry.GetNextRetryInterval'.
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts.
* @param baseWaitSeconds The number of seconds to base the suggested retry interval on;
* this should be used as the minimum interval returned under normal circumstances.
* @param retryCount The number of retries that have already been attempted.
* @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried.
*/
protected abstract Duration calculateNextRetryInterval(Exception lastException,
Duration remainingTime,
int baseWaitSeconds,
int retryCount);
}
|
class Retry {
public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0);
public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30);
public static final int DEFAULT_MAX_RETRY_COUNT = 10;
private final AtomicInteger retryCount = new AtomicInteger();
private final int maxRetryCount;
/**
* Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount}
*
* @param maxRetryCount The maximum number of retries allowed.
*/
public Retry(int maxRetryCount) {
this.maxRetryCount = maxRetryCount;
}
/**
* Check if the existing exception is a retriable exception.
*
* @param exception An exception that was observed for the operation to be retried.
* @return true if the exception is a retriable exception, otherwise false.
*/
public static boolean isRetriableException(Exception exception) {
return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient();
}
/**
* Get default configured Retry.
*
* @return Retry which has all default property set up.
*/
public static Retry getNoRetry() {
return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0);
}
/**
* Get default configured Retry.
*
* @return Retry which has all default property set up.
*/
public static Retry getDefaultRetry() {
return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT);
}
/**
* Increase one count to current count value.
*
* @return current AtomicInteger value.
*/
public int incrementRetryCount() {
return retryCount.incrementAndGet();
}
/**
* Get the current retried count.
*
* @return current AtomicInteger value.
*/
public int getRetryCount() {
return retryCount.get();
}
/**
* Reset AtomicInteger to value zero.
*/
public void resetRetryInterval() {
retryCount.set(0);
}
/**
* Get the maximum allowed retry count.
*
* @return maximum allowed retry count value.
*/
/**
* Calculates the amount of time to delay before the next retry attempt
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts.
* @return The amount of time to delay before retrying the associated operation; if {@code null},
* then the operation is no longer eligible to be retried.
*/
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) {
int baseWaitTime = 0;
if (!isRetriableException(lastException)) {
return null;
}
if (retryCount.get() >= maxRetryCount) {
return null;
}
if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) {
baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS;
}
return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount());
}
/**
* Allows a concrete retry policy implementation to offer a base retry interval to be used in
* the calculations performed by 'Retry.GetNextRetryInterval'.
*
* @param lastException The last exception that was observed for the operation to be retried.
* @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts.
* @param baseWaitSeconds The number of seconds to base the suggested retry interval on;
* this should be used as the minimum interval returned under normal circumstances.
* @param retryCount The number of retries that have already been attempted.
* @return The amount of time to delay before retrying the associated operation; if {@code null},
* then the operation is no longer eligible to be retried.
*/
protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime,
int baseWaitSeconds, int retryCount);
}
|
```suggestion return exprNode.isPresent()? exprNode : Optional.empty(); ``` Is this further simplification possible?
|
public Optional<ExpressionNode> findExpression(Node node) {
if (node == null) {
return Optional.empty();
}
Optional<ExpressionNode> exprNode = node.apply(this);
if (exprNode == null) {
return Optional.empty();
}
return exprNode;
}
|
return exprNode;
|
public Optional<ExpressionNode> findExpression(Node node) {
if (node == null) {
return Optional.empty();
}
Optional<ExpressionNode> exprNode = node.apply(this);
return exprNode == null ? Optional.empty() : exprNode;
}
|
class MatchedExpressionNodeResolver extends NodeTransformer<Optional<ExpressionNode>> {
Node matchedNode;
public MatchedExpressionNodeResolver(Node matchedNode) {
this.matchedNode = matchedNode;
}
/**
* Given the node, this method returns the optional expression in which the provided node is located.
*
* @param node Node
* @return Optional enclosing expression node
*/
@Override
protected Optional<ExpressionNode> transformSyntaxNode(Node node) {
if (node.parent() == null) {
return Optional.empty();
}
return node.parent().apply(this);
}
@Override
public Optional<ExpressionNode> transform(AssignmentStatementNode assignmentStatementNode) {
return Optional.of(assignmentStatementNode.expression());
}
@Override
public Optional<ExpressionNode> transform(ModuleVariableDeclarationNode moduleVariableDeclarationNode) {
return moduleVariableDeclarationNode.initializer();
}
@Override
public Optional<ExpressionNode> transform(VariableDeclarationNode variableDeclarationNode) {
return variableDeclarationNode.initializer();
}
@Override
public Optional<ExpressionNode> transform(PositionalArgumentNode positionalArgumentNode) {
return Optional.of(positionalArgumentNode.expression());
}
@Override
public Optional<ExpressionNode> transform(NamedArgumentNode namedArgumentNode) {
return Optional.of(namedArgumentNode.expression());
}
@Override
public Optional<ExpressionNode> transform(SpecificFieldNode specificFieldNode) {
return specificFieldNode.valueExpr();
}
@Override
public Optional<ExpressionNode> transform(LetVariableDeclarationNode letVariableDeclarationNode) {
return Optional.of(letVariableDeclarationNode.expression());
}
@Override
public Optional<ExpressionNode> transform(FromClauseNode fromClauseNode) {
return Optional.of(fromClauseNode.expression());
}
public Optional<ExpressionNode> transform(BracedExpressionNode node) {
return Optional.of(node);
}
public Optional<ExpressionNode> transform(ImplicitNewExpressionNode implicitNewExpressionNode) {
return Optional.of(implicitNewExpressionNode);
}
@Override
public Optional<ExpressionNode> transform(ExplicitNewExpressionNode explicitNewExpressionNode) {
return Optional.of(explicitNewExpressionNode);
}
@Override
public Optional<ExpressionNode> transform(ListConstructorExpressionNode listConstructorExpressionNode) {
Optional<Node> expressionNode = listConstructorExpressionNode.expressions().stream()
.filter(expression -> this.matchedNode == expression)
.findFirst();
if (expressionNode.isPresent() && expressionNode.get() instanceof ExpressionNode) {
return Optional.of((ExpressionNode) expressionNode.get());
}
return Optional.empty();
}
}
|
class MatchedExpressionNodeResolver extends NodeTransformer<Optional<ExpressionNode>> {
Node matchedNode;
public MatchedExpressionNodeResolver(Node matchedNode) {
this.matchedNode = matchedNode;
}
/**
* Given the node, this method returns the optional expression in which the provided node is located.
*
* @param node Node
* @return Optional enclosing expression node
*/
@Override
protected Optional<ExpressionNode> transformSyntaxNode(Node node) {
if (node.parent() == null) {
return Optional.empty();
}
return node.parent().apply(this);
}
@Override
public Optional<ExpressionNode> transform(AssignmentStatementNode assignmentStatementNode) {
return Optional.of(assignmentStatementNode.expression());
}
@Override
public Optional<ExpressionNode> transform(ModuleVariableDeclarationNode moduleVariableDeclarationNode) {
return moduleVariableDeclarationNode.initializer();
}
@Override
public Optional<ExpressionNode> transform(VariableDeclarationNode variableDeclarationNode) {
return variableDeclarationNode.initializer();
}
@Override
public Optional<ExpressionNode> transform(PositionalArgumentNode positionalArgumentNode) {
return Optional.of(positionalArgumentNode.expression());
}
@Override
public Optional<ExpressionNode> transform(NamedArgumentNode namedArgumentNode) {
return Optional.of(namedArgumentNode.expression());
}
@Override
public Optional<ExpressionNode> transform(SpecificFieldNode specificFieldNode) {
return specificFieldNode.valueExpr();
}
@Override
public Optional<ExpressionNode> transform(LetVariableDeclarationNode letVariableDeclarationNode) {
return Optional.of(letVariableDeclarationNode.expression());
}
@Override
public Optional<ExpressionNode> transform(FromClauseNode fromClauseNode) {
return Optional.of(fromClauseNode.expression());
}
public Optional<ExpressionNode> transform(BracedExpressionNode node) {
return Optional.of(node);
}
public Optional<ExpressionNode> transform(ImplicitNewExpressionNode implicitNewExpressionNode) {
return Optional.of(implicitNewExpressionNode);
}
@Override
public Optional<ExpressionNode> transform(ExplicitNewExpressionNode explicitNewExpressionNode) {
return Optional.of(explicitNewExpressionNode);
}
@Override
public Optional<ExpressionNode> transform(ListConstructorExpressionNode listConstructorExpressionNode) {
Optional<Node> expressionNode = listConstructorExpressionNode.expressions().stream()
.filter(expression -> this.matchedNode == expression)
.findFirst();
if (expressionNode.isPresent() && expressionNode.get() instanceof ExpressionNode) {
return Optional.of((ExpressionNode) expressionNode.get());
}
return Optional.empty();
}
}
|
Please also fix same issue further down in `getShardIterator` (https://github.com/apache/flink/pull/7706/files#diff-ed02b5340df65de06c19eb93fe90a920L340)
|
public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) throws InterruptedException {
final GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
getRecordsRequest.setShardIterator(shardIterator);
getRecordsRequest.setLimit(maxRecordsToGet);
GetRecordsResult getRecordsResult = null;
int retryCount = 0;
while (retryCount <= getRecordsMaxRetries && getRecordsResult == null) {
try {
getRecordsResult = kinesisClient.getRecords(getRecordsRequest);
} catch (SdkClientException ex) {
if (isRecoverableSdkClientException(ex)) {
long backoffMillis = fullJitterBackoff(
getRecordsBaseBackoffMillis, getRecordsMaxBackoffMillis, getRecordsExpConstant, retryCount++);
LOG.warn("Got recoverable SdkClientException. Backing off for "
+ backoffMillis + " millis (" + ex.getClass().getName() + ": " + ex.getMessage() + ")");
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (getRecordsResult == null) {
throw new RuntimeException("Retries exceeded for getRecords operation - all " + getRecordsMaxRetries +
" retry attempts failed.");
}
return getRecordsResult;
}
|
throw new RuntimeException("Retries exceeded for getRecords operation - all " + getRecordsMaxRetries +
|
public GetRecordsResult getRecords(String shardIterator, int maxRecordsToGet) throws InterruptedException {
final GetRecordsRequest getRecordsRequest = new GetRecordsRequest();
getRecordsRequest.setShardIterator(shardIterator);
getRecordsRequest.setLimit(maxRecordsToGet);
GetRecordsResult getRecordsResult = null;
int retryCount = 0;
while (retryCount <= getRecordsMaxRetries && getRecordsResult == null) {
try {
getRecordsResult = kinesisClient.getRecords(getRecordsRequest);
} catch (SdkClientException ex) {
if (isRecoverableSdkClientException(ex)) {
long backoffMillis = fullJitterBackoff(
getRecordsBaseBackoffMillis, getRecordsMaxBackoffMillis, getRecordsExpConstant, retryCount++);
LOG.warn("Got recoverable SdkClientException. Backing off for "
+ backoffMillis + " millis (" + ex.getClass().getName() + ": " + ex.getMessage() + ")");
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (getRecordsResult == null) {
throw new RuntimeException("Retries exceeded for getRecords operation - all " + getRecordsMaxRetries +
" retry attempts failed.");
}
return getRecordsResult;
}
|
class KinesisProxy implements KinesisProxyInterface {
private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
/** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
private final AmazonKinesis kinesisClient;
/** Random seed used to calculate backoff jitter for Kinesis operations. */
private static final Random seed = new Random();
/** Base backoff millis for the list shards operation. */
private final long listShardsBaseBackoffMillis;
/** Maximum backoff millis for the list shards operation. */
private final long listShardsMaxBackoffMillis;
/** Exponential backoff power constant for the list shards operation. */
private final double listShardsExpConstant;
/** Maximum retry attempts for the list shards operation. */
private final int listShardsMaxRetries;
/** Base backoff millis for the get records operation. */
private final long getRecordsBaseBackoffMillis;
/** Maximum backoff millis for the get records operation. */
private final long getRecordsMaxBackoffMillis;
/** Exponential backoff power constant for the get records operation. */
private final double getRecordsExpConstant;
/** Maximum retry attempts for the get records operation. */
private final int getRecordsMaxRetries;
/** Base backoff millis for the get shard iterator operation. */
private final long getShardIteratorBaseBackoffMillis;
/** Maximum backoff millis for the get shard iterator operation. */
private final long getShardIteratorMaxBackoffMillis;
/** Exponential backoff power constant for the get shard iterator operation. */
private final double getShardIteratorExpConstant;
/** Maximum retry attempts for the get shard iterator operation. */
private final int getShardIteratorMaxRetries;
/* Backoff millis for the describe stream operation. */
private final long describeStreamBaseBackoffMillis;
/* Maximum backoff millis for the describe stream operation. */
private final long describeStreamMaxBackoffMillis;
/* Exponential backoff power constant for the describe stream operation. */
private final double describeStreamExpConstant;
/**
* Create a new KinesisProxy based on the supplied configuration properties.
*
* @param configProps configuration properties containing AWS credential and AWS region info
*/
protected KinesisProxy(Properties configProps) {
checkNotNull(configProps);
KinesisConfigUtil.backfillConsumerKeys(configProps);
this.kinesisClient = createKinesisClient(configProps);
this.listShardsBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_BASE)));
this.listShardsMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_MAX)));
this.listShardsExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT)));
this.listShardsMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_RETRIES)));
this.describeStreamBaseBackoffMillis = Long.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE)));
this.describeStreamMaxBackoffMillis = Long.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX)));
this.describeStreamExpConstant = Double.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getRecordsBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_BASE)));
this.getRecordsMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_MAX)));
this.getRecordsExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getRecordsMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_RETRIES)));
this.getShardIteratorBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_BASE)));
this.getShardIteratorMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_MAX)));
this.getShardIteratorExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getShardIteratorMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_RETRIES)));
}
/**
* Create the Kinesis client, using the provided configuration properties and default {@link ClientConfiguration}.
* Derived classes can override this method to customize the client configuration.
* @param configProps
* @return
*/
protected AmazonKinesis createKinesisClient(Properties configProps) {
ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
AWSUtil.setAwsClientConfigProperties(awsClientConfig, configProps);
return AWSUtil.createKinesisClient(configProps, awsClientConfig);
}
/**
* Creates a Kinesis proxy.
*
* @param configProps configuration properties
* @return the created kinesis proxy
*/
public static KinesisProxyInterface create(Properties configProps) {
return new KinesisProxy(configProps);
}
/**
* {@inheritDoc}
*/
@Override
/**
* {@inheritDoc}
*/
@Override
public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException {
GetShardListResult result = new GetShardListResult();
for (Map.Entry<String, String> streamNameWithLastSeenShardId : streamNamesWithLastSeenShardIds.entrySet()) {
String stream = streamNameWithLastSeenShardId.getKey();
String lastSeenShardId = streamNameWithLastSeenShardId.getValue();
result.addRetrievedShardsToStream(stream, getShardsOfStream(stream, lastSeenShardId));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public String getShardIterator(StreamShardHandle shard, String shardIteratorType, @Nullable Object startingMarker) throws InterruptedException {
GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest()
.withStreamName(shard.getStreamName())
.withShardId(shard.getShard().getShardId())
.withShardIteratorType(shardIteratorType);
switch (ShardIteratorType.fromValue(shardIteratorType)) {
case TRIM_HORIZON:
case LATEST:
break;
case AT_TIMESTAMP:
if (startingMarker instanceof Date) {
getShardIteratorRequest.setTimestamp((Date) startingMarker);
} else {
throw new IllegalArgumentException("Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_TIMESTAMP. Must be a Date object.");
}
break;
case AT_SEQUENCE_NUMBER:
case AFTER_SEQUENCE_NUMBER:
if (startingMarker instanceof String) {
getShardIteratorRequest.setStartingSequenceNumber((String) startingMarker);
} else {
throw new IllegalArgumentException("Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. Must be a String.");
}
}
return getShardIterator(getShardIteratorRequest);
}
private String getShardIterator(GetShardIteratorRequest getShardIteratorRequest) throws InterruptedException {
GetShardIteratorResult getShardIteratorResult = null;
int retryCount = 0;
while (retryCount <= getShardIteratorMaxRetries && getShardIteratorResult == null) {
try {
getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
} catch (AmazonServiceException ex) {
if (isRecoverableException(ex)) {
long backoffMillis = fullJitterBackoff(
getShardIteratorBaseBackoffMillis, getShardIteratorMaxBackoffMillis, getShardIteratorExpConstant, retryCount++);
LOG.warn("Got recoverable AmazonServiceException. Backing off for "
+ backoffMillis + " millis (" + ex.getErrorMessage() + ")");
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (getShardIteratorResult == null) {
throw new RuntimeException("Rate Exceeded for getShardIterator operation - all " + getShardIteratorMaxRetries +
" retry attempts returned ProvisionedThroughputExceededException.");
}
return getShardIteratorResult.getShardIterator();
}
/**
* Determines whether the exception is recoverable using exponential-backoff.
*
* @param ex Exception to inspect
* @return <code>true</code> if the exception can be recovered from, else
* <code>false</code>
*/
protected boolean isRecoverableSdkClientException(SdkClientException ex) {
if (ex instanceof AmazonServiceException) {
return KinesisProxy.isRecoverableException((AmazonServiceException) ex);
}
return false;
}
/**
* Determines whether the exception is recoverable using exponential-backoff.
*
* @param ex Exception to inspect
* @return <code>true</code> if the exception can be recovered from, else
* <code>false</code>
*/
protected static boolean isRecoverableException(AmazonServiceException ex) {
if (ex.getErrorType() == null) {
return false;
}
switch (ex.getErrorType()) {
case Client:
return ex instanceof ProvisionedThroughputExceededException;
case Service:
case Unknown:
return true;
default:
return false;
}
}
private List<StreamShardHandle> getShardsOfStream(String streamName, @Nullable String lastSeenShardId) throws InterruptedException {
List<StreamShardHandle> shardsOfStream = new ArrayList<>();
ListShardsResult listShardsResult;
String startShardToken = null;
do {
listShardsResult = listShards(streamName, lastSeenShardId, startShardToken);
if (listShardsResult == null) {
shardsOfStream.clear();
return shardsOfStream;
}
List<Shard> shards = listShardsResult.getShards();
for (Shard shard : shards) {
shardsOfStream.add(new StreamShardHandle(streamName, shard));
}
startShardToken = listShardsResult.getNextToken();
} while (startShardToken != null);
return shardsOfStream;
}
/**
* Get metainfo for a Kinesis stream, which contains information about which shards this Kinesis stream possess.
*
* <p>This method is using a "full jitter" approach described in AWS's article,
* <a href="https:
* This is necessary because concurrent calls will be made by all parallel subtask's fetcher. This
* jitter backoff approach will help distribute calls across the fetchers over time.
*
* @param streamName the stream to describe
* @param startShardId which shard to start with for this describe operation (earlier shard's infos will not appear in result)
* @return the result of the describe stream operation
*/
private ListShardsResult listShards(String streamName, @Nullable String startShardId,
@Nullable String startNextToken)
throws InterruptedException {
final ListShardsRequest listShardsRequest = new ListShardsRequest();
if (startNextToken == null) {
listShardsRequest.setExclusiveStartShardId(startShardId);
listShardsRequest.setStreamName(streamName);
} else {
listShardsRequest.setNextToken(startNextToken);
}
ListShardsResult listShardsResults = null;
int retryCount = 0;
while (retryCount <= listShardsMaxRetries && listShardsResults == null) {
try {
listShardsResults = kinesisClient.listShards(listShardsRequest);
} catch (LimitExceededException le) {
long backoffMillis = fullJitterBackoff(
listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
LOG.warn("Got LimitExceededException when listing shards from stream " + streamName
+ ". Backing off for " + backoffMillis + " millis.");
Thread.sleep(backoffMillis);
} catch (ResourceInUseException reInUse) {
if (LOG.isWarnEnabled()) {
LOG.info("The stream is currently not in active state. Reusing the older state "
+ "for the time being");
break;
}
} catch (ResourceNotFoundException reNotFound) {
throw new RuntimeException("Stream not found. Error while getting shard list.", reNotFound);
} catch (InvalidArgumentException inArg) {
throw new RuntimeException("Invalid Arguments to listShards.", inArg);
} catch (ExpiredNextTokenException expiredToken) {
LOG.warn("List Shards has an expired token. Reusing the previous state.");
break;
} catch (SdkClientException ex) {
if (retryCount < listShardsMaxRetries && isRecoverableSdkClientException(ex)) {
long backoffMillis = fullJitterBackoff(
listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
LOG.warn("Got SdkClientException when listing shards from stream {}. Backing off for {} millis.",
streamName, backoffMillis);
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (startShardId != null && listShardsResults != null) {
List<Shard> shards = listShardsResults.getShards();
Iterator<Shard> shardItr = shards.iterator();
while (shardItr.hasNext()) {
if (StreamShardHandle.compareShardIds(shardItr.next().getShardId(), startShardId) <= 0) {
shardItr.remove();
}
}
}
return listShardsResults;
}
/**
* Get metainfo for a Kinesis stream, which contains information about which shards this
* Kinesis stream possess.
*
* <p>This method is using a "full jitter" approach described in AWS's article,
* <a href="https:
* "Exponential Backoff and Jitter"</a>.
* This is necessary because concurrent calls will be made by all parallel subtask's fetcher.
* This jitter backoff approach will help distribute calls across the fetchers over time.
*
* @param streamName the stream to describe
* @param startShardId which shard to start with for this describe operation
*
* @return the result of the describe stream operation
*/
protected DescribeStreamResult describeStream(String streamName, @Nullable String startShardId)
throws InterruptedException {
final DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(streamName);
describeStreamRequest.setExclusiveStartShardId(startShardId);
DescribeStreamResult describeStreamResult = null;
int attemptCount = 0;
while (describeStreamResult == null) {
try {
describeStreamResult = kinesisClient.describeStream(describeStreamRequest);
} catch (LimitExceededException le) {
long backoffMillis = fullJitterBackoff(
describeStreamBaseBackoffMillis,
describeStreamMaxBackoffMillis,
describeStreamExpConstant,
attemptCount++);
LOG.warn(String.format("Got LimitExceededException when describing stream %s. "
+ "Backing off for %d millis.", streamName, backoffMillis));
Thread.sleep(backoffMillis);
} catch (ResourceNotFoundException re) {
throw new RuntimeException("Error while getting stream details", re);
}
}
String streamStatus = describeStreamResult.getStreamDescription().getStreamStatus();
if (!(streamStatus.equals(StreamStatus.ACTIVE.toString())
|| streamStatus.equals(StreamStatus.UPDATING.toString()))) {
if (LOG.isWarnEnabled()) {
LOG.warn(String.format("The status of stream %s is %s ; result of the current "
+ "describeStream operation will not contain any shard information.",
streamName, streamStatus));
}
}
return describeStreamResult;
}
protected static long fullJitterBackoff(long base, long max, double power, int attempt) {
long exponentialBackoff = (long) Math.min(max, base * Math.pow(power, attempt));
return (long) (seed.nextDouble() * exponentialBackoff);
}
}
|
class KinesisProxy implements KinesisProxyInterface {
private static final Logger LOG = LoggerFactory.getLogger(KinesisProxy.class);
/** The actual Kinesis client from the AWS SDK that we will be using to make calls. */
private final AmazonKinesis kinesisClient;
/** Random seed used to calculate backoff jitter for Kinesis operations. */
private static final Random seed = new Random();
/** Base backoff millis for the list shards operation. */
private final long listShardsBaseBackoffMillis;
/** Maximum backoff millis for the list shards operation. */
private final long listShardsMaxBackoffMillis;
/** Exponential backoff power constant for the list shards operation. */
private final double listShardsExpConstant;
/** Maximum retry attempts for the list shards operation. */
private final int listShardsMaxRetries;
/** Base backoff millis for the get records operation. */
private final long getRecordsBaseBackoffMillis;
/** Maximum backoff millis for the get records operation. */
private final long getRecordsMaxBackoffMillis;
/** Exponential backoff power constant for the get records operation. */
private final double getRecordsExpConstant;
/** Maximum retry attempts for the get records operation. */
private final int getRecordsMaxRetries;
/** Base backoff millis for the get shard iterator operation. */
private final long getShardIteratorBaseBackoffMillis;
/** Maximum backoff millis for the get shard iterator operation. */
private final long getShardIteratorMaxBackoffMillis;
/** Exponential backoff power constant for the get shard iterator operation. */
private final double getShardIteratorExpConstant;
/** Maximum retry attempts for the get shard iterator operation. */
private final int getShardIteratorMaxRetries;
/* Backoff millis for the describe stream operation. */
private final long describeStreamBaseBackoffMillis;
/* Maximum backoff millis for the describe stream operation. */
private final long describeStreamMaxBackoffMillis;
/* Exponential backoff power constant for the describe stream operation. */
private final double describeStreamExpConstant;
/**
* Create a new KinesisProxy based on the supplied configuration properties.
*
* @param configProps configuration properties containing AWS credential and AWS region info
*/
protected KinesisProxy(Properties configProps) {
checkNotNull(configProps);
KinesisConfigUtil.backfillConsumerKeys(configProps);
this.kinesisClient = createKinesisClient(configProps);
this.listShardsBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_BASE)));
this.listShardsMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_MAX)));
this.listShardsExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_BACKOFF_EXPONENTIAL_CONSTANT)));
this.listShardsMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.LIST_SHARDS_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_LIST_SHARDS_RETRIES)));
this.describeStreamBaseBackoffMillis = Long.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_BASE)));
this.describeStreamMaxBackoffMillis = Long.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_MAX)));
this.describeStreamExpConstant = Double.valueOf(
configProps.getProperty(ConsumerConfigConstants.STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_STREAM_DESCRIBE_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getRecordsBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_BASE)));
this.getRecordsMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_MAX)));
this.getRecordsExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getRecordsMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETRECORDS_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETRECORDS_RETRIES)));
this.getShardIteratorBaseBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_BASE,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_BASE)));
this.getShardIteratorMaxBackoffMillis = Long.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_MAX,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_MAX)));
this.getShardIteratorExpConstant = Double.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT,
Double.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_BACKOFF_EXPONENTIAL_CONSTANT)));
this.getShardIteratorMaxRetries = Integer.valueOf(
configProps.getProperty(
ConsumerConfigConstants.SHARD_GETITERATOR_RETRIES,
Long.toString(ConsumerConfigConstants.DEFAULT_SHARD_GETITERATOR_RETRIES)));
}
/**
* Create the Kinesis client, using the provided configuration properties and default {@link ClientConfiguration}.
* Derived classes can override this method to customize the client configuration.
* @param configProps
* @return
*/
protected AmazonKinesis createKinesisClient(Properties configProps) {
ClientConfiguration awsClientConfig = new ClientConfigurationFactory().getConfig();
AWSUtil.setAwsClientConfigProperties(awsClientConfig, configProps);
return AWSUtil.createKinesisClient(configProps, awsClientConfig);
}
/**
* Creates a Kinesis proxy.
*
* @param configProps configuration properties
* @return the created kinesis proxy
*/
public static KinesisProxyInterface create(Properties configProps) {
return new KinesisProxy(configProps);
}
/**
* {@inheritDoc}
*/
@Override
/**
* {@inheritDoc}
*/
@Override
public GetShardListResult getShardList(Map<String, String> streamNamesWithLastSeenShardIds) throws InterruptedException {
GetShardListResult result = new GetShardListResult();
for (Map.Entry<String, String> streamNameWithLastSeenShardId : streamNamesWithLastSeenShardIds.entrySet()) {
String stream = streamNameWithLastSeenShardId.getKey();
String lastSeenShardId = streamNameWithLastSeenShardId.getValue();
result.addRetrievedShardsToStream(stream, getShardsOfStream(stream, lastSeenShardId));
}
return result;
}
/**
* {@inheritDoc}
*/
@Override
public String getShardIterator(StreamShardHandle shard, String shardIteratorType, @Nullable Object startingMarker) throws InterruptedException {
GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest()
.withStreamName(shard.getStreamName())
.withShardId(shard.getShard().getShardId())
.withShardIteratorType(shardIteratorType);
switch (ShardIteratorType.fromValue(shardIteratorType)) {
case TRIM_HORIZON:
case LATEST:
break;
case AT_TIMESTAMP:
if (startingMarker instanceof Date) {
getShardIteratorRequest.setTimestamp((Date) startingMarker);
} else {
throw new IllegalArgumentException("Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_TIMESTAMP. Must be a Date object.");
}
break;
case AT_SEQUENCE_NUMBER:
case AFTER_SEQUENCE_NUMBER:
if (startingMarker instanceof String) {
getShardIteratorRequest.setStartingSequenceNumber((String) startingMarker);
} else {
throw new IllegalArgumentException("Invalid object given for GetShardIteratorRequest() when ShardIteratorType is AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. Must be a String.");
}
}
return getShardIterator(getShardIteratorRequest);
}
private String getShardIterator(GetShardIteratorRequest getShardIteratorRequest) throws InterruptedException {
GetShardIteratorResult getShardIteratorResult = null;
int retryCount = 0;
while (retryCount <= getShardIteratorMaxRetries && getShardIteratorResult == null) {
try {
getShardIteratorResult = kinesisClient.getShardIterator(getShardIteratorRequest);
} catch (AmazonServiceException ex) {
if (isRecoverableException(ex)) {
long backoffMillis = fullJitterBackoff(
getShardIteratorBaseBackoffMillis, getShardIteratorMaxBackoffMillis, getShardIteratorExpConstant, retryCount++);
LOG.warn("Got recoverable AmazonServiceException. Backing off for "
+ backoffMillis + " millis (" + ex.getClass().getName() + ": " + ex.getMessage() + ")");
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (getShardIteratorResult == null) {
throw new RuntimeException("Retries exceeded for getShardIterator operation - all " + getShardIteratorMaxRetries +
" retry attempts failed.");
}
return getShardIteratorResult.getShardIterator();
}
/**
* Determines whether the exception is recoverable using exponential-backoff.
*
* @param ex Exception to inspect
* @return <code>true</code> if the exception can be recovered from, else
* <code>false</code>
*/
protected boolean isRecoverableSdkClientException(SdkClientException ex) {
if (ex instanceof AmazonServiceException) {
return KinesisProxy.isRecoverableException((AmazonServiceException) ex);
}
return false;
}
/**
* Determines whether the exception is recoverable using exponential-backoff.
*
* @param ex Exception to inspect
* @return <code>true</code> if the exception can be recovered from, else
* <code>false</code>
*/
protected static boolean isRecoverableException(AmazonServiceException ex) {
if (ex.getErrorType() == null) {
return false;
}
switch (ex.getErrorType()) {
case Client:
return ex instanceof ProvisionedThroughputExceededException;
case Service:
case Unknown:
return true;
default:
return false;
}
}
private List<StreamShardHandle> getShardsOfStream(String streamName, @Nullable String lastSeenShardId) throws InterruptedException {
List<StreamShardHandle> shardsOfStream = new ArrayList<>();
ListShardsResult listShardsResult;
String startShardToken = null;
do {
listShardsResult = listShards(streamName, lastSeenShardId, startShardToken);
if (listShardsResult == null) {
shardsOfStream.clear();
return shardsOfStream;
}
List<Shard> shards = listShardsResult.getShards();
for (Shard shard : shards) {
shardsOfStream.add(new StreamShardHandle(streamName, shard));
}
startShardToken = listShardsResult.getNextToken();
} while (startShardToken != null);
return shardsOfStream;
}
/**
* Get metainfo for a Kinesis stream, which contains information about which shards this Kinesis stream possess.
*
* <p>This method is using a "full jitter" approach described in AWS's article,
* <a href="https:
* This is necessary because concurrent calls will be made by all parallel subtask's fetcher. This
* jitter backoff approach will help distribute calls across the fetchers over time.
*
* @param streamName the stream to describe
* @param startShardId which shard to start with for this describe operation (earlier shard's infos will not appear in result)
* @return the result of the describe stream operation
*/
private ListShardsResult listShards(String streamName, @Nullable String startShardId,
@Nullable String startNextToken)
throws InterruptedException {
final ListShardsRequest listShardsRequest = new ListShardsRequest();
if (startNextToken == null) {
listShardsRequest.setExclusiveStartShardId(startShardId);
listShardsRequest.setStreamName(streamName);
} else {
listShardsRequest.setNextToken(startNextToken);
}
ListShardsResult listShardsResults = null;
int retryCount = 0;
while (retryCount <= listShardsMaxRetries && listShardsResults == null) {
try {
listShardsResults = kinesisClient.listShards(listShardsRequest);
} catch (LimitExceededException le) {
long backoffMillis = fullJitterBackoff(
listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
LOG.warn("Got LimitExceededException when listing shards from stream " + streamName
+ ". Backing off for " + backoffMillis + " millis.");
Thread.sleep(backoffMillis);
} catch (ResourceInUseException reInUse) {
if (LOG.isWarnEnabled()) {
LOG.info("The stream is currently not in active state. Reusing the older state "
+ "for the time being");
break;
}
} catch (ResourceNotFoundException reNotFound) {
throw new RuntimeException("Stream not found. Error while getting shard list.", reNotFound);
} catch (InvalidArgumentException inArg) {
throw new RuntimeException("Invalid Arguments to listShards.", inArg);
} catch (ExpiredNextTokenException expiredToken) {
LOG.warn("List Shards has an expired token. Reusing the previous state.");
break;
} catch (SdkClientException ex) {
if (retryCount < listShardsMaxRetries && isRecoverableSdkClientException(ex)) {
long backoffMillis = fullJitterBackoff(
listShardsBaseBackoffMillis, listShardsMaxBackoffMillis, listShardsExpConstant, retryCount++);
LOG.warn("Got SdkClientException when listing shards from stream {}. Backing off for {} millis.",
streamName, backoffMillis);
Thread.sleep(backoffMillis);
} else {
throw ex;
}
}
}
if (startShardId != null && listShardsResults != null) {
List<Shard> shards = listShardsResults.getShards();
Iterator<Shard> shardItr = shards.iterator();
while (shardItr.hasNext()) {
if (StreamShardHandle.compareShardIds(shardItr.next().getShardId(), startShardId) <= 0) {
shardItr.remove();
}
}
}
return listShardsResults;
}
/**
* Get metainfo for a Kinesis stream, which contains information about which shards this
* Kinesis stream possess.
*
* <p>This method is using a "full jitter" approach described in AWS's article,
* <a href="https:
* "Exponential Backoff and Jitter"</a>.
* This is necessary because concurrent calls will be made by all parallel subtask's fetcher.
* This jitter backoff approach will help distribute calls across the fetchers over time.
*
* @param streamName the stream to describe
* @param startShardId which shard to start with for this describe operation
*
* @return the result of the describe stream operation
*/
protected DescribeStreamResult describeStream(String streamName, @Nullable String startShardId)
throws InterruptedException {
final DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest();
describeStreamRequest.setStreamName(streamName);
describeStreamRequest.setExclusiveStartShardId(startShardId);
DescribeStreamResult describeStreamResult = null;
int attemptCount = 0;
while (describeStreamResult == null) {
try {
describeStreamResult = kinesisClient.describeStream(describeStreamRequest);
} catch (LimitExceededException le) {
long backoffMillis = fullJitterBackoff(
describeStreamBaseBackoffMillis,
describeStreamMaxBackoffMillis,
describeStreamExpConstant,
attemptCount++);
LOG.warn(String.format("Got LimitExceededException when describing stream %s. "
+ "Backing off for %d millis.", streamName, backoffMillis));
Thread.sleep(backoffMillis);
} catch (ResourceNotFoundException re) {
throw new RuntimeException("Error while getting stream details", re);
}
}
String streamStatus = describeStreamResult.getStreamDescription().getStreamStatus();
if (!(streamStatus.equals(StreamStatus.ACTIVE.toString())
|| streamStatus.equals(StreamStatus.UPDATING.toString()))) {
if (LOG.isWarnEnabled()) {
LOG.warn(String.format("The status of stream %s is %s ; result of the current "
+ "describeStream operation will not contain any shard information.",
streamName, streamStatus));
}
}
return describeStreamResult;
}
protected static long fullJitterBackoff(long base, long max, double power, int attempt) {
long exponentialBackoff = (long) Math.min(max, base * Math.pow(power, attempt));
return (long) (seed.nextDouble() * exponentialBackoff);
}
}
|
This check won't always work as expected and won't comply with the changes made in PR #4194.
|
public void logAndThrow(RuntimeException runtimeException) {
if (runtimeException == null) {
return;
}
if (canLogAtLevel(level)) {
logger.error("", runtimeException);
}
throw runtimeException;
}
|
if (canLogAtLevel(level)) {
|
public void logAndThrow(RuntimeException runtimeException) {
if (runtimeException == null) {
return;
}
if (canLogAtLevel(ERROR_LEVEL)) {
logger.error(runtimeException.getMessage(), runtimeException);
}
throw runtimeException;
}
|
class name using the {@link LoggerFactory}
|
class name using the {@link LoggerFactory}
|
third param can be non-literal?
|
public static void verifyAnalyticExpression(AnalyticExpr analyticExpr) {
for (Expr e : analyticExpr.getPartitionExprs()) {
if (e.isConstant()) {
throw new SemanticException("Expressions in the PARTITION BY clause must not be constant: "
+ e.toSql() + " (in " + analyticExpr.toSql() + ")", e.getPos());
}
if (!e.getType().canPartitionBy()) {
throw new SemanticException("HLL, BITMAP and PERCENTILE type can't as partition by column", e.getPos());
}
}
for (OrderByElement e : analyticExpr.getOrderByElements()) {
if (e.getExpr().isConstant()) {
throw new SemanticException("Expressions in the ORDER BY clause must not be constant: "
+ e.getExpr().toSql() + " (in " + analyticExpr.toSql() + ")", e.getPos());
}
if (!e.getExpr().getType().canOrderBy()) {
throw new SemanticException("HLL, BITMAP and PERCENTILE type can't as order by column", e.getPos());
}
}
FunctionCallExpr analyticFunction = analyticExpr.getFnCall();
if (analyticFunction.getParams().isDistinct()) {
throw new SemanticException("DISTINCT not allowed in analytic function: " + analyticFunction.toSql(),
analyticExpr.getPos());
}
if (!isAnalyticFn(analyticFunction.getFn())) {
throw new SemanticException("Function '%s' not supported with OVER clause.",
analyticExpr.getFnCall().toSql(), analyticFunction.getPos());
}
for (Expr e : analyticExpr.getFnCall().getChildren()) {
if (e.getType().isBitmapType() &&
!analyticFunction.getFn().functionName().equals(FunctionSet.BITMAP_UNION_COUNT) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LEAD) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LAG)) {
throw new SemanticException("bitmap type could only used for bitmap_union_count/lead/lag window function",
e.getPos());
} else if (e.getType().isHllType() &&
!analyticFunction.getFn().functionName().equals(AnalyticExpr.HLL_UNION_AGG) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LEAD) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LAG)) {
throw new SemanticException("hll type could only used for hll_union_agg/lead/lag window function",
e.getPos());
} else if (e.getType().isPercentile()) {
throw new SemanticException("window functions don't support percentile type", e.getPos());
}
}
if (isOffsetFn(analyticFunction.getFn()) && analyticFunction.getChildren().size() > 1) {
Expr offset = analyticFunction.getChild(1);
if (!isPositiveConstantInteger(offset)) {
throw new SemanticException(
"The offset parameter of LEAD/LAG must be a constant positive integer: " +
analyticFunction.toSql(), analyticFunction.getPos());
}
if (analyticFunction.getChildren().size() == 2) {
} else if (analyticFunction.getChildren().size() == 3) {
Type firstType = analyticFunction.getChild(0).getType();
if (analyticFunction.getChild(0) instanceof NullLiteral) {
firstType = analyticFunction.getFn().getArgs()[0];
}
try {
analyticFunction.uncheckedCastChild(firstType, 2);
} catch (AnalysisException e) {
throw new SemanticException("The third parameter of LEAD/LAG can't convert to " + firstType,
analyticFunction.getChild(2).getPos());
}
if (!analyticFunction.getChild(2).isLiteral() && analyticFunction.getChild(2).isNullable()) {
throw new SemanticException("The type of the third parameter of LEAD/LAG not match the type " + firstType,
analyticFunction.getChild(2).getPos());
}
} else {
throw new SemanticException("The number of parameter in LEAD/LAG is uncorrected", analyticFunction.getPos());
}
}
if (isNtileFn(analyticFunction.getFn())) {
Expr numBuckets = analyticFunction.getChild(0);
if (!isPositiveConstantInteger(numBuckets)) {
throw new SemanticException(
"The num_buckets parameter of NTILE must be a constant positive integer: " +
analyticFunction.toSql(), numBuckets.getPos());
}
}
if (analyticExpr.getWindow() != null) {
if ((isRankingFn(analyticFunction.getFn()) || isOffsetFn(analyticFunction.getFn()) ||
isHllAggFn(analyticFunction.getFn()))) {
throw new SemanticException("Windowing clause not allowed with '" + analyticFunction.toSql() + "'",
analyticExpr.getPos());
}
verifyWindowFrame(analyticExpr);
}
}
|
if (!analyticFunction.getChild(2).isLiteral() && analyticFunction.getChild(2).isNullable()) {
|
public static void verifyAnalyticExpression(AnalyticExpr analyticExpr) {
for (Expr e : analyticExpr.getPartitionExprs()) {
if (e.isConstant()) {
throw new SemanticException("Expressions in the PARTITION BY clause must not be constant: "
+ e.toSql() + " (in " + analyticExpr.toSql() + ")", e.getPos());
}
if (!e.getType().canPartitionBy()) {
throw new SemanticException("HLL, BITMAP and PERCENTILE type can't as partition by column", e.getPos());
}
}
for (OrderByElement e : analyticExpr.getOrderByElements()) {
if (e.getExpr().isConstant()) {
throw new SemanticException("Expressions in the ORDER BY clause must not be constant: "
+ e.getExpr().toSql() + " (in " + analyticExpr.toSql() + ")", e.getPos());
}
if (!e.getExpr().getType().canOrderBy()) {
throw new SemanticException("HLL, BITMAP and PERCENTILE type can't as order by column", e.getPos());
}
}
FunctionCallExpr analyticFunction = analyticExpr.getFnCall();
if (analyticFunction.getParams().isDistinct()) {
throw new SemanticException("DISTINCT not allowed in analytic function: " + analyticFunction.toSql(),
analyticExpr.getPos());
}
if (!isAnalyticFn(analyticFunction.getFn())) {
throw new SemanticException("Function '%s' not supported with OVER clause.",
analyticExpr.getFnCall().toSql(), analyticFunction.getPos());
}
for (Expr e : analyticExpr.getFnCall().getChildren()) {
if (e.getType().isBitmapType() &&
!analyticFunction.getFn().functionName().equals(FunctionSet.BITMAP_UNION_COUNT) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LEAD) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LAG)) {
throw new SemanticException("bitmap type could only used for bitmap_union_count/lead/lag window function",
e.getPos());
} else if (e.getType().isHllType() &&
!analyticFunction.getFn().functionName().equals(AnalyticExpr.HLL_UNION_AGG) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LEAD) &&
!analyticFunction.getFn().functionName().equals(FunctionSet.LAG)) {
throw new SemanticException("hll type could only used for hll_union_agg/lead/lag window function",
e.getPos());
} else if (e.getType().isPercentile()) {
throw new SemanticException("window functions don't support percentile type", e.getPos());
}
}
if (isOffsetFn(analyticFunction.getFn()) && analyticFunction.getChildren().size() > 1) {
Expr offset = analyticFunction.getChild(1);
if (!isPositiveConstantInteger(offset)) {
throw new SemanticException(
"The offset parameter of LEAD/LAG must be a constant positive integer: " +
analyticFunction.toSql(), analyticFunction.getPos());
}
if (analyticFunction.getChildren().size() == 2) {
} else if (analyticFunction.getChildren().size() == 3) {
Type firstType = analyticFunction.getChild(0).getType();
if (analyticFunction.getChild(0) instanceof NullLiteral) {
firstType = analyticFunction.getFn().getArgs()[0];
}
try {
analyticFunction.uncheckedCastChild(firstType, 2);
} catch (AnalysisException e) {
throw new SemanticException("The third parameter of LEAD/LAG can't convert to " + firstType,
analyticFunction.getChild(2).getPos());
}
if (!analyticFunction.getChild(2).isLiteral() && analyticFunction.getChild(2).isNullable()) {
throw new SemanticException("The type of the third parameter of LEAD/LAG not match the type " + firstType,
analyticFunction.getChild(2).getPos());
}
} else {
throw new SemanticException("The number of parameter in LEAD/LAG is uncorrected", analyticFunction.getPos());
}
}
if (isNtileFn(analyticFunction.getFn())) {
Expr numBuckets = analyticFunction.getChild(0);
if (!isPositiveConstantInteger(numBuckets)) {
throw new SemanticException(
"The num_buckets parameter of NTILE must be a constant positive integer: " +
analyticFunction.toSql(), numBuckets.getPos());
}
}
if (analyticExpr.getWindow() != null) {
if ((isRankingFn(analyticFunction.getFn()) || isOffsetFn(analyticFunction.getFn()) ||
isHllAggFn(analyticFunction.getFn()))) {
throw new SemanticException("Windowing clause not allowed with '" + analyticFunction.toSql() + "'",
analyticExpr.getPos());
}
verifyWindowFrame(analyticExpr);
}
}
|
class AnalyticAnalyzer {
private static boolean isPositiveConstantInteger(Expr expr) {
if (!expr.isConstant()) {
return false;
}
double value = 0;
if (expr instanceof IntLiteral) {
IntLiteral intl = (IntLiteral) expr;
value = intl.getDoubleValue();
} else if (expr instanceof LargeIntLiteral) {
LargeIntLiteral intl = (LargeIntLiteral) expr;
value = intl.getDoubleValue();
}
return value > 0;
}
private static void verifyWindowFrame(AnalyticExpr analyticExpr) {
if (analyticExpr.getOrderByElements().isEmpty()) {
throw new SemanticException("Windowing clause requires ORDER BY clause: " + analyticExpr.toSql(),
analyticExpr.getPos());
}
AnalyticWindow windowFrame = analyticExpr.getWindow();
AnalyticWindow.Boundary leftBoundary = windowFrame.getLeftBoundary();
Preconditions.checkArgument(leftBoundary != null);
if (windowFrame.getRightBoundary() == null) {
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
throw new SemanticException(leftBoundary.getType().toString() + " requires a BETWEEN clause",
leftBoundary.getPos());
} else {
windowFrame
.setRightBoundary(new AnalyticWindow.Boundary(AnalyticWindow.BoundaryType.CURRENT_ROW, null));
}
}
AnalyticWindow.Boundary rightBoundary = windowFrame.getRightBoundary();
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING) {
throw new SemanticException(
leftBoundary.getType().toString() + " is only allowed for upper bound of BETWEEN",
leftBoundary.getPos());
}
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING) {
throw new SemanticException(
rightBoundary.getType().toString() + " is only allowed for lower bound of BETWEEN",
rightBoundary.getPos());
}
if (windowFrame.getType() == AnalyticWindow.Type.RANGE) {
if (leftBoundary.getType().isOffset()) {
checkRangeOffsetBoundaryExpr(analyticExpr, leftBoundary);
}
if (rightBoundary.getType().isOffset()) {
checkRangeOffsetBoundaryExpr(analyticExpr, rightBoundary);
}
if (leftBoundary.getType().isOffset() || (rightBoundary.getType().isOffset()) ||
(leftBoundary.getType() == AnalyticWindow.BoundaryType.CURRENT_ROW
&& rightBoundary.getType() == AnalyticWindow.BoundaryType.CURRENT_ROW)) {
throw new SemanticException("RANGE is only supported with both the lower and upper bounds UNBOUNDED or"
+ " one UNBOUNDED and the other CURRENT ROW.", windowFrame.getPos());
}
}
if (leftBoundary.getType().isOffset()) {
checkOffsetExpr(windowFrame, leftBoundary);
}
if (rightBoundary.getType().isOffset()) {
checkOffsetExpr(windowFrame, rightBoundary);
}
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
checkOffsetBoundaries(leftBoundary, rightBoundary);
} else if (rightBoundary.getType() != AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING) {
throw new SemanticException(
"A lower window bound of " + AnalyticWindow.BoundaryType.FOLLOWING
+ " requires that the upper bound also be " +
AnalyticWindow.BoundaryType.FOLLOWING, windowFrame.getPos());
}
}
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.PRECEDING) {
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.PRECEDING) {
checkOffsetBoundaries(rightBoundary, leftBoundary);
} else if (leftBoundary.getType() != AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING) {
throw new SemanticException(
"An upper window bound of " + AnalyticWindow.BoundaryType.PRECEDING
+ " requires that the lower bound also be " +
AnalyticWindow.BoundaryType.PRECEDING, windowFrame.getPos());
}
}
}
/**
* Checks that the value expr of an offset boundary of a RANGE window is compatible
* with orderingExprs (and that there's only a single ordering expr).
*/
private static void checkRangeOffsetBoundaryExpr(AnalyticExpr analyticExpr, AnalyticWindow.Boundary boundary) {
if (analyticExpr.getOrderByElements().size() > 1) {
throw new SemanticException("Only one ORDER BY expression allowed if used with "
+ "a RANGE window with PRECEDING/FOLLOWING: " + analyticExpr.toSql(), analyticExpr.getPos());
}
if (!Type.isImplicitlyCastable(boundary.getExpr().getType(),
analyticExpr.getOrderByElements().get(0).getExpr().getType(), false)) {
throw new SemanticException("The value expression of a PRECEDING/FOLLOWING clause of a RANGE window "
+ "must be implicitly convertable to the ORDER BY expression's type: "
+ boundary.getExpr().toSql() + " cannot be implicitly converted to "
+ analyticExpr.getOrderByElements().get(0).getExpr().toSql(), analyticExpr.getPos());
}
}
/**
* Semantic analysis for expr of a PRECEDING/FOLLOWING clause.
*/
private static void checkOffsetExpr(AnalyticWindow windowFrame, AnalyticWindow.Boundary boundary) {
Preconditions.checkState(boundary.getType().isOffset());
Expr e = boundary.getExpr();
Preconditions.checkNotNull(e);
boolean isPos = true;
Double val = null;
if (e.isConstant() && e.getType().isNumericType()) {
try {
val = Expr.getConstFromExpr(e);
if (val <= 0) {
isPos = false;
}
} catch (AnalysisException exc) {
throw new SemanticException("Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage(), e.getPos());
}
}
if (windowFrame.getType() == AnalyticWindow.Type.ROWS) {
if (!e.isConstant() || !e.getType().isFixedPointType() || !isPos) {
throw new SemanticException("For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
+ "constant positive integer: " + boundary.toSql(), e.getPos());
}
Preconditions.checkNotNull(val);
boundary.setOffsetValue(new BigDecimal(val.longValue()));
} else {
if (!e.isConstant() || !e.getType().isNumericType() || !isPos) {
throw new SemanticException("For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
+ "constant positive number: " + boundary.toSql(), e.getPos());
}
boundary.setOffsetValue(BigDecimal.valueOf(val));
}
}
/**
* Check that b1 <= b2.
*/
private static void checkOffsetBoundaries(AnalyticWindow.Boundary b1, AnalyticWindow.Boundary b2) {
Preconditions.checkState(b1.getType().isOffset());
Preconditions.checkState(b2.getType().isOffset());
Expr e1 = b1.getExpr();
Preconditions.checkState(e1 != null && e1.isConstant() && e1.getType().isNumericType());
Expr e2 = b2.getExpr();
Preconditions.checkState(e2 != null && e2.isConstant() && e2.getType().isNumericType());
try {
double left = Expr.getConstFromExpr(e1);
double right = Expr.getConstFromExpr(e2);
if (left > right) {
throw new SemanticException("Offset boundaries are in the wrong order", e1.getPos());
}
} catch (AnalysisException exc) {
throw new SemanticException("Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage(),
e1.getPos());
}
}
private static boolean isAnalyticFn(Function fn) {
return fn instanceof AggregateFunction && ((AggregateFunction) fn).isAnalyticFn();
}
private static boolean isOffsetFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.LEAD) ||
fn.functionName().equalsIgnoreCase(AnalyticExpr.LAG);
}
private static boolean isMinMax(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.MIN) ||
fn.functionName().equalsIgnoreCase(AnalyticExpr.MAX);
}
private static boolean isRankingFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.RANK)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.DENSERANK)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.ROWNUMBER)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.NTILE);
}
private static boolean isNtileFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.NTILE);
}
private static boolean isHllAggFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.HLL_UNION_AGG);
}
}
|
class AnalyticAnalyzer {
private static boolean isPositiveConstantInteger(Expr expr) {
if (!expr.isConstant()) {
return false;
}
double value = 0;
if (expr instanceof IntLiteral) {
IntLiteral intl = (IntLiteral) expr;
value = intl.getDoubleValue();
} else if (expr instanceof LargeIntLiteral) {
LargeIntLiteral intl = (LargeIntLiteral) expr;
value = intl.getDoubleValue();
}
return value > 0;
}
private static void verifyWindowFrame(AnalyticExpr analyticExpr) {
if (analyticExpr.getOrderByElements().isEmpty()) {
throw new SemanticException("Windowing clause requires ORDER BY clause: " + analyticExpr.toSql(),
analyticExpr.getPos());
}
AnalyticWindow windowFrame = analyticExpr.getWindow();
AnalyticWindow.Boundary leftBoundary = windowFrame.getLeftBoundary();
Preconditions.checkArgument(leftBoundary != null);
if (windowFrame.getRightBoundary() == null) {
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
throw new SemanticException(leftBoundary.getType().toString() + " requires a BETWEEN clause",
leftBoundary.getPos());
} else {
windowFrame
.setRightBoundary(new AnalyticWindow.Boundary(AnalyticWindow.BoundaryType.CURRENT_ROW, null));
}
}
AnalyticWindow.Boundary rightBoundary = windowFrame.getRightBoundary();
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING) {
throw new SemanticException(
leftBoundary.getType().toString() + " is only allowed for upper bound of BETWEEN",
leftBoundary.getPos());
}
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING) {
throw new SemanticException(
rightBoundary.getType().toString() + " is only allowed for lower bound of BETWEEN",
rightBoundary.getPos());
}
if (windowFrame.getType() == AnalyticWindow.Type.RANGE) {
if (leftBoundary.getType().isOffset()) {
checkRangeOffsetBoundaryExpr(analyticExpr, leftBoundary);
}
if (rightBoundary.getType().isOffset()) {
checkRangeOffsetBoundaryExpr(analyticExpr, rightBoundary);
}
if (leftBoundary.getType().isOffset() || (rightBoundary.getType().isOffset()) ||
(leftBoundary.getType() == AnalyticWindow.BoundaryType.CURRENT_ROW
&& rightBoundary.getType() == AnalyticWindow.BoundaryType.CURRENT_ROW)) {
throw new SemanticException("RANGE is only supported with both the lower and upper bounds UNBOUNDED or"
+ " one UNBOUNDED and the other CURRENT ROW.", windowFrame.getPos());
}
}
if (leftBoundary.getType().isOffset()) {
checkOffsetExpr(windowFrame, leftBoundary);
}
if (rightBoundary.getType().isOffset()) {
checkOffsetExpr(windowFrame, rightBoundary);
}
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.FOLLOWING) {
checkOffsetBoundaries(leftBoundary, rightBoundary);
} else if (rightBoundary.getType() != AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING) {
throw new SemanticException(
"A lower window bound of " + AnalyticWindow.BoundaryType.FOLLOWING
+ " requires that the upper bound also be " +
AnalyticWindow.BoundaryType.FOLLOWING, windowFrame.getPos());
}
}
if (rightBoundary.getType() == AnalyticWindow.BoundaryType.PRECEDING) {
if (leftBoundary.getType() == AnalyticWindow.BoundaryType.PRECEDING) {
checkOffsetBoundaries(rightBoundary, leftBoundary);
} else if (leftBoundary.getType() != AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING) {
throw new SemanticException(
"An upper window bound of " + AnalyticWindow.BoundaryType.PRECEDING
+ " requires that the lower bound also be " +
AnalyticWindow.BoundaryType.PRECEDING, windowFrame.getPos());
}
}
}
/**
* Checks that the value expr of an offset boundary of a RANGE window is compatible
* with orderingExprs (and that there's only a single ordering expr).
*/
private static void checkRangeOffsetBoundaryExpr(AnalyticExpr analyticExpr, AnalyticWindow.Boundary boundary) {
if (analyticExpr.getOrderByElements().size() > 1) {
throw new SemanticException("Only one ORDER BY expression allowed if used with "
+ "a RANGE window with PRECEDING/FOLLOWING: " + analyticExpr.toSql(), analyticExpr.getPos());
}
if (!Type.isImplicitlyCastable(boundary.getExpr().getType(),
analyticExpr.getOrderByElements().get(0).getExpr().getType(), false)) {
throw new SemanticException("The value expression of a PRECEDING/FOLLOWING clause of a RANGE window "
+ "must be implicitly convertable to the ORDER BY expression's type: "
+ boundary.getExpr().toSql() + " cannot be implicitly converted to "
+ analyticExpr.getOrderByElements().get(0).getExpr().toSql(), analyticExpr.getPos());
}
}
/**
* Semantic analysis for expr of a PRECEDING/FOLLOWING clause.
*/
private static void checkOffsetExpr(AnalyticWindow windowFrame, AnalyticWindow.Boundary boundary) {
Preconditions.checkState(boundary.getType().isOffset());
Expr e = boundary.getExpr();
Preconditions.checkNotNull(e);
boolean isPos = true;
Double val = null;
if (e.isConstant() && e.getType().isNumericType()) {
try {
val = Expr.getConstFromExpr(e);
if (val <= 0) {
isPos = false;
}
} catch (AnalysisException exc) {
throw new SemanticException("Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage(), e.getPos());
}
}
if (windowFrame.getType() == AnalyticWindow.Type.ROWS) {
if (!e.isConstant() || !e.getType().isFixedPointType() || !isPos) {
throw new SemanticException("For ROWS window, the value of a PRECEDING/FOLLOWING offset must be a "
+ "constant positive integer: " + boundary.toSql(), e.getPos());
}
Preconditions.checkNotNull(val);
boundary.setOffsetValue(new BigDecimal(val.longValue()));
} else {
if (!e.isConstant() || !e.getType().isNumericType() || !isPos) {
throw new SemanticException("For RANGE window, the value of a PRECEDING/FOLLOWING offset must be a "
+ "constant positive number: " + boundary.toSql(), e.getPos());
}
boundary.setOffsetValue(BigDecimal.valueOf(val));
}
}
/**
* Check that b1 <= b2.
*/
private static void checkOffsetBoundaries(AnalyticWindow.Boundary b1, AnalyticWindow.Boundary b2) {
Preconditions.checkState(b1.getType().isOffset());
Preconditions.checkState(b2.getType().isOffset());
Expr e1 = b1.getExpr();
Preconditions.checkState(e1 != null && e1.isConstant() && e1.getType().isNumericType());
Expr e2 = b2.getExpr();
Preconditions.checkState(e2 != null && e2.isConstant() && e2.getType().isNumericType());
try {
double left = Expr.getConstFromExpr(e1);
double right = Expr.getConstFromExpr(e2);
if (left > right) {
throw new SemanticException("Offset boundaries are in the wrong order", e1.getPos());
}
} catch (AnalysisException exc) {
throw new SemanticException("Couldn't evaluate PRECEDING/FOLLOWING expression: " + exc.getMessage(),
e1.getPos());
}
}
private static boolean isAnalyticFn(Function fn) {
return fn instanceof AggregateFunction && ((AggregateFunction) fn).isAnalyticFn();
}
private static boolean isOffsetFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.LEAD) ||
fn.functionName().equalsIgnoreCase(AnalyticExpr.LAG);
}
private static boolean isMinMax(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.MIN) ||
fn.functionName().equalsIgnoreCase(AnalyticExpr.MAX);
}
private static boolean isRankingFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.RANK)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.DENSERANK)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.ROWNUMBER)
|| fn.functionName().equalsIgnoreCase(AnalyticExpr.NTILE);
}
private static boolean isNtileFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.NTILE);
}
private static boolean isHllAggFn(Function fn) {
if (!isAnalyticFn(fn)) {
return false;
}
return fn.functionName().equalsIgnoreCase(AnalyticExpr.HLL_UNION_AGG);
}
}
|
is it better to set it as not materialized instead of remove it?
|
public void substitutePreRepeatExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
ArrayList<Expr> originalPreRepeatExprs = new ArrayList<>(preRepeatExprs);
preRepeatExprs = Expr.substituteList(preRepeatExprs, smap, analyzer, true);
ArrayList<Expr> materializedPreRepeatExprs = new ArrayList<>();
ArrayList<Expr> unMaterializedSlotRefs = new ArrayList<>();
for (int i = 0; i < preRepeatExprs.size(); ++i) {
Expr expr = preRepeatExprs.get(i);
if (expr instanceof SlotRef && !((SlotRef) expr).getDesc().isMaterialized()) {
unMaterializedSlotRefs.add(originalPreRepeatExprs.get(i));
} else {
materializedPreRepeatExprs.add(expr);
}
}
preRepeatExprs = materializedPreRepeatExprs;
for (Expr expr : unMaterializedSlotRefs) {
Expr rExpr = outputTupleSmap.get(expr);
outputTupleSmap.removeByRhsExpr(rExpr);
if (rExpr instanceof SlotRef) {
outputTupleDesc.getSlots().remove(((SlotRef) rExpr).getDesc());
}
}
}
|
outputTupleDesc.getSlots().remove(((SlotRef) rExpr).getDesc());
|
public void substitutePreRepeatExprs(ExprSubstitutionMap smap, Analyzer analyzer) {
ArrayList<Expr> originalPreRepeatExprs = new ArrayList<>(preRepeatExprs);
preRepeatExprs = Expr.substituteList(preRepeatExprs, smap, analyzer, true);
ArrayList<Expr> materializedPreRepeatExprs = new ArrayList<>();
ArrayList<Expr> unMaterializedSlotRefs = new ArrayList<>();
for (int i = 0; i < preRepeatExprs.size(); ++i) {
Expr expr = preRepeatExprs.get(i);
if (expr instanceof SlotRef && !((SlotRef) expr).getDesc().isMaterialized()) {
unMaterializedSlotRefs.add(originalPreRepeatExprs.get(i));
} else {
materializedPreRepeatExprs.add(expr);
}
}
preRepeatExprs = materializedPreRepeatExprs;
for (Expr expr : unMaterializedSlotRefs) {
Expr rExpr = outputTupleSmap.get(expr);
if (rExpr instanceof SlotRef) {
((SlotRef) rExpr).getDesc().setIsMaterialized(false);
}
}
}
|
class GroupingInfo {
public static final String COL_GROUPING_ID = "GROUPING_ID";
public static final String GROUPING_PREFIX = "GROUPING_PREFIX_";
private VirtualSlotRef groupingIDSlot;
private TupleDescriptor virtualTuple;
private TupleDescriptor outputTupleDesc;
private ExprSubstitutionMap outputTupleSmap;
private List<SlotDescriptor> groupingSlotDescList;
private Set<VirtualSlotRef> virtualSlotRefs;
private List<BitSet> groupingIdList;
private GroupByClause.GroupingType groupingType;
private BitSet bitSetAll;
private List<Expr> preRepeatExprs;
public GroupingInfo(Analyzer analyzer, GroupByClause groupByClause) throws AnalysisException {
this.groupingType = groupByClause.getGroupingType();
virtualSlotRefs = new LinkedHashSet<>();
virtualTuple = analyzer.getDescTbl().createTupleDescriptor("VIRTUAL_TUPLE");
groupingIDSlot = new VirtualSlotRef(COL_GROUPING_ID, Type.BIGINT, virtualTuple, new ArrayList<>());
groupingIDSlot.analyze(analyzer);
virtualSlotRefs.add(groupingIDSlot);
outputTupleDesc = analyzer.getDescTbl().createTupleDescriptor("repeat-tuple");
outputTupleSmap = new ExprSubstitutionMap();
groupingSlotDescList = Lists.newArrayList();
preRepeatExprs = Lists.newArrayList();
}
public Set<VirtualSlotRef> getVirtualSlotRefs() {
return virtualSlotRefs;
}
public TupleDescriptor getVirtualTuple() {
return virtualTuple;
}
public TupleDescriptor getOutputTupleDesc() {
return outputTupleDesc;
}
public ExprSubstitutionMap getOutputTupleSmap() {
return outputTupleSmap;
}
public List<SlotDescriptor> getGroupingSlotDescList() {
return groupingSlotDescList;
}
public List<BitSet> getGroupingIdList() {
return groupingIdList;
}
public List<Expr> getPreRepeatExprs() {
return preRepeatExprs;
}
public VirtualSlotRef addGroupingSlots(List<Expr> realSlots, Analyzer analyzer) throws AnalysisException {
String colName = realSlots.stream().map(expr -> expr.toSql()).collect(Collectors.joining("_"));
colName = GROUPING_PREFIX + colName;
VirtualSlotRef virtualSlot = new VirtualSlotRef(colName, Type.BIGINT, virtualTuple, realSlots);
virtualSlot.analyze(analyzer);
if (virtualSlotRefs.contains(virtualSlot)) {
for (VirtualSlotRef vs : virtualSlotRefs) {
if (vs.equals(virtualSlot)) {
return vs;
}
}
}
virtualSlotRefs.add(virtualSlot);
return virtualSlot;
}
public void buildRepeat(ArrayList<Expr> groupingExprs, List<ArrayList<Expr>> groupingSetList) {
groupingIdList = new ArrayList<>();
bitSetAll = new BitSet();
bitSetAll.set(0, groupingExprs.size(), true);
switch (groupingType) {
case CUBE:
for (int i = 0; i < (1 << groupingExprs.size()); i++) {
BitSet bitSet = new BitSet();
for (int j = 0; j < groupingExprs.size(); j++) {
if ((i & (1 << j)) > 0) {
bitSet.set(j, true);
}
}
groupingIdList.add(bitSet);
}
break;
case ROLLUP:
for (int i = 0; i <= groupingExprs.size(); i++) {
BitSet bitSet = new BitSet();
bitSet.set(0, i);
groupingIdList.add(bitSet);
}
break;
case GROUPING_SETS:
for (ArrayList<Expr> list : groupingSetList) {
BitSet bitSet = new BitSet();
for (int i = 0; i < groupingExprs.size(); i++) {
bitSet.set(i, list.contains(groupingExprs.get(i)));
}
if (!groupingIdList.contains(bitSet)) {
groupingIdList.add(bitSet);
}
}
break;
default:
Preconditions.checkState(false);
}
groupingExprs.addAll(virtualSlotRefs);
}
public List<List<Long>> genGroupingList(ArrayList<Expr> groupingExprs) throws AnalysisException {
List<List<Long>> groupingList = new ArrayList<>();
for (SlotRef slot : virtualSlotRefs) {
List<Long> glist = new ArrayList<>();
for (BitSet bitSet : groupingIdList) {
long l = 0L;
if (slot.getColumnName().equalsIgnoreCase(COL_GROUPING_ID)) {
BitSet newBitSet = new BitSet();
for (int i = 0; i < bitSetAll.length(); ++i) {
newBitSet.set(i, bitSet.get(bitSetAll.length() - i - 1));
}
newBitSet.flip(0, bitSetAll.length());
newBitSet.and(bitSetAll);
for (int i = 0; i < newBitSet.length(); ++i) {
l += newBitSet.get(i) ? (1L << i) : 0L;
}
} else {
int slotSize = ((VirtualSlotRef) slot).getRealSlots().size();
for (int i = 0; i < slotSize; ++i) {
int j = groupingExprs.indexOf(((VirtualSlotRef) slot).getRealSlots().get(i));
if (j < 0 || j >= bitSet.size()) {
throw new AnalysisException("Column " + ((VirtualSlotRef) slot).getRealColumnName()
+ " in GROUP_ID() does not exist in GROUP BY clause.");
}
l += bitSet.get(j) ? 0L : (1L << (slotSize - i - 1));
}
}
glist.add(l);
}
groupingList.add(glist);
}
return groupingList;
}
public void genOutputTupleDescAndSMap(Analyzer analyzer, ArrayList<Expr> groupingAndVirtualSlotExprs,
List<FunctionCallExpr> aggExprs) {
List<Expr> groupingExprs = Lists.newArrayList();
List<Expr> virtualSlotExprs = Lists.newArrayList();
for (Expr expr : groupingAndVirtualSlotExprs) {
if (expr instanceof VirtualSlotRef) {
virtualSlotExprs.add(expr);
} else {
groupingExprs.add(expr);
}
}
for (Expr expr : groupingExprs) {
SlotDescriptor slotDesc = addSlot(analyzer, expr);
slotDesc.setIsNullable(true);
groupingSlotDescList.add(slotDesc);
preRepeatExprs.add(expr);
if (!expr.isConstant()) {
analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
}
}
List<SlotRef> aggSlot = Lists.newArrayList();
aggExprs.forEach(expr -> aggSlot.addAll(getSlotRefChildren(expr)));
for (SlotRef slotRef : aggSlot) {
addSlot(analyzer, slotRef);
preRepeatExprs.add(slotRef);
}
for (Expr expr : virtualSlotExprs) {
addSlot(analyzer, expr);
}
}
private SlotDescriptor addSlot(Analyzer analyzer, Expr expr) {
SlotDescriptor slotDesc = analyzer.addSlotDescriptor(outputTupleDesc);
slotDesc.initFromExpr(expr);
slotDesc.setIsMaterialized(true);
if (expr instanceof SlotRef) {
slotDesc.setColumn(((SlotRef) expr).getColumn());
}
if (expr instanceof VirtualSlotRef) {
outputTupleSmap.put(expr.clone(), new VirtualSlotRef(slotDesc));
} else {
outputTupleSmap.put(expr.clone(), new SlotRef(slotDesc));
}
return slotDesc;
}
private List<SlotRef> getSlotRefChildren(Expr root) {
List<SlotRef> result = new ArrayList<>();
for (Expr child : root.getChildren()) {
if (child instanceof SlotRef) {
result.add((SlotRef) child);
} else {
result.addAll(getSlotRefChildren(child));
}
}
return result;
}
public void substituteGroupingFn(List<Expr> exprs, Analyzer analyzer) throws AnalysisException {
if (groupingType == GroupByClause.GroupingType.GROUP_BY) {
throw new AnalysisException("cannot use GROUPING functions without [grouping sets|rollup|cube] a"
+ "clause or grouping sets only have one element.");
}
ListIterator<Expr> i = exprs.listIterator();
while (i.hasNext()) {
Expr expr = i.next();
substituteGroupingFn(expr, analyzer);
}
}
public void substituteGroupingFn(Expr expr, Analyzer analyzer) throws AnalysisException {
if (expr instanceof GroupingFunctionCallExpr) {
if (expr.getChildren().size() == 1 && expr.getChild(0) instanceof VirtualSlotRef) {
return;
}
VirtualSlotRef vSlot = addGroupingSlots(((GroupingFunctionCallExpr) expr).getRealSlot(), analyzer);
((GroupingFunctionCallExpr) expr).resetChild(vSlot);
expr.analyze(analyzer);
} else if (expr.getChildren() != null && expr.getChildren().size() > 0) {
for (Expr child : expr.getChildren()) {
substituteGroupingFn(child, analyzer);
}
}
}
}
|
class GroupingInfo {
public static final String COL_GROUPING_ID = "GROUPING_ID";
public static final String GROUPING_PREFIX = "GROUPING_PREFIX_";
private VirtualSlotRef groupingIDSlot;
private TupleDescriptor virtualTuple;
private TupleDescriptor outputTupleDesc;
private ExprSubstitutionMap outputTupleSmap;
private List<SlotDescriptor> groupingSlotDescList;
private Set<VirtualSlotRef> virtualSlotRefs;
private List<BitSet> groupingIdList;
private GroupByClause.GroupingType groupingType;
private BitSet bitSetAll;
private List<Expr> preRepeatExprs;
public GroupingInfo(Analyzer analyzer, GroupByClause groupByClause) throws AnalysisException {
this.groupingType = groupByClause.getGroupingType();
virtualSlotRefs = new LinkedHashSet<>();
virtualTuple = analyzer.getDescTbl().createTupleDescriptor("VIRTUAL_TUPLE");
groupingIDSlot = new VirtualSlotRef(COL_GROUPING_ID, Type.BIGINT, virtualTuple, new ArrayList<>());
groupingIDSlot.analyze(analyzer);
virtualSlotRefs.add(groupingIDSlot);
outputTupleDesc = analyzer.getDescTbl().createTupleDescriptor("repeat-tuple");
outputTupleSmap = new ExprSubstitutionMap();
groupingSlotDescList = Lists.newArrayList();
preRepeatExprs = Lists.newArrayList();
}
public Set<VirtualSlotRef> getVirtualSlotRefs() {
return virtualSlotRefs;
}
public TupleDescriptor getVirtualTuple() {
return virtualTuple;
}
public TupleDescriptor getOutputTupleDesc() {
return outputTupleDesc;
}
public ExprSubstitutionMap getOutputTupleSmap() {
return outputTupleSmap;
}
public List<SlotDescriptor> getGroupingSlotDescList() {
return groupingSlotDescList;
}
public List<BitSet> getGroupingIdList() {
return groupingIdList;
}
public List<Expr> getPreRepeatExprs() {
return preRepeatExprs;
}
public VirtualSlotRef addGroupingSlots(List<Expr> realSlots, Analyzer analyzer) throws AnalysisException {
String colName = realSlots.stream().map(expr -> expr.toSql()).collect(Collectors.joining("_"));
colName = GROUPING_PREFIX + colName;
VirtualSlotRef virtualSlot = new VirtualSlotRef(colName, Type.BIGINT, virtualTuple, realSlots);
virtualSlot.analyze(analyzer);
if (virtualSlotRefs.contains(virtualSlot)) {
for (VirtualSlotRef vs : virtualSlotRefs) {
if (vs.equals(virtualSlot)) {
return vs;
}
}
}
virtualSlotRefs.add(virtualSlot);
return virtualSlot;
}
public void buildRepeat(ArrayList<Expr> groupingExprs, List<ArrayList<Expr>> groupingSetList) {
groupingIdList = new ArrayList<>();
bitSetAll = new BitSet();
bitSetAll.set(0, groupingExprs.size(), true);
switch (groupingType) {
case CUBE:
for (int i = 0; i < (1 << groupingExprs.size()); i++) {
BitSet bitSet = new BitSet();
for (int j = 0; j < groupingExprs.size(); j++) {
if ((i & (1 << j)) > 0) {
bitSet.set(j, true);
}
}
groupingIdList.add(bitSet);
}
break;
case ROLLUP:
for (int i = 0; i <= groupingExprs.size(); i++) {
BitSet bitSet = new BitSet();
bitSet.set(0, i);
groupingIdList.add(bitSet);
}
break;
case GROUPING_SETS:
for (ArrayList<Expr> list : groupingSetList) {
BitSet bitSet = new BitSet();
for (int i = 0; i < groupingExprs.size(); i++) {
bitSet.set(i, list.contains(groupingExprs.get(i)));
}
if (!groupingIdList.contains(bitSet)) {
groupingIdList.add(bitSet);
}
}
break;
default:
Preconditions.checkState(false);
}
groupingExprs.addAll(virtualSlotRefs);
}
public List<List<Long>> genGroupingList(ArrayList<Expr> groupingExprs) throws AnalysisException {
List<List<Long>> groupingList = new ArrayList<>();
for (SlotRef slot : virtualSlotRefs) {
List<Long> glist = new ArrayList<>();
for (BitSet bitSet : groupingIdList) {
long l = 0L;
if (slot.getColumnName().equalsIgnoreCase(COL_GROUPING_ID)) {
BitSet newBitSet = new BitSet();
for (int i = 0; i < bitSetAll.length(); ++i) {
newBitSet.set(i, bitSet.get(bitSetAll.length() - i - 1));
}
newBitSet.flip(0, bitSetAll.length());
newBitSet.and(bitSetAll);
for (int i = 0; i < newBitSet.length(); ++i) {
l += newBitSet.get(i) ? (1L << i) : 0L;
}
} else {
int slotSize = ((VirtualSlotRef) slot).getRealSlots().size();
for (int i = 0; i < slotSize; ++i) {
int j = groupingExprs.indexOf(((VirtualSlotRef) slot).getRealSlots().get(i));
if (j < 0 || j >= bitSet.size()) {
throw new AnalysisException("Column " + ((VirtualSlotRef) slot).getRealColumnName()
+ " in GROUP_ID() does not exist in GROUP BY clause.");
}
l += bitSet.get(j) ? 0L : (1L << (slotSize - i - 1));
}
}
glist.add(l);
}
groupingList.add(glist);
}
return groupingList;
}
public void genOutputTupleDescAndSMap(Analyzer analyzer, ArrayList<Expr> groupingAndVirtualSlotExprs,
List<FunctionCallExpr> aggExprs) {
List<Expr> groupingExprs = Lists.newArrayList();
List<Expr> virtualSlotExprs = Lists.newArrayList();
for (Expr expr : groupingAndVirtualSlotExprs) {
if (expr instanceof VirtualSlotRef) {
virtualSlotExprs.add(expr);
} else {
groupingExprs.add(expr);
}
}
for (Expr expr : groupingExprs) {
SlotDescriptor slotDesc = addSlot(analyzer, expr);
slotDesc.setIsNullable(true);
groupingSlotDescList.add(slotDesc);
preRepeatExprs.add(expr);
if (!expr.isConstant()) {
analyzer.createAuxEquivPredicate(new SlotRef(slotDesc), expr.clone());
}
}
List<SlotRef> aggSlot = Lists.newArrayList();
aggExprs.forEach(expr -> aggSlot.addAll(getSlotRefChildren(expr)));
for (SlotRef slotRef : aggSlot) {
addSlot(analyzer, slotRef);
preRepeatExprs.add(slotRef);
}
for (Expr expr : virtualSlotExprs) {
addSlot(analyzer, expr);
}
}
private SlotDescriptor addSlot(Analyzer analyzer, Expr expr) {
SlotDescriptor slotDesc = analyzer.addSlotDescriptor(outputTupleDesc);
slotDesc.initFromExpr(expr);
slotDesc.setIsMaterialized(true);
if (expr instanceof SlotRef) {
slotDesc.setColumn(((SlotRef) expr).getColumn());
}
if (expr instanceof VirtualSlotRef) {
outputTupleSmap.put(expr.clone(), new VirtualSlotRef(slotDesc));
} else {
outputTupleSmap.put(expr.clone(), new SlotRef(slotDesc));
}
return slotDesc;
}
private List<SlotRef> getSlotRefChildren(Expr root) {
List<SlotRef> result = new ArrayList<>();
for (Expr child : root.getChildren()) {
if (child instanceof SlotRef) {
result.add((SlotRef) child);
} else {
result.addAll(getSlotRefChildren(child));
}
}
return result;
}
public void substituteGroupingFn(List<Expr> exprs, Analyzer analyzer) throws AnalysisException {
if (groupingType == GroupByClause.GroupingType.GROUP_BY) {
throw new AnalysisException("cannot use GROUPING functions without [grouping sets|rollup|cube] a"
+ "clause or grouping sets only have one element.");
}
ListIterator<Expr> i = exprs.listIterator();
while (i.hasNext()) {
Expr expr = i.next();
substituteGroupingFn(expr, analyzer);
}
}
public void substituteGroupingFn(Expr expr, Analyzer analyzer) throws AnalysisException {
if (expr instanceof GroupingFunctionCallExpr) {
if (expr.getChildren().size() == 1 && expr.getChild(0) instanceof VirtualSlotRef) {
return;
}
VirtualSlotRef vSlot = addGroupingSlots(((GroupingFunctionCallExpr) expr).getRealSlot(), analyzer);
((GroupingFunctionCallExpr) expr).resetChild(vSlot);
expr.analyze(analyzer);
} else if (expr.getChildren() != null && expr.getChildren().size() > 0) {
for (Expr child : expr.getChildren()) {
substituteGroupingFn(child, analyzer);
}
}
}
}
|
Oh, oh, I get the idea
|
private String getShardingColumn(final ShardingStrategyConfiguration shardingStrategyConfig) {
if (shardingStrategyConfig instanceof ComplexShardingStrategyConfiguration) {
return ((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns();
}
if (shardingStrategyConfig instanceof StandardShardingStrategyConfiguration) {
String shardingColumn = ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn();
if (StringUtils.isNotEmpty(shardingColumn)) {
return shardingColumn;
}
}
return StringUtils.defaultString(defaultShardingColumn, "");
}
|
return ((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns();
|
private String getShardingColumn(final ShardingStrategyConfiguration shardingStrategyConfig) {
String shardingColumn = defaultShardingColumn;
if (shardingStrategyConfig instanceof ComplexShardingStrategyConfiguration) {
shardingColumn = ((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns();
}
if (shardingStrategyConfig instanceof StandardShardingStrategyConfiguration) {
shardingColumn = ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn();
}
return StringUtils.defaultString(shardingColumn, "");
}
|
class ShardingRule implements SchemaRule, DataNodeContainedRule, TableContainedRule, InstanceAwareRule {
private static final String EQUAL = "=";
static {
ShardingSphereServiceLoader.register(ShardingAlgorithm.class);
ShardingSphereServiceLoader.register(KeyGenerateAlgorithm.class);
}
private final Collection<String> dataSourceNames;
private final Map<String, ShardingAlgorithm> shardingAlgorithms = new LinkedHashMap<>();
private final Map<String, KeyGenerateAlgorithm> keyGenerators = new LinkedHashMap<>();
private final Map<String, TableRule> tableRules = new LinkedHashMap<>();
private final Map<String, BindingTableRule> bindingTableRules = new LinkedHashMap<>();
private final Collection<String> broadcastTables;
private final ShardingStrategyConfiguration defaultDatabaseShardingStrategyConfig;
private final ShardingStrategyConfiguration defaultTableShardingStrategyConfig;
private final KeyGenerateAlgorithm defaultKeyGenerateAlgorithm;
private final String defaultShardingColumn;
private final Map<String, Collection<DataNode>> shardingTableDataNodes;
public ShardingRule(final ShardingRuleConfiguration config, final Collection<String> dataSourceNames) {
this.dataSourceNames = getDataSourceNames(config.getTables(), config.getAutoTables(), dataSourceNames);
config.getShardingAlgorithms().forEach((key, value) -> shardingAlgorithms.put(key, ShardingSphereAlgorithmFactory.createAlgorithm(value, ShardingAlgorithm.class)));
config.getKeyGenerators().forEach((key, value) -> keyGenerators.put(key, ShardingSphereAlgorithmFactory.createAlgorithm(value, KeyGenerateAlgorithm.class)));
tableRules.putAll(createTableRules(config.getTables(), config.getDefaultKeyGenerateStrategy()));
tableRules.putAll(createAutoTableRules(config.getAutoTables(), config.getDefaultKeyGenerateStrategy()));
bindingTableRules.putAll(createBindingTableRules(config.getBindingTableGroups()));
broadcastTables = createBroadcastTables(config.getBroadcastTables());
defaultDatabaseShardingStrategyConfig = null == config.getDefaultDatabaseShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultDatabaseShardingStrategy();
defaultTableShardingStrategyConfig = null == config.getDefaultTableShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultTableShardingStrategy();
defaultKeyGenerateAlgorithm = null == config.getDefaultKeyGenerateStrategy()
? RequiredSPIRegistry.getRegisteredService(KeyGenerateAlgorithm.class)
: keyGenerators.get(config.getDefaultKeyGenerateStrategy().getKeyGeneratorName());
defaultShardingColumn = config.getDefaultShardingColumn();
shardingTableDataNodes = createShardingTableDataNodes(tableRules);
Preconditions.checkArgument(isValidBindingTableConfiguration(config.getBindingTableGroups()), "Invalid binding table configuration in ShardingRuleConfiguration.");
}
public ShardingRule(final AlgorithmProvidedShardingRuleConfiguration config, final Collection<String> dataSourceNames) {
this.dataSourceNames = getDataSourceNames(config.getTables(), config.getAutoTables(), dataSourceNames);
shardingAlgorithms.putAll(config.getShardingAlgorithms());
keyGenerators.putAll(config.getKeyGenerators());
tableRules.putAll(createTableRules(config.getTables(), config.getDefaultKeyGenerateStrategy()));
tableRules.putAll(createAutoTableRules(config.getAutoTables(), config.getDefaultKeyGenerateStrategy()));
bindingTableRules.putAll(createBindingTableRules(config.getBindingTableGroups()));
broadcastTables = createBroadcastTables(config.getBroadcastTables());
defaultDatabaseShardingStrategyConfig = null == config.getDefaultDatabaseShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultDatabaseShardingStrategy();
defaultTableShardingStrategyConfig = null == config.getDefaultTableShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultTableShardingStrategy();
defaultKeyGenerateAlgorithm = null == config.getDefaultKeyGenerateStrategy()
? RequiredSPIRegistry.getRegisteredService(KeyGenerateAlgorithm.class)
: keyGenerators.get(config.getDefaultKeyGenerateStrategy().getKeyGeneratorName());
defaultShardingColumn = config.getDefaultShardingColumn();
shardingTableDataNodes = createShardingTableDataNodes(tableRules);
Preconditions.checkArgument(isValidBindingTableConfiguration(config.getBindingTableGroups()), "Invalid binding table configuration in ShardingRuleConfiguration.");
}
private Map<String, Collection<DataNode>> createShardingTableDataNodes(final Map<String, TableRule> tableRules) {
Map<String, Collection<DataNode>> result = new HashMap<>(tableRules.size(), 1);
for (TableRule each : tableRules.values()) {
result.put(each.getLogicTable().toLowerCase(), each.getActualDataNodes());
}
return result;
}
private Collection<String> getDataSourceNames(final Collection<ShardingTableRuleConfiguration> tableRuleConfigs,
final Collection<ShardingAutoTableRuleConfiguration> autoTableRuleConfigs, final Collection<String> dataSourceNames) {
if (tableRuleConfigs.isEmpty() && autoTableRuleConfigs.isEmpty()) {
return dataSourceNames;
}
if (tableRuleConfigs.stream().map(ShardingTableRuleConfiguration::getActualDataNodes).anyMatch(each -> null == each || each.isEmpty())) {
return dataSourceNames;
}
Collection<String> result = new LinkedHashSet<>();
tableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
autoTableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
return result;
}
private Collection<String> getDataSourceNames(final ShardingAutoTableRuleConfiguration shardingAutoTableRuleConfig) {
List<String> actualDataSources = new InlineExpressionParser(shardingAutoTableRuleConfig.getActualDataSources()).splitAndEvaluate();
return new HashSet<>(actualDataSources);
}
private Collection<String> getDataSourceNames(final ShardingTableRuleConfiguration shardingTableRuleConfig) {
List<String> actualDataNodes = new InlineExpressionParser(shardingTableRuleConfig.getActualDataNodes()).splitAndEvaluate();
return actualDataNodes.stream().map(each -> new DataNode(each).getDataSourceName()).collect(Collectors.toList());
}
private Map<String, TableRule> createTableRules(final Collection<ShardingTableRuleConfiguration> tableRuleConfigs, final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return tableRuleConfigs.stream().map(each -> new TableRule(each, dataSourceNames, getDefaultGenerateKeyColumn(defaultKeyGenerateStrategyConfig)))
.collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
}
private Map<String, TableRule> createAutoTableRules(final Collection<ShardingAutoTableRuleConfiguration> autoTableRuleConfigs,
final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return autoTableRuleConfigs.stream().map(each -> createAutoTableRule(defaultKeyGenerateStrategyConfig, each))
.collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
}
private TableRule createAutoTableRule(final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig, final ShardingAutoTableRuleConfiguration autoTableRuleConfig) {
ShardingAlgorithm shardingAlgorithm = null == autoTableRuleConfig.getShardingStrategy() ? null : shardingAlgorithms.get(autoTableRuleConfig.getShardingStrategy().getShardingAlgorithmName());
Preconditions.checkState(shardingAlgorithm instanceof ShardingAutoTableAlgorithm, "Sharding auto table rule configuration must match sharding auto table algorithm.");
return new TableRule(autoTableRuleConfig, dataSourceNames, (ShardingAutoTableAlgorithm) shardingAlgorithm, getDefaultGenerateKeyColumn(defaultKeyGenerateStrategyConfig));
}
private String getDefaultGenerateKeyColumn(final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return Optional.ofNullable(defaultKeyGenerateStrategyConfig).map(KeyGenerateStrategyConfiguration::getColumn).orElse(null);
}
private Collection<String> createBroadcastTables(final Collection<String> broadcastTables) {
Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
result.addAll(broadcastTables);
return result;
}
private Map<String, BindingTableRule> createBindingTableRules(final Collection<String> bindingTableGroups) {
Map<String, BindingTableRule> result = new LinkedHashMap<>();
for (String each : bindingTableGroups) {
BindingTableRule bindingTableRule = createBindingTableRule(each);
for (String logicTable : bindingTableRule.getAllLogicTables()) {
result.put(logicTable.toLowerCase(), bindingTableRule);
}
}
return result;
}
private BindingTableRule createBindingTableRule(final String bindingTableGroup) {
Map<String, TableRule> tableRules = Splitter.on(",").trimResults().splitToList(bindingTableGroup).stream()
.map(this::getTableRule).collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
BindingTableRule result = new BindingTableRule();
result.getTableRules().putAll(tableRules);
return result;
}
private boolean isValidBindingTableConfiguration(final Collection<String> bindingTableGroups) {
for (String each : bindingTableGroups) {
Collection<String> bindingTables = Splitter.on(",").trimResults().splitToList(each.toLowerCase());
if (bindingTables.size() <= 1) {
continue;
}
Iterator<String> iterator = bindingTables.iterator();
TableRule sampleTableRule = getTableRule(iterator.next());
while (iterator.hasNext()) {
TableRule tableRule = getTableRule(iterator.next());
if (!isValidActualDatasourceName(sampleTableRule, tableRule) || !isValidActualTableName(sampleTableRule, tableRule)) {
return false;
}
if (!isValidShardingAlgorithm(sampleTableRule, tableRule, true)
|| !isValidShardingAlgorithm(sampleTableRule, tableRule, false)) {
return false;
}
}
}
return true;
}
private boolean isValidActualDatasourceName(final TableRule sampleTableRule, final TableRule tableRule) {
return sampleTableRule.getActualDatasourceNames().equals(tableRule.getActualDatasourceNames());
}
private boolean isValidActualTableName(final TableRule sampleTableRule, final TableRule tableRule) {
for (String each : sampleTableRule.getActualDatasourceNames()) {
Collection<String> sampleActualTableNames =
sampleTableRule.getActualTableNames(each).stream().map(actualTableName -> actualTableName.replace(sampleTableRule.getTableDataNode().getPrefix(), "")).collect(Collectors.toSet());
Collection<String> actualTableNames =
tableRule.getActualTableNames(each).stream().map(actualTableName -> actualTableName.replace(tableRule.getTableDataNode().getPrefix(), "")).collect(Collectors.toSet());
if (!sampleActualTableNames.equals(actualTableNames)) {
return false;
}
}
return true;
}
private boolean isValidShardingAlgorithm(final TableRule sampleTableRule, final TableRule tableRule, final boolean databaseAlgorithm) {
String sampleAlgorithmExpression = getAlgorithmExpression(sampleTableRule, databaseAlgorithm);
String algorithmExpression = getAlgorithmExpression(tableRule, databaseAlgorithm);
return sampleAlgorithmExpression.equalsIgnoreCase(algorithmExpression);
}
private String getAlgorithmExpression(final TableRule tableRule, final boolean databaseAlgorithm) {
ShardingStrategyConfiguration shardingStrategyConfig = databaseAlgorithm
? null == tableRule.getDatabaseShardingStrategyConfig() ? defaultDatabaseShardingStrategyConfig : tableRule.getDatabaseShardingStrategyConfig()
: null == tableRule.getTableShardingStrategyConfig() ? defaultTableShardingStrategyConfig : tableRule.getTableShardingStrategyConfig();
ShardingAlgorithm shardingAlgorithm = shardingAlgorithms.get(shardingStrategyConfig.getShardingAlgorithmName());
String originAlgorithmExpression = null == shardingAlgorithm ? "" : StringUtils.defaultString(shardingAlgorithm.getProps().getProperty("algorithm-expression"), "");
String sampleDataNodePrefix = databaseAlgorithm ? tableRule.getDataSourceDataNode().getPrefix() : tableRule.getTableDataNode().getPrefix();
String shardingColumn = getShardingColumn(shardingStrategyConfig);
return originAlgorithmExpression.replace(sampleDataNodePrefix, "").replace(shardingColumn, "");
}
@Override
public Collection<String> getAllTables() {
Collection<String> result = new HashSet<>(getTables());
result.addAll(getAllActualTables());
return result;
}
/**
* Get database sharding strategy configuration.
*
* @param tableRule table rule
* @return database sharding strategy configuration
*/
public ShardingStrategyConfiguration getDatabaseShardingStrategyConfiguration(final TableRule tableRule) {
return null == tableRule.getDatabaseShardingStrategyConfig() ? defaultDatabaseShardingStrategyConfig : tableRule.getDatabaseShardingStrategyConfig();
}
/**
* Get table sharding strategy configuration.
*
* @param tableRule table rule
* @return table sharding strategy configuration
*/
public ShardingStrategyConfiguration getTableShardingStrategyConfiguration(final TableRule tableRule) {
return null == tableRule.getTableShardingStrategyConfig() ? defaultTableShardingStrategyConfig : tableRule.getTableShardingStrategyConfig();
}
/**
* Find table rule.
*
* @param logicTableName logic table name
* @return table rule
*/
public Optional<TableRule> findTableRule(final String logicTableName) {
return Optional.ofNullable(tableRules.get(logicTableName.toLowerCase()));
}
/**
* Find table rule via actual table name.
*
* @param actualTableName actual table name
* @return table rule
*/
public Optional<TableRule> findTableRuleByActualTable(final String actualTableName) {
for (TableRule each : tableRules.values()) {
if (each.isExisted(actualTableName)) {
return Optional.of(each);
}
}
return Optional.empty();
}
/**
* Get table rule.
*
* @param logicTableName logic table name
* @return table rule
*/
public TableRule getTableRule(final String logicTableName) {
Optional<TableRule> tableRule = findTableRule(logicTableName);
if (tableRule.isPresent()) {
return tableRule.get();
}
if (isBroadcastTable(logicTableName)) {
return new TableRule(dataSourceNames, logicTableName);
}
throw new ShardingSphereConfigurationException("Cannot find table rule with logic table: '%s'", logicTableName);
}
/**
* Judge whether logic table is all binding tables or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all binding tables or not
*/
public boolean isAllBindingTables(final Collection<String> logicTableNames) {
if (logicTableNames.isEmpty()) {
return false;
}
Optional<BindingTableRule> bindingTableRule = findBindingTableRule(logicTableNames);
if (!bindingTableRule.isPresent()) {
return false;
}
Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
result.addAll(bindingTableRule.get().getAllLogicTables());
return !result.isEmpty() && result.containsAll(logicTableNames);
}
/**
* Judge whether logic table is all binding tables.
*
* @param schema schema
* @param sqlStatementContext sqlStatementContext
* @param logicTableNames logic table names
* @return whether logic table is all binding tables
*/
public boolean isAllBindingTables(final ShardingSphereSchema schema, final SQLStatementContext<?> sqlStatementContext, final Collection<String> logicTableNames) {
if (!(sqlStatementContext instanceof SelectStatementContext && ((SelectStatementContext) sqlStatementContext).isContainsJoinQuery())) {
return isAllBindingTables(logicTableNames);
}
if (!isAllBindingTables(logicTableNames)) {
return false;
}
SelectStatementContext select = (SelectStatementContext) sqlStatementContext;
Collection<WhereSegment> joinSegments = WhereExtractUtil.getJoinWhereSegments(select.getSqlStatement());
return isJoinConditionContainsShardingColumns(schema, select, logicTableNames, joinSegments)
|| isJoinConditionContainsShardingColumns(schema, select, logicTableNames, select.getWhereSegments());
}
private Optional<BindingTableRule> findBindingTableRule(final Collection<String> logicTableNames) {
for (String each : logicTableNames) {
Optional<BindingTableRule> result = findBindingTableRule(each);
if (result.isPresent()) {
return result;
}
}
return Optional.empty();
}
/**
* Find binding table rule via logic table name.
*
* @param logicTableName logic table name
* @return binding table rule
*/
public Optional<BindingTableRule> findBindingTableRule(final String logicTableName) {
return Optional.ofNullable(bindingTableRules.get(logicTableName.toLowerCase()));
}
/**
* Judge whether logic table is all broadcast tables or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all broadcast tables or not
*/
public boolean isAllBroadcastTables(final Collection<String> logicTableNames) {
return !logicTableNames.isEmpty() && broadcastTables.containsAll(logicTableNames);
}
/**
* Judge whether logic table is all sharding table or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all sharding table or not
*/
public boolean isAllShardingTables(final Collection<String> logicTableNames) {
if (logicTableNames.isEmpty()) {
return false;
}
for (String each : logicTableNames) {
if (!isShardingTable(each)) {
return false;
}
}
return true;
}
/**
* Judge whether logic table is sharding table or not.
*
* @param logicTableName logic table name
* @return whether logic table is sharding table or not
*/
public boolean isShardingTable(final String logicTableName) {
return tableRules.containsKey(logicTableName.toLowerCase());
}
/**
* Judge whether logic table is broadcast table or not.
*
* @param logicTableName logic table name
* @return whether logic table is broadcast table or not
*/
public boolean isBroadcastTable(final String logicTableName) {
return broadcastTables.contains(logicTableName);
}
/**
* Judge whether all tables are in same data source or not.
*
* @param logicTableNames logic table names
* @return whether all tables are in same data source or not
*/
public boolean isAllTablesInSameDataSource(final Collection<String> logicTableNames) {
Collection<String> dataSourceNames = logicTableNames.stream().map(each -> tableRules.get(each.toLowerCase()))
.filter(Objects::nonNull).flatMap(each -> each.getActualDatasourceNames().stream()).collect(Collectors.toSet());
return 1 == dataSourceNames.size();
}
/**
* Judge whether a table rule exists for logic tables.
*
* @param logicTableNames logic table names
* @return whether a table rule exists for logic tables
*/
public boolean tableRuleExists(final Collection<String> logicTableNames) {
for (String each : logicTableNames) {
if (isShardingTable(each) || isBroadcastTable(each)) {
return true;
}
}
return false;
}
/**
* Find sharding column.
*
* @param columnName column name
* @param tableName table name
* @return sharding column
*/
public Optional<String> findShardingColumn(final String columnName, final String tableName) {
return Optional.ofNullable(tableRules.get(tableName.toLowerCase())).flatMap(optional -> findShardingColumn(optional, columnName));
}
private Optional<String> findShardingColumn(final TableRule tableRule, final String columnName) {
Optional<String> databaseShardingColumn = findShardingColumn(getDatabaseShardingStrategyConfiguration(tableRule), columnName);
if (databaseShardingColumn.isPresent()) {
return databaseShardingColumn;
}
return findShardingColumn(getTableShardingStrategyConfiguration(tableRule), columnName);
}
private Optional<String> findShardingColumn(final ShardingStrategyConfiguration shardingStrategyConfig, final String columnName) {
if (shardingStrategyConfig instanceof StandardShardingStrategyConfiguration) {
String shardingColumn = null == ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn()
? defaultShardingColumn
: ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn();
return shardingColumn.equalsIgnoreCase(columnName) ? Optional.of(shardingColumn) : Optional.empty();
}
if (shardingStrategyConfig instanceof ComplexShardingStrategyConfiguration) {
List<String> shardingColumns = Splitter.on(",").trimResults().splitToList(((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns());
for (String each : shardingColumns) {
if (each.equalsIgnoreCase(columnName)) {
return Optional.of(each);
}
}
}
return Optional.empty();
}
/**
* Judge whether given logic table column is generate key column or not.
*
* @param columnName column name
* @param tableName table name
* @return whether given logic table column is generate key column or not
*/
public boolean isGenerateKeyColumn(final String columnName, final String tableName) {
return Optional.ofNullable(tableRules.get(tableName.toLowerCase())).filter(each -> isGenerateKeyColumn(each, columnName)).isPresent();
}
private boolean isGenerateKeyColumn(final TableRule tableRule, final String columnName) {
Optional<String> generateKeyColumn = tableRule.getGenerateKeyColumn();
return generateKeyColumn.isPresent() && generateKeyColumn.get().equalsIgnoreCase(columnName);
}
/**
* Find column name of generated key.
*
* @param logicTableName logic table name
* @return column name of generated key
*/
public Optional<String> findGenerateKeyColumnName(final String logicTableName) {
return Optional.ofNullable(tableRules.get(logicTableName.toLowerCase())).filter(each -> each.getGenerateKeyColumn().isPresent()).flatMap(TableRule::getGenerateKeyColumn);
}
/**
* Find the Generated key of logic table.
*
* @param logicTableName logic table name
* @return generated key
*/
public Comparable<?> generateKey(final String logicTableName) {
Optional<TableRule> tableRule = findTableRule(logicTableName);
if (!tableRule.isPresent()) {
throw new ShardingSphereConfigurationException("Cannot find strategy for generate keys.");
}
KeyGenerateAlgorithm keyGenerator = null != tableRule.get().getKeyGeneratorName() ? keyGenerators.get(tableRule.get().getKeyGeneratorName()) : defaultKeyGenerateAlgorithm;
return keyGenerator.generateKey();
}
/**
* Find data node by logic table name.
*
* @param logicTableName logic table name
* @return data node
*/
public DataNode getDataNode(final String logicTableName) {
TableRule tableRule = getTableRule(logicTableName);
return tableRule.getActualDataNodes().get(0);
}
/**
* Get sharding logic table names.
*
* @param logicTableNames logic table names
* @return sharding logic table names
*/
public Collection<String> getShardingLogicTableNames(final Collection<String> logicTableNames) {
Collection<String> result = new LinkedList<>();
for (String each : logicTableNames) {
if (isShardingTable(each)) {
result.add(each);
}
}
return result;
}
/**
* Get sharding rule table names.
*
* @param logicTableNames logic table names
* @return sharding rule table names
*/
public Collection<String> getShardingRuleTableNames(final Collection<String> logicTableNames) {
return logicTableNames.stream().filter(each -> isShardingTable(each) || isBroadcastTable(each)).collect(Collectors.toCollection(LinkedList::new));
}
/**
* Get logic and actual binding tables.
*
* @param dataSourceName data source name
* @param logicTable logic table name
* @param actualTable actual table name
* @param availableLogicBindingTables available logic binding table names
* @return logic and actual binding tables
*/
public Map<String, String> getLogicAndActualTablesFromBindingTable(final String dataSourceName,
final String logicTable, final String actualTable, final Collection<String> availableLogicBindingTables) {
return findBindingTableRule(logicTable).map(optional -> optional.getLogicAndActualTables(dataSourceName, logicTable, actualTable, availableLogicBindingTables))
.orElseGet(Collections::emptyMap);
}
/**
* Get logic tables via actual table name.
*
* @param actualTable actual table name
* @return logic tables
*/
public Collection<String> getLogicTablesByActualTable(final String actualTable) {
return tableRules.values().stream().filter(each -> each.isExisted(actualTable)).map(TableRule::getLogicTable).collect(Collectors.toSet());
}
@Override
public Map<String, Collection<DataNode>> getAllDataNodes() {
return shardingTableDataNodes;
}
@Override
public Collection<DataNode> getDataNodesByTableName(final String tableName) {
return shardingTableDataNodes.getOrDefault(tableName.toLowerCase(), Collections.emptyList());
}
private Collection<String> getAllActualTables() {
return tableRules.values().stream().flatMap(each -> each.getActualDataNodes().stream().map(DataNode::getTableName)).collect(Collectors.toSet());
}
@Override
public Optional<String> findFirstActualTable(final String logicTable) {
return findTableRule(logicTable).map(tableRule -> tableRule.getActualDataNodes().get(0).getTableName());
}
@Override
public boolean isNeedAccumulate(final Collection<String> tables) {
return !isAllBroadcastTables(tables);
}
@Override
public Optional<String> findLogicTableByActualTable(final String actualTable) {
return findTableRuleByActualTable(actualTable).map(TableRule::getLogicTable);
}
@Override
public Collection<String> getTables() {
Collection<String> result = tableRules.values().stream().map(TableRule::getLogicTable).collect(Collectors.toSet());
result.addAll(broadcastTables);
return result;
}
@Override
public Optional<String> findActualTableByCatalog(final String catalog, final String logicTable) {
return findTableRule(logicTable).flatMap(tableRule -> findActualTableFromActualDataNode(catalog, tableRule.getActualDataNodes()));
}
private Optional<String> findActualTableFromActualDataNode(final String catalog, final List<DataNode> actualDataNodes) {
return actualDataNodes.stream().filter(each -> each.getDataSourceName().equalsIgnoreCase(catalog)).findFirst().map(DataNode::getTableName);
}
@Override
public String getType() {
return ShardingRule.class.getSimpleName();
}
private boolean isJoinConditionContainsShardingColumns(final ShardingSphereSchema schema, final SelectStatementContext select,
final Collection<String> tableNames, final Collection<WhereSegment> whereSegments) {
Collection<String> databaseJoinConditionTables = new HashSet<>(tableNames.size());
Collection<String> tableJoinConditionTables = new HashSet<>(tableNames.size());
for (WhereSegment each : whereSegments) {
Collection<AndPredicate> andPredicates = ExpressionExtractUtil.getAndPredicates(each.getExpr());
if (andPredicates.size() > 1) {
return false;
}
for (AndPredicate andPredicate : andPredicates) {
databaseJoinConditionTables.addAll(getJoinConditionTables(schema, select, andPredicate.getPredicates(), true));
tableJoinConditionTables.addAll(getJoinConditionTables(schema, select, andPredicate.getPredicates(), false));
}
}
TableRule tableRule = getTableRule(tableNames.iterator().next());
boolean containsDatabaseShardingColumns = !(getDatabaseShardingStrategyConfiguration(tableRule) instanceof StandardShardingStrategyConfiguration)
|| databaseJoinConditionTables.containsAll(tableNames);
boolean containsTableShardingColumns = !(getTableShardingStrategyConfiguration(tableRule) instanceof StandardShardingStrategyConfiguration) || tableJoinConditionTables.containsAll(tableNames);
return containsDatabaseShardingColumns && containsTableShardingColumns;
}
private Collection<String> getJoinConditionTables(final ShardingSphereSchema schema, final SelectStatementContext select,
final Collection<ExpressionSegment> predicates, final boolean isDatabaseJoinCondition) {
Collection<String> result = new LinkedList<>();
for (ExpressionSegment each : predicates) {
if (!isJoinConditionExpression(each)) {
continue;
}
ColumnSegment leftColumn = (ColumnSegment) ((BinaryOperationExpression) each).getLeft();
ColumnSegment rightColumn = (ColumnSegment) ((BinaryOperationExpression) each).getRight();
Map<String, String> columnExpressionTableNames = select.getTablesContext().findTableNamesByColumnSegment(Arrays.asList(leftColumn, rightColumn), schema);
Optional<TableRule> leftTableRule = findTableRule(columnExpressionTableNames.get(leftColumn.getExpression()));
Optional<TableRule> rightTableRule = findTableRule(columnExpressionTableNames.get(rightColumn.getExpression()));
if (!leftTableRule.isPresent() || !rightTableRule.isPresent()) {
continue;
}
ShardingStrategyConfiguration leftConfiguration = isDatabaseJoinCondition
? getDatabaseShardingStrategyConfiguration(leftTableRule.get())
: getTableShardingStrategyConfiguration(leftTableRule.get());
ShardingStrategyConfiguration rightConfiguration = isDatabaseJoinCondition
? getDatabaseShardingStrategyConfiguration(rightTableRule.get())
: getTableShardingStrategyConfiguration(rightTableRule.get());
if (findShardingColumn(leftConfiguration, leftColumn.getIdentifier().getValue()).isPresent()
&& findShardingColumn(rightConfiguration, rightColumn.getIdentifier().getValue()).isPresent()) {
result.add(columnExpressionTableNames.get(leftColumn.getExpression()));
result.add(columnExpressionTableNames.get(rightColumn.getExpression()));
}
}
return result;
}
private boolean isJoinConditionExpression(final ExpressionSegment expression) {
if (!(expression instanceof BinaryOperationExpression)) {
return false;
}
BinaryOperationExpression binaryExpression = (BinaryOperationExpression) expression;
return binaryExpression.getLeft() instanceof ColumnSegment && binaryExpression.getRight() instanceof ColumnSegment && EQUAL.equals(binaryExpression.getOperator());
}
@Override
public void setInstanceContext(final InstanceContext instanceContext) {
keyGenerators.values().stream().filter(each -> each instanceof ShardingSphereInstanceRequiredAlgorithm)
.forEach(each -> ((ShardingSphereInstanceRequiredAlgorithm) each).setInstanceContext(instanceContext));
if (defaultKeyGenerateAlgorithm instanceof ShardingSphereInstanceRequiredAlgorithm) {
((ShardingSphereInstanceRequiredAlgorithm) defaultKeyGenerateAlgorithm).setInstanceContext(instanceContext);
}
}
}
|
class ShardingRule implements SchemaRule, DataNodeContainedRule, TableContainedRule, InstanceAwareRule {
private static final String EQUAL = "=";
static {
ShardingSphereServiceLoader.register(ShardingAlgorithm.class);
ShardingSphereServiceLoader.register(KeyGenerateAlgorithm.class);
}
private final Collection<String> dataSourceNames;
private final Map<String, ShardingAlgorithm> shardingAlgorithms = new LinkedHashMap<>();
private final Map<String, KeyGenerateAlgorithm> keyGenerators = new LinkedHashMap<>();
private final Map<String, TableRule> tableRules = new LinkedHashMap<>();
private final Map<String, BindingTableRule> bindingTableRules = new LinkedHashMap<>();
private final Collection<String> broadcastTables;
private final ShardingStrategyConfiguration defaultDatabaseShardingStrategyConfig;
private final ShardingStrategyConfiguration defaultTableShardingStrategyConfig;
private final KeyGenerateAlgorithm defaultKeyGenerateAlgorithm;
private final String defaultShardingColumn;
private final Map<String, Collection<DataNode>> shardingTableDataNodes;
public ShardingRule(final ShardingRuleConfiguration config, final Collection<String> dataSourceNames) {
this.dataSourceNames = getDataSourceNames(config.getTables(), config.getAutoTables(), dataSourceNames);
config.getShardingAlgorithms().forEach((key, value) -> shardingAlgorithms.put(key, ShardingSphereAlgorithmFactory.createAlgorithm(value, ShardingAlgorithm.class)));
config.getKeyGenerators().forEach((key, value) -> keyGenerators.put(key, ShardingSphereAlgorithmFactory.createAlgorithm(value, KeyGenerateAlgorithm.class)));
tableRules.putAll(createTableRules(config.getTables(), config.getDefaultKeyGenerateStrategy()));
tableRules.putAll(createAutoTableRules(config.getAutoTables(), config.getDefaultKeyGenerateStrategy()));
bindingTableRules.putAll(createBindingTableRules(config.getBindingTableGroups()));
broadcastTables = createBroadcastTables(config.getBroadcastTables());
defaultDatabaseShardingStrategyConfig = null == config.getDefaultDatabaseShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultDatabaseShardingStrategy();
defaultTableShardingStrategyConfig = null == config.getDefaultTableShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultTableShardingStrategy();
defaultKeyGenerateAlgorithm = null == config.getDefaultKeyGenerateStrategy()
? RequiredSPIRegistry.getRegisteredService(KeyGenerateAlgorithm.class)
: keyGenerators.get(config.getDefaultKeyGenerateStrategy().getKeyGeneratorName());
defaultShardingColumn = config.getDefaultShardingColumn();
shardingTableDataNodes = createShardingTableDataNodes(tableRules);
Preconditions.checkArgument(isValidBindingTableConfiguration(config.getBindingTableGroups()), "Invalid binding table configuration in ShardingRuleConfiguration.");
}
public ShardingRule(final AlgorithmProvidedShardingRuleConfiguration config, final Collection<String> dataSourceNames) {
this.dataSourceNames = getDataSourceNames(config.getTables(), config.getAutoTables(), dataSourceNames);
shardingAlgorithms.putAll(config.getShardingAlgorithms());
keyGenerators.putAll(config.getKeyGenerators());
tableRules.putAll(createTableRules(config.getTables(), config.getDefaultKeyGenerateStrategy()));
tableRules.putAll(createAutoTableRules(config.getAutoTables(), config.getDefaultKeyGenerateStrategy()));
bindingTableRules.putAll(createBindingTableRules(config.getBindingTableGroups()));
broadcastTables = createBroadcastTables(config.getBroadcastTables());
defaultDatabaseShardingStrategyConfig = null == config.getDefaultDatabaseShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultDatabaseShardingStrategy();
defaultTableShardingStrategyConfig = null == config.getDefaultTableShardingStrategy() ? new NoneShardingStrategyConfiguration() : config.getDefaultTableShardingStrategy();
defaultKeyGenerateAlgorithm = null == config.getDefaultKeyGenerateStrategy()
? RequiredSPIRegistry.getRegisteredService(KeyGenerateAlgorithm.class)
: keyGenerators.get(config.getDefaultKeyGenerateStrategy().getKeyGeneratorName());
defaultShardingColumn = config.getDefaultShardingColumn();
shardingTableDataNodes = createShardingTableDataNodes(tableRules);
Preconditions.checkArgument(isValidBindingTableConfiguration(config.getBindingTableGroups()), "Invalid binding table configuration in ShardingRuleConfiguration.");
}
private Map<String, Collection<DataNode>> createShardingTableDataNodes(final Map<String, TableRule> tableRules) {
Map<String, Collection<DataNode>> result = new HashMap<>(tableRules.size(), 1);
for (TableRule each : tableRules.values()) {
result.put(each.getLogicTable().toLowerCase(), each.getActualDataNodes());
}
return result;
}
private Collection<String> getDataSourceNames(final Collection<ShardingTableRuleConfiguration> tableRuleConfigs,
final Collection<ShardingAutoTableRuleConfiguration> autoTableRuleConfigs, final Collection<String> dataSourceNames) {
if (tableRuleConfigs.isEmpty() && autoTableRuleConfigs.isEmpty()) {
return dataSourceNames;
}
if (tableRuleConfigs.stream().map(ShardingTableRuleConfiguration::getActualDataNodes).anyMatch(each -> null == each || each.isEmpty())) {
return dataSourceNames;
}
Collection<String> result = new LinkedHashSet<>();
tableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
autoTableRuleConfigs.forEach(each -> result.addAll(getDataSourceNames(each)));
return result;
}
private Collection<String> getDataSourceNames(final ShardingAutoTableRuleConfiguration shardingAutoTableRuleConfig) {
List<String> actualDataSources = new InlineExpressionParser(shardingAutoTableRuleConfig.getActualDataSources()).splitAndEvaluate();
return new HashSet<>(actualDataSources);
}
private Collection<String> getDataSourceNames(final ShardingTableRuleConfiguration shardingTableRuleConfig) {
List<String> actualDataNodes = new InlineExpressionParser(shardingTableRuleConfig.getActualDataNodes()).splitAndEvaluate();
return actualDataNodes.stream().map(each -> new DataNode(each).getDataSourceName()).collect(Collectors.toList());
}
private Map<String, TableRule> createTableRules(final Collection<ShardingTableRuleConfiguration> tableRuleConfigs, final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return tableRuleConfigs.stream().map(each -> new TableRule(each, dataSourceNames, getDefaultGenerateKeyColumn(defaultKeyGenerateStrategyConfig)))
.collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
}
private Map<String, TableRule> createAutoTableRules(final Collection<ShardingAutoTableRuleConfiguration> autoTableRuleConfigs,
final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return autoTableRuleConfigs.stream().map(each -> createAutoTableRule(defaultKeyGenerateStrategyConfig, each))
.collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
}
private TableRule createAutoTableRule(final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig, final ShardingAutoTableRuleConfiguration autoTableRuleConfig) {
ShardingAlgorithm shardingAlgorithm = null == autoTableRuleConfig.getShardingStrategy() ? null : shardingAlgorithms.get(autoTableRuleConfig.getShardingStrategy().getShardingAlgorithmName());
Preconditions.checkState(shardingAlgorithm instanceof ShardingAutoTableAlgorithm, "Sharding auto table rule configuration must match sharding auto table algorithm.");
return new TableRule(autoTableRuleConfig, dataSourceNames, (ShardingAutoTableAlgorithm) shardingAlgorithm, getDefaultGenerateKeyColumn(defaultKeyGenerateStrategyConfig));
}
private String getDefaultGenerateKeyColumn(final KeyGenerateStrategyConfiguration defaultKeyGenerateStrategyConfig) {
return Optional.ofNullable(defaultKeyGenerateStrategyConfig).map(KeyGenerateStrategyConfiguration::getColumn).orElse(null);
}
private Collection<String> createBroadcastTables(final Collection<String> broadcastTables) {
Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
result.addAll(broadcastTables);
return result;
}
private Map<String, BindingTableRule> createBindingTableRules(final Collection<String> bindingTableGroups) {
Map<String, BindingTableRule> result = new LinkedHashMap<>();
for (String each : bindingTableGroups) {
BindingTableRule bindingTableRule = createBindingTableRule(each);
for (String logicTable : bindingTableRule.getAllLogicTables()) {
result.put(logicTable.toLowerCase(), bindingTableRule);
}
}
return result;
}
private BindingTableRule createBindingTableRule(final String bindingTableGroup) {
Map<String, TableRule> tableRules = Splitter.on(",").trimResults().splitToList(bindingTableGroup).stream()
.map(this::getTableRule).collect(Collectors.toMap(each -> each.getLogicTable().toLowerCase(), Function.identity(), (oldValue, currentValue) -> oldValue, LinkedHashMap::new));
BindingTableRule result = new BindingTableRule();
result.getTableRules().putAll(tableRules);
return result;
}
private boolean isValidBindingTableConfiguration(final Collection<String> bindingTableGroups) {
for (String each : bindingTableGroups) {
Collection<String> bindingTables = Splitter.on(",").trimResults().splitToList(each.toLowerCase());
if (bindingTables.size() <= 1) {
continue;
}
Iterator<String> iterator = bindingTables.iterator();
TableRule sampleTableRule = getTableRule(iterator.next());
while (iterator.hasNext()) {
TableRule tableRule = getTableRule(iterator.next());
if (!isValidActualDatasourceName(sampleTableRule, tableRule) || !isValidActualTableName(sampleTableRule, tableRule)) {
return false;
}
if (!isValidShardingAlgorithm(sampleTableRule, tableRule, true)
|| !isValidShardingAlgorithm(sampleTableRule, tableRule, false)) {
return false;
}
}
}
return true;
}
private boolean isValidActualDatasourceName(final TableRule sampleTableRule, final TableRule tableRule) {
return sampleTableRule.getActualDatasourceNames().equals(tableRule.getActualDatasourceNames());
}
private boolean isValidActualTableName(final TableRule sampleTableRule, final TableRule tableRule) {
for (String each : sampleTableRule.getActualDatasourceNames()) {
Collection<String> sampleActualTableNames =
sampleTableRule.getActualTableNames(each).stream().map(actualTableName -> actualTableName.replace(sampleTableRule.getTableDataNode().getPrefix(), "")).collect(Collectors.toSet());
Collection<String> actualTableNames =
tableRule.getActualTableNames(each).stream().map(actualTableName -> actualTableName.replace(tableRule.getTableDataNode().getPrefix(), "")).collect(Collectors.toSet());
if (!sampleActualTableNames.equals(actualTableNames)) {
return false;
}
}
return true;
}
private boolean isValidShardingAlgorithm(final TableRule sampleTableRule, final TableRule tableRule, final boolean databaseAlgorithm) {
String sampleAlgorithmExpression = getAlgorithmExpression(sampleTableRule, databaseAlgorithm);
String algorithmExpression = getAlgorithmExpression(tableRule, databaseAlgorithm);
return sampleAlgorithmExpression.equalsIgnoreCase(algorithmExpression);
}
private String getAlgorithmExpression(final TableRule tableRule, final boolean databaseAlgorithm) {
ShardingStrategyConfiguration shardingStrategyConfig = databaseAlgorithm
? null == tableRule.getDatabaseShardingStrategyConfig() ? defaultDatabaseShardingStrategyConfig : tableRule.getDatabaseShardingStrategyConfig()
: null == tableRule.getTableShardingStrategyConfig() ? defaultTableShardingStrategyConfig : tableRule.getTableShardingStrategyConfig();
ShardingAlgorithm shardingAlgorithm = shardingAlgorithms.get(shardingStrategyConfig.getShardingAlgorithmName());
String originAlgorithmExpression = null == shardingAlgorithm ? "" : StringUtils.defaultString(shardingAlgorithm.getProps().getProperty("algorithm-expression"), "");
String sampleDataNodePrefix = databaseAlgorithm ? tableRule.getDataSourceDataNode().getPrefix() : tableRule.getTableDataNode().getPrefix();
String shardingColumn = getShardingColumn(shardingStrategyConfig);
return originAlgorithmExpression.replace(sampleDataNodePrefix, "").replace(shardingColumn, "");
}
@Override
public Collection<String> getAllTables() {
Collection<String> result = new HashSet<>(getTables());
result.addAll(getAllActualTables());
return result;
}
/**
* Get database sharding strategy configuration.
*
* @param tableRule table rule
* @return database sharding strategy configuration
*/
public ShardingStrategyConfiguration getDatabaseShardingStrategyConfiguration(final TableRule tableRule) {
return null == tableRule.getDatabaseShardingStrategyConfig() ? defaultDatabaseShardingStrategyConfig : tableRule.getDatabaseShardingStrategyConfig();
}
/**
* Get table sharding strategy configuration.
*
* @param tableRule table rule
* @return table sharding strategy configuration
*/
public ShardingStrategyConfiguration getTableShardingStrategyConfiguration(final TableRule tableRule) {
return null == tableRule.getTableShardingStrategyConfig() ? defaultTableShardingStrategyConfig : tableRule.getTableShardingStrategyConfig();
}
/**
* Find table rule.
*
* @param logicTableName logic table name
* @return table rule
*/
public Optional<TableRule> findTableRule(final String logicTableName) {
return Optional.ofNullable(tableRules.get(logicTableName.toLowerCase()));
}
/**
* Find table rule via actual table name.
*
* @param actualTableName actual table name
* @return table rule
*/
public Optional<TableRule> findTableRuleByActualTable(final String actualTableName) {
for (TableRule each : tableRules.values()) {
if (each.isExisted(actualTableName)) {
return Optional.of(each);
}
}
return Optional.empty();
}
/**
* Get table rule.
*
* @param logicTableName logic table name
* @return table rule
*/
public TableRule getTableRule(final String logicTableName) {
Optional<TableRule> tableRule = findTableRule(logicTableName);
if (tableRule.isPresent()) {
return tableRule.get();
}
if (isBroadcastTable(logicTableName)) {
return new TableRule(dataSourceNames, logicTableName);
}
throw new ShardingSphereConfigurationException("Cannot find table rule with logic table: '%s'", logicTableName);
}
/**
* Judge whether logic table is all binding tables or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all binding tables or not
*/
public boolean isAllBindingTables(final Collection<String> logicTableNames) {
if (logicTableNames.isEmpty()) {
return false;
}
Optional<BindingTableRule> bindingTableRule = findBindingTableRule(logicTableNames);
if (!bindingTableRule.isPresent()) {
return false;
}
Collection<String> result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
result.addAll(bindingTableRule.get().getAllLogicTables());
return !result.isEmpty() && result.containsAll(logicTableNames);
}
/**
* Judge whether logic table is all binding tables.
*
* @param schema schema
* @param sqlStatementContext sqlStatementContext
* @param logicTableNames logic table names
* @return whether logic table is all binding tables
*/
public boolean isAllBindingTables(final ShardingSphereSchema schema, final SQLStatementContext<?> sqlStatementContext, final Collection<String> logicTableNames) {
if (!(sqlStatementContext instanceof SelectStatementContext && ((SelectStatementContext) sqlStatementContext).isContainsJoinQuery())) {
return isAllBindingTables(logicTableNames);
}
if (!isAllBindingTables(logicTableNames)) {
return false;
}
SelectStatementContext select = (SelectStatementContext) sqlStatementContext;
Collection<WhereSegment> joinSegments = WhereExtractUtil.getJoinWhereSegments(select.getSqlStatement());
return isJoinConditionContainsShardingColumns(schema, select, logicTableNames, joinSegments)
|| isJoinConditionContainsShardingColumns(schema, select, logicTableNames, select.getWhereSegments());
}
private Optional<BindingTableRule> findBindingTableRule(final Collection<String> logicTableNames) {
for (String each : logicTableNames) {
Optional<BindingTableRule> result = findBindingTableRule(each);
if (result.isPresent()) {
return result;
}
}
return Optional.empty();
}
/**
* Find binding table rule via logic table name.
*
* @param logicTableName logic table name
* @return binding table rule
*/
public Optional<BindingTableRule> findBindingTableRule(final String logicTableName) {
return Optional.ofNullable(bindingTableRules.get(logicTableName.toLowerCase()));
}
/**
* Judge whether logic table is all broadcast tables or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all broadcast tables or not
*/
public boolean isAllBroadcastTables(final Collection<String> logicTableNames) {
return !logicTableNames.isEmpty() && broadcastTables.containsAll(logicTableNames);
}
/**
* Judge whether logic table is all sharding table or not.
*
* @param logicTableNames logic table names
* @return whether logic table is all sharding table or not
*/
public boolean isAllShardingTables(final Collection<String> logicTableNames) {
if (logicTableNames.isEmpty()) {
return false;
}
for (String each : logicTableNames) {
if (!isShardingTable(each)) {
return false;
}
}
return true;
}
/**
* Judge whether logic table is sharding table or not.
*
* @param logicTableName logic table name
* @return whether logic table is sharding table or not
*/
public boolean isShardingTable(final String logicTableName) {
return tableRules.containsKey(logicTableName.toLowerCase());
}
/**
* Judge whether logic table is broadcast table or not.
*
* @param logicTableName logic table name
* @return whether logic table is broadcast table or not
*/
public boolean isBroadcastTable(final String logicTableName) {
return broadcastTables.contains(logicTableName);
}
/**
* Judge whether all tables are in same data source or not.
*
* @param logicTableNames logic table names
* @return whether all tables are in same data source or not
*/
public boolean isAllTablesInSameDataSource(final Collection<String> logicTableNames) {
Collection<String> dataSourceNames = logicTableNames.stream().map(each -> tableRules.get(each.toLowerCase()))
.filter(Objects::nonNull).flatMap(each -> each.getActualDatasourceNames().stream()).collect(Collectors.toSet());
return 1 == dataSourceNames.size();
}
/**
* Judge whether a table rule exists for logic tables.
*
* @param logicTableNames logic table names
* @return whether a table rule exists for logic tables
*/
public boolean tableRuleExists(final Collection<String> logicTableNames) {
for (String each : logicTableNames) {
if (isShardingTable(each) || isBroadcastTable(each)) {
return true;
}
}
return false;
}
/**
* Find sharding column.
*
* @param columnName column name
* @param tableName table name
* @return sharding column
*/
public Optional<String> findShardingColumn(final String columnName, final String tableName) {
return Optional.ofNullable(tableRules.get(tableName.toLowerCase())).flatMap(optional -> findShardingColumn(optional, columnName));
}
private Optional<String> findShardingColumn(final TableRule tableRule, final String columnName) {
Optional<String> databaseShardingColumn = findShardingColumn(getDatabaseShardingStrategyConfiguration(tableRule), columnName);
if (databaseShardingColumn.isPresent()) {
return databaseShardingColumn;
}
return findShardingColumn(getTableShardingStrategyConfiguration(tableRule), columnName);
}
private Optional<String> findShardingColumn(final ShardingStrategyConfiguration shardingStrategyConfig, final String columnName) {
if (shardingStrategyConfig instanceof StandardShardingStrategyConfiguration) {
String shardingColumn = null == ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn()
? defaultShardingColumn
: ((StandardShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumn();
return shardingColumn.equalsIgnoreCase(columnName) ? Optional.of(shardingColumn) : Optional.empty();
}
if (shardingStrategyConfig instanceof ComplexShardingStrategyConfiguration) {
List<String> shardingColumns = Splitter.on(",").trimResults().splitToList(((ComplexShardingStrategyConfiguration) shardingStrategyConfig).getShardingColumns());
for (String each : shardingColumns) {
if (each.equalsIgnoreCase(columnName)) {
return Optional.of(each);
}
}
}
return Optional.empty();
}
/**
* Judge whether given logic table column is generate key column or not.
*
* @param columnName column name
* @param tableName table name
* @return whether given logic table column is generate key column or not
*/
public boolean isGenerateKeyColumn(final String columnName, final String tableName) {
return Optional.ofNullable(tableRules.get(tableName.toLowerCase())).filter(each -> isGenerateKeyColumn(each, columnName)).isPresent();
}
private boolean isGenerateKeyColumn(final TableRule tableRule, final String columnName) {
Optional<String> generateKeyColumn = tableRule.getGenerateKeyColumn();
return generateKeyColumn.isPresent() && generateKeyColumn.get().equalsIgnoreCase(columnName);
}
/**
* Find column name of generated key.
*
* @param logicTableName logic table name
* @return column name of generated key
*/
public Optional<String> findGenerateKeyColumnName(final String logicTableName) {
return Optional.ofNullable(tableRules.get(logicTableName.toLowerCase())).filter(each -> each.getGenerateKeyColumn().isPresent()).flatMap(TableRule::getGenerateKeyColumn);
}
/**
* Find the Generated key of logic table.
*
* @param logicTableName logic table name
* @return generated key
*/
public Comparable<?> generateKey(final String logicTableName) {
Optional<TableRule> tableRule = findTableRule(logicTableName);
if (!tableRule.isPresent()) {
throw new ShardingSphereConfigurationException("Cannot find strategy for generate keys.");
}
KeyGenerateAlgorithm keyGenerator = null != tableRule.get().getKeyGeneratorName() ? keyGenerators.get(tableRule.get().getKeyGeneratorName()) : defaultKeyGenerateAlgorithm;
return keyGenerator.generateKey();
}
/**
* Find data node by logic table name.
*
* @param logicTableName logic table name
* @return data node
*/
public DataNode getDataNode(final String logicTableName) {
TableRule tableRule = getTableRule(logicTableName);
return tableRule.getActualDataNodes().get(0);
}
/**
* Get sharding logic table names.
*
* @param logicTableNames logic table names
* @return sharding logic table names
*/
public Collection<String> getShardingLogicTableNames(final Collection<String> logicTableNames) {
Collection<String> result = new LinkedList<>();
for (String each : logicTableNames) {
if (isShardingTable(each)) {
result.add(each);
}
}
return result;
}
/**
* Get sharding rule table names.
*
* @param logicTableNames logic table names
* @return sharding rule table names
*/
public Collection<String> getShardingRuleTableNames(final Collection<String> logicTableNames) {
return logicTableNames.stream().filter(each -> isShardingTable(each) || isBroadcastTable(each)).collect(Collectors.toCollection(LinkedList::new));
}
/**
* Get logic and actual binding tables.
*
* @param dataSourceName data source name
* @param logicTable logic table name
* @param actualTable actual table name
* @param availableLogicBindingTables available logic binding table names
* @return logic and actual binding tables
*/
public Map<String, String> getLogicAndActualTablesFromBindingTable(final String dataSourceName,
final String logicTable, final String actualTable, final Collection<String> availableLogicBindingTables) {
return findBindingTableRule(logicTable).map(optional -> optional.getLogicAndActualTables(dataSourceName, logicTable, actualTable, availableLogicBindingTables))
.orElseGet(Collections::emptyMap);
}
/**
* Get logic tables via actual table name.
*
* @param actualTable actual table name
* @return logic tables
*/
public Collection<String> getLogicTablesByActualTable(final String actualTable) {
return tableRules.values().stream().filter(each -> each.isExisted(actualTable)).map(TableRule::getLogicTable).collect(Collectors.toSet());
}
@Override
public Map<String, Collection<DataNode>> getAllDataNodes() {
return shardingTableDataNodes;
}
@Override
public Collection<DataNode> getDataNodesByTableName(final String tableName) {
return shardingTableDataNodes.getOrDefault(tableName.toLowerCase(), Collections.emptyList());
}
private Collection<String> getAllActualTables() {
return tableRules.values().stream().flatMap(each -> each.getActualDataNodes().stream().map(DataNode::getTableName)).collect(Collectors.toSet());
}
@Override
public Optional<String> findFirstActualTable(final String logicTable) {
return findTableRule(logicTable).map(tableRule -> tableRule.getActualDataNodes().get(0).getTableName());
}
@Override
public boolean isNeedAccumulate(final Collection<String> tables) {
return !isAllBroadcastTables(tables);
}
@Override
public Optional<String> findLogicTableByActualTable(final String actualTable) {
return findTableRuleByActualTable(actualTable).map(TableRule::getLogicTable);
}
@Override
public Collection<String> getTables() {
Collection<String> result = tableRules.values().stream().map(TableRule::getLogicTable).collect(Collectors.toSet());
result.addAll(broadcastTables);
return result;
}
@Override
public Optional<String> findActualTableByCatalog(final String catalog, final String logicTable) {
return findTableRule(logicTable).flatMap(tableRule -> findActualTableFromActualDataNode(catalog, tableRule.getActualDataNodes()));
}
private Optional<String> findActualTableFromActualDataNode(final String catalog, final List<DataNode> actualDataNodes) {
return actualDataNodes.stream().filter(each -> each.getDataSourceName().equalsIgnoreCase(catalog)).findFirst().map(DataNode::getTableName);
}
@Override
public String getType() {
return ShardingRule.class.getSimpleName();
}
private boolean isJoinConditionContainsShardingColumns(final ShardingSphereSchema schema, final SelectStatementContext select,
final Collection<String> tableNames, final Collection<WhereSegment> whereSegments) {
Collection<String> databaseJoinConditionTables = new HashSet<>(tableNames.size());
Collection<String> tableJoinConditionTables = new HashSet<>(tableNames.size());
for (WhereSegment each : whereSegments) {
Collection<AndPredicate> andPredicates = ExpressionExtractUtil.getAndPredicates(each.getExpr());
if (andPredicates.size() > 1) {
return false;
}
for (AndPredicate andPredicate : andPredicates) {
databaseJoinConditionTables.addAll(getJoinConditionTables(schema, select, andPredicate.getPredicates(), true));
tableJoinConditionTables.addAll(getJoinConditionTables(schema, select, andPredicate.getPredicates(), false));
}
}
TableRule tableRule = getTableRule(tableNames.iterator().next());
boolean containsDatabaseShardingColumns = !(getDatabaseShardingStrategyConfiguration(tableRule) instanceof StandardShardingStrategyConfiguration)
|| databaseJoinConditionTables.containsAll(tableNames);
boolean containsTableShardingColumns = !(getTableShardingStrategyConfiguration(tableRule) instanceof StandardShardingStrategyConfiguration) || tableJoinConditionTables.containsAll(tableNames);
return containsDatabaseShardingColumns && containsTableShardingColumns;
}
private Collection<String> getJoinConditionTables(final ShardingSphereSchema schema, final SelectStatementContext select,
final Collection<ExpressionSegment> predicates, final boolean isDatabaseJoinCondition) {
Collection<String> result = new LinkedList<>();
for (ExpressionSegment each : predicates) {
if (!isJoinConditionExpression(each)) {
continue;
}
ColumnSegment leftColumn = (ColumnSegment) ((BinaryOperationExpression) each).getLeft();
ColumnSegment rightColumn = (ColumnSegment) ((BinaryOperationExpression) each).getRight();
Map<String, String> columnExpressionTableNames = select.getTablesContext().findTableNamesByColumnSegment(Arrays.asList(leftColumn, rightColumn), schema);
Optional<TableRule> leftTableRule = findTableRule(columnExpressionTableNames.get(leftColumn.getExpression()));
Optional<TableRule> rightTableRule = findTableRule(columnExpressionTableNames.get(rightColumn.getExpression()));
if (!leftTableRule.isPresent() || !rightTableRule.isPresent()) {
continue;
}
ShardingStrategyConfiguration leftConfiguration = isDatabaseJoinCondition
? getDatabaseShardingStrategyConfiguration(leftTableRule.get())
: getTableShardingStrategyConfiguration(leftTableRule.get());
ShardingStrategyConfiguration rightConfiguration = isDatabaseJoinCondition
? getDatabaseShardingStrategyConfiguration(rightTableRule.get())
: getTableShardingStrategyConfiguration(rightTableRule.get());
if (findShardingColumn(leftConfiguration, leftColumn.getIdentifier().getValue()).isPresent()
&& findShardingColumn(rightConfiguration, rightColumn.getIdentifier().getValue()).isPresent()) {
result.add(columnExpressionTableNames.get(leftColumn.getExpression()));
result.add(columnExpressionTableNames.get(rightColumn.getExpression()));
}
}
return result;
}
private boolean isJoinConditionExpression(final ExpressionSegment expression) {
if (!(expression instanceof BinaryOperationExpression)) {
return false;
}
BinaryOperationExpression binaryExpression = (BinaryOperationExpression) expression;
return binaryExpression.getLeft() instanceof ColumnSegment && binaryExpression.getRight() instanceof ColumnSegment && EQUAL.equals(binaryExpression.getOperator());
}
@Override
public void setInstanceContext(final InstanceContext instanceContext) {
keyGenerators.values().stream().filter(each -> each instanceof ShardingSphereInstanceRequiredAlgorithm)
.forEach(each -> ((ShardingSphereInstanceRequiredAlgorithm) each).setInstanceContext(instanceContext));
if (defaultKeyGenerateAlgorithm instanceof ShardingSphereInstanceRequiredAlgorithm) {
((ShardingSphereInstanceRequiredAlgorithm) defaultKeyGenerateAlgorithm).setInstanceContext(instanceContext);
}
}
}
|
We need to close the FileInputStream once we done use it.
|
private static SSLContext getSSLContext(MapValue secureSocket) {
final String CERT_PASS = "password";
final String CERT_PATH = "path";
try {
MapValue cryptoKeyStore = secureSocket.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_KEYSTORE);
MapValue cryptoTrustStore = secureSocket.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_TRUSTORE);
char[] keyPassphrase = cryptoKeyStore.getStringValue(CERT_PASS).toCharArray();
String keyFilePath = cryptoKeyStore.getStringValue(CERT_PATH);
char[] trustPassphrase = cryptoTrustStore.getStringValue(CERT_PASS).toCharArray();
String trustFilePath = cryptoTrustStore.getStringValue(CERT_PATH);
String tlsVersion = secureSocket.getStringValue(RabbitMQConstants.RABBITMQ_CONNECTION_TLS_VERSION);
if (tlsVersion == null) {
tlsVersion = "TLS";
}
KeyStore keyStore = KeyStore.getInstance(RabbitMQConstants.KEY_STORE_TYPE);
if (keyFilePath != null) {
keyStore.load(new FileInputStream(keyFilePath), keyPassphrase);
} else {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Path for the keystore is not found.");
}
KeyManagerFactory keyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyManagerFactory.init(keyStore, keyPassphrase);
KeyStore trustStore = KeyStore.getInstance(RabbitMQConstants.KEY_STORE_TYPE);
if (trustFilePath != null) {
trustStore.load(new FileInputStream(trustFilePath), trustPassphrase);
} else {
throw new RabbitMQConnectorException("Path for the truststore is not found.");
}
TrustManagerFactory trustManagerFactory =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustManagerFactory.init(trustStore);
SSLContext sslContext = SSLContext.getInstance(tlsVersion);
sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);
return sslContext;
} catch (FileNotFoundException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
exception.getLocalizedMessage());
} catch (IOException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"I/O error occurred. " +
exception.getLocalizedMessage());
} catch (CertificateException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Certification error occurred. " +
exception.getLocalizedMessage());
} catch (UnrecoverableKeyException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"A key in the keystore cannot be recovered. " +
exception.getLocalizedMessage());
} catch (NoSuchAlgorithmException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"The particular cryptographic algorithm requested is not available in the environment." +
exception.getLocalizedMessage());
} catch (KeyStoreException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"No provider supports a KeyStoreSpi implementation for this keystore type." +
exception.getLocalizedMessage());
} catch (KeyManagementException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Error occurred in an operation with key management." +
exception.getLocalizedMessage());
}
}
|
keyStore.load(new FileInputStream(keyFilePath), keyPassphrase);
|
private static SSLContext getSSLContext(MapValue secureSocket) {
try {
MapValue cryptoKeyStore = secureSocket.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_KEYSTORE);
MapValue cryptoTrustStore = secureSocket.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_TRUSTORE);
char[] keyPassphrase = cryptoKeyStore.getStringValue(RabbitMQConstants.KEY_STORE_PASS).toCharArray();
String keyFilePath = cryptoKeyStore.getStringValue(RabbitMQConstants.KEY_STORE_PATH);
char[] trustPassphrase = cryptoTrustStore.getStringValue(RabbitMQConstants.KEY_STORE_PASS).toCharArray();
String trustFilePath = cryptoTrustStore.getStringValue(RabbitMQConstants.KEY_STORE_PATH);
String tlsVersion = secureSocket.getStringValue(RabbitMQConstants.RABBITMQ_CONNECTION_TLS_VERSION);
KeyStore keyStore = KeyStore.getInstance(RabbitMQConstants.KEY_STORE_TYPE);
if (keyFilePath != null) {
try (FileInputStream keyFileInputStream = new FileInputStream(keyFilePath)) {
keyStore.load(keyFileInputStream, keyPassphrase);
}
} else {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Path for the keystore is not found.");
}
KeyManagerFactory keyManagerFactory =
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
keyManagerFactory.init(keyStore, keyPassphrase);
KeyStore trustStore = KeyStore.getInstance(RabbitMQConstants.KEY_STORE_TYPE);
if (trustFilePath != null) {
try (FileInputStream trustFileInputStream = new FileInputStream(trustFilePath)) {
trustStore.load(trustFileInputStream, trustPassphrase);
}
} else {
throw new RabbitMQConnectorException("Path for the truststore is not found.");
}
TrustManagerFactory trustManagerFactory =
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
trustManagerFactory.init(trustStore);
SSLContext sslContext = SSLContext.getInstance(tlsVersion);
sslContext.init(keyManagerFactory.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);
return sslContext;
} catch (FileNotFoundException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
exception.getLocalizedMessage());
} catch (IOException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"I/O error occurred.");
} catch (CertificateException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Certification error occurred.");
} catch (UnrecoverableKeyException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"A key in the keystore cannot be recovered.");
} catch (NoSuchAlgorithmException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"The particular cryptographic algorithm requested is not available in the environment.");
} catch (KeyStoreException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"No provider supports a KeyStoreSpi implementation for this keystore type." +
exception.getLocalizedMessage());
} catch (KeyManagementException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_SECURE_CONNECTION_ERROR +
"Error occurred in an operation with key management." +
exception.getLocalizedMessage());
}
}
|
class ConnectionUtils {
private static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
/**
* Creates a RabbitMQ Connection using the given connection parameters.
*
* @param connectionConfig Parameters used to initialize the connection.
* @return RabbitMQ Connection object.
*/
public static Connection createConnection(MapValue<String, Object> connectionConfig) {
try {
ConnectionFactory connectionFactory = new ConnectionFactory();
MapValue secureSocket = connectionConfig.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_SECURE_SOCKET);
if (secureSocket != null) {
SSLContext sslContext = getSSLContext(secureSocket);
connectionFactory.useSslProtocol(sslContext);
if (secureSocket.getBooleanValue(RabbitMQConstants.RABBITMQ_CONNECTION_VERIFY_HOST)) {
connectionFactory.enableHostnameVerification();
}
logger.info("TLS enabled for the connection with the RabbitMQ server.");
}
String host = connectionConfig.getStringValue(RabbitMQConstants.RABBITMQ_CONNECTION_HOST);
connectionFactory.setHost(host);
int port = Math.toIntExact(connectionConfig.getIntValue(RabbitMQConstants.RABBITMQ_CONNECTION_PORT));
connectionFactory.setPort(port);
Object username = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_USER);
if (username != null) {
connectionFactory.setUsername(username.toString());
}
Object pass = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_PASS);
if (pass != null) {
connectionFactory.setPassword(pass.toString());
}
Object timeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_TIMEOUT);
if (timeout != null) {
connectionFactory.setConnectionTimeout(Integer.parseInt(timeout.toString()));
}
Object handshakeTimeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_HANDSHAKE_TIMEOUT);
if (handshakeTimeout != null) {
connectionFactory.setHandshakeTimeout(Integer.parseInt(handshakeTimeout.toString()));
}
Object shutdownTimeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_SHUTDOWN_TIMEOUT);
if (shutdownTimeout != null) {
connectionFactory.setShutdownTimeout(Integer.parseInt(shutdownTimeout.toString()));
}
Object connectionHeartBeat = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_HEARTBEAT);
if (connectionHeartBeat != null) {
connectionFactory.setRequestedHeartbeat(Integer.parseInt(connectionHeartBeat.toString()));
}
return connectionFactory.newConnection();
} catch (IOException | TimeoutException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_CONNECTION_ERROR
+ exception.getMessage(), exception);
}
}
/**
* Creates and retrieves the initialized SSLContext.
*
* @param secureSocket secureSocket record.
* @return Initialized SSLContext.
*/
/**
* Handles closing the given connection.
*
* @param connection RabbitMQ Connection object.
* @param timeout Timeout (in milliseconds) for completing all the close-related
* operations, use -1 for infinity.
* @param closeCode The close code (See under "Reply Codes" in the AMQP specification).
* @param closeMessage A message indicating the reason for closing the connection.
*/
public static void handleCloseConnection(Connection connection, Object closeCode, Object closeMessage,
Object timeout) {
boolean validTimeout = timeout != null && RabbitMQUtils.checkIfInt(timeout);
boolean validCloseCode = (closeCode != null && RabbitMQUtils.checkIfInt(closeCode)) &&
(closeMessage != null && RabbitMQUtils.checkIfString(closeMessage));
try {
if (validTimeout && validCloseCode) {
connection.close(Integer.parseInt(closeCode.toString()), closeMessage.toString(),
Integer.parseInt(timeout.toString()));
} else if (validTimeout) {
connection.close(Integer.parseInt(timeout.toString()));
} else if (validCloseCode) {
connection.close(Integer.parseInt(closeCode.toString()), closeMessage.toString());
} else {
connection.close();
}
} catch (IOException | ArithmeticException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CLOSE_CONNECTION_ERROR + exception.getMessage(),
exception);
}
}
/**
* Handles aborting the given connection.
*
* @param connection RabbitMQ Connection object.
* @param timeout Timeout (in milliseconds) for completing all the close-related
* operations, use -1 for infinity.
* @param closeCode The close code (See under "Reply Codes" in the AMQP specification).
* @param closeMessage A message indicating the reason for closing the connection.
*/
public static void handleAbortConnection(Connection connection, Object closeCode, Object closeMessage,
Object timeout) {
boolean validTimeout = timeout != null && RabbitMQUtils.checkIfInt(timeout);
boolean validCloseCode = (closeCode != null && RabbitMQUtils.checkIfInt(closeCode)) &&
(closeMessage != null && RabbitMQUtils.checkIfString(closeMessage));
if (validTimeout && validCloseCode) {
connection.abort(Integer.parseInt(closeCode.toString()), closeMessage.toString(),
Integer.parseInt(timeout.toString()));
} else if (validTimeout) {
connection.abort(Integer.parseInt(timeout.toString()));
} else if (validCloseCode) {
connection.abort(Integer.parseInt(closeCode.toString()), closeMessage.toString());
} else {
connection.abort();
}
}
/**
* Checks if close was already called on the connection.
*
* @param connection RabbitMQ Connection object.
* @return True if the connection is already closed and false otherwise.
*/
public static boolean isClosed(Connection connection) {
boolean flag = false;
if (connection == null || !connection.isOpen()) {
flag = true;
}
return flag;
}
private ConnectionUtils() {
}
}
|
class ConnectionUtils {
private static final Logger logger = LoggerFactory.getLogger(ConnectionUtils.class);
/**
* Creates a RabbitMQ Connection using the given connection parameters.
*
* @param connectionConfig Parameters used to initialize the connection.
* @return RabbitMQ Connection object.
*/
public static Connection createConnection(MapValue<String, Object> connectionConfig) {
try {
ConnectionFactory connectionFactory = new ConnectionFactory();
MapValue secureSocket = connectionConfig.getMapValue(RabbitMQConstants.RABBITMQ_CONNECTION_SECURE_SOCKET);
if (secureSocket != null) {
SSLContext sslContext = getSSLContext(secureSocket);
connectionFactory.useSslProtocol(sslContext);
if (secureSocket.getBooleanValue(RabbitMQConstants.RABBITMQ_CONNECTION_VERIFY_HOST)) {
connectionFactory.enableHostnameVerification();
}
logger.info("TLS enabled for the connection.");
}
String host = connectionConfig.getStringValue(RabbitMQConstants.RABBITMQ_CONNECTION_HOST);
connectionFactory.setHost(host);
int port = Math.toIntExact(connectionConfig.getIntValue(RabbitMQConstants.RABBITMQ_CONNECTION_PORT));
connectionFactory.setPort(port);
Object username = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_USER);
if (username != null) {
connectionFactory.setUsername(username.toString());
}
Object pass = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_PASS);
if (pass != null) {
connectionFactory.setPassword(pass.toString());
}
Object timeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_TIMEOUT);
if (timeout != null) {
connectionFactory.setConnectionTimeout(Integer.parseInt(timeout.toString()));
}
Object handshakeTimeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_HANDSHAKE_TIMEOUT);
if (handshakeTimeout != null) {
connectionFactory.setHandshakeTimeout(Integer.parseInt(handshakeTimeout.toString()));
}
Object shutdownTimeout = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_SHUTDOWN_TIMEOUT);
if (shutdownTimeout != null) {
connectionFactory.setShutdownTimeout(Integer.parseInt(shutdownTimeout.toString()));
}
Object connectionHeartBeat = connectionConfig.get(RabbitMQConstants.RABBITMQ_CONNECTION_HEARTBEAT);
if (connectionHeartBeat != null) {
connectionFactory.setRequestedHeartbeat(Integer.parseInt(connectionHeartBeat.toString()));
}
return connectionFactory.newConnection();
} catch (IOException | TimeoutException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CREATE_CONNECTION_ERROR
+ exception.getMessage(), exception);
}
}
/**
* Creates and retrieves the initialized SSLContext.
*
* @param secureSocket secureSocket record.
* @return Initialized SSLContext.
*/
/**
* Handles closing the given connection.
*
* @param connection RabbitMQ Connection object.
* @param timeout Timeout (in milliseconds) for completing all the close-related
* operations, use -1 for infinity.
* @param closeCode The close code (See under "Reply Codes" in the AMQP specification).
* @param closeMessage A message indicating the reason for closing the connection.
*/
public static void handleCloseConnection(Connection connection, Object closeCode, Object closeMessage,
Object timeout) {
boolean validTimeout = timeout != null && RabbitMQUtils.checkIfInt(timeout);
boolean validCloseCode = (closeCode != null && RabbitMQUtils.checkIfInt(closeCode)) &&
(closeMessage != null && RabbitMQUtils.checkIfString(closeMessage));
try {
if (validTimeout && validCloseCode) {
connection.close(Integer.parseInt(closeCode.toString()), closeMessage.toString(),
Integer.parseInt(timeout.toString()));
} else if (validTimeout) {
connection.close(Integer.parseInt(timeout.toString()));
} else if (validCloseCode) {
connection.close(Integer.parseInt(closeCode.toString()), closeMessage.toString());
} else {
connection.close();
}
} catch (IOException | ArithmeticException exception) {
throw new RabbitMQConnectorException(RabbitMQConstants.CLOSE_CONNECTION_ERROR + exception.getMessage(),
exception);
}
}
/**
* Handles aborting the given connection.
*
* @param connection RabbitMQ Connection object.
* @param timeout Timeout (in milliseconds) for completing all the close-related
* operations, use -1 for infinity.
* @param closeCode The close code (See under "Reply Codes" in the AMQP specification).
* @param closeMessage A message indicating the reason for closing the connection.
*/
public static void handleAbortConnection(Connection connection, Object closeCode, Object closeMessage,
Object timeout) {
boolean validTimeout = timeout != null && RabbitMQUtils.checkIfInt(timeout);
boolean validCloseCode = (closeCode != null && RabbitMQUtils.checkIfInt(closeCode)) &&
(closeMessage != null && RabbitMQUtils.checkIfString(closeMessage));
if (validTimeout && validCloseCode) {
connection.abort(Integer.parseInt(closeCode.toString()), closeMessage.toString(),
Integer.parseInt(timeout.toString()));
} else if (validTimeout) {
connection.abort(Integer.parseInt(timeout.toString()));
} else if (validCloseCode) {
connection.abort(Integer.parseInt(closeCode.toString()), closeMessage.toString());
} else {
connection.abort();
}
}
/**
* Checks if close was already called on the connection.
*
* @param connection RabbitMQ Connection object.
* @return True if the connection is already closed and false otherwise.
*/
public static boolean isClosed(Connection connection) {
boolean flag = false;
if (connection == null || !connection.isOpen()) {
flag = true;
}
return flag;
}
private ConnectionUtils() {
}
}
|
Consider creating a new method on tester that allows creating a tenant with a given access role or helper method to update access role so we dont test REST API here, plus it'll be much faster.
|
public void grantsRoleAccess() {
var containerTester = new ContainerTester(container, "");
((InMemoryFlagSource) containerTester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true);
var tester = new ControllerTester(containerTester);
var tenantName = tester.createTenant("tenant1", Tenant.Type.cloud);
containerTester.assertResponse(request("/application/v4/tenant/tenant1/archive-access", PUT)
.data("{\"role\":\"arn:aws:iam::123456789012:role/my-role\"}").roles(Role.administrator(tenantName)),
"{\"message\":\"Archive access role set to 'arn:aws:iam::123456789012:role/my-role' for tenant tenant1.\"}", 200);
var archiveBucketDb = (MockArchiveBucketDb) tester.controller().serviceRegistry().archiveBucketDb();
var testBucket = new ArchiveBucket("foo", "bar", Set.of(tenantName));
archiveBucketDb.addBucket(ZoneId.from("prod.us-east-3"), testBucket);
MockArchiveService archiveService = (MockArchiveService) tester.controller().serviceRegistry().archiveService();
assertNull(archiveService.authorizedIamRoles.get(testBucket));
new ArchiveAccessMaintainer(containerTester.controller(), Duration.ofMinutes(10)).maintain();
assertEquals("arn:aws:iam::123456789012:role/my-role", archiveService.authorizedIamRoles.get(testBucket).get(tenantName));
}
|
containerTester.assertResponse(request("/application/v4/tenant/tenant1/archive-access", PUT)
|
public void grantsRoleAccess() {
var containerTester = new ContainerTester(container, "");
((InMemoryFlagSource) containerTester.controller().flagSource())
.withBooleanFlag(PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.id(), true)
.withStringFlag(Flags.SYNC_HOST_LOGS_TO_S3_BUCKET.id(), "auto");
var tester = new ControllerTester(containerTester);
String tenant1role = "arn:aws:iam::123456789012:role/my-role";
String tenant2role = "arn:aws:iam::210987654321:role/my-role";
var tenant1 = createTenantWithAccessRole(tester, "tenant1", tenant1role);
createTenantWithAccessRole(tester, "tenant2", tenant2role);
tester.controller().archiveBucketDb().archiveUriFor(ZoneId.from("prod.us-east-3"), tenant1);
var testBucket = new ArchiveBucket("bucketName", "keyArn").withTenant(tenant1);
MockArchiveService archiveService = (MockArchiveService) tester.controller().serviceRegistry().archiveService();
assertNull(archiveService.authorizedIamRoles.get(testBucket));
new ArchiveAccessMaintainer(containerTester.controller(), Duration.ofMinutes(10)).maintain();
assertEquals(Map.of(tenant1, tenant1role), archiveService.authorizedIamRoles.get(testBucket));
}
|
class ArchiveAccessMaintainerTest extends ControllerContainerCloudTest {
@Test
}
|
class ArchiveAccessMaintainerTest extends ControllerContainerCloudTest {
@Test
private TenantName createTenantWithAccessRole(ControllerTester tester, String tenantName, String role) {
var tenant = tester.createTenant(tenantName, Tenant.Type.cloud);
tester.controller().tenants().lockOrThrow(tenant, LockedTenant.Cloud.class, lockedTenant -> {
lockedTenant = lockedTenant.withArchiveAccessRole(Optional.of(role));
tester.controller().tenants().store(lockedTenant);
});
return tenant;
}
}
|
Would like me to throw the exception after all @gsmet?
|
private void createMavenWrapper() {
try {
executeMojo(
plugin(
groupId("io.takari"),
artifactId("maven"),
version(MojoUtils.getMavenWrapperVersion())),
goal("wrapper"),
configuration(
element(name("maven"), MojoUtils.getProposedMavenVersion())),
executionEnvironment(
project,
session,
pluginManager));
} catch (Exception e) {
getLog().debug("Unable to create Maven Wrapper");
}
}
|
getLog().debug("Unable to create Maven Wrapper");
|
private void createMavenWrapper() {
try {
executeMojo(
plugin(
groupId("io.takari"),
artifactId("maven"),
version(MojoUtils.getMavenWrapperVersion())),
goal("wrapper"),
configuration(
element(name("maven"), MojoUtils.getProposedMavenVersion())),
executionEnvironment(
project,
session,
pluginManager));
} catch (Exception e) {
getLog().error("Unable to install the Maven wrapper (./mvnw) in the project");
}
}
|
class CreateProjectMojo extends AbstractMojo {
public static final String PLUGIN_KEY = MojoUtils.getPluginGroupId() + ":" + MojoUtils.getPluginArtifactId();
private static final String DEFAULT_GROUP_ID = "org.acme.quarkus.sample";
@Parameter(defaultValue = "${project}")
protected MavenProject project;
@Parameter(property = "projectGroupId")
private String projectGroupId;
@Parameter(property = "projectArtifactId")
private String projectArtifactId;
@Parameter(property = "projectVersion")
private String projectVersion;
@Parameter(property = "path")
private String path;
@Parameter(property = "className")
private String className;
@Parameter(property = "extensions")
private Set<String> extensions;
@Parameter(defaultValue = "${session}")
private MavenSession session;
@Component
private Prompter prompter;
@Component
private MavenVersionEnforcer mavenVersionEnforcer;
@Component
private BuildPluginManager pluginManager;
@Override
public void execute() throws MojoExecutionException {
mavenVersionEnforcer.ensureMavenVersion(getLog(), session);
File projectRoot = new File(".");
File pom = new File(projectRoot, "pom.xml");
if (pom.isFile()) {
if (!StringUtils.isBlank(projectGroupId) || !StringUtils.isBlank(projectArtifactId)
|| !StringUtils.isBlank(projectVersion)) {
throw new MojoExecutionException("Unable to generate the project, the `projectGroupId`, " +
"`projectArtifactId` and `projectVersion` parameters are not supported when applied to an " +
"existing `pom.xml` file");
}
projectGroupId = project.getGroupId();
projectArtifactId = project.getArtifactId();
projectVersion = project.getVersion();
} else {
askTheUserForMissingValues();
if (!isDirectoryEmpty(projectRoot)) {
projectRoot = new File(projectArtifactId);
if (projectRoot.exists()) {
throw new MojoExecutionException("Unable to create the project - the current directory is not empty and" +
" the directory " + projectArtifactId + " exists");
}
}
}
boolean success;
try {
sanitizeOptions();
final Map<String, Object> context = new HashMap<>();
context.put("className", className);
context.put("path", path);
success = new CreateProject(projectRoot)
.groupId(projectGroupId)
.artifactId(projectArtifactId)
.version(projectVersion)
.sourceType(determineSourceType(extensions))
.doCreateProject(context);
if (success) {
new AddExtensions(new File(projectRoot, "pom.xml"))
.addExtensions(extensions);
}
createMavenWrapper();
} catch (IOException e) {
throw new MojoExecutionException(e.getMessage(), e);
}
if (success) {
printUserInstructions(projectRoot);
}
}
private SourceType determineSourceType(Set<String> extensions) {
return extensions.stream().anyMatch(e -> e.toLowerCase().contains("kotlin"))
? SourceType.KOTLIN
: SourceType.JAVA;
}
private void askTheUserForMissingValues() throws MojoExecutionException {
if (!session.getRequest().isInteractiveMode() || shouldUseDefaults()) {
if (StringUtils.isBlank(projectGroupId)) {
projectGroupId = DEFAULT_GROUP_ID;
}
if (StringUtils.isBlank(projectArtifactId)) {
projectArtifactId = "my-quarkus-project";
}
if (StringUtils.isBlank(projectVersion)) {
projectVersion = "1.0-SNAPSHOT";
}
return;
}
try {
if (StringUtils.isBlank(projectGroupId)) {
projectGroupId = prompter.promptWithDefaultValue("Set the project groupId",
DEFAULT_GROUP_ID);
}
if (StringUtils.isBlank(projectArtifactId)) {
projectArtifactId = prompter.promptWithDefaultValue("Set the project artifactId",
"my-quarkus-project");
}
if (StringUtils.isBlank(projectVersion)) {
projectVersion = prompter.promptWithDefaultValue("Set the Quarkus version",
"1.0-SNAPSHOT");
}
if (StringUtils.isBlank(className)) {
String answer = prompter.promptWithDefaultValue("Do you want to create a REST resource? (y/n)", "no");
if (isTrueOrYes(answer)) {
String defaultResourceName = projectGroupId.replace("-", ".")
.replace("_", ".") + ".HelloResource";
className = prompter.promptWithDefaultValue("Set the resource classname", defaultResourceName);
if (StringUtils.isBlank(path)) {
path = prompter.promptWithDefaultValue("Set the resource path ", "/hello");
}
} else {
className = null;
path = null;
}
}
} catch (IOException e) {
throw new MojoExecutionException("Unable to get user input", e);
}
}
private boolean shouldUseDefaults() {
return projectArtifactId != null;
}
private boolean isTrueOrYes(String answer) {
if (answer == null) {
return false;
}
String content = answer.trim().toLowerCase();
return "true".equalsIgnoreCase(content) || "yes".equalsIgnoreCase(content) || "y".equalsIgnoreCase(content);
}
private void sanitizeOptions() {
if (className != null) {
if (className.endsWith(MojoUtils.JAVA_EXTENSION)) {
className = className.substring(0, className.length() - MojoUtils.JAVA_EXTENSION.length());
} else if (className.endsWith(MojoUtils.KOTLIN_EXTENSION)) {
className = className.substring(0, className.length() - MojoUtils.KOTLIN_EXTENSION.length());
}
if (!className.contains(".")) {
className = projectGroupId.replace("-", ".").replace("_", ".") + "." + className;
}
if (StringUtils.isBlank(path)) {
path = "/hello";
}
if (!path.startsWith("/")) {
path = "/" + path;
}
}
extensions = extensions.stream().map(String::trim).collect(Collectors.toSet());
}
private void printUserInstructions(File root) {
getLog().info("");
getLog().info("========================================================================================");
getLog().info(
ansi().a("Your new application has been created in ").bold().a(root.getAbsolutePath()).boldOff().toString());
getLog().info(ansi().a("Navigate into this directory and launch your application with ")
.bold()
.fg(Ansi.Color.CYAN)
.a("mvn compile quarkus:dev")
.reset()
.toString());
getLog().info(
ansi().a("Your application will be accessible on ").bold().fg(Ansi.Color.CYAN).a("http:
.reset().toString());
getLog().info("========================================================================================");
getLog().info("");
}
private boolean isDirectoryEmpty(File dir) {
if (!dir.isDirectory()) {
throw new IllegalArgumentException("The specified file must be a directory: " + dir.getAbsolutePath());
}
String[] children = dir.list();
if (children == null) {
throw new IllegalArgumentException("The specified directory cannot be accessed: " + dir.getAbsolutePath());
}
return children.length == 0;
}
}
|
class CreateProjectMojo extends AbstractMojo {
public static final String PLUGIN_KEY = MojoUtils.getPluginGroupId() + ":" + MojoUtils.getPluginArtifactId();
private static final String DEFAULT_GROUP_ID = "org.acme.quarkus.sample";
@Parameter(defaultValue = "${project}")
protected MavenProject project;
@Parameter(property = "projectGroupId")
private String projectGroupId;
@Parameter(property = "projectArtifactId")
private String projectArtifactId;
@Parameter(property = "projectVersion")
private String projectVersion;
@Parameter(property = "path")
private String path;
@Parameter(property = "className")
private String className;
@Parameter(property = "extensions")
private Set<String> extensions;
@Parameter(defaultValue = "${session}")
private MavenSession session;
@Component
private Prompter prompter;
@Component
private MavenVersionEnforcer mavenVersionEnforcer;
@Component
private BuildPluginManager pluginManager;
@Override
public void execute() throws MojoExecutionException {
mavenVersionEnforcer.ensureMavenVersion(getLog(), session);
File projectRoot = new File(".");
File pom = new File(projectRoot, "pom.xml");
if (pom.isFile()) {
if (!StringUtils.isBlank(projectGroupId) || !StringUtils.isBlank(projectArtifactId)
|| !StringUtils.isBlank(projectVersion)) {
throw new MojoExecutionException("Unable to generate the project, the `projectGroupId`, " +
"`projectArtifactId` and `projectVersion` parameters are not supported when applied to an " +
"existing `pom.xml` file");
}
projectGroupId = project.getGroupId();
projectArtifactId = project.getArtifactId();
projectVersion = project.getVersion();
} else {
askTheUserForMissingValues();
if (!isDirectoryEmpty(projectRoot)) {
projectRoot = new File(projectArtifactId);
if (projectRoot.exists()) {
throw new MojoExecutionException("Unable to create the project - the current directory is not empty and" +
" the directory " + projectArtifactId + " exists");
}
}
}
boolean success;
try {
sanitizeOptions();
final Map<String, Object> context = new HashMap<>();
context.put("className", className);
context.put("path", path);
success = new CreateProject(projectRoot)
.groupId(projectGroupId)
.artifactId(projectArtifactId)
.version(projectVersion)
.sourceType(determineSourceType(extensions))
.doCreateProject(context);
if (success) {
new AddExtensions(new File(projectRoot, "pom.xml"))
.addExtensions(extensions);
}
createMavenWrapper();
} catch (IOException e) {
throw new MojoExecutionException(e.getMessage(), e);
}
if (success) {
printUserInstructions(projectRoot);
}
}
private SourceType determineSourceType(Set<String> extensions) {
return extensions.stream().anyMatch(e -> e.toLowerCase().contains("kotlin"))
? SourceType.KOTLIN
: SourceType.JAVA;
}
private void askTheUserForMissingValues() throws MojoExecutionException {
if (!session.getRequest().isInteractiveMode() || shouldUseDefaults()) {
if (StringUtils.isBlank(projectGroupId)) {
projectGroupId = DEFAULT_GROUP_ID;
}
if (StringUtils.isBlank(projectArtifactId)) {
projectArtifactId = "my-quarkus-project";
}
if (StringUtils.isBlank(projectVersion)) {
projectVersion = "1.0-SNAPSHOT";
}
return;
}
try {
if (StringUtils.isBlank(projectGroupId)) {
projectGroupId = prompter.promptWithDefaultValue("Set the project groupId",
DEFAULT_GROUP_ID);
}
if (StringUtils.isBlank(projectArtifactId)) {
projectArtifactId = prompter.promptWithDefaultValue("Set the project artifactId",
"my-quarkus-project");
}
if (StringUtils.isBlank(projectVersion)) {
projectVersion = prompter.promptWithDefaultValue("Set the Quarkus version",
"1.0-SNAPSHOT");
}
if (StringUtils.isBlank(className)) {
String answer = prompter.promptWithDefaultValue("Do you want to create a REST resource? (y/n)", "no");
if (isTrueOrYes(answer)) {
String defaultResourceName = projectGroupId.replace("-", ".")
.replace("_", ".") + ".HelloResource";
className = prompter.promptWithDefaultValue("Set the resource classname", defaultResourceName);
if (StringUtils.isBlank(path)) {
path = prompter.promptWithDefaultValue("Set the resource path ", "/hello");
}
} else {
className = null;
path = null;
}
}
} catch (IOException e) {
throw new MojoExecutionException("Unable to get user input", e);
}
}
private boolean shouldUseDefaults() {
return projectArtifactId != null;
}
private boolean isTrueOrYes(String answer) {
if (answer == null) {
return false;
}
String content = answer.trim().toLowerCase();
return "true".equalsIgnoreCase(content) || "yes".equalsIgnoreCase(content) || "y".equalsIgnoreCase(content);
}
private void sanitizeOptions() {
if (className != null) {
if (className.endsWith(MojoUtils.JAVA_EXTENSION)) {
className = className.substring(0, className.length() - MojoUtils.JAVA_EXTENSION.length());
} else if (className.endsWith(MojoUtils.KOTLIN_EXTENSION)) {
className = className.substring(0, className.length() - MojoUtils.KOTLIN_EXTENSION.length());
}
if (!className.contains(".")) {
className = projectGroupId.replace("-", ".").replace("_", ".") + "." + className;
}
if (StringUtils.isBlank(path)) {
path = "/hello";
}
if (!path.startsWith("/")) {
path = "/" + path;
}
}
extensions = extensions.stream().map(String::trim).collect(Collectors.toSet());
}
private void printUserInstructions(File root) {
getLog().info("");
getLog().info("========================================================================================");
getLog().info(
ansi().a("Your new application has been created in ").bold().a(root.getAbsolutePath()).boldOff().toString());
getLog().info(ansi().a("Navigate into this directory and launch your application with ")
.bold()
.fg(Ansi.Color.CYAN)
.a("mvn compile quarkus:dev")
.reset()
.toString());
getLog().info(
ansi().a("Your application will be accessible on ").bold().fg(Ansi.Color.CYAN).a("http:
.reset().toString());
getLog().info("========================================================================================");
getLog().info("");
}
private boolean isDirectoryEmpty(File dir) {
if (!dir.isDirectory()) {
throw new IllegalArgumentException("The specified file must be a directory: " + dir.getAbsolutePath());
}
String[] children = dir.list();
if (children == null) {
throw new IllegalArgumentException("The specified directory cannot be accessed: " + dir.getAbsolutePath());
}
return children.length == 0;
}
}
|
Keep in mind that when you inject the stage version of the session, it will be eagerly created. So the Mutiny version is not less efficient. I had a look at the session implementation code. It seems that a connection is acquired as soon as the session is created. Perhaps this should be delayed until a connection is actually needed. But I don't know if this is possible in practice.
|
public void disposeMutinySession(@Disposes Uni<Mutiny.Session> reactiveSession) {
reactiveSession.subscribe().with(Mutiny.Session::close);
}
|
reactiveSession.subscribe().with(Mutiny.Session::close);
|
public void disposeMutinySession(@Disposes Uni<Mutiny.Session> reactiveSession) {
reactiveSession.subscribe().with(Mutiny.Session::close);
}
|
class ReactiveSessionProducer {
@Inject
private Stage.SessionFactory reactiveSessionFactory;
@Inject
private Mutiny.SessionFactory mutinySessionFactory;
@Produces
@RequestScoped
@DefaultBean
public CompletionStage<Stage.Session> stageSession() {
return reactiveSessionFactory.openSession();
}
@Produces
@RequestScoped
@DefaultBean
public Uni<Mutiny.Session> mutinySession() {
return mutinySessionFactory.openSession().cache();
}
public void disposeStageSession(@Disposes CompletionStage<Stage.Session> reactiveSession) {
reactiveSession.whenComplete((s, t) -> {
if (s != null)
s.close();
});
}
}
|
class ReactiveSessionProducer {
@Inject
private Stage.SessionFactory reactiveSessionFactory;
@Inject
private Mutiny.SessionFactory mutinySessionFactory;
@Produces
@RequestScoped
@DefaultBean
public CompletionStage<Stage.Session> stageSession() {
return reactiveSessionFactory.openSession();
}
@Produces
@RequestScoped
@DefaultBean
public Uni<Mutiny.Session> mutinySession() {
return mutinySessionFactory.openSession().cache();
}
public void disposeStageSession(@Disposes CompletionStage<Stage.Session> reactiveSession) {
reactiveSession.whenComplete((s, t) -> {
if (s != null)
s.close();
});
}
}
|
Only targeted for tests - setting a JSON environment variable value through command-line in `live-platform-matrix.json` never seemed to work for some reason. This seemed like a simple compromise and the newly added system property is hidden. Let me know if you disagree - I can investigate how to set it as JSON directly.
|
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}");
}
}
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
|
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
|
CosmosAsyncClient buildAsyncClient(boolean logStartupInfo) {
StopWatch stopwatch = new StopWatch();
stopwatch.start();
if (Configs.shouldOptInDefaultCircuitBreakerConfig()) {
System.setProperty("COSMOS.PARTITION_LEVEL_CIRCUIT_BREAKER_CONFIG", "{\"isPartitionLevelCircuitBreakerEnabled\": true}");
}
this.resetSessionCapturingType();
validateConfig();
buildConnectionPolicy();
CosmosAsyncClient cosmosAsyncClient = new CosmosAsyncClient(this);
if (proactiveContainerInitConfig != null) {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesStarted(proactiveContainerInitConfig.getCosmosContainerIdentities());
Duration aggressiveWarmupDuration = proactiveContainerInitConfig
.getAggressiveWarmupDuration();
if (aggressiveWarmupDuration != null) {
cosmosAsyncClient.openConnectionsAndInitCaches(aggressiveWarmupDuration);
} else {
cosmosAsyncClient.openConnectionsAndInitCaches();
}
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(proactiveContainerInitConfig.getCosmosContainerIdentities());
} else {
cosmosAsyncClient.recordOpenConnectionsAndInitCachesCompleted(new ArrayList<>());
}
if (logStartupInfo) {
logStartupInfo(stopwatch, cosmosAsyncClient);
}
return cosmosAsyncClient;
}
|
class to instantiate {@link CosmosContainerProactiveInitConfig}
|
class to instantiate {@link CosmosContainerProactiveInitConfig}
|
The most risky bug in this code is: Potential ClassCastException when casting `Table` to `IcebergTable`. You can modify the code like this: ```java @@ -515,6 +516,17 @@ private IcebergSplitScanTask buildIcebergSplitScanTask( @Override public void refreshTable(String srDbName, Table table, List<String> partitionNames, boolean onlyCachedPartitions) { + if (isResourceMappingCatalog(catalogName)) { + refreshTableWithResource(table); + } else { + if (table instanceof IcebergTable) { // Added instance check + IcebergTable icebergTable = (IcebergTable) table; + String dbName = icebergTable.getRemoteDbName(); + String tableName = icebergTable.getRemoteTableName(); + icebergCatalog.refreshTable(dbName, tableName); + } else { + // Handle the case where the table is not an instance of IcebergTable + } + } + } + + private void refreshTableWithResource(Table table) { if (table instanceof IcebergTable) { // Added instance check IcebergTable icebergTable = (IcebergTable) table; org.apache.iceberg.Table nativeTable = icebergTable.getNativeTable(); ```
|
private void refreshTableWithResource(Table table) {
IcebergTable icebergTable = (IcebergTable) table;
org.apache.iceberg.Table nativeTable = icebergTable.getNativeTable();
try {
if (nativeTable instanceof BaseTable) {
BaseTable baseTable = (BaseTable) nativeTable;
if (baseTable.operations().refresh() == null) {
throw new NoSuchTableException("No such table: %s", nativeTable.name());
}
} else {
throw new StarRocksConnectorException("Invalid table type of %s, it should be a BaseTable!", nativeTable.name());
}
} catch (NoSuchTableException e) {
throw new StarRocksConnectorException("No such table %s", nativeTable.name());
} catch (IllegalStateException ei) {
throw new StarRocksConnectorException("Refresh table %s with failure, the table under hood" +
" may have been dropped. You should re-create the external table. cause %s",
nativeTable.name(), ei.getMessage());
}
icebergTable.resetSnapshot();
}
|
throw new StarRocksConnectorException("No such table %s", nativeTable.name());
|
private void refreshTableWithResource(Table table) {
IcebergTable icebergTable = (IcebergTable) table;
org.apache.iceberg.Table nativeTable = icebergTable.getNativeTable();
try {
if (nativeTable instanceof BaseTable) {
BaseTable baseTable = (BaseTable) nativeTable;
if (baseTable.operations().refresh() == null) {
throw new NoSuchTableException("No such table: %s", nativeTable.name());
}
} else {
throw new StarRocksConnectorException("Invalid table type of %s, it should be a BaseTable!", nativeTable.name());
}
} catch (NoSuchTableException e) {
throw new StarRocksConnectorException("No such table %s", nativeTable.name());
} catch (IllegalStateException ei) {
throw new StarRocksConnectorException("Refresh table %s with failure, the table under hood" +
" may have been dropped. You should re-create the external table. cause %s",
nativeTable.name(), ei.getMessage());
}
icebergTable.resetSnapshot();
}
|
class IcebergMetadata implements ConnectorMetadata {
private static final Logger LOG = LogManager.getLogger(IcebergMetadata.class);
private final String catalogName;
private final HdfsEnvironment hdfsEnvironment;
private final IcebergCatalog icebergCatalog;
private final IcebergStatisticProvider statisticProvider = new IcebergStatisticProvider();
private final Map<TableIdentifier, Table> tables = new ConcurrentHashMap<>();
private final Map<String, Database> databases = new ConcurrentHashMap<>();
private final Map<IcebergFilter, List<FileScanTask>> splitTasks = new ConcurrentHashMap<>();
private final Set<IcebergFilter> scannedTables = new HashSet<>();
private final Map<FileScanTaskSchema, Pair<String, String>> fileScanTaskSchemas = new ConcurrentHashMap<>();
public IcebergMetadata(String catalogName, HdfsEnvironment hdfsEnvironment, IcebergCatalog icebergCatalog) {
this.catalogName = catalogName;
this.hdfsEnvironment = hdfsEnvironment;
this.icebergCatalog = icebergCatalog;
new IcebergMetricsReporter().setThreadLocalReporter();
}
@Override
public List<String> listDbNames() {
return icebergCatalog.listAllDatabases();
}
@Override
public void createDb(String dbName, Map<String, String> properties) throws AlreadyExistsException {
if (dbExists(dbName)) {
throw new AlreadyExistsException("Database Already Exists");
}
icebergCatalog.createDb(dbName, properties);
}
@Override
public void dropDb(String dbName, boolean isForceDrop) throws MetaNotFoundException {
if (listTableNames(dbName).size() != 0) {
throw new StarRocksConnectorException("Database %s not empty", dbName);
}
icebergCatalog.dropDb(dbName);
databases.remove(dbName);
}
@Override
public Database getDb(String dbName) {
if (databases.containsKey(dbName)) {
return databases.get(dbName);
}
Database db;
try {
db = icebergCatalog.getDB(dbName);
} catch (NoSuchNamespaceException e) {
LOG.error("Database {} not found", dbName, e);
return null;
}
databases.put(dbName, db);
return db;
}
@Override
public List<String> listTableNames(String dbName) {
return icebergCatalog.listTables(dbName);
}
@Override
public boolean createTable(CreateTableStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Schema schema = toIcebergApiSchema(stmt.getColumns());
PartitionDesc partitionDesc = stmt.getPartitionDesc();
List<String> partitionColNames = partitionDesc == null ? Lists.newArrayList() :
((ListPartitionDesc) partitionDesc).getPartitionColNames();
PartitionSpec partitionSpec = parsePartitionFields(schema, partitionColNames);
Map<String, String> properties = stmt.getProperties() == null ? new HashMap<>() : stmt.getProperties();
String tableLocation = properties.get(LOCATION_PROPERTY);
Map<String, String> createTableProperties = IcebergApiConverter.rebuildCreateTableProperties(properties);
return icebergCatalog.createTable(dbName, tableName, schema, partitionSpec, tableLocation, createTableProperties);
}
@Override
public void dropTable(DropTableStmt stmt) {
Table icebergTable = getTable(stmt.getDbName(), stmt.getTableName());
if (icebergTable == null) {
return;
}
icebergCatalog.dropTable(stmt.getDbName(), stmt.getTableName(), stmt.isForceDrop());
tables.remove(TableIdentifier.of(stmt.getDbName(), stmt.getTableName()));
StatisticUtils.dropStatisticsAfterDropTable(icebergTable);
}
@Override
public Table getTable(String dbName, String tblName) {
TableIdentifier identifier = TableIdentifier.of(dbName, tblName);
if (tables.containsKey(identifier)) {
return tables.get(identifier);
}
try {
IcebergCatalogType catalogType = icebergCatalog.getIcebergCatalogType();
org.apache.iceberg.Table icebergTable = icebergCatalog.getTable(dbName, tblName);
Table table = IcebergApiConverter.toIcebergTable(icebergTable, catalogName, dbName, tblName, catalogType.name());
tables.put(identifier, table);
return table;
} catch (StarRocksConnectorException | NoSuchTableException e) {
LOG.error("Failed to get iceberg table {}", identifier, e);
return null;
}
}
@Override
public List<String> listPartitionNames(String dbName, String tblName) {
IcebergCatalogType nativeType = icebergCatalog.getIcebergCatalogType();
if (nativeType != HIVE_CATALOG && nativeType != REST_CATALOG && nativeType != GLUE_CATALOG) {
throw new StarRocksConnectorException(
"Do not support get partitions from catalog type: " + nativeType);
}
return icebergCatalog.listPartitionNames(dbName, tblName);
}
@Override
public List<RemoteFileInfo> getRemoteFileInfos(Table table, List<PartitionKey> partitionKeys,
long snapshotId, ScalarOperator predicate,
List<String> fieldNames, long limit) {
return getRemoteFileInfos((IcebergTable) table, snapshotId, predicate, limit);
}
private List<RemoteFileInfo> getRemoteFileInfos(IcebergTable table, long snapshotId,
ScalarOperator predicate, long limit) {
RemoteFileInfo remoteFileInfo = new RemoteFileInfo();
String dbName = table.getRemoteDbName();
String tableName = table.getRemoteTableName();
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshotId, predicate);
triggerIcebergPlanFilesIfNeeded(key, table, predicate, limit);
List<FileScanTask> icebergScanTasks = splitTasks.get(key);
if (icebergScanTasks == null) {
throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]",
dbName, tableName, predicate);
}
List<RemoteFileDesc> remoteFileDescs = Lists.newArrayList(RemoteFileDesc.createIcebergRemoteFileDesc(icebergScanTasks));
remoteFileInfo.setFiles(remoteFileDescs);
return Lists.newArrayList(remoteFileInfo);
}
private void triggerIcebergPlanFilesIfNeeded(IcebergFilter key, IcebergTable table, ScalarOperator predicate, long limit) {
if (!scannedTables.contains(key)) {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.processSplit." + key)) {
collectTableStatisticsAndCacheIcebergSplit(table, predicate, limit);
}
}
}
public List<PartitionKey> getPrunedPartitions(Table table, ScalarOperator predicate, long limit) {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
if (!snapshot.isPresent()) {
return new ArrayList<>();
}
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshot.get().snapshotId(), predicate);
triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit);
List<PartitionKey> partitionKeys = new ArrayList<>();
List<FileScanTask> icebergSplitTasks = splitTasks.get(key);
if (icebergSplitTasks == null) {
throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]",
dbName, tableName, predicate);
}
Set<List<String>> scannedPartitions = new HashSet<>();
PartitionSpec spec = icebergTable.getNativeTable().spec();
List<Column> partitionColumns = icebergTable.getPartitionColumnsIncludeTransformed();
for (FileScanTask fileScanTask : icebergSplitTasks) {
org.apache.iceberg.PartitionData partitionData = (org.apache.iceberg.PartitionData) fileScanTask.file().partition();
List<String> values = PartitionUtil.getIcebergPartitionValues(spec, partitionData);
if (values.size() != partitionColumns.size()) {
continue;
}
if (scannedPartitions.contains(values)) {
continue;
} else {
scannedPartitions.add(values);
}
try {
List<com.starrocks.catalog.Type> srTypes = new ArrayList<>();
for (PartitionField partitionField : spec.fields()) {
if (partitionField.transform().isVoid()) {
continue;
}
if (!partitionField.transform().isIdentity()) {
Type sourceType = spec.schema().findType(partitionField.sourceId());
Type resultType = partitionField.transform().getResultType(sourceType);
if (resultType == Types.DateType.get()) {
resultType = Types.IntegerType.get();
}
srTypes.add(fromIcebergType(resultType));
continue;
}
srTypes.add(icebergTable.getColumn(partitionField.name()).getType());
}
if (icebergTable.hasPartitionTransformedEvolution()) {
srTypes = partitionColumns.stream()
.map(Column::getType)
.collect(Collectors.toList());
}
partitionKeys.add(createPartitionKeyWithType(values, srTypes, table.getType()));
} catch (Exception e) {
LOG.error("create partition key failed.", e);
throw new StarRocksConnectorException(e.getMessage());
}
}
return partitionKeys;
}
private void collectTableStatisticsAndCacheIcebergSplit(Table table, ScalarOperator predicate, long limit) {
IcebergTable icebergTable = (IcebergTable) table;
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
if (!snapshot.isPresent()) {
return;
}
long snapshotId = snapshot.get().snapshotId();
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshotId, predicate);
org.apache.iceberg.Table nativeTbl = icebergTable.getNativeTable();
Types.StructType schema = nativeTbl.schema().asStruct();
List<ScalarOperator> scalarOperators = Utils.extractConjuncts(predicate);
ScalarOperatorToIcebergExpr.IcebergContext icebergContext = new ScalarOperatorToIcebergExpr.IcebergContext(schema);
Expression icebergPredicate = new ScalarOperatorToIcebergExpr().convert(scalarOperators, icebergContext);
TableScan scan = nativeTbl.newScan().useSnapshot(snapshotId);
if (enableCollectColumnStatistics()) {
scan = scan.includeColumnStats();
}
if (icebergPredicate.op() != Expression.Operation.TRUE) {
scan = scan.filter(icebergPredicate);
}
CloseableIterable<FileScanTask> fileScanTaskIterable = TableScanUtil.splitFiles(
scan.planFiles(), scan.targetSplitSize());
CloseableIterator<FileScanTask> fileScanTaskIterator = fileScanTaskIterable.iterator();
Iterator<FileScanTask> fileScanTasks;
boolean canPruneManifests = limit != -1 && !icebergTable.isV2Format() && onlyHasPartitionPredicate(table, predicate)
&& limit < Integer.MAX_VALUE && nativeTbl.spec().specId() == 0 && enablePruneManifest();
if (canPruneManifests) {
fileScanTasks = Iterators.limit(fileScanTaskIterator, (int) limit);
} else {
fileScanTasks = fileScanTaskIterator;
}
List<Types.NestedField> fullColumns = nativeTbl.schema().columns();
Map<Integer, Type.PrimitiveType> idToTypeMapping = fullColumns.stream()
.filter(column -> column.type().isPrimitiveType())
.collect(Collectors.toMap(Types.NestedField::fieldId, column -> column.type().asPrimitiveType()));
Set<Integer> identityPartitionIds = nativeTbl.spec().fields().stream()
.filter(x -> x.transform().isIdentity())
.map(PartitionField::sourceId)
.collect(Collectors.toSet());
List<Types.NestedField> nonPartitionPrimitiveColumns = fullColumns.stream()
.filter(column -> !identityPartitionIds.contains(column.fieldId()) &&
column.type().isPrimitiveType())
.collect(toImmutableList());
List<FileScanTask> icebergScanTasks = Lists.newArrayList();
long totalReadCount = 0;
Set<String> filePaths = new HashSet<>();
while (fileScanTasks.hasNext()) {
FileScanTask scanTask = fileScanTaskIterator.next();
statisticProvider.updateIcebergFileStats(
icebergTable, scanTask, idToTypeMapping, nonPartitionPrimitiveColumns, key);
FileScanTask icebergSplitScanTask = scanTask;
if (enableCollectColumnStatistics()) {
icebergSplitScanTask = buildIcebergSplitScanTask(scanTask, icebergPredicate, key);
}
icebergScanTasks.add(icebergSplitScanTask);
String filePath = icebergSplitScanTask.file().path().toString();
if (!filePaths.contains(filePath)) {
filePaths.add(filePath);
totalReadCount += scanTask.file().recordCount();
}
if (canPruneManifests && totalReadCount >= limit) {
break;
}
}
try {
fileScanTaskIterable.close();
fileScanTaskIterator.close();
} catch (IOException e) {
}
IcebergMetricsReporter.lastReport().ifPresent(scanReportWithCounter ->
Tracers.record(Tracers.Module.EXTERNAL, "ICEBERG.ScanMetrics." +
scanReportWithCounter.getScanReport().tableName() + " / No_" +
scanReportWithCounter.getCount(),
scanReportWithCounter.getScanReport().scanMetrics().toString()));
splitTasks.put(key, icebergScanTasks);
scannedTables.add(key);
}
@Override
public Statistics getTableStatistics(OptimizerContext session,
Table table,
Map<ColumnRefOperator, Column> columns,
List<PartitionKey> partitionKeys,
ScalarOperator predicate,
long limit) {
IcebergTable icebergTable = (IcebergTable) table;
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
long snapshotId;
if (snapshot.isPresent()) {
snapshotId = snapshot.get().snapshotId();
} else {
Statistics.Builder statisticsBuilder = Statistics.builder();
statisticsBuilder.setOutputRowCount(1);
statisticsBuilder.addColumnStatistics(statisticProvider.buildUnknownColumnStatistics(columns.keySet()));
return statisticsBuilder.build();
}
IcebergFilter key = IcebergFilter.of(
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), snapshotId, predicate);
triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit);
return statisticProvider.getTableStatistics(icebergTable, columns, session, predicate);
}
private IcebergSplitScanTask buildIcebergSplitScanTask(
FileScanTask fileScanTask, Expression icebergPredicate, IcebergFilter filter) {
long offset = fileScanTask.start();
long length = fileScanTask.length();
DataFile dataFileWithoutStats = fileScanTask.file().copyWithoutStats();
DeleteFile[] deleteFiles = fileScanTask.deletes().stream()
.map(DeleteFile::copyWithoutStats)
.toArray(DeleteFile[]::new);
PartitionSpec taskSpec = fileScanTask.spec();
Schema taskSchema = fileScanTask.spec().schema();
String schemaString;
String partitionString;
FileScanTaskSchema schemaKey = new FileScanTaskSchema(filter, taskSchema.schemaId(), taskSpec.specId());
Pair<String, String> schema = fileScanTaskSchemas.get(schemaKey);
if (schema == null) {
schemaString = SchemaParser.toJson(fileScanTask.spec().schema());
partitionString = PartitionSpecParser.toJson(fileScanTask.spec());
fileScanTaskSchemas.put(schemaKey, Pair.create(schemaString, partitionString));
} else {
schemaString = schema.first;
partitionString = schema.second;
}
ResidualEvaluator residualEvaluator = ResidualEvaluator.of(taskSpec, icebergPredicate, true);
BaseFileScanTask baseFileScanTask = new BaseFileScanTask(
dataFileWithoutStats,
deleteFiles,
schemaString,
partitionString,
residualEvaluator);
return new IcebergSplitScanTask(offset, length, baseFileScanTask);
}
@Override
public void refreshTable(String srDbName, Table table, List<String> partitionNames, boolean onlyCachedPartitions) {
if (isResourceMappingCatalog(catalogName)) {
refreshTableWithResource(table);
} else {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
icebergCatalog.refreshTable(dbName, tableName);
}
}
@Override
public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos) {
boolean isOverwrite = false;
if (!commitInfos.isEmpty()) {
TSinkCommitInfo sinkCommitInfo = commitInfos.get(0);
if (sinkCommitInfo.isSetIs_overwrite()) {
isOverwrite = sinkCommitInfo.is_overwrite;
}
}
List<TIcebergDataFile> dataFiles = commitInfos.stream()
.map(TSinkCommitInfo::getIceberg_data_file).collect(Collectors.toList());
IcebergTable table = (IcebergTable) getTable(dbName, tableName);
org.apache.iceberg.Table nativeTbl = table.getNativeTable();
Transaction transaction = nativeTbl.newTransaction();
BatchWrite batchWrite = getBatchWrite(transaction, isOverwrite);
PartitionSpec partitionSpec = nativeTbl.spec();
for (TIcebergDataFile dataFile : dataFiles) {
Metrics metrics = IcebergApiConverter.buildDataFileMetrics(dataFile);
DataFiles.Builder builder =
DataFiles.builder(partitionSpec)
.withMetrics(metrics)
.withPath(dataFile.path)
.withFormat(dataFile.format)
.withRecordCount(dataFile.record_count)
.withFileSizeInBytes(dataFile.file_size_in_bytes)
.withSplitOffsets(dataFile.split_offsets);
if (partitionSpec.isPartitioned()) {
String relativePartitionLocation = getIcebergRelativePartitionPath(
nativeTbl.location(), dataFile.partition_path);
PartitionData partitionData = partitionDataFromPath(
relativePartitionLocation, partitionSpec);
builder.withPartition(partitionData);
}
batchWrite.addFile(builder.build());
}
try {
batchWrite.commit();
transaction.commitTransaction();
} catch (Exception e) {
List<String> toDeleteFiles = dataFiles.stream()
.map(TIcebergDataFile::getPath)
.collect(Collectors.toList());
icebergCatalog.deleteUncommittedDataFiles(toDeleteFiles);
LOG.error("Failed to commit iceberg transaction on {}.{}", dbName, tableName, e);
throw new StarRocksConnectorException(e.getMessage());
}
}
public BatchWrite getBatchWrite(Transaction transaction, boolean isOverwrite) {
return isOverwrite ? new DynamicOverwrite(transaction) : new Append(transaction);
}
public static PartitionData partitionDataFromPath(String relativePartitionPath, PartitionSpec spec) {
PartitionData data = new PartitionData(spec.fields().size());
String[] partitions = relativePartitionPath.split("/", -1);
List<PartitionField> partitionFields = spec.fields();
for (int i = 0; i < partitions.length; i++) {
PartitionField field = partitionFields.get(i);
String[] parts = partitions[i].split("=", 2);
Preconditions.checkArgument(parts.length == 2 && parts[0] != null &&
field.name().equals(parts[0]), "Invalid partition: %s", partitions[i]);
org.apache.iceberg.types.Type sourceType = spec.partitionType().fields().get(i).type();
data.set(i, Conversions.fromPartitionString(sourceType, parts[1]));
}
return data;
}
public static String getIcebergRelativePartitionPath(String tableLocation, String partitionLocation) {
tableLocation = tableLocation.endsWith("/") ? tableLocation.substring(0, tableLocation.length() - 1) : tableLocation;
String tableLocationWithData = tableLocation + "/data/";
String path = PartitionUtil.getSuffixName(tableLocationWithData, partitionLocation);
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static boolean onlyHasPartitionPredicate(Table table, ScalarOperator predicate) {
if (predicate == null) {
return true;
}
List<ColumnRefOperator> columnRefOperators = predicate.getColumnRefs();
List<String> partitionColNames = table.getPartitionColumnNames();
for (ColumnRefOperator c : columnRefOperators) {
if (!partitionColNames.contains(c.getName())) {
return false;
}
}
return true;
}
private boolean enablePruneManifest() {
if (ConnectContext.get() == null) {
return false;
}
if (ConnectContext.get().getSessionVariable() == null) {
return false;
}
return ConnectContext.get().getSessionVariable().isEnablePruneIcebergManifest();
}
private boolean enableCollectColumnStatistics() {
if (ConnectContext.get() == null) {
return false;
}
if (ConnectContext.get().getSessionVariable() == null) {
return false;
}
return ConnectContext.get().getSessionVariable().enableIcebergColumnStatistics();
}
@Override
public void clear() {
splitTasks.clear();
tables.clear();
databases.clear();
scannedTables.clear();
IcebergMetricsReporter.remove();
}
interface BatchWrite {
void addFile(DataFile file);
void commit();
}
static class Append implements BatchWrite {
private final AppendFiles append;
public Append(Transaction txn) {
append = txn.newAppend();
}
@Override
public void addFile(DataFile file) {
append.appendFile(file);
}
@Override
public void commit() {
append.commit();
}
}
static class DynamicOverwrite implements BatchWrite {
private final ReplacePartitions replace;
public DynamicOverwrite(Transaction txn) {
replace = txn.newReplacePartitions();
}
@Override
public void addFile(DataFile file) {
replace.addFile(file);
}
@Override
public void commit() {
replace.commit();
}
}
public static class PartitionData implements StructLike {
private final Object[] values;
private PartitionData(int size) {
this.values = new Object[size];
}
@Override
public int size() {
return values.length;
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(values[pos]);
}
@Override
public <T> void set(int pos, T value) {
if (value instanceof ByteBuffer) {
ByteBuffer buffer = (ByteBuffer) value;
byte[] bytes = new byte[buffer.remaining()];
buffer.duplicate().get(bytes);
values[pos] = bytes;
} else {
values[pos] = value;
}
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
PartitionData that = (PartitionData) other;
return Arrays.equals(values, that.values);
}
@Override
public int hashCode() {
return Arrays.hashCode(values);
}
}
@Override
public CloudConfiguration getCloudConfiguration() {
return hdfsEnvironment.getCloudConfiguration();
}
private static class FileScanTaskSchema {
private final IcebergFilter icebergFilter;
private final int schemaId;
private final int specId;
public FileScanTaskSchema(IcebergFilter icebergFilter, int schemaId, int specId) {
this.icebergFilter = icebergFilter;
this.schemaId = schemaId;
this.specId = specId;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileScanTaskSchema that = (FileScanTaskSchema) o;
return schemaId == that.schemaId && specId == that.specId && Objects.equals(icebergFilter, that.icebergFilter);
}
@Override
public int hashCode() {
return Objects.hash(icebergFilter, schemaId, specId);
}
}
}
|
class IcebergMetadata implements ConnectorMetadata {
private static final Logger LOG = LogManager.getLogger(IcebergMetadata.class);
private final String catalogName;
private final HdfsEnvironment hdfsEnvironment;
private final IcebergCatalog icebergCatalog;
private final IcebergStatisticProvider statisticProvider = new IcebergStatisticProvider();
private final Map<TableIdentifier, Table> tables = new ConcurrentHashMap<>();
private final Map<String, Database> databases = new ConcurrentHashMap<>();
private final Map<IcebergFilter, List<FileScanTask>> splitTasks = new ConcurrentHashMap<>();
private final Set<IcebergFilter> scannedTables = new HashSet<>();
private final Map<FileScanTaskSchema, Pair<String, String>> fileScanTaskSchemas = new ConcurrentHashMap<>();
public IcebergMetadata(String catalogName, HdfsEnvironment hdfsEnvironment, IcebergCatalog icebergCatalog) {
this.catalogName = catalogName;
this.hdfsEnvironment = hdfsEnvironment;
this.icebergCatalog = icebergCatalog;
new IcebergMetricsReporter().setThreadLocalReporter();
}
@Override
public List<String> listDbNames() {
return icebergCatalog.listAllDatabases();
}
@Override
public void createDb(String dbName, Map<String, String> properties) throws AlreadyExistsException {
if (dbExists(dbName)) {
throw new AlreadyExistsException("Database Already Exists");
}
icebergCatalog.createDb(dbName, properties);
}
@Override
public void dropDb(String dbName, boolean isForceDrop) throws MetaNotFoundException {
if (listTableNames(dbName).size() != 0) {
throw new StarRocksConnectorException("Database %s not empty", dbName);
}
icebergCatalog.dropDb(dbName);
databases.remove(dbName);
}
@Override
public Database getDb(String dbName) {
if (databases.containsKey(dbName)) {
return databases.get(dbName);
}
Database db;
try {
db = icebergCatalog.getDB(dbName);
} catch (NoSuchNamespaceException e) {
LOG.error("Database {} not found", dbName, e);
return null;
}
databases.put(dbName, db);
return db;
}
@Override
public List<String> listTableNames(String dbName) {
return icebergCatalog.listTables(dbName);
}
@Override
public boolean createTable(CreateTableStmt stmt) throws DdlException {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
Schema schema = toIcebergApiSchema(stmt.getColumns());
PartitionDesc partitionDesc = stmt.getPartitionDesc();
List<String> partitionColNames = partitionDesc == null ? Lists.newArrayList() :
((ListPartitionDesc) partitionDesc).getPartitionColNames();
PartitionSpec partitionSpec = parsePartitionFields(schema, partitionColNames);
Map<String, String> properties = stmt.getProperties() == null ? new HashMap<>() : stmt.getProperties();
String tableLocation = properties.get(LOCATION_PROPERTY);
Map<String, String> createTableProperties = IcebergApiConverter.rebuildCreateTableProperties(properties);
return icebergCatalog.createTable(dbName, tableName, schema, partitionSpec, tableLocation, createTableProperties);
}
@Override
public void dropTable(DropTableStmt stmt) {
Table icebergTable = getTable(stmt.getDbName(), stmt.getTableName());
if (icebergTable == null) {
return;
}
icebergCatalog.dropTable(stmt.getDbName(), stmt.getTableName(), stmt.isForceDrop());
tables.remove(TableIdentifier.of(stmt.getDbName(), stmt.getTableName()));
StatisticUtils.dropStatisticsAfterDropTable(icebergTable);
}
@Override
public Table getTable(String dbName, String tblName) {
TableIdentifier identifier = TableIdentifier.of(dbName, tblName);
if (tables.containsKey(identifier)) {
return tables.get(identifier);
}
try {
IcebergCatalogType catalogType = icebergCatalog.getIcebergCatalogType();
org.apache.iceberg.Table icebergTable = icebergCatalog.getTable(dbName, tblName);
Table table = IcebergApiConverter.toIcebergTable(icebergTable, catalogName, dbName, tblName, catalogType.name());
tables.put(identifier, table);
return table;
} catch (StarRocksConnectorException | NoSuchTableException e) {
LOG.error("Failed to get iceberg table {}", identifier, e);
return null;
}
}
@Override
public List<String> listPartitionNames(String dbName, String tblName) {
IcebergCatalogType nativeType = icebergCatalog.getIcebergCatalogType();
if (nativeType != HIVE_CATALOG && nativeType != REST_CATALOG && nativeType != GLUE_CATALOG) {
throw new StarRocksConnectorException(
"Do not support get partitions from catalog type: " + nativeType);
}
return icebergCatalog.listPartitionNames(dbName, tblName);
}
@Override
public List<RemoteFileInfo> getRemoteFileInfos(Table table, List<PartitionKey> partitionKeys,
long snapshotId, ScalarOperator predicate,
List<String> fieldNames, long limit) {
return getRemoteFileInfos((IcebergTable) table, snapshotId, predicate, limit);
}
private List<RemoteFileInfo> getRemoteFileInfos(IcebergTable table, long snapshotId,
ScalarOperator predicate, long limit) {
RemoteFileInfo remoteFileInfo = new RemoteFileInfo();
String dbName = table.getRemoteDbName();
String tableName = table.getRemoteTableName();
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshotId, predicate);
triggerIcebergPlanFilesIfNeeded(key, table, predicate, limit);
List<FileScanTask> icebergScanTasks = splitTasks.get(key);
if (icebergScanTasks == null) {
throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]",
dbName, tableName, predicate);
}
List<RemoteFileDesc> remoteFileDescs = Lists.newArrayList(RemoteFileDesc.createIcebergRemoteFileDesc(icebergScanTasks));
remoteFileInfo.setFiles(remoteFileDescs);
return Lists.newArrayList(remoteFileInfo);
}
private void triggerIcebergPlanFilesIfNeeded(IcebergFilter key, IcebergTable table, ScalarOperator predicate, long limit) {
if (!scannedTables.contains(key)) {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.processSplit." + key)) {
collectTableStatisticsAndCacheIcebergSplit(table, predicate, limit);
}
}
}
public List<PartitionKey> getPrunedPartitions(Table table, ScalarOperator predicate, long limit) {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
if (!snapshot.isPresent()) {
return new ArrayList<>();
}
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshot.get().snapshotId(), predicate);
triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit);
List<PartitionKey> partitionKeys = new ArrayList<>();
List<FileScanTask> icebergSplitTasks = splitTasks.get(key);
if (icebergSplitTasks == null) {
throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]",
dbName, tableName, predicate);
}
Set<List<String>> scannedPartitions = new HashSet<>();
PartitionSpec spec = icebergTable.getNativeTable().spec();
List<Column> partitionColumns = icebergTable.getPartitionColumnsIncludeTransformed();
for (FileScanTask fileScanTask : icebergSplitTasks) {
org.apache.iceberg.PartitionData partitionData = (org.apache.iceberg.PartitionData) fileScanTask.file().partition();
List<String> values = PartitionUtil.getIcebergPartitionValues(spec, partitionData);
if (values.size() != partitionColumns.size()) {
continue;
}
if (scannedPartitions.contains(values)) {
continue;
} else {
scannedPartitions.add(values);
}
try {
List<com.starrocks.catalog.Type> srTypes = new ArrayList<>();
for (PartitionField partitionField : spec.fields()) {
if (partitionField.transform().isVoid()) {
continue;
}
if (!partitionField.transform().isIdentity()) {
Type sourceType = spec.schema().findType(partitionField.sourceId());
Type resultType = partitionField.transform().getResultType(sourceType);
if (resultType == Types.DateType.get()) {
resultType = Types.IntegerType.get();
}
srTypes.add(fromIcebergType(resultType));
continue;
}
srTypes.add(icebergTable.getColumn(partitionField.name()).getType());
}
if (icebergTable.hasPartitionTransformedEvolution()) {
srTypes = partitionColumns.stream()
.map(Column::getType)
.collect(Collectors.toList());
}
partitionKeys.add(createPartitionKeyWithType(values, srTypes, table.getType()));
} catch (Exception e) {
LOG.error("create partition key failed.", e);
throw new StarRocksConnectorException(e.getMessage());
}
}
return partitionKeys;
}
private void collectTableStatisticsAndCacheIcebergSplit(Table table, ScalarOperator predicate, long limit) {
IcebergTable icebergTable = (IcebergTable) table;
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
if (!snapshot.isPresent()) {
return;
}
long snapshotId = snapshot.get().snapshotId();
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
IcebergFilter key = IcebergFilter.of(dbName, tableName, snapshotId, predicate);
org.apache.iceberg.Table nativeTbl = icebergTable.getNativeTable();
Types.StructType schema = nativeTbl.schema().asStruct();
List<ScalarOperator> scalarOperators = Utils.extractConjuncts(predicate);
ScalarOperatorToIcebergExpr.IcebergContext icebergContext = new ScalarOperatorToIcebergExpr.IcebergContext(schema);
Expression icebergPredicate = new ScalarOperatorToIcebergExpr().convert(scalarOperators, icebergContext);
TableScan scan = nativeTbl.newScan().useSnapshot(snapshotId);
if (enableCollectColumnStatistics()) {
scan = scan.includeColumnStats();
}
if (icebergPredicate.op() != Expression.Operation.TRUE) {
scan = scan.filter(icebergPredicate);
}
CloseableIterable<FileScanTask> fileScanTaskIterable = TableScanUtil.splitFiles(
scan.planFiles(), scan.targetSplitSize());
CloseableIterator<FileScanTask> fileScanTaskIterator = fileScanTaskIterable.iterator();
Iterator<FileScanTask> fileScanTasks;
boolean canPruneManifests = limit != -1 && !icebergTable.isV2Format() && onlyHasPartitionPredicate(table, predicate)
&& limit < Integer.MAX_VALUE && nativeTbl.spec().specId() == 0 && enablePruneManifest();
if (canPruneManifests) {
fileScanTasks = Iterators.limit(fileScanTaskIterator, (int) limit);
} else {
fileScanTasks = fileScanTaskIterator;
}
List<FileScanTask> icebergScanTasks = Lists.newArrayList();
long totalReadCount = 0;
Set<String> filePaths = new HashSet<>();
while (fileScanTasks.hasNext()) {
FileScanTask scanTask = fileScanTaskIterator.next();
FileScanTask icebergSplitScanTask = scanTask;
if (enableCollectColumnStatistics()) {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.buildSplitScanTask")) {
icebergSplitScanTask = buildIcebergSplitScanTask(scanTask, icebergPredicate, key);
}
List<Types.NestedField> fullColumns = nativeTbl.schema().columns();
Map<Integer, Type.PrimitiveType> idToTypeMapping = fullColumns.stream()
.filter(column -> column.type().isPrimitiveType())
.collect(Collectors.toMap(Types.NestedField::fieldId, column -> column.type().asPrimitiveType()));
Set<Integer> identityPartitionIds = nativeTbl.spec().fields().stream()
.filter(x -> x.transform().isIdentity())
.map(PartitionField::sourceId)
.collect(Collectors.toSet());
List<Types.NestedField> nonPartitionPrimitiveColumns = fullColumns.stream()
.filter(column -> !identityPartitionIds.contains(column.fieldId()) &&
column.type().isPrimitiveType())
.collect(toImmutableList());
try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.updateIcebergFileStats")) {
statisticProvider.updateIcebergFileStats(
icebergTable, scanTask, idToTypeMapping, nonPartitionPrimitiveColumns, key);
}
} else {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.updateCardinality")) {
statisticProvider.updateIcebergCardinality(key, scanTask);
}
}
icebergScanTasks.add(icebergSplitScanTask);
String filePath = icebergSplitScanTask.file().path().toString();
if (!filePaths.contains(filePath)) {
filePaths.add(filePath);
totalReadCount += scanTask.file().recordCount();
}
if (canPruneManifests && totalReadCount >= limit) {
break;
}
}
try {
fileScanTaskIterable.close();
fileScanTaskIterator.close();
} catch (IOException e) {
}
IcebergMetricsReporter.lastReport().ifPresent(scanReportWithCounter ->
Tracers.record(Tracers.Module.EXTERNAL, "ICEBERG.ScanMetrics." +
scanReportWithCounter.getScanReport().tableName() + " / No_" +
scanReportWithCounter.getCount(),
scanReportWithCounter.getScanReport().scanMetrics().toString()));
splitTasks.put(key, icebergScanTasks);
scannedTables.add(key);
}
@Override
public Statistics getTableStatistics(OptimizerContext session,
Table table,
Map<ColumnRefOperator, Column> columns,
List<PartitionKey> partitionKeys,
ScalarOperator predicate,
long limit) {
IcebergTable icebergTable = (IcebergTable) table;
Optional<Snapshot> snapshot = icebergTable.getSnapshot();
long snapshotId;
if (snapshot.isPresent()) {
snapshotId = snapshot.get().snapshotId();
} else {
Statistics.Builder statisticsBuilder = Statistics.builder();
statisticsBuilder.setOutputRowCount(1);
statisticsBuilder.addColumnStatistics(statisticProvider.buildUnknownColumnStatistics(columns.keySet()));
return statisticsBuilder.build();
}
IcebergFilter key = IcebergFilter.of(
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), snapshotId, predicate);
triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit);
if (!session.getSessionVariable().enableIcebergColumnStatistics()) {
return statisticProvider.getCardinalityStats(icebergTable, columns, predicate);
} else {
return statisticProvider.getTableStatistics(icebergTable, columns, session, predicate);
}
}
private IcebergSplitScanTask buildIcebergSplitScanTask(
FileScanTask fileScanTask, Expression icebergPredicate, IcebergFilter filter) {
long offset = fileScanTask.start();
long length = fileScanTask.length();
DataFile dataFileWithoutStats = fileScanTask.file().copyWithoutStats();
DeleteFile[] deleteFiles = fileScanTask.deletes().stream()
.map(DeleteFile::copyWithoutStats)
.toArray(DeleteFile[]::new);
PartitionSpec taskSpec = fileScanTask.spec();
Schema taskSchema = fileScanTask.spec().schema();
String schemaString;
String partitionString;
FileScanTaskSchema schemaKey = new FileScanTaskSchema(filter.getDatabaseName(), filter.getTableName(),
taskSchema.schemaId(), taskSpec.specId());
Pair<String, String> schema = fileScanTaskSchemas.get(schemaKey);
if (schema == null) {
schemaString = SchemaParser.toJson(fileScanTask.spec().schema());
partitionString = PartitionSpecParser.toJson(fileScanTask.spec());
fileScanTaskSchemas.put(schemaKey, Pair.create(schemaString, partitionString));
} else {
schemaString = schema.first;
partitionString = schema.second;
}
ResidualEvaluator residualEvaluator = ResidualEvaluator.of(taskSpec, icebergPredicate, true);
BaseFileScanTask baseFileScanTask = new BaseFileScanTask(
dataFileWithoutStats,
deleteFiles,
schemaString,
partitionString,
residualEvaluator);
return new IcebergSplitScanTask(offset, length, baseFileScanTask);
}
@Override
public void refreshTable(String srDbName, Table table, List<String> partitionNames, boolean onlyCachedPartitions) {
if (isResourceMappingCatalog(catalogName)) {
refreshTableWithResource(table);
} else {
IcebergTable icebergTable = (IcebergTable) table;
String dbName = icebergTable.getRemoteDbName();
String tableName = icebergTable.getRemoteTableName();
icebergCatalog.refreshTable(dbName, tableName);
}
}
@Override
public void finishSink(String dbName, String tableName, List<TSinkCommitInfo> commitInfos) {
boolean isOverwrite = false;
if (!commitInfos.isEmpty()) {
TSinkCommitInfo sinkCommitInfo = commitInfos.get(0);
if (sinkCommitInfo.isSetIs_overwrite()) {
isOverwrite = sinkCommitInfo.is_overwrite;
}
}
List<TIcebergDataFile> dataFiles = commitInfos.stream()
.map(TSinkCommitInfo::getIceberg_data_file).collect(Collectors.toList());
IcebergTable table = (IcebergTable) getTable(dbName, tableName);
org.apache.iceberg.Table nativeTbl = table.getNativeTable();
Transaction transaction = nativeTbl.newTransaction();
BatchWrite batchWrite = getBatchWrite(transaction, isOverwrite);
PartitionSpec partitionSpec = nativeTbl.spec();
for (TIcebergDataFile dataFile : dataFiles) {
Metrics metrics = IcebergApiConverter.buildDataFileMetrics(dataFile);
DataFiles.Builder builder =
DataFiles.builder(partitionSpec)
.withMetrics(metrics)
.withPath(dataFile.path)
.withFormat(dataFile.format)
.withRecordCount(dataFile.record_count)
.withFileSizeInBytes(dataFile.file_size_in_bytes)
.withSplitOffsets(dataFile.split_offsets);
if (partitionSpec.isPartitioned()) {
String relativePartitionLocation = getIcebergRelativePartitionPath(
nativeTbl.location(), dataFile.partition_path);
PartitionData partitionData = partitionDataFromPath(
relativePartitionLocation, partitionSpec);
builder.withPartition(partitionData);
}
batchWrite.addFile(builder.build());
}
try {
batchWrite.commit();
transaction.commitTransaction();
} catch (Exception e) {
List<String> toDeleteFiles = dataFiles.stream()
.map(TIcebergDataFile::getPath)
.collect(Collectors.toList());
icebergCatalog.deleteUncommittedDataFiles(toDeleteFiles);
LOG.error("Failed to commit iceberg transaction on {}.{}", dbName, tableName, e);
throw new StarRocksConnectorException(e.getMessage());
}
}
public BatchWrite getBatchWrite(Transaction transaction, boolean isOverwrite) {
return isOverwrite ? new DynamicOverwrite(transaction) : new Append(transaction);
}
public static PartitionData partitionDataFromPath(String relativePartitionPath, PartitionSpec spec) {
PartitionData data = new PartitionData(spec.fields().size());
String[] partitions = relativePartitionPath.split("/", -1);
List<PartitionField> partitionFields = spec.fields();
for (int i = 0; i < partitions.length; i++) {
PartitionField field = partitionFields.get(i);
String[] parts = partitions[i].split("=", 2);
Preconditions.checkArgument(parts.length == 2 && parts[0] != null &&
field.name().equals(parts[0]), "Invalid partition: %s", partitions[i]);
org.apache.iceberg.types.Type sourceType = spec.partitionType().fields().get(i).type();
data.set(i, Conversions.fromPartitionString(sourceType, parts[1]));
}
return data;
}
public static String getIcebergRelativePartitionPath(String tableLocation, String partitionLocation) {
tableLocation = tableLocation.endsWith("/") ? tableLocation.substring(0, tableLocation.length() - 1) : tableLocation;
String tableLocationWithData = tableLocation + "/data/";
String path = PartitionUtil.getSuffixName(tableLocationWithData, partitionLocation);
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.endsWith("/")) {
path = path.substring(0, path.length() - 1);
}
return path;
}
public static boolean onlyHasPartitionPredicate(Table table, ScalarOperator predicate) {
if (predicate == null) {
return true;
}
List<ColumnRefOperator> columnRefOperators = predicate.getColumnRefs();
List<String> partitionColNames = table.getPartitionColumnNames();
for (ColumnRefOperator c : columnRefOperators) {
if (!partitionColNames.contains(c.getName())) {
return false;
}
}
return true;
}
private boolean enablePruneManifest() {
if (ConnectContext.get() == null) {
return false;
}
if (ConnectContext.get().getSessionVariable() == null) {
return false;
}
return ConnectContext.get().getSessionVariable().isEnablePruneIcebergManifest();
}
private boolean enableCollectColumnStatistics() {
if (ConnectContext.get() == null) {
return false;
}
if (ConnectContext.get().getSessionVariable() == null) {
return false;
}
return ConnectContext.get().getSessionVariable().enableIcebergColumnStatistics();
}
@Override
public void clear() {
splitTasks.clear();
tables.clear();
databases.clear();
scannedTables.clear();
IcebergMetricsReporter.remove();
}
interface BatchWrite {
void addFile(DataFile file);
void commit();
}
static class Append implements BatchWrite {
private final AppendFiles append;
public Append(Transaction txn) {
append = txn.newAppend();
}
@Override
public void addFile(DataFile file) {
append.appendFile(file);
}
@Override
public void commit() {
append.commit();
}
}
static class DynamicOverwrite implements BatchWrite {
private final ReplacePartitions replace;
public DynamicOverwrite(Transaction txn) {
replace = txn.newReplacePartitions();
}
@Override
public void addFile(DataFile file) {
replace.addFile(file);
}
@Override
public void commit() {
replace.commit();
}
}
public static class PartitionData implements StructLike {
private final Object[] values;
private PartitionData(int size) {
this.values = new Object[size];
}
@Override
public int size() {
return values.length;
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(values[pos]);
}
@Override
public <T> void set(int pos, T value) {
if (value instanceof ByteBuffer) {
ByteBuffer buffer = (ByteBuffer) value;
byte[] bytes = new byte[buffer.remaining()];
buffer.duplicate().get(bytes);
values[pos] = bytes;
} else {
values[pos] = value;
}
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
PartitionData that = (PartitionData) other;
return Arrays.equals(values, that.values);
}
@Override
public int hashCode() {
return Arrays.hashCode(values);
}
}
@Override
public CloudConfiguration getCloudConfiguration() {
return hdfsEnvironment.getCloudConfiguration();
}
private static class FileScanTaskSchema {
private final String dbName;
private final String tableName;
private final int schemaId;
private final int specId;
public FileScanTaskSchema(String dbName, String tableName, int schemaId, int specId) {
this.dbName = dbName;
this.tableName = tableName;
this.schemaId = schemaId;
this.specId = specId;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
FileScanTaskSchema that = (FileScanTaskSchema) o;
return schemaId == that.schemaId && specId == that.specId &&
Objects.equals(dbName, that.dbName) && Objects.equals(tableName, that.tableName);
}
@Override
public int hashCode() {
return Objects.hash(dbName, tableName, schemaId, specId);
}
}
}
|
Please check whether there is simiar issue here. Thx.
|
public ASTNode visitShowColumns(final ShowColumnsContext ctx) {
ShowColumnsStatement result = new ShowColumnsStatement();
FromTableContext fromTableContext = ctx.fromTable();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromTableContext) {
FromTableSegment fromTableSegment = (FromTableSegment) visit(fromTableContext);
result.setTable(fromTableSegment.getPattern());
result.getAllSQLSegments().add(fromTableSegment);
}
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
|
FromTableSegment fromTableSegment = (FromTableSegment) visit(fromTableContext);
|
public ASTNode visitShowColumns(final ShowColumnsContext ctx) {
ShowColumnsStatement result = new ShowColumnsStatement();
FromTableContext fromTableContext = ctx.fromTable();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromTableContext) {
FromTableSegment fromTableSegment = (FromTableSegment) visit(fromTableContext);
result.setTable(fromTableSegment.getPattern());
result.getAllSQLSegments().add(fromTableSegment);
}
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
|
class MySQLVisitor extends MySQLStatementBaseVisitor<ASTNode> implements SQLVisitor {
private int currentParameterIndex;
@Override
public ASTNode visitUse(final UseContext ctx) {
LiteralValue schema = (LiteralValue) visit(ctx.schemaName());
UseStatement result = new UseStatement();
result.setSchema(schema.getLiteral());
return result;
}
@Override
public ASTNode visitDesc(final DescContext ctx) {
TableSegment table = (TableSegment) visit(ctx.tableName());
DescribeStatement result = new DescribeStatement();
result.setTable(table);
return result;
}
@Override
public ASTNode visitShowDatabases(final ShowDatabasesContext ctx) {
ShowDatabasesStatement result = new ShowDatabasesStatement();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
public ASTNode visitShowTables(final ShowTablesContext ctx) {
ShowTablesStatement result = new ShowTablesStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
public ASTNode visitShowTableStatus(final ShowTableStatusContext ctx) {
ShowTableStatusStatement result = new ShowTableStatusStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
@Override
public ASTNode visitShowIndex(final ShowIndexContext ctx) {
ShowIndexStatement result = new ShowIndexStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
FromTableContext fromTableContext = ctx.fromTable();
if (null != fromSchemaContext) {
SchemaNameContext schemaNameContext = fromSchemaContext.schemaName();
LiteralValue schema = (LiteralValue) visit(schemaNameContext);
SchemaSegment schemaSegment = new SchemaSegment(schemaNameContext.start.getStartIndex(), schemaNameContext.stop.getStopIndex(), schema.getLiteral());
result.getAllSQLSegments().add(schemaSegment);
}
if (null != fromTableContext) {
FromTableSegment fromTableSegment = (FromTableSegment) visitFromTable(fromTableContext);
TableSegment tableSegment = fromTableSegment.getPattern();
result.setTable(tableSegment);
result.getAllSQLSegments().add(tableSegment);
}
return result;
}
@Override
public ASTNode visitShowCreateTable(final ShowCreateTableContext ctx) {
ShowCreateTableStatement result = new ShowCreateTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
return result;
}
@Override
public ASTNode visitFromTable(final FromTableContext ctx) {
FromTableSegment fromTableSegment = new FromTableSegment();
TableSegment tableSegment = (TableSegment) visit(ctx.tableName());
fromTableSegment.setPattern(tableSegment);
return fromTableSegment;
}
@Override
public ASTNode visitFromSchema(final FromSchemaContext ctx) {
return new FromSchemaSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
}
@Override
public ASTNode visitShowLike(final ShowLikeContext ctx) {
LiteralValue literalValue = (LiteralValue) visit(ctx.stringLiterals());
return new ShowLikeSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), literalValue.getLiteral());
}
@Override
public ASTNode visitCreateUser(final CreateUserContext ctx) {
return new CreateUserStatement();
}
@Override
public ASTNode visitDropRole(final DropRoleContext ctx) {
return new DropRoleStatement();
}
@Override
public ASTNode visitSetDefaultRole(final SetDefaultRoleContext ctx) {
return new SetRoleStatement();
}
@Override
public ASTNode visitCreateRole(final CreateRoleContext ctx) {
return new CreateRoleStatement();
}
@Override
public ASTNode visitDropUser(final DropUserContext ctx) {
return new DropUserStatement();
}
@Override
public ASTNode visitAlterUser(final AlterUserContext ctx) {
return new AlterUserStatement();
}
@Override
public ASTNode visitRenameUser(final RenameUserContext ctx) {
return new RenameUserStatement();
}
@Override
public ASTNode visitSetPassword(final SetPasswordContext ctx) {
return new SetPasswordStatement();
}
@Override
public ASTNode visitCreateTable(final CreateTableContext ctx) {
CreateTableStatement result = new CreateTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
CreateDefinitionClause_Context createDefinitionClause = ctx.createDefinitionClause_();
if (null != createDefinitionClause) {
for (CreateDefinition_Context createDefinition : createDefinitionClause.createDefinitions_().createDefinition_()) {
ColumnDefinitionContext columnDefinition = createDefinition.columnDefinition();
if (null != columnDefinition) {
ColumnDefinitionSegment columnDefinitionSegment = createColumnDefinitionSegment(columnDefinition, result);
result.getColumnDefinitions().add(columnDefinitionSegment);
result.getAllSQLSegments().add(columnDefinitionSegment);
}
ConstraintDefinition_Context constraintDefinition = createDefinition.constraintDefinition_();
ForeignKeyOption_Context foreignKeyOption = null == constraintDefinition ? null : constraintDefinition.foreignKeyOption_();
if (null != foreignKeyOption) {
result.getAllSQLSegments().add((TableSegment) visit(foreignKeyOption.referenceDefinition_().tableName()));
}
}
}
CreateLikeClause_Context createLikeClause = ctx.createLikeClause_();
if (null != createLikeClause) {
result.getAllSQLSegments().add((TableSegment) visit(createLikeClause));
}
return result;
}
@Override
public ASTNode visitAlterTable(final AlterTableContext ctx) {
AlterTableStatement result = new AlterTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
if (null != ctx.alterDefinitionClause_()) {
for (AlterSpecification_Context alterSpecification : ctx.alterDefinitionClause_().alterSpecification_()) {
AddColumnSpecificationContext addColumnSpecification = alterSpecification.addColumnSpecification();
if (null != addColumnSpecification) {
List<ColumnDefinitionContext> columnDefinitions = addColumnSpecification.columnDefinition();
ColumnDefinitionSegment columnDefinitionSegment = null;
for (ColumnDefinitionContext columnDefinition : columnDefinitions) {
columnDefinitionSegment = createColumnDefinitionSegment(columnDefinition, result);
result.getAddedColumnDefinitions().add(columnDefinitionSegment);
result.getAllSQLSegments().add(columnDefinitionSegment);
}
createColumnPositionSegment(addColumnSpecification.firstOrAfterColumn(), columnDefinitionSegment, result);
}
AddConstraintSpecificationContext addConstraintSpecification = alterSpecification.addConstraintSpecification();
ForeignKeyOption_Context foreignKeyOption = null == addConstraintSpecification
? null : addConstraintSpecification.constraintDefinition_().foreignKeyOption_();
if (null != foreignKeyOption) {
result.getAllSQLSegments().add((TableSegment) visit(foreignKeyOption.referenceDefinition_().tableName()));
}
ChangeColumnSpecificationContext changeColumnSpecification = alterSpecification.changeColumnSpecification();
if (null != changeColumnSpecification) {
createColumnPositionSegment(changeColumnSpecification.firstOrAfterColumn(),
createColumnDefinitionSegment(changeColumnSpecification.columnDefinition(), result), result);
}
DropColumnSpecificationContext dropColumnSpecification = alterSpecification.dropColumnSpecification();
if (null != dropColumnSpecification) {
result.getDroppedColumnNames().add(((ColumnSegment) visit(dropColumnSpecification)).getName());
}
ModifyColumnSpecificationContext modifyColumnSpecification = alterSpecification.modifyColumnSpecification();
if (null != modifyColumnSpecification) {
createColumnPositionSegment(modifyColumnSpecification.firstOrAfterColumn(),
createColumnDefinitionSegment(modifyColumnSpecification.columnDefinition(), result), result);
}
}
}
return result;
}
@Override
public ASTNode visitDropTable(final DropTableContext ctx) {
DropTableStatement result = new DropTableStatement();
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.tableNames());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
return result;
}
@Override
public ASTNode visitTruncateTable(final TruncateTableContext ctx) {
TruncateStatement result = new TruncateStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.getAllSQLSegments().add(table);
result.getTables().add(table);
return result;
}
@Override
public ASTNode visitCreateIndex(final CreateIndexContext ctx) {
CreateIndexStatement result = new CreateIndexStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
return result;
}
@Override
public ASTNode visitDropIndex(final DropIndexContext ctx) {
DropIndexStatement result = new DropIndexStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
return result;
}
@Override
public ASTNode visitIndexDefinition_(final IndexDefinition_Context ctx) {
return visit(ctx.indexName());
}
@Override
public ASTNode visitCreateLikeClause_(final CreateLikeClause_Context ctx) {
return visit(ctx.tableName());
}
@Override
public ASTNode visitDropColumnSpecification(final DropColumnSpecificationContext ctx) {
return visit(ctx.columnName());
}
@Override
public ASTNode visitInsert(final InsertContext ctx) {
InsertStatement result;
if (null != ctx.insertValuesClause()) {
result = (InsertStatement) visit(ctx.insertValuesClause());
} else {
result = new InsertStatement();
SetAssignmentSegment segment = (SetAssignmentSegment) visit(ctx.setAssignmentsClause());
result.setSetAssignment(segment);
result.getAllSQLSegments().add(segment);
}
if (null != ctx.onDuplicateKeyClause()) {
ListValue<AssignmentSegment> segments = (ListValue<AssignmentSegment>) visit(ctx.onDuplicateKeyClause());
result.getAllSQLSegments().addAll(segments.getValues());
}
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) {
InsertStatement result = new InsertStatement();
if (null != ctx.columnNames()) {
InsertColumnsSegment insertColumnsSegment = (InsertColumnsSegment) visit(ctx.columnNames());
result.setColumns(insertColumnsSegment);
result.getAllSQLSegments().add(insertColumnsSegment);
}
Collection<InsertValuesSegment> insertValuesSegments = createInsertValuesSegments(ctx.assignmentValues());
result.getValues().addAll(insertValuesSegments);
result.getAllSQLSegments().addAll(insertValuesSegments);
return result;
}
@Override
public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) {
ListValue<AssignmentSegment> result = new ListValue<>(new LinkedList<AssignmentSegment>());
for (AssignmentContext each : ctx.assignment()) {
result.getValues().add((AssignmentSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitUpdate(final UpdateContext ctx) {
UpdateStatement result = new UpdateStatement();
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.tableReferences());
SetAssignmentSegment setSegment = (SetAssignmentSegment) visit(ctx.setAssignmentsClause());
result.getTables().addAll(tables.getValues());
result.setSetAssignment(setSegment);
result.getAllSQLSegments().addAll(tables.getValues());
result.getAllSQLSegments().add(setSegment);
if (null != ctx.whereClause()) {
WhereSegment whereSegment = (WhereSegment) visit(ctx.whereClause());
result.setWhere(whereSegment);
result.getAllSQLSegments().add(whereSegment);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) {
Collection<AssignmentSegment> assignments = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
assignments.add((AssignmentSegment) visit(each));
}
return new SetAssignmentSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), assignments);
}
@Override
public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) {
List<ExpressionSegment> segments = new LinkedList<>();
for (AssignmentValueContext each : ctx.assignmentValue()) {
segments.add((ExpressionSegment) visit(each));
}
return new InsertValuesSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), segments);
}
@Override
public ASTNode visitAssignment(final AssignmentContext ctx) {
ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName());
ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue());
return new AssignmentSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), column, value);
}
@Override
public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) {
ExprContext expr = ctx.expr();
if (null != expr) {
return visit(expr);
}
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitBlobValue(final BlobValueContext ctx) {
return new LiteralValue(ctx.STRING_().getText());
}
@Override
public ASTNode visitDelete(final DeleteContext ctx) {
DeleteStatement result = new DeleteStatement();
if (null != ctx.multipleTablesClause()) {
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.multipleTablesClause());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
} else {
TableSegment table = (TableSegment) visit(ctx.singleTableClause());
result.getTables().add(table);
result.getAllSQLSegments().add(table);
}
if (null != ctx.whereClause()) {
WhereSegment where = (WhereSegment) visit(ctx.whereClause());
result.setWhere(where);
result.getAllSQLSegments().add(where);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) {
TableSegment result = (TableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias(ctx.alias().getText());
}
return result;
}
@Override
public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
result.combine((ListValue<TableSegment>) visit(ctx.multipleTableNames()));
result.combine((ListValue<TableSegment>) visit(ctx.tableReferences()));
return result;
}
@Override
public ASTNode visitMultipleTableNames(final MultipleTableNamesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (TableNameContext each : ctx.tableName()) {
result.getValues().add((TableSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitSelect(final SelectContext ctx) {
SelectStatement result = (SelectStatement) visit(ctx.unionClause());
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitUnionClause(final UnionClauseContext ctx) {
return visit(ctx.selectClause(0));
}
@Override
public ASTNode visitSelectClause(final SelectClauseContext ctx) {
SelectStatement result = new SelectStatement();
ProjectionsSegment projections = (ProjectionsSegment) visit(ctx.projections());
result.setProjections(projections);
result.getAllSQLSegments().add(projections);
if (null != ctx.selectSpecification()) {
result.getProjections().setDistinctRow(isDistinct(ctx));
}
if (null != ctx.fromClause()) {
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.fromClause());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
}
if (null != ctx.whereClause()) {
WhereSegment where = (WhereSegment) visit(ctx.whereClause());
result.setWhere(where);
result.getAllSQLSegments().add(where);
}
if (null != ctx.orderByClause()) {
OrderBySegment orderBy = (OrderBySegment) visit(ctx.orderByClause());
result.setOrderBy(orderBy);
result.getAllSQLSegments().add(orderBy);
}
return result;
}
@Override
public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) {
if (null != ctx.duplicateSpecification()) {
return visit(ctx.duplicateSpecification());
}
return new BooleanValue(false);
}
@Override
public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) {
String text = ctx.getText();
if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) {
return new BooleanValue(true);
}
return new BooleanValue(false);
}
@Override
public ASTNode visitProjections(final ProjectionsContext ctx) {
Collection<ProjectionSegment> projections = new LinkedList<>();
if (null != ctx.unqualifiedShorthand()) {
projections.add(
new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex(), ctx.unqualifiedShorthand().getText()));
}
for (ProjectionContext each : ctx.projection()) {
projections.add((ProjectionSegment) visit(each));
}
ProjectionsSegment result = new ProjectionsSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.getProjections().addAll(projections);
return result;
}
@Override
public ASTNode visitProjection(final ProjectionContext ctx) {
if (null != ctx.qualifiedShorthand()) {
QualifiedShorthandContext shorthand = ctx.qualifiedShorthand();
ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex(), shorthand.getText());
result.setOwner(new TableSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), shorthand.identifier().getText()));
return result;
}
String alias = null == ctx.alias() ? null : ctx.alias().getText();
if (null != ctx.columnName()) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnName());
ColumnProjectionSegment result = new ColumnProjectionSegment(ctx.columnName().getText(), column);
result.setAlias(alias);
return result;
}
LiteralExpressionSegment column = (LiteralExpressionSegment) visit(ctx.expr());
ExpressionProjectionSegment result = Strings.isNullOrEmpty(alias) ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()))
: new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()));
result.setAlias(alias);
return result;
}
@Override
public ASTNode visitFromClause(final FromClauseContext ctx) {
return visit(ctx.tableReferences());
}
@Override
public ASTNode visitTableReferences(final TableReferencesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (EscapedTableReferenceContext each : ctx.escapedTableReference()) {
result.combine((ListValue<TableSegment>) visit(each));
}
return result;
}
@Override
public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) {
return visit(ctx.tableReference());
}
@Override
public ASTNode visitTableReference(final TableReferenceContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
if (null != ctx.joinedTable()) {
for (JoinedTableContext each : ctx.joinedTable()) {
result.getValues().add((TableSegment) visit(each));
}
}
if (null != ctx.tableFactor()) {
result.getValues().add((TableSegment) visit(ctx.tableFactor()));
}
return result;
}
@Override
public ASTNode visitTableFactor(final TableFactorContext ctx) {
if (null != ctx.tableReferences()) {
return visit(ctx.tableReferences());
}
TableSegment table = (TableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
table.setAlias(ctx.alias().getText());
}
return table;
}
@Override
public ASTNode visitJoinedTable(final JoinedTableContext ctx) {
return visit(ctx.tableFactor());
}
@Override
public ASTNode visitWhereClause(final WhereClauseContext ctx) {
WhereSegment result = new WhereSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.setParameterMarkerStartIndex(currentParameterIndex);
ASTNode segment = visit(ctx.expr());
if (segment instanceof OrPredicateSegment) {
result.getAndPredicates().addAll(((OrPredicateSegment) segment).getAndPredicates());
} else if (segment instanceof PredicateSegment) {
AndPredicate andPredicate = new AndPredicate();
andPredicate.getPredicates().add((PredicateSegment) segment);
result.getAndPredicates().add(andPredicate);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSetTransaction(final SetTransactionContext ctx) {
return new SetTransactionStatement();
}
@Override
public ASTNode visitSetAutoCommit(final SetAutoCommitContext ctx) {
SetAutoCommitStatement result = new SetAutoCommitStatement();
AutoCommitValueContext autoCommitValueContext = ctx.autoCommitValue();
if (null != autoCommitValueContext) {
AutoCommitSegment autoCommitSegment = (AutoCommitSegment) visit(ctx.autoCommitValue());
result.getAllSQLSegments().add(autoCommitSegment);
result.setAutoCommit(autoCommitSegment.isAutoCommit());
}
return result;
}
@Override
public ASTNode visitAutoCommitValue(final AutoCommitValueContext ctx) {
boolean autoCommit = "1".equals(ctx.getText()) || "ON".equals(ctx.getText());
return new AutoCommitSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), autoCommit);
}
@Override
public ASTNode visitBeginTransaction(final BeginTransactionContext ctx) {
return new BeginTransactionStatement();
}
@Override
public ASTNode visitCommit(final CommitContext ctx) {
return new CommitStatement();
}
@Override
public ASTNode visitRollback(final RollbackContext ctx) {
return new RollbackStatement();
}
@Override
public ASTNode visitSavepoint(final SavepointContext ctx) {
return new SavepointStatement();
}
@Override
public ASTNode visitSchemaName(final SchemaNameContext ctx) {
return visit(ctx.identifier());
}
@Override
public ASTNode visitTableNames(final TableNamesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (TableNameContext each : ctx.tableName()) {
result.getValues().add((TableSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitTableName(final TableNameContext ctx) {
LiteralValue tableName = (LiteralValue) visit(ctx.name());
TableSegment result = new TableSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), tableName.getLiteral());
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner(createSchemaSegment(owner));
}
return result;
}
@Override
public ASTNode visitColumnNames(final ColumnNamesContext ctx) {
Collection<ColumnSegment> segments = new LinkedList<>();
for (ColumnNameContext each : ctx.columnName()) {
segments.add((ColumnSegment) visit(each));
}
InsertColumnsSegment result = new InsertColumnsSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.getColumns().addAll(segments);
return result;
}
@Override
public ASTNode visitColumnName(final ColumnNameContext ctx) {
LiteralValue columnName = (LiteralValue) visit(ctx.name());
ColumnSegment result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columnName.getLiteral());
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner(createTableSegment(owner));
}
return result;
}
@Override
public ASTNode visitIndexName(final IndexNameContext ctx) {
LiteralValue indexName = (LiteralValue) visit(ctx.identifier());
return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), indexName.getLiteral());
}
@Override
public ASTNode visitDataTypeName_(final DataTypeName_Context ctx) {
return visit(ctx.identifier(0));
}
@Override
public ASTNode visitExpr(final ExprContext ctx) {
BooleanPrimaryContext bool = ctx.booleanPrimary();
if (null != bool) {
return visit(bool);
} else if (null != ctx.logicalOperator()) {
return mergePredicateSegment(visit(ctx.expr(0)), visit(ctx.expr(1)), ctx.logicalOperator().getText());
} else if (!ctx.expr().isEmpty()) {
return visit(ctx.expr(0));
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.comparisonOperator()) {
return createCompareSegment(ctx);
}
if (null != ctx.predicate()) {
return visit(ctx.predicate());
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitPredicate(final PredicateContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.IN()) {
return createInSegment(ctx);
}
if (null != ctx.BETWEEN()) {
return createBetweenSegment(ctx);
}
BitExprContext bitExpr = ctx.bitExpr(0);
if (null != bitExpr) {
return createExpressionSegment(visit(bitExpr), ctx);
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitBitExpr(final BitExprContext ctx) {
SimpleExprContext simple = ctx.simpleExpr();
if (null != simple) {
return visit(simple);
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitSimpleExpr(final SimpleExprContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.parameterMarker()) {
return visit(ctx.parameterMarker());
}
if (null != ctx.literals()) {
return visit(ctx.literals());
}
if (null != ctx.intervalExpression()) {
return visit(ctx.intervalExpression());
}
if (null != ctx.functionCall()) {
return visit(ctx.functionCall());
}
if (null != ctx.columnName()) {
return visit(ctx.columnName());
}
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitParameterMarker(final ParameterMarkerContext ctx) {
return new ParameterMarkerValue(currentParameterIndex++);
}
@Override
public ASTNode visitLiterals(final LiteralsContext ctx) {
if (null != ctx.stringLiterals()) {
return visit(ctx.stringLiterals());
}
if (null != ctx.numberLiterals()) {
return visit(ctx.numberLiterals());
}
if (null != ctx.booleanLiterals()) {
return visit(ctx.booleanLiterals());
}
if (null != ctx.nullValueLiterals()) {
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitStringLiterals(final StringLiteralsContext ctx) {
String text = ctx.getText();
return new LiteralValue(text.substring(1, text.length() - 1));
}
@Override
public ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) {
return new NumberValue(ctx.getText());
}
@Override
public ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) {
return new BooleanValue(ctx.getText());
}
@Override
public ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitOrderByClause(final OrderByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new OrderBySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), items);
}
@Override
public ASTNode visitOrderByItem(final OrderByItemContext ctx) {
OrderDirection orderDirection = null != ctx.DESC() ? OrderDirection.DESC : OrderDirection.ASC;
if (null != ctx.columnName()) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnName());
return new ColumnOrderByItemSegment(column, orderDirection);
}
if (null != ctx.numberLiterals()) {
return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(),
SQLUtil.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection);
}
return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(), ctx.expr().getStop().getStopIndex(), ctx.expr().getText(), orderDirection);
}
@Override
public ASTNode visitFunctionCall(final FunctionCallContext ctx) {
if (null != ctx.aggregationFunction()) {
return visit(ctx.aggregationFunction());
}
if (null != ctx.regularFunction()) {
return visit(ctx.regularFunction());
}
if (null != ctx.specialFunction()) {
return visit(ctx.specialFunction());
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) {
if (AggregationType.isAggregationType(ctx.aggregationFunctionName_().getText())) {
return createAggregationSegment(ctx);
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) {
if (null != ctx.groupConcatFunction()) {
return visit(ctx.groupConcatFunction());
}
if (null != ctx.windowFunction()) {
return visit(ctx.windowFunction());
}
if (null != ctx.castFunction()) {
return visit(ctx.castFunction());
}
if (null != ctx.convertFunction()) {
return visit(ctx.convertFunction());
}
if (null != ctx.positionFunction()) {
return visit(ctx.positionFunction());
}
if (null != ctx.substringFunction()) {
return visit(ctx.substringFunction());
}
if (null != ctx.extractFunction()) {
return visit(ctx.extractFunction());
}
if (null != ctx.charFunction()) {
return visit(ctx.charFunction());
}
if (null != ctx.weightStringFunction()) {
return visit(ctx.weightStringFunction());
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitWindowFunction(final WindowFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitCastFunction(final CastFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitConvertFunction(final ConvertFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitPositionFunction(final PositionFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitExtractFunction(final ExtractFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitCharFunction(final CharFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitRegularFunction(final RegularFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitIdentifier(final IdentifierContext ctx) {
UnreservedWord_Context unreservedWord = ctx.unreservedWord_();
if (null != unreservedWord) {
return visit(unreservedWord);
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitUnreservedWord_(final UnreservedWord_Context ctx) {
return new LiteralValue(ctx.getText());
}
private SchemaSegment createSchemaSegment(final OwnerContext ownerContext) {
LiteralValue literalValue = (LiteralValue) visit(ownerContext.identifier());
return new SchemaSegment(ownerContext.getStart().getStartIndex(), ownerContext.getStop().getStopIndex(), literalValue.getLiteral());
}
private TableSegment createTableSegment(final OwnerContext ownerContext) {
LiteralValue literalValue = (LiteralValue) visit(ownerContext.identifier());
return new TableSegment(ownerContext.getStart().getStartIndex(), ownerContext.getStop().getStopIndex(), literalValue.getLiteral());
}
private ASTNode createExpressionSegment(final ASTNode astNode, final ParserRuleContext context) {
if (astNode instanceof LiteralValue) {
return new LiteralExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((LiteralValue) astNode).getLiteral());
}
if (astNode instanceof NumberValue) {
return new LiteralExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((NumberValue) astNode).getNumber());
}
if (astNode instanceof ParameterMarkerValue) {
return new ParameterMarkerExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((ParameterMarkerValue) astNode).getParameterIndex());
}
return astNode;
}
private ColumnDefinitionSegment createColumnDefinitionSegment(final ColumnDefinitionContext columnDefinition, final DDLStatement statement) {
ColumnSegment column = (ColumnSegment) visit(columnDefinition.columnName());
LiteralValue dataType = (LiteralValue) visit(columnDefinition.dataType().dataTypeName_());
boolean isPrimaryKey = false;
for (InlineDataType_Context inlineDataType : columnDefinition.inlineDataType_()) {
CommonDataTypeOption_Context commonDataTypeOption = inlineDataType.commonDataTypeOption_();
if (null != commonDataTypeOption) {
if (null != commonDataTypeOption.primaryKey()) {
isPrimaryKey = true;
}
if (null != commonDataTypeOption.referenceDefinition_()) {
statement.getAllSQLSegments().add((TableSegment) visit(commonDataTypeOption.referenceDefinition_().tableName()));
}
}
}
for (GeneratedDataType_Context generatedDataType: columnDefinition.generatedDataType_()) {
CommonDataTypeOption_Context commonDataTypeOption = generatedDataType.commonDataTypeOption_();
if (null != commonDataTypeOption) {
if (null != commonDataTypeOption.primaryKey()) {
isPrimaryKey = true;
}
if (null != commonDataTypeOption.referenceDefinition_()) {
statement.getAllSQLSegments().add((TableSegment) visit(commonDataTypeOption.referenceDefinition_().tableName()));
}
}
}
return new ColumnDefinitionSegment(column.getStartIndex(), column.getStopIndex(),
column.getName(), dataType.getLiteral(), isPrimaryKey);
}
private void createColumnPositionSegment(final FirstOrAfterColumnContext firstOrAfterColumn, final ColumnDefinitionSegment columnDefinition,
final AlterTableStatement statement) {
if (null != firstOrAfterColumn) {
ColumnPositionSegment columnPositionSegment = null;
if (null != firstOrAfterColumn.FIRST()) {
columnPositionSegment = new ColumnFirstPositionSegment(columnDefinition.getStartIndex(), columnDefinition.getStopIndex(),
columnDefinition.getColumnName());
} else if (null != firstOrAfterColumn.AFTER()) {
ColumnSegment afterColumn = (ColumnSegment) visit(firstOrAfterColumn.columnName());
columnPositionSegment = new ColumnAfterPositionSegment(columnDefinition.getStartIndex(), columnDefinition.getStopIndex(),
columnDefinition.getColumnName(), afterColumn.getName());
}
statement.getChangedPositionColumns().add(columnPositionSegment);
statement.getAllSQLSegments().add(columnPositionSegment);
}
}
private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) {
Collection<InsertValuesSegment> result = new LinkedList<>();
for (AssignmentValuesContext each : assignmentValuesContexts) {
result.add((InsertValuesSegment) visit(each));
}
return result;
}
private ASTNode createAggregationSegment(final AggregationFunctionContext ctx) {
AggregationType type = AggregationType.valueOf(ctx.aggregationFunctionName_().getText());
int innerExpressionStartIndex = ((TerminalNode) ctx.getChild(1)).getSymbol().getStartIndex();
if (null != ctx.distinct()) {
return new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(),
ctx.getStop().getStopIndex(), ctx.getText(), type, innerExpressionStartIndex, getDistinctExpression(ctx));
}
return new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
ctx.getText(), type, innerExpressionStartIndex);
}
private String getDistinctExpression(final AggregationFunctionContext ctx) {
StringBuilder result = new StringBuilder();
for (int i = 3; i < ctx.getChildCount() - 1; i++) {
result.append(ctx.getChild(i).getText());
}
return result.toString();
}
private PredicateSegment createCompareSegment(final BooleanPrimaryContext ctx) {
ASTNode leftValue = visit(ctx.booleanPrimary());
ASTNode rightValue = visit(ctx.predicate());
if (rightValue instanceof ColumnSegment) {
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ColumnSegment) leftValue, (ColumnSegment) rightValue);
}
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
(ColumnSegment) leftValue, new PredicateCompareRightValue(ctx.comparisonOperator().getText(), (ExpressionSegment) rightValue));
}
private PredicateSegment createInSegment(final PredicateContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.bitExpr(0));
Collection<ExpressionSegment> segments = Lists.transform(ctx.expr(), new Function<ExprContext, ExpressionSegment>() {
@Override
public ExpressionSegment apply(final ExprContext input) {
return (ExpressionSegment) visit(input);
}
});
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, new PredicateInRightValue(segments));
}
private PredicateSegment createBetweenSegment(final PredicateContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.bitExpr(0));
ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1));
ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate());
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, new PredicateBetweenRightValue(between, and));
}
private OrPredicateSegment mergePredicateSegment(final ASTNode left, final ASTNode right, final String operator) {
Optional<LogicalOperator> logicalOperator = LogicalOperator.valueFrom(operator);
Preconditions.checkState(logicalOperator.isPresent());
if (LogicalOperator.OR == logicalOperator.get()) {
return mergeOrPredicateSegment(left, right);
}
return mergeAndPredicateSegment(left, right);
}
private OrPredicateSegment mergeOrPredicateSegment(final ASTNode left, final ASTNode right) {
OrPredicateSegment result = new OrPredicateSegment();
result.getAndPredicates().addAll(getAndPredicates(left));
result.getAndPredicates().addAll(getAndPredicates(right));
return result;
}
private OrPredicateSegment mergeAndPredicateSegment(final ASTNode left, final ASTNode right) {
OrPredicateSegment result = new OrPredicateSegment();
for (AndPredicate eachLeft : getAndPredicates(left)) {
for (AndPredicate eachRight : getAndPredicates(right)) {
result.getAndPredicates().add(createAndPredicate(eachLeft, eachRight));
}
}
return result;
}
private AndPredicate createAndPredicate(final AndPredicate left, final AndPredicate right) {
AndPredicate result = new AndPredicate();
result.getPredicates().addAll(left.getPredicates());
result.getPredicates().addAll(right.getPredicates());
return result;
}
private Collection<AndPredicate> getAndPredicates(final ASTNode astNode) {
if (astNode instanceof OrPredicateSegment) {
return ((OrPredicateSegment) astNode).getAndPredicates();
}
if (astNode instanceof AndPredicate) {
return Collections.singleton((AndPredicate) astNode);
}
AndPredicate andPredicate = new AndPredicate();
andPredicate.getPredicates().add((PredicateSegment) astNode);
return Collections.singleton(andPredicate);
}
private boolean isDistinct(final SelectClauseContext ctx) {
for (SelectSpecificationContext each : ctx.selectSpecification()) {
boolean eachDistinct = ((BooleanValue) visit(each)).isCorrect();
if (eachDistinct) {
return true;
}
}
return false;
}
private void calculateParameterCount(final Collection<ExprContext> exprContexts) {
for (ExprContext each : exprContexts) {
visit(each);
}
}
}
|
class MySQLVisitor extends MySQLStatementBaseVisitor<ASTNode> implements SQLVisitor {
private int currentParameterIndex;
@Override
public ASTNode visitUse(final UseContext ctx) {
LiteralValue schema = (LiteralValue) visit(ctx.schemaName());
UseStatement result = new UseStatement();
result.setSchema(schema.getLiteral());
return result;
}
@Override
public ASTNode visitDesc(final DescContext ctx) {
TableSegment table = (TableSegment) visit(ctx.tableName());
DescribeStatement result = new DescribeStatement();
result.setTable(table);
return result;
}
@Override
public ASTNode visitShowDatabases(final ShowDatabasesContext ctx) {
ShowDatabasesStatement result = new ShowDatabasesStatement();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
public ASTNode visitShowTables(final ShowTablesContext ctx) {
ShowTablesStatement result = new ShowTablesStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
public ASTNode visitShowTableStatus(final ShowTableStatusContext ctx) {
ShowTableStatusStatement result = new ShowTableStatusStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
ShowLikeContext showLikeContext = ctx.showLike();
if (null != fromSchemaContext) {
FromSchemaSegment fromSchemaSegment = (FromSchemaSegment) visit(ctx.fromSchema());
result.getAllSQLSegments().add(fromSchemaSegment);
}
if (null != showLikeContext) {
ShowLikeSegment showLikeSegment = (ShowLikeSegment) visit(ctx.showLike());
result.getAllSQLSegments().add(showLikeSegment);
}
return result;
}
@Override
@Override
public ASTNode visitShowIndex(final ShowIndexContext ctx) {
ShowIndexStatement result = new ShowIndexStatement();
FromSchemaContext fromSchemaContext = ctx.fromSchema();
FromTableContext fromTableContext = ctx.fromTable();
if (null != fromSchemaContext) {
SchemaNameContext schemaNameContext = fromSchemaContext.schemaName();
LiteralValue schema = (LiteralValue) visit(schemaNameContext);
SchemaSegment schemaSegment = new SchemaSegment(schemaNameContext.start.getStartIndex(), schemaNameContext.stop.getStopIndex(), schema.getLiteral());
result.getAllSQLSegments().add(schemaSegment);
}
if (null != fromTableContext) {
FromTableSegment fromTableSegment = (FromTableSegment) visitFromTable(fromTableContext);
TableSegment tableSegment = fromTableSegment.getPattern();
result.setTable(tableSegment);
result.getAllSQLSegments().add(tableSegment);
}
return result;
}
@Override
public ASTNode visitShowCreateTable(final ShowCreateTableContext ctx) {
ShowCreateTableStatement result = new ShowCreateTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
return result;
}
@Override
public ASTNode visitFromTable(final FromTableContext ctx) {
FromTableSegment fromTableSegment = new FromTableSegment();
TableSegment tableSegment = (TableSegment) visit(ctx.tableName());
fromTableSegment.setPattern(tableSegment);
return fromTableSegment;
}
@Override
public ASTNode visitFromSchema(final FromSchemaContext ctx) {
return new FromSchemaSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
}
@Override
public ASTNode visitShowLike(final ShowLikeContext ctx) {
LiteralValue literalValue = (LiteralValue) visit(ctx.stringLiterals());
return new ShowLikeSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), literalValue.getLiteral());
}
@Override
public ASTNode visitCreateUser(final CreateUserContext ctx) {
return new CreateUserStatement();
}
@Override
public ASTNode visitDropRole(final DropRoleContext ctx) {
return new DropRoleStatement();
}
@Override
public ASTNode visitSetDefaultRole(final SetDefaultRoleContext ctx) {
return new SetRoleStatement();
}
@Override
public ASTNode visitCreateRole(final CreateRoleContext ctx) {
return new CreateRoleStatement();
}
@Override
public ASTNode visitDropUser(final DropUserContext ctx) {
return new DropUserStatement();
}
@Override
public ASTNode visitAlterUser(final AlterUserContext ctx) {
return new AlterUserStatement();
}
@Override
public ASTNode visitRenameUser(final RenameUserContext ctx) {
return new RenameUserStatement();
}
@Override
public ASTNode visitSetPassword(final SetPasswordContext ctx) {
return new SetPasswordStatement();
}
@Override
public ASTNode visitCreateTable(final CreateTableContext ctx) {
CreateTableStatement result = new CreateTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
CreateDefinitionClause_Context createDefinitionClause = ctx.createDefinitionClause_();
if (null != createDefinitionClause) {
for (CreateDefinition_Context createDefinition : createDefinitionClause.createDefinitions_().createDefinition_()) {
ColumnDefinitionContext columnDefinition = createDefinition.columnDefinition();
if (null != columnDefinition) {
ColumnDefinitionSegment columnDefinitionSegment = createColumnDefinitionSegment(columnDefinition, result);
result.getColumnDefinitions().add(columnDefinitionSegment);
result.getAllSQLSegments().add(columnDefinitionSegment);
}
ConstraintDefinition_Context constraintDefinition = createDefinition.constraintDefinition_();
ForeignKeyOption_Context foreignKeyOption = null == constraintDefinition ? null : constraintDefinition.foreignKeyOption_();
if (null != foreignKeyOption) {
result.getAllSQLSegments().add((TableSegment) visit(foreignKeyOption.referenceDefinition_().tableName()));
}
}
}
CreateLikeClause_Context createLikeClause = ctx.createLikeClause_();
if (null != createLikeClause) {
result.getAllSQLSegments().add((TableSegment) visit(createLikeClause));
}
return result;
}
@Override
public ASTNode visitAlterTable(final AlterTableContext ctx) {
AlterTableStatement result = new AlterTableStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
if (null != ctx.alterDefinitionClause_()) {
for (AlterSpecification_Context alterSpecification : ctx.alterDefinitionClause_().alterSpecification_()) {
AddColumnSpecificationContext addColumnSpecification = alterSpecification.addColumnSpecification();
if (null != addColumnSpecification) {
List<ColumnDefinitionContext> columnDefinitions = addColumnSpecification.columnDefinition();
ColumnDefinitionSegment columnDefinitionSegment = null;
for (ColumnDefinitionContext columnDefinition : columnDefinitions) {
columnDefinitionSegment = createColumnDefinitionSegment(columnDefinition, result);
result.getAddedColumnDefinitions().add(columnDefinitionSegment);
result.getAllSQLSegments().add(columnDefinitionSegment);
}
createColumnPositionSegment(addColumnSpecification.firstOrAfterColumn(), columnDefinitionSegment, result);
}
AddConstraintSpecificationContext addConstraintSpecification = alterSpecification.addConstraintSpecification();
ForeignKeyOption_Context foreignKeyOption = null == addConstraintSpecification
? null : addConstraintSpecification.constraintDefinition_().foreignKeyOption_();
if (null != foreignKeyOption) {
result.getAllSQLSegments().add((TableSegment) visit(foreignKeyOption.referenceDefinition_().tableName()));
}
ChangeColumnSpecificationContext changeColumnSpecification = alterSpecification.changeColumnSpecification();
if (null != changeColumnSpecification) {
createColumnPositionSegment(changeColumnSpecification.firstOrAfterColumn(),
createColumnDefinitionSegment(changeColumnSpecification.columnDefinition(), result), result);
}
DropColumnSpecificationContext dropColumnSpecification = alterSpecification.dropColumnSpecification();
if (null != dropColumnSpecification) {
result.getDroppedColumnNames().add(((ColumnSegment) visit(dropColumnSpecification)).getName());
}
ModifyColumnSpecificationContext modifyColumnSpecification = alterSpecification.modifyColumnSpecification();
if (null != modifyColumnSpecification) {
createColumnPositionSegment(modifyColumnSpecification.firstOrAfterColumn(),
createColumnDefinitionSegment(modifyColumnSpecification.columnDefinition(), result), result);
}
}
}
return result;
}
@Override
public ASTNode visitDropTable(final DropTableContext ctx) {
DropTableStatement result = new DropTableStatement();
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.tableNames());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
return result;
}
@Override
public ASTNode visitTruncateTable(final TruncateTableContext ctx) {
TruncateStatement result = new TruncateStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.getAllSQLSegments().add(table);
result.getTables().add(table);
return result;
}
@Override
public ASTNode visitCreateIndex(final CreateIndexContext ctx) {
CreateIndexStatement result = new CreateIndexStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
return result;
}
@Override
public ASTNode visitDropIndex(final DropIndexContext ctx) {
DropIndexStatement result = new DropIndexStatement();
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
return result;
}
@Override
public ASTNode visitIndexDefinition_(final IndexDefinition_Context ctx) {
return visit(ctx.indexName());
}
@Override
public ASTNode visitCreateLikeClause_(final CreateLikeClause_Context ctx) {
return visit(ctx.tableName());
}
@Override
public ASTNode visitDropColumnSpecification(final DropColumnSpecificationContext ctx) {
return visit(ctx.columnName());
}
@Override
public ASTNode visitInsert(final InsertContext ctx) {
InsertStatement result;
if (null != ctx.insertValuesClause()) {
result = (InsertStatement) visit(ctx.insertValuesClause());
} else {
result = new InsertStatement();
SetAssignmentSegment segment = (SetAssignmentSegment) visit(ctx.setAssignmentsClause());
result.setSetAssignment(segment);
result.getAllSQLSegments().add(segment);
}
if (null != ctx.onDuplicateKeyClause()) {
ListValue<AssignmentSegment> segments = (ListValue<AssignmentSegment>) visit(ctx.onDuplicateKeyClause());
result.getAllSQLSegments().addAll(segments.getValues());
}
TableSegment table = (TableSegment) visit(ctx.tableName());
result.setTable(table);
result.getAllSQLSegments().add(table);
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) {
InsertStatement result = new InsertStatement();
if (null != ctx.columnNames()) {
InsertColumnsSegment insertColumnsSegment = (InsertColumnsSegment) visit(ctx.columnNames());
result.setColumns(insertColumnsSegment);
result.getAllSQLSegments().add(insertColumnsSegment);
}
Collection<InsertValuesSegment> insertValuesSegments = createInsertValuesSegments(ctx.assignmentValues());
result.getValues().addAll(insertValuesSegments);
result.getAllSQLSegments().addAll(insertValuesSegments);
return result;
}
@Override
public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) {
ListValue<AssignmentSegment> result = new ListValue<>(new LinkedList<AssignmentSegment>());
for (AssignmentContext each : ctx.assignment()) {
result.getValues().add((AssignmentSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitUpdate(final UpdateContext ctx) {
UpdateStatement result = new UpdateStatement();
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.tableReferences());
SetAssignmentSegment setSegment = (SetAssignmentSegment) visit(ctx.setAssignmentsClause());
result.getTables().addAll(tables.getValues());
result.setSetAssignment(setSegment);
result.getAllSQLSegments().addAll(tables.getValues());
result.getAllSQLSegments().add(setSegment);
if (null != ctx.whereClause()) {
WhereSegment whereSegment = (WhereSegment) visit(ctx.whereClause());
result.setWhere(whereSegment);
result.getAllSQLSegments().add(whereSegment);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) {
Collection<AssignmentSegment> assignments = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
assignments.add((AssignmentSegment) visit(each));
}
return new SetAssignmentSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), assignments);
}
@Override
public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) {
List<ExpressionSegment> segments = new LinkedList<>();
for (AssignmentValueContext each : ctx.assignmentValue()) {
segments.add((ExpressionSegment) visit(each));
}
return new InsertValuesSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), segments);
}
@Override
public ASTNode visitAssignment(final AssignmentContext ctx) {
ColumnSegment column = (ColumnSegment) visitColumnName(ctx.columnName());
ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue());
return new AssignmentSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), column, value);
}
@Override
public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) {
ExprContext expr = ctx.expr();
if (null != expr) {
return visit(expr);
}
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitBlobValue(final BlobValueContext ctx) {
return new LiteralValue(ctx.STRING_().getText());
}
@Override
public ASTNode visitDelete(final DeleteContext ctx) {
DeleteStatement result = new DeleteStatement();
if (null != ctx.multipleTablesClause()) {
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.multipleTablesClause());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
} else {
TableSegment table = (TableSegment) visit(ctx.singleTableClause());
result.getTables().add(table);
result.getAllSQLSegments().add(table);
}
if (null != ctx.whereClause()) {
WhereSegment where = (WhereSegment) visit(ctx.whereClause());
result.setWhere(where);
result.getAllSQLSegments().add(where);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) {
TableSegment result = (TableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias(ctx.alias().getText());
}
return result;
}
@Override
public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
result.combine((ListValue<TableSegment>) visit(ctx.multipleTableNames()));
result.combine((ListValue<TableSegment>) visit(ctx.tableReferences()));
return result;
}
@Override
public ASTNode visitMultipleTableNames(final MultipleTableNamesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (TableNameContext each : ctx.tableName()) {
result.getValues().add((TableSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitSelect(final SelectContext ctx) {
SelectStatement result = (SelectStatement) visit(ctx.unionClause());
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitUnionClause(final UnionClauseContext ctx) {
return visit(ctx.selectClause(0));
}
@Override
public ASTNode visitSelectClause(final SelectClauseContext ctx) {
SelectStatement result = new SelectStatement();
ProjectionsSegment projections = (ProjectionsSegment) visit(ctx.projections());
result.setProjections(projections);
result.getAllSQLSegments().add(projections);
if (null != ctx.selectSpecification()) {
result.getProjections().setDistinctRow(isDistinct(ctx));
}
if (null != ctx.fromClause()) {
ListValue<TableSegment> tables = (ListValue<TableSegment>) visit(ctx.fromClause());
result.getTables().addAll(tables.getValues());
result.getAllSQLSegments().addAll(tables.getValues());
}
if (null != ctx.whereClause()) {
WhereSegment where = (WhereSegment) visit(ctx.whereClause());
result.setWhere(where);
result.getAllSQLSegments().add(where);
}
if (null != ctx.orderByClause()) {
OrderBySegment orderBy = (OrderBySegment) visit(ctx.orderByClause());
result.setOrderBy(orderBy);
result.getAllSQLSegments().add(orderBy);
}
return result;
}
@Override
public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) {
if (null != ctx.duplicateSpecification()) {
return visit(ctx.duplicateSpecification());
}
return new BooleanValue(false);
}
@Override
public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) {
String text = ctx.getText();
if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) {
return new BooleanValue(true);
}
return new BooleanValue(false);
}
@Override
public ASTNode visitProjections(final ProjectionsContext ctx) {
Collection<ProjectionSegment> projections = new LinkedList<>();
if (null != ctx.unqualifiedShorthand()) {
projections.add(
new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex(), ctx.unqualifiedShorthand().getText()));
}
for (ProjectionContext each : ctx.projection()) {
projections.add((ProjectionSegment) visit(each));
}
ProjectionsSegment result = new ProjectionsSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.getProjections().addAll(projections);
return result;
}
@Override
public ASTNode visitProjection(final ProjectionContext ctx) {
if (null != ctx.qualifiedShorthand()) {
QualifiedShorthandContext shorthand = ctx.qualifiedShorthand();
ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex(), shorthand.getText());
result.setOwner(new TableSegment(shorthand.identifier().getStart().getStartIndex(), shorthand.identifier().getStop().getStopIndex(), shorthand.identifier().getText()));
return result;
}
String alias = null == ctx.alias() ? null : ctx.alias().getText();
if (null != ctx.columnName()) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnName());
ColumnProjectionSegment result = new ColumnProjectionSegment(ctx.columnName().getText(), column);
result.setAlias(alias);
return result;
}
LiteralExpressionSegment column = (LiteralExpressionSegment) visit(ctx.expr());
ExpressionProjectionSegment result = Strings.isNullOrEmpty(alias) ? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()))
: new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()));
result.setAlias(alias);
return result;
}
@Override
public ASTNode visitFromClause(final FromClauseContext ctx) {
return visit(ctx.tableReferences());
}
@Override
public ASTNode visitTableReferences(final TableReferencesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (EscapedTableReferenceContext each : ctx.escapedTableReference()) {
result.combine((ListValue<TableSegment>) visit(each));
}
return result;
}
@Override
public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) {
return visit(ctx.tableReference());
}
@Override
public ASTNode visitTableReference(final TableReferenceContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
if (null != ctx.joinedTable()) {
for (JoinedTableContext each : ctx.joinedTable()) {
result.getValues().add((TableSegment) visit(each));
}
}
if (null != ctx.tableFactor()) {
result.getValues().add((TableSegment) visit(ctx.tableFactor()));
}
return result;
}
@Override
public ASTNode visitTableFactor(final TableFactorContext ctx) {
if (null != ctx.tableReferences()) {
return visit(ctx.tableReferences());
}
TableSegment table = (TableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
table.setAlias(ctx.alias().getText());
}
return table;
}
@Override
public ASTNode visitJoinedTable(final JoinedTableContext ctx) {
return visit(ctx.tableFactor());
}
@Override
public ASTNode visitWhereClause(final WhereClauseContext ctx) {
WhereSegment result = new WhereSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.setParameterMarkerStartIndex(currentParameterIndex);
ASTNode segment = visit(ctx.expr());
if (segment instanceof OrPredicateSegment) {
result.getAndPredicates().addAll(((OrPredicateSegment) segment).getAndPredicates());
} else if (segment instanceof PredicateSegment) {
AndPredicate andPredicate = new AndPredicate();
andPredicate.getPredicates().add((PredicateSegment) segment);
result.getAndPredicates().add(andPredicate);
}
result.setParametersCount(currentParameterIndex);
return result;
}
@Override
public ASTNode visitSetTransaction(final SetTransactionContext ctx) {
return new SetTransactionStatement();
}
@Override
public ASTNode visitSetAutoCommit(final SetAutoCommitContext ctx) {
SetAutoCommitStatement result = new SetAutoCommitStatement();
AutoCommitValueContext autoCommitValueContext = ctx.autoCommitValue();
if (null != autoCommitValueContext) {
AutoCommitSegment autoCommitSegment = (AutoCommitSegment) visit(ctx.autoCommitValue());
result.getAllSQLSegments().add(autoCommitSegment);
result.setAutoCommit(autoCommitSegment.isAutoCommit());
}
return result;
}
@Override
public ASTNode visitAutoCommitValue(final AutoCommitValueContext ctx) {
boolean autoCommit = "1".equals(ctx.getText()) || "ON".equals(ctx.getText());
return new AutoCommitSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), autoCommit);
}
@Override
public ASTNode visitBeginTransaction(final BeginTransactionContext ctx) {
return new BeginTransactionStatement();
}
@Override
public ASTNode visitCommit(final CommitContext ctx) {
return new CommitStatement();
}
@Override
public ASTNode visitRollback(final RollbackContext ctx) {
return new RollbackStatement();
}
@Override
public ASTNode visitSavepoint(final SavepointContext ctx) {
return new SavepointStatement();
}
@Override
public ASTNode visitSchemaName(final SchemaNameContext ctx) {
return visit(ctx.identifier());
}
@Override
public ASTNode visitTableNames(final TableNamesContext ctx) {
ListValue<TableSegment> result = new ListValue<>(new LinkedList<TableSegment>());
for (TableNameContext each : ctx.tableName()) {
result.getValues().add((TableSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitTableName(final TableNameContext ctx) {
LiteralValue tableName = (LiteralValue) visit(ctx.name());
TableSegment result = new TableSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), tableName.getLiteral());
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner(createSchemaSegment(owner));
}
return result;
}
@Override
public ASTNode visitColumnNames(final ColumnNamesContext ctx) {
Collection<ColumnSegment> segments = new LinkedList<>();
for (ColumnNameContext each : ctx.columnName()) {
segments.add((ColumnSegment) visit(each));
}
InsertColumnsSegment result = new InsertColumnsSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex());
result.getColumns().addAll(segments);
return result;
}
@Override
public ASTNode visitColumnName(final ColumnNameContext ctx) {
LiteralValue columnName = (LiteralValue) visit(ctx.name());
ColumnSegment result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columnName.getLiteral());
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner(createTableSegment(owner));
}
return result;
}
@Override
public ASTNode visitIndexName(final IndexNameContext ctx) {
LiteralValue indexName = (LiteralValue) visit(ctx.identifier());
return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), indexName.getLiteral());
}
@Override
public ASTNode visitDataTypeName_(final DataTypeName_Context ctx) {
return visit(ctx.identifier(0));
}
@Override
public ASTNode visitExpr(final ExprContext ctx) {
BooleanPrimaryContext bool = ctx.booleanPrimary();
if (null != bool) {
return visit(bool);
} else if (null != ctx.logicalOperator()) {
return mergePredicateSegment(visit(ctx.expr(0)), visit(ctx.expr(1)), ctx.logicalOperator().getText());
} else if (!ctx.expr().isEmpty()) {
return visit(ctx.expr(0));
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.comparisonOperator()) {
return createCompareSegment(ctx);
}
if (null != ctx.predicate()) {
return visit(ctx.predicate());
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitPredicate(final PredicateContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.IN()) {
return createInSegment(ctx);
}
if (null != ctx.BETWEEN()) {
return createBetweenSegment(ctx);
}
BitExprContext bitExpr = ctx.bitExpr(0);
if (null != bitExpr) {
return createExpressionSegment(visit(bitExpr), ctx);
}
return createExpressionSegment(new LiteralValue(ctx.getText()), ctx);
}
@Override
public ASTNode visitBitExpr(final BitExprContext ctx) {
SimpleExprContext simple = ctx.simpleExpr();
if (null != simple) {
return visit(simple);
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitSimpleExpr(final SimpleExprContext ctx) {
if (null != ctx.subquery()) {
return new SubquerySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.subquery().getText());
}
if (null != ctx.parameterMarker()) {
return visit(ctx.parameterMarker());
}
if (null != ctx.literals()) {
return visit(ctx.literals());
}
if (null != ctx.intervalExpression()) {
return visit(ctx.intervalExpression());
}
if (null != ctx.functionCall()) {
return visit(ctx.functionCall());
}
if (null != ctx.columnName()) {
return visit(ctx.columnName());
}
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitParameterMarker(final ParameterMarkerContext ctx) {
return new ParameterMarkerValue(currentParameterIndex++);
}
@Override
public ASTNode visitLiterals(final LiteralsContext ctx) {
if (null != ctx.stringLiterals()) {
return visit(ctx.stringLiterals());
}
if (null != ctx.numberLiterals()) {
return visit(ctx.numberLiterals());
}
if (null != ctx.booleanLiterals()) {
return visit(ctx.booleanLiterals());
}
if (null != ctx.nullValueLiterals()) {
return new CommonExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitStringLiterals(final StringLiteralsContext ctx) {
String text = ctx.getText();
return new LiteralValue(text.substring(1, text.length() - 1));
}
@Override
public ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) {
return new NumberValue(ctx.getText());
}
@Override
public ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) {
return new BooleanValue(ctx.getText());
}
@Override
public ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitOrderByClause(final OrderByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new OrderBySegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), items);
}
@Override
public ASTNode visitOrderByItem(final OrderByItemContext ctx) {
OrderDirection orderDirection = null != ctx.DESC() ? OrderDirection.DESC : OrderDirection.ASC;
if (null != ctx.columnName()) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnName());
return new ColumnOrderByItemSegment(column, orderDirection);
}
if (null != ctx.numberLiterals()) {
return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(),
SQLUtil.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection);
}
return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(), ctx.expr().getStop().getStopIndex(), ctx.expr().getText(), orderDirection);
}
@Override
public ASTNode visitFunctionCall(final FunctionCallContext ctx) {
if (null != ctx.aggregationFunction()) {
return visit(ctx.aggregationFunction());
}
if (null != ctx.regularFunction()) {
return visit(ctx.regularFunction());
}
if (null != ctx.specialFunction()) {
return visit(ctx.specialFunction());
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) {
if (AggregationType.isAggregationType(ctx.aggregationFunctionName_().getText())) {
return createAggregationSegment(ctx);
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) {
if (null != ctx.groupConcatFunction()) {
return visit(ctx.groupConcatFunction());
}
if (null != ctx.windowFunction()) {
return visit(ctx.windowFunction());
}
if (null != ctx.castFunction()) {
return visit(ctx.castFunction());
}
if (null != ctx.convertFunction()) {
return visit(ctx.convertFunction());
}
if (null != ctx.positionFunction()) {
return visit(ctx.positionFunction());
}
if (null != ctx.substringFunction()) {
return visit(ctx.substringFunction());
}
if (null != ctx.extractFunction()) {
return visit(ctx.extractFunction());
}
if (null != ctx.charFunction()) {
return visit(ctx.charFunction());
}
if (null != ctx.weightStringFunction()) {
return visit(ctx.weightStringFunction());
}
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitWindowFunction(final WindowFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitCastFunction(final CastFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitConvertFunction(final ConvertFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitPositionFunction(final PositionFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitExtractFunction(final ExtractFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitCharFunction(final CharFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitRegularFunction(final RegularFunctionContext ctx) {
calculateParameterCount(ctx.expr());
return new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitIdentifier(final IdentifierContext ctx) {
UnreservedWord_Context unreservedWord = ctx.unreservedWord_();
if (null != unreservedWord) {
return visit(unreservedWord);
}
return new LiteralValue(ctx.getText());
}
@Override
public ASTNode visitUnreservedWord_(final UnreservedWord_Context ctx) {
return new LiteralValue(ctx.getText());
}
private SchemaSegment createSchemaSegment(final OwnerContext ownerContext) {
LiteralValue literalValue = (LiteralValue) visit(ownerContext.identifier());
return new SchemaSegment(ownerContext.getStart().getStartIndex(), ownerContext.getStop().getStopIndex(), literalValue.getLiteral());
}
private TableSegment createTableSegment(final OwnerContext ownerContext) {
LiteralValue literalValue = (LiteralValue) visit(ownerContext.identifier());
return new TableSegment(ownerContext.getStart().getStartIndex(), ownerContext.getStop().getStopIndex(), literalValue.getLiteral());
}
private ASTNode createExpressionSegment(final ASTNode astNode, final ParserRuleContext context) {
if (astNode instanceof LiteralValue) {
return new LiteralExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((LiteralValue) astNode).getLiteral());
}
if (astNode instanceof NumberValue) {
return new LiteralExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((NumberValue) astNode).getNumber());
}
if (astNode instanceof ParameterMarkerValue) {
return new ParameterMarkerExpressionSegment(context.start.getStartIndex(), context.stop.getStopIndex(), ((ParameterMarkerValue) astNode).getParameterIndex());
}
return astNode;
}
private ColumnDefinitionSegment createColumnDefinitionSegment(final ColumnDefinitionContext columnDefinition, final DDLStatement statement) {
ColumnSegment column = (ColumnSegment) visit(columnDefinition.columnName());
LiteralValue dataType = (LiteralValue) visit(columnDefinition.dataType().dataTypeName_());
boolean isPrimaryKey = false;
for (InlineDataType_Context inlineDataType : columnDefinition.inlineDataType_()) {
CommonDataTypeOption_Context commonDataTypeOption = inlineDataType.commonDataTypeOption_();
if (null != commonDataTypeOption) {
if (null != commonDataTypeOption.primaryKey()) {
isPrimaryKey = true;
}
if (null != commonDataTypeOption.referenceDefinition_()) {
statement.getAllSQLSegments().add((TableSegment) visit(commonDataTypeOption.referenceDefinition_().tableName()));
}
}
}
for (GeneratedDataType_Context generatedDataType: columnDefinition.generatedDataType_()) {
CommonDataTypeOption_Context commonDataTypeOption = generatedDataType.commonDataTypeOption_();
if (null != commonDataTypeOption) {
if (null != commonDataTypeOption.primaryKey()) {
isPrimaryKey = true;
}
if (null != commonDataTypeOption.referenceDefinition_()) {
statement.getAllSQLSegments().add((TableSegment) visit(commonDataTypeOption.referenceDefinition_().tableName()));
}
}
}
return new ColumnDefinitionSegment(column.getStartIndex(), column.getStopIndex(),
column.getName(), dataType.getLiteral(), isPrimaryKey);
}
private void createColumnPositionSegment(final FirstOrAfterColumnContext firstOrAfterColumn, final ColumnDefinitionSegment columnDefinition,
final AlterTableStatement statement) {
if (null != firstOrAfterColumn) {
ColumnPositionSegment columnPositionSegment = null;
if (null != firstOrAfterColumn.FIRST()) {
columnPositionSegment = new ColumnFirstPositionSegment(columnDefinition.getStartIndex(), columnDefinition.getStopIndex(),
columnDefinition.getColumnName());
} else if (null != firstOrAfterColumn.AFTER()) {
ColumnSegment afterColumn = (ColumnSegment) visit(firstOrAfterColumn.columnName());
columnPositionSegment = new ColumnAfterPositionSegment(columnDefinition.getStartIndex(), columnDefinition.getStopIndex(),
columnDefinition.getColumnName(), afterColumn.getName());
}
statement.getChangedPositionColumns().add(columnPositionSegment);
statement.getAllSQLSegments().add(columnPositionSegment);
}
}
private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) {
Collection<InsertValuesSegment> result = new LinkedList<>();
for (AssignmentValuesContext each : assignmentValuesContexts) {
result.add((InsertValuesSegment) visit(each));
}
return result;
}
private ASTNode createAggregationSegment(final AggregationFunctionContext ctx) {
AggregationType type = AggregationType.valueOf(ctx.aggregationFunctionName_().getText());
int innerExpressionStartIndex = ((TerminalNode) ctx.getChild(1)).getSymbol().getStartIndex();
if (null != ctx.distinct()) {
return new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(),
ctx.getStop().getStopIndex(), ctx.getText(), type, innerExpressionStartIndex, getDistinctExpression(ctx));
}
return new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
ctx.getText(), type, innerExpressionStartIndex);
}
private String getDistinctExpression(final AggregationFunctionContext ctx) {
StringBuilder result = new StringBuilder();
for (int i = 3; i < ctx.getChildCount() - 1; i++) {
result.append(ctx.getChild(i).getText());
}
return result.toString();
}
private PredicateSegment createCompareSegment(final BooleanPrimaryContext ctx) {
ASTNode leftValue = visit(ctx.booleanPrimary());
ASTNode rightValue = visit(ctx.predicate());
if (rightValue instanceof ColumnSegment) {
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ColumnSegment) leftValue, (ColumnSegment) rightValue);
}
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
(ColumnSegment) leftValue, new PredicateCompareRightValue(ctx.comparisonOperator().getText(), (ExpressionSegment) rightValue));
}
private PredicateSegment createInSegment(final PredicateContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.bitExpr(0));
Collection<ExpressionSegment> segments = Lists.transform(ctx.expr(), new Function<ExprContext, ExpressionSegment>() {
@Override
public ExpressionSegment apply(final ExprContext input) {
return (ExpressionSegment) visit(input);
}
});
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, new PredicateInRightValue(segments));
}
private PredicateSegment createBetweenSegment(final PredicateContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.bitExpr(0));
ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1));
ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate());
return new PredicateSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), column, new PredicateBetweenRightValue(between, and));
}
private OrPredicateSegment mergePredicateSegment(final ASTNode left, final ASTNode right, final String operator) {
Optional<LogicalOperator> logicalOperator = LogicalOperator.valueFrom(operator);
Preconditions.checkState(logicalOperator.isPresent());
if (LogicalOperator.OR == logicalOperator.get()) {
return mergeOrPredicateSegment(left, right);
}
return mergeAndPredicateSegment(left, right);
}
private OrPredicateSegment mergeOrPredicateSegment(final ASTNode left, final ASTNode right) {
OrPredicateSegment result = new OrPredicateSegment();
result.getAndPredicates().addAll(getAndPredicates(left));
result.getAndPredicates().addAll(getAndPredicates(right));
return result;
}
private OrPredicateSegment mergeAndPredicateSegment(final ASTNode left, final ASTNode right) {
OrPredicateSegment result = new OrPredicateSegment();
for (AndPredicate eachLeft : getAndPredicates(left)) {
for (AndPredicate eachRight : getAndPredicates(right)) {
result.getAndPredicates().add(createAndPredicate(eachLeft, eachRight));
}
}
return result;
}
private AndPredicate createAndPredicate(final AndPredicate left, final AndPredicate right) {
AndPredicate result = new AndPredicate();
result.getPredicates().addAll(left.getPredicates());
result.getPredicates().addAll(right.getPredicates());
return result;
}
private Collection<AndPredicate> getAndPredicates(final ASTNode astNode) {
if (astNode instanceof OrPredicateSegment) {
return ((OrPredicateSegment) astNode).getAndPredicates();
}
if (astNode instanceof AndPredicate) {
return Collections.singleton((AndPredicate) astNode);
}
AndPredicate andPredicate = new AndPredicate();
andPredicate.getPredicates().add((PredicateSegment) astNode);
return Collections.singleton(andPredicate);
}
private boolean isDistinct(final SelectClauseContext ctx) {
for (SelectSpecificationContext each : ctx.selectSpecification()) {
boolean eachDistinct = ((BooleanValue) visit(each)).isCorrect();
if (eachDistinct) {
return true;
}
}
return false;
}
private void calculateParameterCount(final Collection<ExprContext> exprContexts) {
for (ExprContext each : exprContexts) {
visit(each);
}
}
}
|
Yeah, I agree. It may have some thread-safe problem. I will update this part.
|
public CompletableFuture<Void> createTaskManagerPod(KubernetesPod kubernetesPod) {
if (masterDeployment == null) {
masterDeployment =
this.internalClient
.apps()
.deployments()
.withName(KubernetesUtils.getDeploymentName(clusterId))
.get();
}
return CompletableFuture.runAsync(
() -> {
if (masterDeployment == null) {
throw new RuntimeException(
"Failed to find Deployment named "
+ clusterId
+ " in namespace "
+ this.namespace);
}
setOwnerReference(
masterDeployment,
Collections.singletonList(kubernetesPod.getInternalResource()));
LOG.debug(
"Start to create pod with spec {}{}",
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(
kubernetesPod.getInternalResource()));
this.internalClient.pods().create(kubernetesPod.getInternalResource());
},
kubeClientExecutorService);
}
|
if (masterDeployment == null) {
|
public CompletableFuture<Void> createTaskManagerPod(KubernetesPod kubernetesPod) {
return CompletableFuture.runAsync(
() -> {
if (masterDeploymentRef.get() == null) {
final Deployment masterDeployment =
this.internalClient
.apps()
.deployments()
.withName(KubernetesUtils.getDeploymentName(clusterId))
.get();
if (masterDeployment == null) {
throw new RuntimeException(
"Failed to find Deployment named "
+ clusterId
+ " in namespace "
+ this.namespace);
}
masterDeploymentRef.compareAndSet(null, masterDeployment);
}
setOwnerReference(
checkNotNull(masterDeploymentRef.get()),
Collections.singletonList(kubernetesPod.getInternalResource()));
LOG.debug(
"Start to create pod with spec {}{}",
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(
kubernetesPod.getInternalResource()));
this.internalClient.pods().create(kubernetesPod.getInternalResource());
},
kubeClientExecutorService);
}
|
class Fabric8FlinkKubeClient implements FlinkKubeClient {
private static final Logger LOG = LoggerFactory.getLogger(Fabric8FlinkKubeClient.class);
private final String clusterId;
private final String namespace;
private final int maxRetryAttempts;
private final KubernetesConfigOptions.NodePortAddressType nodePortAddressType;
private final NamespacedKubernetesClient internalClient;
private final ExecutorService kubeClientExecutorService;
private Deployment masterDeployment;
public Fabric8FlinkKubeClient(
Configuration flinkConfig,
NamespacedKubernetesClient client,
ExecutorService executorService) {
this.clusterId =
flinkConfig
.getOptional(KubernetesConfigOptions.CLUSTER_ID)
.orElseThrow(
() ->
new IllegalArgumentException(
String.format(
"Configuration option '%s' is not set.",
KubernetesConfigOptions.CLUSTER_ID.key())));
this.namespace = flinkConfig.getString(KubernetesConfigOptions.NAMESPACE);
this.maxRetryAttempts =
flinkConfig.getInteger(
KubernetesConfigOptions.KUBERNETES_TRANSACTIONAL_OPERATION_MAX_RETRIES);
this.nodePortAddressType =
flinkConfig.get(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_NODE_PORT_ADDRESS_TYPE);
this.internalClient = checkNotNull(client);
this.kubeClientExecutorService = checkNotNull(executorService);
}
@Override
public void createJobManagerComponent(KubernetesJobManagerSpecification kubernetesJMSpec) {
final Deployment deployment = kubernetesJMSpec.getDeployment();
final List<HasMetadata> accompanyingResources = kubernetesJMSpec.getAccompanyingResources();
LOG.debug(
"Start to create deployment with spec {}{}",
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(deployment));
final Deployment createdDeployment =
this.internalClient.apps().deployments().create(deployment);
setOwnerReference(createdDeployment, accompanyingResources);
this.internalClient.resourceList(accompanyingResources).createOrReplace();
}
@Override
@Override
public CompletableFuture<Void> stopPod(String podName) {
return CompletableFuture.runAsync(
() -> this.internalClient.pods().withName(podName).delete(),
kubeClientExecutorService);
}
@Override
public Optional<Endpoint> getRestEndpoint(String clusterId) {
Optional<KubernetesService> restService =
getService(KubernetesService.ServiceType.REST_SERVICE, clusterId);
if (!restService.isPresent()) {
return Optional.empty();
}
final Service service = restService.get().getInternalResource();
final int restPort = getRestPortFromExternalService(service);
final KubernetesConfigOptions.ServiceExposedType serviceExposedType =
ServiceType.classify(service);
if (serviceExposedType.isClusterIP()) {
return Optional.of(
new Endpoint(
ExternalServiceDecorator.getNamespacedExternalServiceName(
clusterId, namespace),
restPort));
}
return getRestEndPointFromService(service, restPort);
}
@Override
public List<KubernetesPod> getPodsWithLabels(Map<String, String> labels) {
final List<Pod> podList = this.internalClient.pods().withLabels(labels).list().getItems();
if (podList == null || podList.isEmpty()) {
return new ArrayList<>();
}
return podList.stream().map(KubernetesPod::new).collect(Collectors.toList());
}
@Override
public void stopAndCleanupCluster(String clusterId) {
this.internalClient
.apps()
.deployments()
.withName(KubernetesUtils.getDeploymentName(clusterId))
.cascading(true)
.delete();
}
@Override
public Optional<KubernetesService> getService(
KubernetesService.ServiceType serviceType, String clusterId) {
final String serviceName = getServiceName(serviceType, clusterId);
final Service service =
this.internalClient.services().withName(serviceName).fromServer().get();
if (service == null) {
LOG.debug("Service {} does not exist", serviceName);
return Optional.empty();
}
return Optional.of(new KubernetesService(service));
}
@Override
public KubernetesWatch watchPodsAndDoCallback(
Map<String, String> labels, WatchCallbackHandler<KubernetesPod> podCallbackHandler)
throws Exception {
return FutureUtils.retry(
() ->
CompletableFuture.supplyAsync(
() ->
new KubernetesWatch(
this.internalClient
.pods()
.withLabels(labels)
.watch(
new KubernetesPodsWatcher(
podCallbackHandler))),
kubeClientExecutorService),
maxRetryAttempts,
t ->
ExceptionUtils.findThrowable(t, KubernetesClientException.class)
.isPresent(),
kubeClientExecutorService)
.get();
}
@Override
public KubernetesLeaderElector createLeaderElector(
KubernetesLeaderElectionConfiguration leaderElectionConfiguration,
KubernetesLeaderElector.LeaderCallbackHandler leaderCallbackHandler) {
return new KubernetesLeaderElector(
this.internalClient, leaderElectionConfiguration, leaderCallbackHandler);
}
@Override
public CompletableFuture<Void> createConfigMap(KubernetesConfigMap configMap) {
final String configMapName = configMap.getName();
return CompletableFuture.runAsync(
() ->
this.internalClient
.configMaps()
.create(configMap.getInternalResource()),
kubeClientExecutorService)
.exceptionally(
throwable -> {
throw new CompletionException(
new KubernetesException(
"Failed to create ConfigMap " + configMapName,
throwable));
});
}
@Override
public Optional<KubernetesConfigMap> getConfigMap(String name) {
final ConfigMap configMap = this.internalClient.configMaps().withName(name).get();
return configMap == null
? Optional.empty()
: Optional.of(new KubernetesConfigMap(configMap));
}
@Override
public CompletableFuture<Boolean> checkAndUpdateConfigMap(
String configMapName,
Function<KubernetesConfigMap, Optional<KubernetesConfigMap>> updateFunction) {
return FutureUtils.retry(
() -> attemptCheckAndUpdateConfigMap(configMapName, updateFunction),
maxRetryAttempts,
t -> ExceptionUtils.findThrowable(t, KubernetesClientException.class).isPresent(),
kubeClientExecutorService);
}
private CompletableFuture<Boolean> attemptCheckAndUpdateConfigMap(
String configMapName,
Function<KubernetesConfigMap, Optional<KubernetesConfigMap>> updateFunction) {
return CompletableFuture.supplyAsync(
() -> {
final KubernetesConfigMap configMap =
getConfigMap(configMapName)
.orElseThrow(
() ->
new CompletionException(
new KubernetesException(
"Cannot retry checkAndUpdateConfigMap with configMap "
+ configMapName
+ " because it does not exist.")));
final Optional<KubernetesConfigMap> maybeUpdate =
updateFunction.apply(configMap);
if (maybeUpdate.isPresent()) {
try {
internalClient
.configMaps()
.withName(configMapName)
.lockResourceVersion(maybeUpdate.get().getResourceVersion())
.replace(maybeUpdate.get().getInternalResource());
return true;
} catch (Throwable throwable) {
LOG.debug(
"Failed to update ConfigMap {} with data {}. Trying again.",
configMap.getName(),
configMap.getData());
throw new CompletionException(
new PossibleInconsistentStateException(throwable));
}
}
return false;
},
kubeClientExecutorService);
}
@Override
public CompletableFuture<Void> deleteConfigMapsByLabels(Map<String, String> labels) {
return CompletableFuture.runAsync(
() -> this.internalClient.configMaps().withLabels(labels).delete(),
kubeClientExecutorService);
}
@Override
public CompletableFuture<Void> deleteConfigMap(String configMapName) {
return CompletableFuture.runAsync(
() -> this.internalClient.configMaps().withName(configMapName).delete(),
kubeClientExecutorService);
}
@Override
public KubernetesConfigMapSharedWatcher createConfigMapSharedWatcher(
Map<String, String> labels) {
return new KubernetesConfigMapSharedInformer(this.internalClient, labels);
}
@Override
public void close() {
this.internalClient.close();
ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.kubeClientExecutorService);
}
@Override
public KubernetesPod loadPodFromTemplateFile(File file) {
if (!file.exists()) {
throw new FlinkRuntimeException(
String.format("Pod template file %s does not exist.", file));
}
return new KubernetesPod(this.internalClient.pods().load(file).get());
}
@Override
public CompletableFuture<Void> updateServiceTargetPort(
KubernetesService.ServiceType serviceType,
String clusterId,
String portName,
int targetPort) {
LOG.debug("Update {} target port to {}", portName, targetPort);
return CompletableFuture.runAsync(
() ->
getService(serviceType, clusterId)
.ifPresent(
service -> {
final Service updatedService =
new ServiceBuilder(
service.getInternalResource())
.editSpec()
.editMatchingPort(
servicePortBuilder ->
servicePortBuilder
.build()
.getName()
.equals(
portName))
.withTargetPort(
new IntOrString(targetPort))
.endPort()
.endSpec()
.build();
this.internalClient
.services()
.withName(
getServiceName(serviceType, clusterId))
.replace(updatedService);
}),
kubeClientExecutorService);
}
/**
* Get the Kubernetes service name.
*
* @param serviceType The service type
* @param clusterId The cluster id
* @return Return the Kubernetes service name if the service type is known.
*/
private String getServiceName(KubernetesService.ServiceType serviceType, String clusterId) {
switch (serviceType) {
case REST_SERVICE:
return ExternalServiceDecorator.getExternalServiceName(clusterId);
case INTERNAL_SERVICE:
return InternalServiceDecorator.getInternalServiceName(clusterId);
default:
throw new IllegalArgumentException(
"Unrecognized service type: " + serviceType.name());
}
}
private void setOwnerReference(Deployment deployment, List<HasMetadata> resources) {
final OwnerReference deploymentOwnerReference =
new OwnerReferenceBuilder()
.withName(deployment.getMetadata().getName())
.withApiVersion(deployment.getApiVersion())
.withUid(deployment.getMetadata().getUid())
.withKind(deployment.getKind())
.withController(true)
.withBlockOwnerDeletion(true)
.build();
resources.forEach(
resource ->
resource.getMetadata()
.setOwnerReferences(
Collections.singletonList(deploymentOwnerReference)));
}
/** Get rest port from the external Service. */
private int getRestPortFromExternalService(Service externalService) {
final List<ServicePort> servicePortCandidates =
externalService.getSpec().getPorts().stream()
.filter(x -> x.getName().equals(Constants.REST_PORT_NAME))
.collect(Collectors.toList());
if (servicePortCandidates.isEmpty()) {
throw new RuntimeException(
"Failed to find port \""
+ Constants.REST_PORT_NAME
+ "\" in Service \""
+ ExternalServiceDecorator.getExternalServiceName(this.clusterId)
+ "\"");
}
final ServicePort externalServicePort = servicePortCandidates.get(0);
final KubernetesConfigOptions.ServiceExposedType externalServiceType =
KubernetesConfigOptions.ServiceExposedType.valueOf(
externalService.getSpec().getType());
switch (externalServiceType) {
case ClusterIP:
case LoadBalancer:
return externalServicePort.getPort();
case NodePort:
return externalServicePort.getNodePort();
default:
throw new RuntimeException("Unrecognized Service type: " + externalServiceType);
}
}
private Optional<Endpoint> getRestEndPointFromService(Service service, int restPort) {
if (service.getStatus() == null) {
return Optional.empty();
}
LoadBalancerStatus loadBalancer = service.getStatus().getLoadBalancer();
boolean hasExternalIP =
service.getSpec() != null
&& service.getSpec().getExternalIPs() != null
&& !service.getSpec().getExternalIPs().isEmpty();
if (loadBalancer != null) {
return getLoadBalancerRestEndpoint(loadBalancer, restPort);
} else if (hasExternalIP) {
final String address = service.getSpec().getExternalIPs().get(0);
if (address != null && !address.isEmpty()) {
return Optional.of(new Endpoint(address, restPort));
}
}
return Optional.empty();
}
private Optional<Endpoint> getLoadBalancerRestEndpoint(
LoadBalancerStatus loadBalancer, int restPort) {
boolean hasIngress =
loadBalancer.getIngress() != null && !loadBalancer.getIngress().isEmpty();
String address;
if (hasIngress) {
address = loadBalancer.getIngress().get(0).getIp();
if (address == null || address.isEmpty()) {
address = loadBalancer.getIngress().get(0).getHostname();
}
} else {
address =
internalClient.nodes().list().getItems().stream()
.flatMap(node -> node.getStatus().getAddresses().stream())
.filter(
nodeAddress ->
nodePortAddressType
.name()
.equals(nodeAddress.getType()))
.map(NodeAddress::getAddress)
.filter(ip -> !ip.isEmpty())
.findAny()
.orElse(null);
if (address == null) {
LOG.warn(
"Unable to find any node ip with type [{}]. Please see [{}] config option for more details.",
nodePortAddressType,
KubernetesConfigOptions.REST_SERVICE_EXPOSED_NODE_PORT_ADDRESS_TYPE.key());
}
}
boolean noAddress = address == null || address.isEmpty();
return noAddress ? Optional.empty() : Optional.of(new Endpoint(address, restPort));
}
}
|
class Fabric8FlinkKubeClient implements FlinkKubeClient {
private static final Logger LOG = LoggerFactory.getLogger(Fabric8FlinkKubeClient.class);
private final String clusterId;
private final String namespace;
private final int maxRetryAttempts;
private final KubernetesConfigOptions.NodePortAddressType nodePortAddressType;
private final NamespacedKubernetesClient internalClient;
private final ExecutorService kubeClientExecutorService;
private final AtomicReference<Deployment> masterDeploymentRef;
public Fabric8FlinkKubeClient(
Configuration flinkConfig,
NamespacedKubernetesClient client,
ExecutorService executorService) {
this.clusterId =
flinkConfig
.getOptional(KubernetesConfigOptions.CLUSTER_ID)
.orElseThrow(
() ->
new IllegalArgumentException(
String.format(
"Configuration option '%s' is not set.",
KubernetesConfigOptions.CLUSTER_ID.key())));
this.namespace = flinkConfig.getString(KubernetesConfigOptions.NAMESPACE);
this.maxRetryAttempts =
flinkConfig.getInteger(
KubernetesConfigOptions.KUBERNETES_TRANSACTIONAL_OPERATION_MAX_RETRIES);
this.nodePortAddressType =
flinkConfig.get(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_NODE_PORT_ADDRESS_TYPE);
this.internalClient = checkNotNull(client);
this.kubeClientExecutorService = checkNotNull(executorService);
this.masterDeploymentRef = new AtomicReference<>();
}
@Override
public void createJobManagerComponent(KubernetesJobManagerSpecification kubernetesJMSpec) {
final Deployment deployment = kubernetesJMSpec.getDeployment();
final List<HasMetadata> accompanyingResources = kubernetesJMSpec.getAccompanyingResources();
LOG.debug(
"Start to create deployment with spec {}{}",
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(deployment));
final Deployment createdDeployment =
this.internalClient.apps().deployments().create(deployment);
setOwnerReference(createdDeployment, accompanyingResources);
this.internalClient.resourceList(accompanyingResources).createOrReplace();
}
@Override
@Override
public CompletableFuture<Void> stopPod(String podName) {
return CompletableFuture.runAsync(
() -> this.internalClient.pods().withName(podName).delete(),
kubeClientExecutorService);
}
@Override
public Optional<Endpoint> getRestEndpoint(String clusterId) {
Optional<KubernetesService> restService =
getService(KubernetesService.ServiceType.REST_SERVICE, clusterId);
if (!restService.isPresent()) {
return Optional.empty();
}
final Service service = restService.get().getInternalResource();
final int restPort = getRestPortFromExternalService(service);
final KubernetesConfigOptions.ServiceExposedType serviceExposedType =
ServiceType.classify(service);
if (serviceExposedType.isClusterIP()) {
return Optional.of(
new Endpoint(
ExternalServiceDecorator.getNamespacedExternalServiceName(
clusterId, namespace),
restPort));
}
return getRestEndPointFromService(service, restPort);
}
@Override
public List<KubernetesPod> getPodsWithLabels(Map<String, String> labels) {
final List<Pod> podList = this.internalClient.pods().withLabels(labels).list().getItems();
if (podList == null || podList.isEmpty()) {
return new ArrayList<>();
}
return podList.stream().map(KubernetesPod::new).collect(Collectors.toList());
}
@Override
public void stopAndCleanupCluster(String clusterId) {
this.internalClient
.apps()
.deployments()
.withName(KubernetesUtils.getDeploymentName(clusterId))
.cascading(true)
.delete();
}
@Override
public Optional<KubernetesService> getService(
KubernetesService.ServiceType serviceType, String clusterId) {
final String serviceName = getServiceName(serviceType, clusterId);
final Service service =
this.internalClient.services().withName(serviceName).fromServer().get();
if (service == null) {
LOG.debug("Service {} does not exist", serviceName);
return Optional.empty();
}
return Optional.of(new KubernetesService(service));
}
@Override
public KubernetesWatch watchPodsAndDoCallback(
Map<String, String> labels, WatchCallbackHandler<KubernetesPod> podCallbackHandler)
throws Exception {
return FutureUtils.retry(
() ->
CompletableFuture.supplyAsync(
() ->
new KubernetesWatch(
this.internalClient
.pods()
.withLabels(labels)
.watch(
new KubernetesPodsWatcher(
podCallbackHandler))),
kubeClientExecutorService),
maxRetryAttempts,
t ->
ExceptionUtils.findThrowable(t, KubernetesClientException.class)
.isPresent(),
kubeClientExecutorService)
.get();
}
@Override
public KubernetesLeaderElector createLeaderElector(
KubernetesLeaderElectionConfiguration leaderElectionConfiguration,
KubernetesLeaderElector.LeaderCallbackHandler leaderCallbackHandler) {
return new KubernetesLeaderElector(
this.internalClient, leaderElectionConfiguration, leaderCallbackHandler);
}
@Override
public CompletableFuture<Void> createConfigMap(KubernetesConfigMap configMap) {
final String configMapName = configMap.getName();
return CompletableFuture.runAsync(
() ->
this.internalClient
.configMaps()
.create(configMap.getInternalResource()),
kubeClientExecutorService)
.exceptionally(
throwable -> {
throw new CompletionException(
new KubernetesException(
"Failed to create ConfigMap " + configMapName,
throwable));
});
}
@Override
public Optional<KubernetesConfigMap> getConfigMap(String name) {
final ConfigMap configMap = this.internalClient.configMaps().withName(name).get();
return configMap == null
? Optional.empty()
: Optional.of(new KubernetesConfigMap(configMap));
}
@Override
public CompletableFuture<Boolean> checkAndUpdateConfigMap(
String configMapName,
Function<KubernetesConfigMap, Optional<KubernetesConfigMap>> updateFunction) {
return FutureUtils.retry(
() -> attemptCheckAndUpdateConfigMap(configMapName, updateFunction),
maxRetryAttempts,
t -> ExceptionUtils.findThrowable(t, KubernetesClientException.class).isPresent(),
kubeClientExecutorService);
}
private CompletableFuture<Boolean> attemptCheckAndUpdateConfigMap(
String configMapName,
Function<KubernetesConfigMap, Optional<KubernetesConfigMap>> updateFunction) {
return CompletableFuture.supplyAsync(
() -> {
final KubernetesConfigMap configMap =
getConfigMap(configMapName)
.orElseThrow(
() ->
new CompletionException(
new KubernetesException(
"Cannot retry checkAndUpdateConfigMap with configMap "
+ configMapName
+ " because it does not exist.")));
final Optional<KubernetesConfigMap> maybeUpdate =
updateFunction.apply(configMap);
if (maybeUpdate.isPresent()) {
try {
internalClient
.configMaps()
.withName(configMapName)
.lockResourceVersion(maybeUpdate.get().getResourceVersion())
.replace(maybeUpdate.get().getInternalResource());
return true;
} catch (Throwable throwable) {
LOG.debug(
"Failed to update ConfigMap {} with data {}. Trying again.",
configMap.getName(),
configMap.getData());
throw new CompletionException(
new PossibleInconsistentStateException(throwable));
}
}
return false;
},
kubeClientExecutorService);
}
@Override
public CompletableFuture<Void> deleteConfigMapsByLabels(Map<String, String> labels) {
return CompletableFuture.runAsync(
() -> this.internalClient.configMaps().withLabels(labels).delete(),
kubeClientExecutorService);
}
@Override
public CompletableFuture<Void> deleteConfigMap(String configMapName) {
return CompletableFuture.runAsync(
() -> this.internalClient.configMaps().withName(configMapName).delete(),
kubeClientExecutorService);
}
@Override
public KubernetesConfigMapSharedWatcher createConfigMapSharedWatcher(
Map<String, String> labels) {
return new KubernetesConfigMapSharedInformer(this.internalClient, labels);
}
@Override
public void close() {
this.internalClient.close();
ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.kubeClientExecutorService);
}
@Override
public KubernetesPod loadPodFromTemplateFile(File file) {
if (!file.exists()) {
throw new FlinkRuntimeException(
String.format("Pod template file %s does not exist.", file));
}
return new KubernetesPod(this.internalClient.pods().load(file).get());
}
@Override
public CompletableFuture<Void> updateServiceTargetPort(
KubernetesService.ServiceType serviceType,
String clusterId,
String portName,
int targetPort) {
LOG.debug("Update {} target port to {}", portName, targetPort);
return CompletableFuture.runAsync(
() ->
getService(serviceType, clusterId)
.ifPresent(
service -> {
final Service updatedService =
new ServiceBuilder(
service.getInternalResource())
.editSpec()
.editMatchingPort(
servicePortBuilder ->
servicePortBuilder
.build()
.getName()
.equals(
portName))
.withTargetPort(
new IntOrString(targetPort))
.endPort()
.endSpec()
.build();
this.internalClient
.services()
.withName(
getServiceName(serviceType, clusterId))
.replace(updatedService);
}),
kubeClientExecutorService);
}
/**
* Get the Kubernetes service name.
*
* @param serviceType The service type
* @param clusterId The cluster id
* @return Return the Kubernetes service name if the service type is known.
*/
private String getServiceName(KubernetesService.ServiceType serviceType, String clusterId) {
switch (serviceType) {
case REST_SERVICE:
return ExternalServiceDecorator.getExternalServiceName(clusterId);
case INTERNAL_SERVICE:
return InternalServiceDecorator.getInternalServiceName(clusterId);
default:
throw new IllegalArgumentException(
"Unrecognized service type: " + serviceType.name());
}
}
private void setOwnerReference(Deployment deployment, List<HasMetadata> resources) {
final OwnerReference deploymentOwnerReference =
new OwnerReferenceBuilder()
.withName(deployment.getMetadata().getName())
.withApiVersion(deployment.getApiVersion())
.withUid(deployment.getMetadata().getUid())
.withKind(deployment.getKind())
.withController(true)
.withBlockOwnerDeletion(true)
.build();
resources.forEach(
resource ->
resource.getMetadata()
.setOwnerReferences(
Collections.singletonList(deploymentOwnerReference)));
}
/** Get rest port from the external Service. */
private int getRestPortFromExternalService(Service externalService) {
final List<ServicePort> servicePortCandidates =
externalService.getSpec().getPorts().stream()
.filter(x -> x.getName().equals(Constants.REST_PORT_NAME))
.collect(Collectors.toList());
if (servicePortCandidates.isEmpty()) {
throw new RuntimeException(
"Failed to find port \""
+ Constants.REST_PORT_NAME
+ "\" in Service \""
+ ExternalServiceDecorator.getExternalServiceName(this.clusterId)
+ "\"");
}
final ServicePort externalServicePort = servicePortCandidates.get(0);
final KubernetesConfigOptions.ServiceExposedType externalServiceType =
KubernetesConfigOptions.ServiceExposedType.valueOf(
externalService.getSpec().getType());
switch (externalServiceType) {
case ClusterIP:
case LoadBalancer:
return externalServicePort.getPort();
case NodePort:
return externalServicePort.getNodePort();
default:
throw new RuntimeException("Unrecognized Service type: " + externalServiceType);
}
}
private Optional<Endpoint> getRestEndPointFromService(Service service, int restPort) {
if (service.getStatus() == null) {
return Optional.empty();
}
LoadBalancerStatus loadBalancer = service.getStatus().getLoadBalancer();
boolean hasExternalIP =
service.getSpec() != null
&& service.getSpec().getExternalIPs() != null
&& !service.getSpec().getExternalIPs().isEmpty();
if (loadBalancer != null) {
return getLoadBalancerRestEndpoint(loadBalancer, restPort);
} else if (hasExternalIP) {
final String address = service.getSpec().getExternalIPs().get(0);
if (address != null && !address.isEmpty()) {
return Optional.of(new Endpoint(address, restPort));
}
}
return Optional.empty();
}
private Optional<Endpoint> getLoadBalancerRestEndpoint(
LoadBalancerStatus loadBalancer, int restPort) {
boolean hasIngress =
loadBalancer.getIngress() != null && !loadBalancer.getIngress().isEmpty();
String address;
if (hasIngress) {
address = loadBalancer.getIngress().get(0).getIp();
if (address == null || address.isEmpty()) {
address = loadBalancer.getIngress().get(0).getHostname();
}
} else {
address =
internalClient.nodes().list().getItems().stream()
.flatMap(node -> node.getStatus().getAddresses().stream())
.filter(
nodeAddress ->
nodePortAddressType
.name()
.equals(nodeAddress.getType()))
.map(NodeAddress::getAddress)
.filter(ip -> !ip.isEmpty())
.findAny()
.orElse(null);
if (address == null) {
LOG.warn(
"Unable to find any node ip with type [{}]. Please see [{}] config option for more details.",
nodePortAddressType,
KubernetesConfigOptions.REST_SERVICE_EXPOSED_NODE_PORT_ADDRESS_TYPE.key());
}
}
boolean noAddress = address == null || address.isEmpty();
return noAddress ? Optional.empty() : Optional.of(new Endpoint(address, restPort));
}
}
|
But that enum constant I don't see!
|
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT);
this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES);
this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE);
this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE);
this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS);
this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD);
this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD);
this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD);
this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION);
this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE);
this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY);
this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER);
this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT);
this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR);
this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO);
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
}
|
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
|
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.defaultTermwiseLimit = flagValue(source, appId, Flags.DEFAULT_TERM_WISE_LIMIT);
this.useThreePhaseUpdates = flagValue(source, appId, Flags.USE_THREE_PHASE_UPDATES);
this.feedSequencer = flagValue(source, appId, Flags.FEED_SEQUENCER_TYPE);
this.responseSequencer = flagValue(source, appId, Flags.RESPONSE_SEQUENCER_TYPE);
this.numResponseThreads = flagValue(source, appId, Flags.RESPONSE_NUM_THREADS);
this.skipCommunicationManagerThread = flagValue(source, appId, Flags.SKIP_COMMUNICATIONMANAGER_THREAD);
this.skipMbusRequestThread = flagValue(source, appId, Flags.SKIP_MBUS_REQUEST_THREAD);
this.skipMbusReplyThread = flagValue(source, appId, Flags.SKIP_MBUS_REPLY_THREAD);
this.useAccessControlTlsHandshakeClientAuth = flagValue(source, appId, Flags.USE_ACCESS_CONTROL_CLIENT_AUTHENTICATION);
this.useAsyncMessageHandlingOnSchedule = flagValue(source, appId, Flags.USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE);
this.feedConcurrency = flagValue(source, appId, Flags.FEED_CONCURRENCY);
this.reconfigurableZookeeperServer = flagValue(source, appId, Flags.RECONFIGURABLE_ZOOKEEPER_SERVER_FOR_CLUSTER_CONTROLLER);
this.useBucketExecutorForLidSpaceCompact = flagValue(source, appId, Flags.USE_BUCKET_EXECUTOR_FOR_LID_SPACE_COMPACT);
this.enableFeedBlockInDistributor = flagValue(source, appId, Flags.ENABLE_FEED_BLOCK_IN_DISTRIBUTOR);
this.maxDeadBytesRatio = flagValue(source, appId, Flags.MAX_DEAD_BYTES_RATIO);
this.clusterControllerMaxHeapSizeInMb = flagValue(source, appId, Flags.CLUSTER_CONTROLLER_MAX_HEAP_SIZE_IN_MB);
}
|
class FeatureFlags implements ModelContext.FeatureFlags {
private final double defaultTermwiseLimit;
private final boolean useThreePhaseUpdates;
private final String feedSequencer;
private final String responseSequencer;
private final int numResponseThreads;
private final boolean skipCommunicationManagerThread;
private final boolean skipMbusRequestThread;
private final boolean skipMbusReplyThread;
private final boolean useAccessControlTlsHandshakeClientAuth;
private final boolean useAsyncMessageHandlingOnSchedule;
private final double feedConcurrency;
private final boolean reconfigurableZookeeperServer;
private final boolean useBucketExecutorForLidSpaceCompact;
private final boolean enableFeedBlockInDistributor;
private final double maxDeadBytesRatio;
private final int clusterControllerMaxHeapSizeInMb;
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; }
@Override public String feedSequencerType() { return feedSequencer; }
@Override public String responseSequencerType() { return responseSequencer; }
@Override public int defaultNumResponseThreads() { return numResponseThreads; }
@Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; }
@Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; }
@Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; }
@Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; }
@Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
@Override public double feedConcurrency() { return feedConcurrency; }
@Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; }
@Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
@Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; }
@Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())
.boxedValue();
}
}
|
class FeatureFlags implements ModelContext.FeatureFlags {
private final double defaultTermwiseLimit;
private final boolean useThreePhaseUpdates;
private final String feedSequencer;
private final String responseSequencer;
private final int numResponseThreads;
private final boolean skipCommunicationManagerThread;
private final boolean skipMbusRequestThread;
private final boolean skipMbusReplyThread;
private final boolean useAccessControlTlsHandshakeClientAuth;
private final boolean useAsyncMessageHandlingOnSchedule;
private final double feedConcurrency;
private final boolean reconfigurableZookeeperServer;
private final boolean useBucketExecutorForLidSpaceCompact;
private final boolean enableFeedBlockInDistributor;
private final double maxDeadBytesRatio;
private final int clusterControllerMaxHeapSizeInMb;
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@Override public boolean useThreePhaseUpdates() { return useThreePhaseUpdates; }
@Override public String feedSequencerType() { return feedSequencer; }
@Override public String responseSequencerType() { return responseSequencer; }
@Override public int defaultNumResponseThreads() { return numResponseThreads; }
@Override public boolean skipCommunicationManagerThread() { return skipCommunicationManagerThread; }
@Override public boolean skipMbusRequestThread() { return skipMbusRequestThread; }
@Override public boolean skipMbusReplyThread() { return skipMbusReplyThread; }
@Override public boolean useAccessControlTlsHandshakeClientAuth() { return useAccessControlTlsHandshakeClientAuth; }
@Override public boolean useAsyncMessageHandlingOnSchedule() { return useAsyncMessageHandlingOnSchedule; }
@Override public double feedConcurrency() { return feedConcurrency; }
@Override public boolean reconfigurableZookeeperServer() { return reconfigurableZookeeperServer; }
@Override public boolean useBucketExecutorForLidSpaceCompact() { return useBucketExecutorForLidSpaceCompact; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
@Override public double maxDeadBytesRatio() { return maxDeadBytesRatio; }
@Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())
.boxedValue();
}
}
|
Different variables has different upper limits
|
private void checkRangeLongVariable(String field, Long min, Long max) {
String value = getResolvedExpression().getStringValue();
try {
long num = Long.parseLong(value);
if (min != null && num < min) {
throw new SemanticException(String.format("%s must be equal or greater than %d.", field, min));
}
if (max != null && num > max) {
throw new SemanticException(String.format("%s must be equal or smaller than %d.", field, max));
}
} catch (NumberFormatException ex) {
throw new SemanticException(field + " is not a number");
}
}
|
if (max != null && num > max) {
|
private void checkRangeLongVariable(String field, Long min, Long max) {
String value = getResolvedExpression().getStringValue();
try {
long num = Long.parseLong(value);
if (min != null && num < min) {
throw new SemanticException(String.format("%s must be equal or greater than %d.", field, min));
}
if (max != null && num > max) {
throw new SemanticException(String.format("%s must be equal or smaller than %d.", field, max));
}
} catch (NumberFormatException ex) {
throw new SemanticException(field + " is not a number");
}
}
|
class SetVar implements ParseNode {
private String variable;
private SetType type;
private Expr expression;
private LiteralExpr resolvedExpression;
public SetVar() {
}
public SetVar(SetType type, String variable, Expr expression) {
this.type = type;
this.variable = variable;
this.expression = expression;
if (expression instanceof LiteralExpr) {
this.resolvedExpression = (LiteralExpr) expression;
}
}
public SetVar(String variable, Expr unevaluatedExpression) {
this.type = SetType.DEFAULT;
this.variable = variable;
this.expression = unevaluatedExpression;
if (unevaluatedExpression instanceof LiteralExpr) {
this.resolvedExpression = (LiteralExpr) unevaluatedExpression;
}
}
public String getVariable() {
return variable;
}
public SetType getType() {
return type;
}
public void setType(SetType type) {
this.type = type;
}
public Expr getExpression() {
return expression;
}
public void setExpression(Expr expression) {
this.expression = expression;
}
public LiteralExpr getResolvedExpression() {
return resolvedExpression;
}
public void setResolvedExpression(LiteralExpr resolvedExpression) {
this.resolvedExpression = resolvedExpression;
}
public void analyze() {
if (type == null) {
type = SetType.DEFAULT;
}
if (Strings.isNullOrEmpty(variable)) {
throw new SemanticException("No variable name in set statement.");
}
if (type == SetType.GLOBAL) {
if (!GlobalStateMgr.getCurrentState().isUsingNewPrivilege()) {
if (!GlobalStateMgr.getCurrentState().getAuth()
.checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportSemanticException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
}
}
if (expression == null) {
return;
}
if (expression instanceof SlotRef) {
expression = new StringLiteral(((SlotRef) expression).getColumnName());
}
expression = Expr.analyzeAndCastFold(expression);
if (!expression.isConstant()) {
throw new SemanticException("Set statement only support constant expr.");
}
resolvedExpression = (LiteralExpr) expression;
if (variable.equalsIgnoreCase(GlobalVariable.DEFAULT_ROWSET_TYPE)) {
if (!HeartbeatFlags.isValidRowsetType(resolvedExpression.getStringValue())) {
throw new SemanticException("Invalid rowset type, now we support {alpha, beta}.");
}
}
if (getVariable().equalsIgnoreCase("prefer_join_method")) {
String value = getResolvedExpression().getStringValue();
if (!value.equalsIgnoreCase("broadcast") && !value.equalsIgnoreCase("shuffle")) {
ErrorReport.reportSemanticException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, "prefer_join_method", value);
}
}
if (getVariable().equalsIgnoreCase(SessionVariable.LOAD_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.LOAD_MEM_LIMIT, 0L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.QUERY_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.QUERY_MEM_LIMIT, 0L, null);
}
try {
if (getVariable().equalsIgnoreCase(SessionVariable.TIME_ZONE)) {
this.expression = new StringLiteral(
TimeUtils.checkTimeZoneValidAndStandardize(getResolvedExpression().getStringValue()));
this.resolvedExpression = (LiteralExpr) this.expression;
}
if (getVariable().equalsIgnoreCase(SessionVariable.EXEC_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.EXEC_MEM_LIMIT, (long) SessionVariable.MIN_EXEC_MEM_LIMIT, null);
this.expression = new StringLiteral(
Long.toString(ParseUtil.analyzeDataVolumn(getResolvedExpression().getStringValue())));
this.resolvedExpression = (LiteralExpr) this.expression;
}
} catch (UserException e) {
throw new SemanticException(e.getMessage());
}
if (getVariable().equalsIgnoreCase(SessionVariable.SQL_SELECT_LIMIT)) {
checkRangeLongVariable(SessionVariable.SQL_SELECT_LIMIT, 0L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.QUERY_TIMEOUT)) {
checkRangeLongVariable(SessionVariable.QUERY_TIMEOUT, 1L, (long) SessionVariable.MAX_QUERY_TIMEOUT);
}
if (getVariable().equalsIgnoreCase(SessionVariable.NEW_PLANNER_OPTIMIZER_TIMEOUT)) {
checkRangeLongVariable(SessionVariable.NEW_PLANNER_OPTIMIZER_TIMEOUT, 1L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.RESOURCE_GROUP)) {
String wgName = getResolvedExpression().getStringValue();
if (!StringUtils.isEmpty(wgName)) {
ResourceGroup wg =
GlobalStateMgr.getCurrentState().getResourceGroupMgr().chooseResourceGroupByName(wgName);
if (wg == null) {
throw new SemanticException("resource group not exists: " + wgName);
}
}
}
if (getVariable().equalsIgnoreCase(SessionVariable.TABLET_INTERNAL_PARALLEL_MODE)) {
validateTabletInternalParallelModeValue(getResolvedExpression().getStringValue());
}
}
private void validateTabletInternalParallelModeValue(String val) {
try {
TTabletInternalParallelMode.valueOf(val.toUpperCase());
} catch (Exception ignored) {
throw new SemanticException("Invalid tablet_internal_parallel_mode, now we support {auto, force_split}.");
}
}
}
|
class SetVar implements ParseNode {
private String variable;
private SetType type;
private Expr expression;
private LiteralExpr resolvedExpression;
public SetVar() {
}
public SetVar(SetType type, String variable, Expr expression) {
this.type = type;
this.variable = variable;
this.expression = expression;
if (expression instanceof LiteralExpr) {
this.resolvedExpression = (LiteralExpr) expression;
}
}
public SetVar(String variable, Expr unevaluatedExpression) {
this.type = SetType.DEFAULT;
this.variable = variable;
this.expression = unevaluatedExpression;
if (unevaluatedExpression instanceof LiteralExpr) {
this.resolvedExpression = (LiteralExpr) unevaluatedExpression;
}
}
public String getVariable() {
return variable;
}
public SetType getType() {
return type;
}
public void setType(SetType type) {
this.type = type;
}
public Expr getExpression() {
return expression;
}
public void setExpression(Expr expression) {
this.expression = expression;
}
public LiteralExpr getResolvedExpression() {
return resolvedExpression;
}
public void setResolvedExpression(LiteralExpr resolvedExpression) {
this.resolvedExpression = resolvedExpression;
}
public void analyze() {
if (type == null) {
type = SetType.DEFAULT;
}
if (Strings.isNullOrEmpty(variable)) {
throw new SemanticException("No variable name in set statement.");
}
if (type == SetType.GLOBAL) {
if (!GlobalStateMgr.getCurrentState().isUsingNewPrivilege()) {
if (!GlobalStateMgr.getCurrentState().getAuth()
.checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
ErrorReport.reportSemanticException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
}
}
}
if (expression == null) {
return;
}
if (expression instanceof SlotRef) {
expression = new StringLiteral(((SlotRef) expression).getColumnName());
}
expression = Expr.analyzeAndCastFold(expression);
if (!expression.isConstant()) {
throw new SemanticException("Set statement only support constant expr.");
}
resolvedExpression = (LiteralExpr) expression;
if (variable.equalsIgnoreCase(GlobalVariable.DEFAULT_ROWSET_TYPE)) {
if (!HeartbeatFlags.isValidRowsetType(resolvedExpression.getStringValue())) {
throw new SemanticException("Invalid rowset type, now we support {alpha, beta}.");
}
}
if (getVariable().equalsIgnoreCase("prefer_join_method")) {
String value = getResolvedExpression().getStringValue();
if (!value.equalsIgnoreCase("broadcast") && !value.equalsIgnoreCase("shuffle")) {
ErrorReport.reportSemanticException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, "prefer_join_method", value);
}
}
if (getVariable().equalsIgnoreCase(SessionVariable.LOAD_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.LOAD_MEM_LIMIT, 0L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.QUERY_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.QUERY_MEM_LIMIT, 0L, null);
}
try {
if (getVariable().equalsIgnoreCase(SessionVariable.TIME_ZONE)) {
this.expression = new StringLiteral(
TimeUtils.checkTimeZoneValidAndStandardize(getResolvedExpression().getStringValue()));
this.resolvedExpression = (LiteralExpr) this.expression;
}
if (getVariable().equalsIgnoreCase(SessionVariable.EXEC_MEM_LIMIT)) {
checkRangeLongVariable(SessionVariable.EXEC_MEM_LIMIT, (long) SessionVariable.MIN_EXEC_MEM_LIMIT, null);
this.expression = new StringLiteral(
Long.toString(ParseUtil.analyzeDataVolumn(getResolvedExpression().getStringValue())));
this.resolvedExpression = (LiteralExpr) this.expression;
}
} catch (UserException e) {
throw new SemanticException(e.getMessage());
}
if (getVariable().equalsIgnoreCase(SessionVariable.SQL_SELECT_LIMIT)) {
checkRangeLongVariable(SessionVariable.SQL_SELECT_LIMIT, 0L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.QUERY_TIMEOUT)) {
checkRangeLongVariable(SessionVariable.QUERY_TIMEOUT, 1L, (long) SessionVariable.MAX_QUERY_TIMEOUT);
}
if (getVariable().equalsIgnoreCase(SessionVariable.NEW_PLANNER_OPTIMIZER_TIMEOUT)) {
checkRangeLongVariable(SessionVariable.NEW_PLANNER_OPTIMIZER_TIMEOUT, 1L, null);
}
if (getVariable().equalsIgnoreCase(SessionVariable.RESOURCE_GROUP)) {
String wgName = getResolvedExpression().getStringValue();
if (!StringUtils.isEmpty(wgName)) {
ResourceGroup wg =
GlobalStateMgr.getCurrentState().getResourceGroupMgr().chooseResourceGroupByName(wgName);
if (wg == null) {
throw new SemanticException("resource group not exists: " + wgName);
}
}
}
if (getVariable().equalsIgnoreCase(SessionVariable.TABLET_INTERNAL_PARALLEL_MODE)) {
validateTabletInternalParallelModeValue(getResolvedExpression().getStringValue());
}
}
private void validateTabletInternalParallelModeValue(String val) {
try {
TTabletInternalParallelMode.valueOf(val.toUpperCase());
} catch (Exception ignored) {
throw new SemanticException("Invalid tablet_internal_parallel_mode, now we support {auto, force_split}.");
}
}
}
|
It wouldn't reduce actual number of lines, but it would reduce the amount of duplicated code a bit (one line can have more embedded logic then the another one). For example after changing from `int[] selectChannels(...)` to `int selectChannel(...)` you would have 6 fewer places to fix. But as I wrote before, I'm not sure if it's worth. As you prefer :)
|
public void testSelectChannelsInterval() {
sd.setInstance(streamRecord);
assertEquals(0, streamPartitioner.selectChannels(sd, 1)[0]);
assertEquals(0, streamPartitioner.selectChannels(sd, 2)[0]);
assertEquals(0, streamPartitioner.selectChannels(sd, 1024)[0]);
}
|
assertEquals(0, streamPartitioner.selectChannels(sd, 1)[0]);
|
public void testSelectChannelsInterval() {
assertSelectedChannel(0, 1);
assertSelectedChannel(0, 2);
assertSelectedChannel(0, 1024);
}
|
class ForwardPartitionerTest extends StreamPartitionerTest {
@Before
public void setPartitioner() {
streamPartitioner = new ForwardPartitioner<>();
}
@Test
}
|
class ForwardPartitionerTest extends StreamPartitionerTest {
@Override
public StreamPartitioner<Tuple> createPartitioner() {
return new ForwardPartitioner<>();
}
@Test
}
|
It doesn't make a difference because the watermark is always the minimum across all readers. In case of the `UnboundedReadFromBoundedSource` adapter, all input is read from all sources and only then the watermark is progressed. See: https://github.com/apache/beam/commit/11d9ec5ebff4820b36db4b6ea4df7a0f79115ddd#diff-6bd5346c8d8943cdb40aa5cf5b1731bdR354
|
public void run(SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx) throws Exception {
context = ctx;
FlinkMetricContainer metricContainer = new FlinkMetricContainer(getRuntimeContext());
ReaderInvocationUtil<OutputT, UnboundedSource.UnboundedReader<OutputT>> readerInvoker =
new ReaderInvocationUtil<>(stepName, serializedOptions.get(), metricContainer);
if (localReaders.isEmpty()) {
LOG.info("Number of readers is 0 for this task executor, idle");
} else if (isConvertedBoundedSource) {
setNextWatermarkTimer(this.runtimeContext);
for (int i = 0; i < localReaders.size() && isRunning; i++) {
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(i);
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeStart(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
boolean dataAvailable;
do {
synchronized (ctx.getCheckpointLock()) {
dataAvailable = readerInvoker.invokeAdvance(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
} while (dataAvailable && isRunning);
}
} else {
int numReaders = localReaders.size();
int currentReader = 0;
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeStart(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
}
setNextWatermarkTimer(this.runtimeContext);
boolean hadData = false;
while (isRunning) {
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(currentReader);
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeAdvance(reader);
if (dataAvailable) {
emitElement(ctx, reader);
hadData = true;
}
}
currentReader = (currentReader + 1) % numReaders;
if (currentReader == 0 && !hadData) {
Thread.sleep(50);
} else if (currentReader == 0) {
hadData = false;
}
}
}
ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
finalizeSource();
}
|
public void run(SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx) throws Exception {
context = ctx;
FlinkMetricContainer metricContainer = new FlinkMetricContainer(getRuntimeContext());
ReaderInvocationUtil<OutputT, UnboundedSource.UnboundedReader<OutputT>> readerInvoker =
new ReaderInvocationUtil<>(stepName, serializedOptions.get(), metricContainer);
if (localReaders.isEmpty()) {
LOG.info("Number of readers is 0 for this task executor, idle");
} else if (isConvertedBoundedSource) {
setNextWatermarkTimer(this.runtimeContext);
for (int i = 0; i < localReaders.size() && isRunning; i++) {
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(i);
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeStart(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
boolean dataAvailable;
do {
synchronized (ctx.getCheckpointLock()) {
dataAvailable = readerInvoker.invokeAdvance(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
} while (dataAvailable && isRunning);
}
} else {
int numReaders = localReaders.size();
int currentReader = 0;
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeStart(reader);
if (dataAvailable) {
emitElement(ctx, reader);
}
}
}
setNextWatermarkTimer(this.runtimeContext);
boolean hadData = false;
while (isRunning) {
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(currentReader);
synchronized (ctx.getCheckpointLock()) {
boolean dataAvailable = readerInvoker.invokeAdvance(reader);
if (dataAvailable) {
emitElement(ctx, reader);
hadData = true;
}
}
currentReader = (currentReader + 1) % numReaders;
if (currentReader == 0 && !hadData) {
Thread.sleep(50);
} else if (currentReader == 0) {
hadData = false;
}
}
}
ctx.emitWatermark(new Watermark(Long.MAX_VALUE));
finalizeSource();
}
|
class UnboundedSourceWrapper<OutputT, CheckpointMarkT extends UnboundedSource.CheckpointMark>
extends RichParallelSourceFunction<WindowedValue<ValueWithRecordId<OutputT>>>
implements ProcessingTimeCallback, StoppableFunction, CheckpointListener, CheckpointedFunction {
private static final Logger LOG = LoggerFactory.getLogger(UnboundedSourceWrapper.class);
private final String stepName;
/** Keep the options so that we can initialize the localReaders. */
private final SerializablePipelineOptions serializedOptions;
/**
* We are processing bounded data and should read from the sources sequentially instead of reading
* round-robin from all the sources. In case of file sources this avoids having too many open
* files/connections at once.
*/
private final boolean isConvertedBoundedSource;
/** For snapshot and restore. */
private final KvCoder<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>
checkpointCoder;
/**
* The split sources. We split them in the constructor to ensure that all parallel sources are
* consistent about the split sources.
*/
private final List<? extends UnboundedSource<OutputT, CheckpointMarkT>> splitSources;
/**
* Shuts down the source if the final watermark is read. Note: This prevents further checkpoints
* of the streaming application.
*/
private final boolean shutdownOnFinalWatermark;
/** The local split sources. Assigned at runtime when the wrapper is executed in parallel. */
private transient List<UnboundedSource<OutputT, CheckpointMarkT>> localSplitSources;
/**
* The local split readers. Assigned at runtime when the wrapper is executed in parallel. Make it
* a field so that we can access it in {@link
*/
private transient List<UnboundedSource.UnboundedReader<OutputT>> localReaders;
/**
* Flag to indicate whether the source is running. Initialize here and not in run() to prevent
* races where we cancel a job before run() is ever called or run() is called after cancel().
*/
private volatile boolean isRunning = true;
/**
* Make it a field so that we can access it in {@link
* triggers.
*/
private transient StreamingRuntimeContext runtimeContext;
/**
* Make it a field so that we can access it in {@link
* watermarks.
*/
private transient SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> context;
/** Pending checkpoints which have not been acknowledged yet. */
private transient LinkedHashMap<Long, List<CheckpointMarkT>> pendingCheckpoints;
/** Keep a maximum of 32 checkpoints for {@code CheckpointMark.finalizeCheckpoint()}. */
private static final int MAX_NUMBER_PENDING_CHECKPOINTS = 32;
private transient ListState<
KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>>
stateForCheckpoint;
/** false if checkpointCoder is null or no restore state by starting first. */
private transient boolean isRestored = false;
@SuppressWarnings("unchecked")
public UnboundedSourceWrapper(
String stepName,
PipelineOptions pipelineOptions,
UnboundedSource<OutputT, CheckpointMarkT> source,
int parallelism)
throws Exception {
this.stepName = stepName;
this.serializedOptions = new SerializablePipelineOptions(pipelineOptions);
this.isConvertedBoundedSource =
source instanceof UnboundedReadFromBoundedSource.BoundedToUnboundedSourceAdapter;
if (source.requiresDeduping()) {
LOG.warn("Source {} requires deduping but Flink runner doesn't support this yet.", source);
}
Coder<CheckpointMarkT> checkpointMarkCoder = source.getCheckpointMarkCoder();
if (checkpointMarkCoder == null) {
LOG.info("No CheckpointMarkCoder specified for this source. Won't create snapshots.");
checkpointCoder = null;
} else {
Coder<? extends UnboundedSource<OutputT, CheckpointMarkT>> sourceCoder =
(Coder) SerializableCoder.of(new TypeDescriptor<UnboundedSource>() {});
checkpointCoder = KvCoder.of(sourceCoder, checkpointMarkCoder);
}
splitSources = source.split(parallelism, pipelineOptions);
shutdownOnFinalWatermark =
pipelineOptions.as(FlinkPipelineOptions.class).isShutdownSourcesOnFinalWatermark();
}
/** Initialize and restore state before starting execution of the source. */
@Override
public void open(Configuration parameters) throws Exception {
runtimeContext = (StreamingRuntimeContext) getRuntimeContext();
int subtaskIndex = runtimeContext.getIndexOfThisSubtask();
int numSubtasks = runtimeContext.getNumberOfParallelSubtasks();
localSplitSources = new ArrayList<>();
localReaders = new ArrayList<>();
pendingCheckpoints = new LinkedHashMap<>();
if (isRestored) {
for (KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT> restored :
stateForCheckpoint.get()) {
localSplitSources.add(restored.getKey());
localReaders.add(
restored.getKey().createReader(serializedOptions.get(), restored.getValue()));
}
} else {
for (int i = 0; i < splitSources.size(); i++) {
if (i % numSubtasks == subtaskIndex) {
UnboundedSource<OutputT, CheckpointMarkT> source = splitSources.get(i);
UnboundedSource.UnboundedReader<OutputT> reader =
source.createReader(serializedOptions.get(), null);
localSplitSources.add(source);
localReaders.add(reader);
}
}
}
LOG.info(
"Unbounded Flink Source {}/{} is reading from sources: {}",
subtaskIndex + 1,
numSubtasks,
localSplitSources);
}
@Override
private void finalizeSource() {
if (!shutdownOnFinalWatermark) {
while (isRunning) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
if (!isRunning) {
Thread.currentThread().interrupt();
}
}
}
}
}
/** Emit the current element from the given Reader. The reader is guaranteed to have data. */
private void emitElement(
SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx,
UnboundedSource.UnboundedReader<OutputT> reader) {
OutputT item = reader.getCurrent();
byte[] recordId = reader.getCurrentRecordId();
Instant timestamp = reader.getCurrentTimestamp();
WindowedValue<ValueWithRecordId<OutputT>> windowedValue =
WindowedValue.of(
new ValueWithRecordId<>(item, recordId),
timestamp,
GlobalWindow.INSTANCE,
PaneInfo.NO_FIRING);
ctx.collect(windowedValue);
}
@Override
public void close() throws Exception {
try {
super.close();
if (localReaders != null) {
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
reader.close();
}
}
} finally {
FlinkClassloading.deleteStaticCaches();
}
}
@Override
public void cancel() {
isRunning = false;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
if (!isRunning) {
LOG.debug("snapshotState() called on closed source");
} else {
if (checkpointCoder == null) {
return;
}
stateForCheckpoint.clear();
long checkpointId = functionSnapshotContext.getCheckpointId();
List<CheckpointMarkT> checkpointMarks = new ArrayList<>(localSplitSources.size());
for (int i = 0; i < localSplitSources.size(); i++) {
UnboundedSource<OutputT, CheckpointMarkT> source = localSplitSources.get(i);
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(i);
@SuppressWarnings("unchecked")
CheckpointMarkT mark = (CheckpointMarkT) reader.getCheckpointMark();
checkpointMarks.add(mark);
KV<UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT> kv = KV.of(source, mark);
stateForCheckpoint.add(kv);
}
int diff = pendingCheckpoints.size() - MAX_NUMBER_PENDING_CHECKPOINTS;
if (diff >= 0) {
for (Iterator<Long> iterator = pendingCheckpoints.keySet().iterator(); diff >= 0; diff--) {
iterator.next();
iterator.remove();
}
}
pendingCheckpoints.put(checkpointId, checkpointMarks);
}
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
if (checkpointCoder == null) {
return;
}
OperatorStateStore stateStore = context.getOperatorStateStore();
CoderTypeInformation<KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>>
typeInformation = (CoderTypeInformation) new CoderTypeInformation<>(checkpointCoder);
stateForCheckpoint =
stateStore.getOperatorState(
new ListStateDescriptor<>(
DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME,
typeInformation.createSerializer(new ExecutionConfig())));
if (context.isRestored()) {
isRestored = true;
LOG.info("Restoring state in the UnboundedSourceWrapper.");
} else {
LOG.info("No restore state for UnboundedSourceWrapper.");
}
}
@Override
public void onProcessingTime(long timestamp) {
if (this.isRunning) {
synchronized (context.getCheckpointLock()) {
long watermarkMillis = Long.MAX_VALUE;
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
Instant watermark = reader.getWatermark();
if (watermark != null) {
watermarkMillis = Math.min(watermark.getMillis(), watermarkMillis);
}
}
context.emitWatermark(new Watermark(watermarkMillis));
if (shutdownOnFinalWatermark
&& watermarkMillis >= BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis()) {
this.isRunning = false;
}
}
setNextWatermarkTimer(this.runtimeContext);
}
}
@SuppressWarnings("FutureReturnValueIgnored")
private void setNextWatermarkTimer(StreamingRuntimeContext runtime) {
if (this.isRunning) {
long watermarkInterval = runtime.getExecutionConfig().getAutoWatermarkInterval();
synchronized (context.getCheckpointLock()) {
long currentProcessingTime = runtime.getProcessingTimeService().getCurrentProcessingTime();
if (currentProcessingTime < Long.MAX_VALUE) {
long nextTriggerTime = currentProcessingTime + watermarkInterval;
if (nextTriggerTime < currentProcessingTime) {
nextTriggerTime = Long.MAX_VALUE;
}
runtime.getProcessingTimeService().registerTimer(nextTriggerTime, this);
}
}
}
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
public List<? extends UnboundedSource<OutputT, CheckpointMarkT>> getSplitSources() {
return splitSources;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
List<? extends UnboundedSource<OutputT, CheckpointMarkT>> getLocalSplitSources() {
return localSplitSources;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
List<UnboundedSource.UnboundedReader<OutputT>> getLocalReaders() {
return localReaders;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
boolean isRunning() {
return isRunning;
}
/**
* Visible so that we can set this in tests. This is only set in the run method which is
* inconvenient for the tests where the context is assumed to be set when run is called. Must not
* be used for anything else.
*/
@VisibleForTesting
public void setSourceContext(SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx) {
context = ctx;
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
List<CheckpointMarkT> checkpointMarks = pendingCheckpoints.get(checkpointId);
if (checkpointMarks != null) {
Iterator<Long> iterator = pendingCheckpoints.keySet().iterator();
long currentId;
do {
currentId = iterator.next();
iterator.remove();
} while (currentId != checkpointId);
for (CheckpointMarkT mark : checkpointMarks) {
mark.finalizeCheckpoint();
}
}
}
}
|
class UnboundedSourceWrapper<OutputT, CheckpointMarkT extends UnboundedSource.CheckpointMark>
extends RichParallelSourceFunction<WindowedValue<ValueWithRecordId<OutputT>>>
implements ProcessingTimeCallback, StoppableFunction, CheckpointListener, CheckpointedFunction {
private static final Logger LOG = LoggerFactory.getLogger(UnboundedSourceWrapper.class);
private final String stepName;
/** Keep the options so that we can initialize the localReaders. */
private final SerializablePipelineOptions serializedOptions;
/**
* We are processing bounded data and should read from the sources sequentially instead of reading
* round-robin from all the sources. In case of file sources this avoids having too many open
* files/connections at once.
*/
private final boolean isConvertedBoundedSource;
/** For snapshot and restore. */
private final KvCoder<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>
checkpointCoder;
/**
* The split sources. We split them in the constructor to ensure that all parallel sources are
* consistent about the split sources.
*/
private final List<? extends UnboundedSource<OutputT, CheckpointMarkT>> splitSources;
/**
* Shuts down the source if the final watermark is read. Note: This prevents further checkpoints
* of the streaming application.
*/
private final boolean shutdownOnFinalWatermark;
/** The local split sources. Assigned at runtime when the wrapper is executed in parallel. */
private transient List<UnboundedSource<OutputT, CheckpointMarkT>> localSplitSources;
/**
* The local split readers. Assigned at runtime when the wrapper is executed in parallel. Make it
* a field so that we can access it in {@link
*/
private transient List<UnboundedSource.UnboundedReader<OutputT>> localReaders;
/**
* Flag to indicate whether the source is running. Initialize here and not in run() to prevent
* races where we cancel a job before run() is ever called or run() is called after cancel().
*/
private volatile boolean isRunning = true;
/**
* Make it a field so that we can access it in {@link
* triggers.
*/
private transient StreamingRuntimeContext runtimeContext;
/**
* Make it a field so that we can access it in {@link
* watermarks.
*/
private transient SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> context;
/** Pending checkpoints which have not been acknowledged yet. */
private transient LinkedHashMap<Long, List<CheckpointMarkT>> pendingCheckpoints;
/** Keep a maximum of 32 checkpoints for {@code CheckpointMark.finalizeCheckpoint()}. */
private static final int MAX_NUMBER_PENDING_CHECKPOINTS = 32;
private transient ListState<
KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>>
stateForCheckpoint;
/** false if checkpointCoder is null or no restore state by starting first. */
private transient boolean isRestored = false;
@SuppressWarnings("unchecked")
public UnboundedSourceWrapper(
String stepName,
PipelineOptions pipelineOptions,
UnboundedSource<OutputT, CheckpointMarkT> source,
int parallelism)
throws Exception {
this.stepName = stepName;
this.serializedOptions = new SerializablePipelineOptions(pipelineOptions);
this.isConvertedBoundedSource =
source instanceof UnboundedReadFromBoundedSource.BoundedToUnboundedSourceAdapter;
if (source.requiresDeduping()) {
LOG.warn("Source {} requires deduping but Flink runner doesn't support this yet.", source);
}
Coder<CheckpointMarkT> checkpointMarkCoder = source.getCheckpointMarkCoder();
if (checkpointMarkCoder == null) {
LOG.info("No CheckpointMarkCoder specified for this source. Won't create snapshots.");
checkpointCoder = null;
} else {
Coder<? extends UnboundedSource<OutputT, CheckpointMarkT>> sourceCoder =
(Coder) SerializableCoder.of(new TypeDescriptor<UnboundedSource>() {});
checkpointCoder = KvCoder.of(sourceCoder, checkpointMarkCoder);
}
splitSources = source.split(parallelism, pipelineOptions);
shutdownOnFinalWatermark =
pipelineOptions.as(FlinkPipelineOptions.class).isShutdownSourcesOnFinalWatermark();
}
/** Initialize and restore state before starting execution of the source. */
@Override
public void open(Configuration parameters) throws Exception {
runtimeContext = (StreamingRuntimeContext) getRuntimeContext();
int subtaskIndex = runtimeContext.getIndexOfThisSubtask();
int numSubtasks = runtimeContext.getNumberOfParallelSubtasks();
localSplitSources = new ArrayList<>();
localReaders = new ArrayList<>();
pendingCheckpoints = new LinkedHashMap<>();
if (isRestored) {
for (KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT> restored :
stateForCheckpoint.get()) {
localSplitSources.add(restored.getKey());
localReaders.add(
restored.getKey().createReader(serializedOptions.get(), restored.getValue()));
}
} else {
for (int i = 0; i < splitSources.size(); i++) {
if (i % numSubtasks == subtaskIndex) {
UnboundedSource<OutputT, CheckpointMarkT> source = splitSources.get(i);
UnboundedSource.UnboundedReader<OutputT> reader =
source.createReader(serializedOptions.get(), null);
localSplitSources.add(source);
localReaders.add(reader);
}
}
}
LOG.info(
"Unbounded Flink Source {}/{} is reading from sources: {}",
subtaskIndex + 1,
numSubtasks,
localSplitSources);
}
@Override
private void finalizeSource() {
if (!shutdownOnFinalWatermark) {
while (isRunning) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
if (!isRunning) {
Thread.currentThread().interrupt();
}
}
}
}
}
/** Emit the current element from the given Reader. The reader is guaranteed to have data. */
private void emitElement(
SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx,
UnboundedSource.UnboundedReader<OutputT> reader) {
OutputT item = reader.getCurrent();
byte[] recordId = reader.getCurrentRecordId();
Instant timestamp = reader.getCurrentTimestamp();
WindowedValue<ValueWithRecordId<OutputT>> windowedValue =
WindowedValue.of(
new ValueWithRecordId<>(item, recordId),
timestamp,
GlobalWindow.INSTANCE,
PaneInfo.NO_FIRING);
ctx.collect(windowedValue);
}
@Override
public void close() throws Exception {
try {
super.close();
if (localReaders != null) {
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
reader.close();
}
}
} finally {
FlinkClassloading.deleteStaticCaches();
}
}
@Override
public void cancel() {
isRunning = false;
}
@Override
public void stop() {
isRunning = false;
}
@Override
public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
if (!isRunning) {
LOG.debug("snapshotState() called on closed source");
} else {
if (checkpointCoder == null) {
return;
}
stateForCheckpoint.clear();
long checkpointId = functionSnapshotContext.getCheckpointId();
List<CheckpointMarkT> checkpointMarks = new ArrayList<>(localSplitSources.size());
for (int i = 0; i < localSplitSources.size(); i++) {
UnboundedSource<OutputT, CheckpointMarkT> source = localSplitSources.get(i);
UnboundedSource.UnboundedReader<OutputT> reader = localReaders.get(i);
@SuppressWarnings("unchecked")
CheckpointMarkT mark = (CheckpointMarkT) reader.getCheckpointMark();
checkpointMarks.add(mark);
KV<UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT> kv = KV.of(source, mark);
stateForCheckpoint.add(kv);
}
int diff = pendingCheckpoints.size() - MAX_NUMBER_PENDING_CHECKPOINTS;
if (diff >= 0) {
for (Iterator<Long> iterator = pendingCheckpoints.keySet().iterator(); diff >= 0; diff--) {
iterator.next();
iterator.remove();
}
}
pendingCheckpoints.put(checkpointId, checkpointMarks);
}
}
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
if (checkpointCoder == null) {
return;
}
OperatorStateStore stateStore = context.getOperatorStateStore();
CoderTypeInformation<KV<? extends UnboundedSource<OutputT, CheckpointMarkT>, CheckpointMarkT>>
typeInformation = (CoderTypeInformation) new CoderTypeInformation<>(checkpointCoder);
stateForCheckpoint =
stateStore.getOperatorState(
new ListStateDescriptor<>(
DefaultOperatorStateBackend.DEFAULT_OPERATOR_STATE_NAME,
typeInformation.createSerializer(new ExecutionConfig())));
if (context.isRestored()) {
isRestored = true;
LOG.info("Restoring state in the UnboundedSourceWrapper.");
} else {
LOG.info("No restore state for UnboundedSourceWrapper.");
}
}
@Override
public void onProcessingTime(long timestamp) {
if (this.isRunning) {
synchronized (context.getCheckpointLock()) {
long watermarkMillis = Long.MAX_VALUE;
for (UnboundedSource.UnboundedReader<OutputT> reader : localReaders) {
Instant watermark = reader.getWatermark();
if (watermark != null) {
watermarkMillis = Math.min(watermark.getMillis(), watermarkMillis);
}
}
context.emitWatermark(new Watermark(watermarkMillis));
if (shutdownOnFinalWatermark
&& watermarkMillis >= BoundedWindow.TIMESTAMP_MAX_VALUE.getMillis()) {
this.isRunning = false;
}
}
setNextWatermarkTimer(this.runtimeContext);
}
}
@SuppressWarnings("FutureReturnValueIgnored")
private void setNextWatermarkTimer(StreamingRuntimeContext runtime) {
if (this.isRunning) {
long watermarkInterval = runtime.getExecutionConfig().getAutoWatermarkInterval();
synchronized (context.getCheckpointLock()) {
long currentProcessingTime = runtime.getProcessingTimeService().getCurrentProcessingTime();
if (currentProcessingTime < Long.MAX_VALUE) {
long nextTriggerTime = currentProcessingTime + watermarkInterval;
if (nextTriggerTime < currentProcessingTime) {
nextTriggerTime = Long.MAX_VALUE;
}
runtime.getProcessingTimeService().registerTimer(nextTriggerTime, this);
}
}
}
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
public List<? extends UnboundedSource<OutputT, CheckpointMarkT>> getSplitSources() {
return splitSources;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
List<? extends UnboundedSource<OutputT, CheckpointMarkT>> getLocalSplitSources() {
return localSplitSources;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
List<UnboundedSource.UnboundedReader<OutputT>> getLocalReaders() {
return localReaders;
}
/** Visible so that we can check this in tests. Must not be used for anything else. */
@VisibleForTesting
boolean isRunning() {
return isRunning;
}
/**
* Visible so that we can set this in tests. This is only set in the run method which is
* inconvenient for the tests where the context is assumed to be set when run is called. Must not
* be used for anything else.
*/
@VisibleForTesting
public void setSourceContext(SourceContext<WindowedValue<ValueWithRecordId<OutputT>>> ctx) {
context = ctx;
}
@Override
public void notifyCheckpointComplete(long checkpointId) throws Exception {
List<CheckpointMarkT> checkpointMarks = pendingCheckpoints.get(checkpointId);
if (checkpointMarks != null) {
Iterator<Long> iterator = pendingCheckpoints.keySet().iterator();
long currentId;
do {
currentId = iterator.next();
iterator.remove();
} while (currentId != checkpointId);
for (CheckpointMarkT mark : checkpointMarks) {
mark.finalizeCheckpoint();
}
}
}
}
|
|
The returned value is only the instance name, since it returns the last path segment
|
public static Optional<String> getInstanceName(List<SubjectAlternativeName> sans) {
return getLastSegmentFromSanUri(sans, "athenz:
}
|
return getLastSegmentFromSanUri(sans, "athenz:
|
public static Optional<String> getInstanceName(List<SubjectAlternativeName> sans) {
return getLastSegmentFromSanUri(sans, "athenz:
}
|
class AthenzX509CertificateUtils {
private AthenzX509CertificateUtils() {}
public static AthenzIdentity getIdentityFromRoleCertificate(X509Certificate certificate) {
List<SubjectAlternativeName> sans = X509CertificateUtils.getSubjectAlternativeNames(certificate);
return getRoleIdentityFromEmail(sans)
.or(() -> getRoleIdentityFromUri(sans))
.orElseThrow(() -> new IllegalArgumentException("Could not find identity in SAN: " + sans));
}
private static Optional<AthenzIdentity> getRoleIdentityFromEmail(List<SubjectAlternativeName> sans) {
return sans.stream()
.filter(san -> san.getType() == Type.EMAIL)
.map(com.yahoo.security.SubjectAlternativeName::getValue)
.map(AthenzX509CertificateUtils::getIdentityFromSanEmail)
.findFirst();
}
private static Optional<AthenzIdentity> getRoleIdentityFromUri(List<SubjectAlternativeName> sans) {
String uriPrefix = "athenz:
return sans.stream()
.filter(s -> s.getType() == Type.URI && s.getValue().startsWith(uriPrefix))
.map(san -> {
String uriPath = URI.create(san.getValue()).getPath();
return AthenzIdentities.from(uriPath.substring(uriPrefix.length()));
})
.findFirst();
}
public static AthenzRole getRolesFromRoleCertificate(X509Certificate certificate) {
String commonName = X509CertificateUtils.getSubjectCommonName(certificate).orElseThrow();
return AthenzRole.fromResourceNameString(commonName);
}
private static AthenzIdentity getIdentityFromSanEmail(String email) {
int separator = email.indexOf('@');
if (separator == -1) throw new IllegalArgumentException("Invalid SAN email: " + email);
return AthenzIdentities.from(email.substring(0, separator));
}
/** @return Athenz unique instance id from an Athenz X.509 certificate (specified in the Subject Alternative Name extension) */
public static Optional<String> getInstanceId(X509Certificate cert) {
return getInstanceId(X509CertificateUtils.getSubjectAlternativeNames(cert));
}
/** @return Athenz unique instance id from the Subject Alternative Name extension */
public static Optional<String> getInstanceId(List<SubjectAlternativeName> sans) {
return getLastSegmentFromSanUri(sans, "athenz:
.or(() -> getAthenzUniqueInstanceIdFromSanDns(sans));
}
/** @return Athenz unique instance name from the Subject Alternative Name extension */
private static Optional<String> getLastSegmentFromSanUri(List<SubjectAlternativeName> sans, String uriPrefix) {
return sans.stream()
.filter(san -> {
if (san.getType() != Type.URI) return false;
return san.getValue().startsWith(uriPrefix);
})
.map(san -> {
String uriPath = URI.create(san.getValue()).getPath();
return uriPath.substring(uriPath.lastIndexOf('/') + 1);
})
.findFirst();
}
private static Optional<String> getAthenzUniqueInstanceIdFromSanDns(List<SubjectAlternativeName> sans) {
String dnsNameDelimiter = ".instanceid.athenz.";
return sans.stream()
.filter(san -> {
if (san.getType() != Type.DNS) return false;
return san.getValue().contains(dnsNameDelimiter);
})
.map(san -> {
String dnsName = san.getValue();
return dnsName.substring(0, dnsName.indexOf(dnsNameDelimiter));
})
.findFirst();
}
}
|
class AthenzX509CertificateUtils {
private AthenzX509CertificateUtils() {}
public static AthenzIdentity getIdentityFromRoleCertificate(X509Certificate certificate) {
List<SubjectAlternativeName> sans = X509CertificateUtils.getSubjectAlternativeNames(certificate);
return getRoleIdentityFromEmail(sans)
.or(() -> getRoleIdentityFromUri(sans))
.orElseThrow(() -> new IllegalArgumentException("Could not find identity in SAN: " + sans));
}
private static Optional<AthenzIdentity> getRoleIdentityFromEmail(List<SubjectAlternativeName> sans) {
return sans.stream()
.filter(san -> san.getType() == Type.EMAIL)
.map(com.yahoo.security.SubjectAlternativeName::getValue)
.map(AthenzX509CertificateUtils::getIdentityFromSanEmail)
.findFirst();
}
private static Optional<AthenzIdentity> getRoleIdentityFromUri(List<SubjectAlternativeName> sans) {
String uriPrefix = "athenz:
return sans.stream()
.filter(s -> s.getType() == Type.URI && s.getValue().startsWith(uriPrefix))
.map(san -> {
String uriPath = URI.create(san.getValue()).getPath();
return AthenzIdentities.from(uriPath.substring(uriPrefix.length()));
})
.findFirst();
}
public static AthenzRole getRolesFromRoleCertificate(X509Certificate certificate) {
String commonName = X509CertificateUtils.getSubjectCommonName(certificate).orElseThrow();
return AthenzRole.fromResourceNameString(commonName);
}
private static AthenzIdentity getIdentityFromSanEmail(String email) {
int separator = email.indexOf('@');
if (separator == -1) throw new IllegalArgumentException("Invalid SAN email: " + email);
return AthenzIdentities.from(email.substring(0, separator));
}
/** @return Athenz unique instance id from an Athenz X.509 certificate (specified in the Subject Alternative Name extension) */
public static Optional<String> getInstanceId(X509Certificate cert) {
return getInstanceId(X509CertificateUtils.getSubjectAlternativeNames(cert));
}
/** @return Athenz unique instance id from the Subject Alternative Name extension */
public static Optional<String> getInstanceId(List<SubjectAlternativeName> sans) {
return getLastSegmentFromSanUri(sans, "athenz:
.or(() -> getAthenzUniqueInstanceIdFromSanDns(sans));
}
/** @return Athenz unique instance name from the Subject Alternative Name extension */
private static Optional<String> getLastSegmentFromSanUri(List<SubjectAlternativeName> sans, String uriPrefix) {
return sans.stream()
.filter(san -> {
if (san.getType() != Type.URI) return false;
return san.getValue().startsWith(uriPrefix);
})
.map(san -> {
String uriPath = URI.create(san.getValue()).getPath();
return uriPath.substring(uriPath.lastIndexOf('/') + 1);
})
.findFirst();
}
private static Optional<String> getAthenzUniqueInstanceIdFromSanDns(List<SubjectAlternativeName> sans) {
String dnsNameDelimiter = ".instanceid.athenz.";
return sans.stream()
.filter(san -> {
if (san.getType() != Type.DNS) return false;
return san.getValue().contains(dnsNameDelimiter);
})
.map(san -> {
String dnsName = san.getValue();
return dnsName.substring(0, dnsName.indexOf(dnsNameDelimiter));
})
.findFirst();
}
}
|
Use `params` from above instead of reading all bytes again?
|
protected HttpResponse handlePOST(HttpRequest request) {
validateDataAndHeader(request, List.of(APPLICATION_X_GZIP, APPLICATION_ZIP, MULTIPART_FORM_DATA));
TenantName tenantName = validateTenant(request);
PrepareParams prepareParams;
CompressedApplicationInputStream compressedStream;
boolean multipartRequest = Optional.ofNullable(request.getHeader(HttpHeaders.Names.CONTENT_TYPE))
.map(val -> val.equalsIgnoreCase(MULTIPART_FORM_DATA))
.orElse(false);
if(multipartRequest) {
try {
MultiPartFormInputStream multiPartFormInputStream = new MultiPartFormInputStream(request.getData(), request.getHeader(CONTENT_TYPE), /* config */null, /* contextTmpDir */null);
Map<String, Part> parts = multiPartFormInputStream.getParts().stream()
.collect(Collectors.toMap(Part::getName, p -> p));
byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes();
log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8));
prepareParams = PrepareParams.fromJson(parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(), tenantName, zookeeperBarrierTimeout);
Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE);
compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType());
} catch (IOException e) {
log.log(Level.WARNING, "Unable to parse multipart in deploy", e);
throw new BadRequestException("Request contains invalid data");
}
} else {
prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout);
compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader));
}
PrepareResult result = applicationRepository.deploy(compressedStream, prepareParams);
return new SessionPrepareAndActivateResponse(result, request, prepareParams.getApplicationId(), zone);
}
|
prepareParams = PrepareParams.fromJson(parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes(), tenantName, zookeeperBarrierTimeout);
|
protected HttpResponse handlePOST(HttpRequest request) {
validateDataAndHeader(request, List.of(APPLICATION_X_GZIP, APPLICATION_ZIP, MULTIPART_FORM_DATA));
TenantName tenantName = validateTenant(request);
PrepareParams prepareParams;
CompressedApplicationInputStream compressedStream;
boolean multipartRequest = Optional.ofNullable(request.getHeader(HttpHeaders.Names.CONTENT_TYPE))
.map(val -> val.equalsIgnoreCase(MULTIPART_FORM_DATA))
.orElse(false);
if(multipartRequest) {
try {
MultiPartFormInputStream multiPartFormInputStream = new MultiPartFormInputStream(request.getData(), request.getHeader(CONTENT_TYPE), /* config */null, /* contextTmpDir */null);
Map<String, Part> parts = multiPartFormInputStream.getParts().stream()
.collect(Collectors.toMap(Part::getName, p -> p));
byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes();
log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8));
prepareParams = PrepareParams.fromJson(params, tenantName, zookeeperBarrierTimeout);
Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE);
compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType());
} catch (IOException e) {
log.log(Level.WARNING, "Unable to parse multipart in deploy", e);
throw new BadRequestException("Request contains invalid data");
}
} else {
prepareParams = PrepareParams.fromHttpRequest(request, tenantName, zookeeperBarrierTimeout);
compressedStream = createFromCompressedStream(request.getData(), request.getHeader(contentTypeHeader));
}
PrepareResult result = applicationRepository.deploy(compressedStream, prepareParams);
return new SessionPrepareAndActivateResponse(result, request, prepareParams.getApplicationId(), zone);
}
|
class ApplicationApiHandler extends SessionHandler {
public final static String APPLICATION_X_GZIP = "application/x-gzip";
public final static String APPLICATION_ZIP = "application/zip";
public final static String MULTIPART_FORM_DATA = "multipart/form-data";
public final static String MULTIPART_PARAMS = "prepareParams";
public final static String MULTIPART_APPLICATION_PACKAGE = "applicationPackage";
public final static String contentTypeHeader = "Content-Type";
private final TenantRepository tenantRepository;
private final Duration zookeeperBarrierTimeout;
private final Zone zone;
@Inject
public ApplicationApiHandler(Context ctx,
ApplicationRepository applicationRepository,
ConfigserverConfig configserverConfig,
Zone zone) {
super(ctx, applicationRepository);
this.tenantRepository = applicationRepository.tenantRepository();
this.zookeeperBarrierTimeout = Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout());
this.zone = zone;
}
@Override
@Override
public Duration getTimeout() {
return zookeeperBarrierTimeout.plus(Duration.ofSeconds(10));
}
private TenantName validateTenant(HttpRequest request) {
TenantName tenantName = getTenantNameFromRequest(request);
checkThatTenantExists(tenantRepository, tenantName);
return tenantName;
}
public static TenantName getTenantNameFromRequest(HttpRequest request) {
BindingMatch<?> bm = Utils.getBindingMatch(request, "http:
return TenantName.from(bm.group(2));
}
}
|
class ApplicationApiHandler extends SessionHandler {
public final static String APPLICATION_X_GZIP = "application/x-gzip";
public final static String APPLICATION_ZIP = "application/zip";
public final static String MULTIPART_FORM_DATA = "multipart/form-data";
public final static String MULTIPART_PARAMS = "prepareParams";
public final static String MULTIPART_APPLICATION_PACKAGE = "applicationPackage";
public final static String contentTypeHeader = "Content-Type";
private final TenantRepository tenantRepository;
private final Duration zookeeperBarrierTimeout;
private final Zone zone;
@Inject
public ApplicationApiHandler(Context ctx,
ApplicationRepository applicationRepository,
ConfigserverConfig configserverConfig,
Zone zone) {
super(ctx, applicationRepository);
this.tenantRepository = applicationRepository.tenantRepository();
this.zookeeperBarrierTimeout = Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout());
this.zone = zone;
}
@Override
@Override
public Duration getTimeout() {
return zookeeperBarrierTimeout.plus(Duration.ofSeconds(10));
}
private TenantName validateTenant(HttpRequest request) {
TenantName tenantName = getTenantNameFromRequest(request);
checkThatTenantExists(tenantRepository, tenantName);
return tenantName;
}
public static TenantName getTenantNameFromRequest(HttpRequest request) {
BindingMatch<?> bm = Utils.getBindingMatch(request, "http:
return TenantName.from(bm.group(2));
}
}
|
```suggestion Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null"); ```
|
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptOptions' cannot be null");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
|
Objects.requireNonNull(decryptParameters, "'decryptOptions' cannot be null");
|
public Mono<DecryptResult> decrypt(DecryptParameters decryptParameters) {
Objects.requireNonNull(decryptParameters, "'decryptParameters' cannot be null");
try {
return withContext(context -> decrypt(decryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
|
class CryptographyAsyncClient {
static final String KEY_VAULT_SCOPE = "https:
static final String SECRETS_COLLECTION = "secrets";
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
JsonWebKey key;
private final CryptographyService service;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private String keyCollection;
private final String keyId;
/**
* Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests
*
* @param jsonWebKey the json web key to use for cryptography operations.
* @param pipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey, HttpPipeline pipeline, CryptographyServiceVersion version) {
Objects.requireNonNull(jsonWebKey, "The Json web key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("Json Web Key is not valid");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("Json Web Key's key operations property is not configured");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("Json Web Key's key type property is not configured");
}
this.key = jsonWebKey;
this.keyId = key.getId();
service = pipeline != null ? RestProxy.create(CryptographyService.class, pipeline) : null;
if (!Strings.isNullOrEmpty(key.getId()) && version != null && service != null) {
unpackAndValidateId(key.getId());
cryptographyServiceClient = new CryptographyServiceClient(key.getId(), service, version);
} else {
cryptographyServiceClient = null;
}
initializeCryptoClients();
}
/**
* Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests
*
* @param keyId THe Azure Key vault key identifier to use for cryptography operations.
* @param pipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
service = RestProxy.create(CryptographyService.class, pipeline);
cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
localKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
localKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
localKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The Json Web Key Type: %s is not supported.", key.getKeyType().toString())));
}
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* {@link KeyVaultKey key}.
* @throws ResourceNotFoundException when the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(context -> getKeyWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
* @throws ResourceNotFoundException when the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
return cryptographyServiceClient.getKey(context);
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a
* single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public
* portion of the key is used for encryption. This operation requires the keys/encrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plainText}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plainText The content to be encrypted.
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code plainText} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plainText) {
return encrypt(new EncryptParameters(algorithm, plainText, null, null), null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a
* single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public
* portion of the key is used for encryption. This operation requires the keys/encrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plainText}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code encryptParameters} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptOptions' cannot be null");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt Operation is missing permission/not supported for key with id %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm to
* be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires the
* keys/decrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the
* specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param cipherText The content to be decrypted.
* @return A {@link Mono} containing the decrypted blob.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code cipherText} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] cipherText) {
return decrypt(new DecryptParameters(algorithm, cipherText, null, null, null));
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm to
* be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires the
* keys/decrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the
* specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
* @return A {@link Mono} containing the decrypted blob.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code decryptParameters} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt Operation is not allowed for key with id %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the keys/sign permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
* @throws ResourceNotFoundException if the key cannot be found for signing.
* @throws UnsupportedOperationException if the sign operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm} or {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key "
+ "with id %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature . This operation
* requires the keys/verify permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
* @param signature The signature to be verified.
* @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result.
* @throws ResourceNotFoundException if the key cannot be found for verifying.
* @throws UnsupportedOperationException if the verify operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Verify Operation is not allowed for "
+ "key with id %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the keys/wrapKey permission.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is
* the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the keys/unwrapKey permission.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a
* response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
* @return A {@link Mono} containing a the unwrapped key content.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the keys/sign permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
* @throws ResourceNotFoundException if the key cannot be found for signing.
* @throws UnsupportedOperationException if the sign operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm} or {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key "
+ "with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys.
* In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires
* the keys/verify permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
* @return The {@link Boolean} indicating the signature verification result.
* @throws ResourceNotFoundException if the key cannot be found for verifying.
* @throws UnsupportedOperationException if the verify operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key Id is invalid"));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
String keyName = (tokens.length >= 3 ? tokens[2] : null);
String version = (tokens.length >= 4 ? tokens[3] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key endpoint in key id is invalid"));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key name in key id is invalid"));
} else if (Strings.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key version in key id is invalid"));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed", e));
}
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
}
|
class CryptographyAsyncClient {
static final String KEY_VAULT_SCOPE = "https:
static final String SECRETS_COLLECTION = "secrets";
static final String KEYVAULT_TRACING_NAMESPACE_VALUE = "Microsoft.KeyVault";
JsonWebKey key;
private final CryptographyService service;
private CryptographyServiceClient cryptographyServiceClient;
private LocalKeyCryptographyClient localKeyCryptographyClient;
private final ClientLogger logger = new ClientLogger(CryptographyAsyncClient.class);
private String keyCollection;
private final String keyId;
private final HttpPipeline pipeline;
/**
* Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests
*
* @param jsonWebKey the json web key to use for cryptography operations.
* @param pipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(JsonWebKey jsonWebKey, HttpPipeline pipeline, CryptographyServiceVersion version) {
Objects.requireNonNull(jsonWebKey, "The Json web key is required.");
if (!jsonWebKey.isValid()) {
throw new IllegalArgumentException("Json Web Key is not valid");
}
if (jsonWebKey.getKeyOps() == null) {
throw new IllegalArgumentException("Json Web Key's key operations property is not configured");
}
if (jsonWebKey.getKeyType() == null) {
throw new IllegalArgumentException("Json Web Key's key type property is not configured");
}
this.key = jsonWebKey;
this.keyId = key.getId();
this.pipeline = pipeline;
service = pipeline != null ? RestProxy.create(CryptographyService.class, pipeline) : null;
if (!Strings.isNullOrEmpty(key.getId()) && version != null && service != null) {
unpackAndValidateId(key.getId());
cryptographyServiceClient = new CryptographyServiceClient(key.getId(), service, version);
} else {
cryptographyServiceClient = null;
}
initializeCryptoClients();
}
/**
* Creates a CryptographyAsyncClient that uses {@code pipeline} to service requests
*
* @param keyId THe Azure Key vault key identifier to use for cryptography operations.
* @param pipeline HttpPipeline that the HTTP requests and responses flow through.
* @param version {@link CryptographyServiceVersion} of the service to be used when making requests.
*/
CryptographyAsyncClient(String keyId, HttpPipeline pipeline, CryptographyServiceVersion version) {
unpackAndValidateId(keyId);
this.keyId = keyId;
this.pipeline = pipeline;
service = RestProxy.create(CryptographyService.class, pipeline);
cryptographyServiceClient = new CryptographyServiceClient(keyId, service, version);
this.key = null;
}
private void initializeCryptoClients() {
if (localKeyCryptographyClient != null) {
return;
}
if (key.getKeyType().equals(RSA) || key.getKeyType().equals(RSA_HSM)) {
localKeyCryptographyClient = new RsaKeyCryptographyClient(key, cryptographyServiceClient);
} else if (key.getKeyType().equals(EC) || key.getKeyType().equals(EC_HSM)) {
localKeyCryptographyClient = new EcKeyCryptographyClient(key, cryptographyServiceClient);
} else if (key.getKeyType().equals(OCT) || key.getKeyType().equals(OCT_HSM)) {
localKeyCryptographyClient = new SymmetricKeyCryptographyClient(key, cryptographyServiceClient);
} else {
throw logger.logExceptionAsError(new IllegalArgumentException(String.format(
"The Json Web Key Type: %s is not supported.", key.getKeyType().toString())));
}
}
/**
* Gets the {@link HttpPipeline} powering this client.
*
* @return The pipeline.
*/
HttpPipeline getHttpPipeline() {
return this.pipeline;
}
Mono<String> getKeyId() {
return Mono.defer(() -> Mono.just(keyId));
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.async.cryptographyclient.getKeyWithResponse}
*
* @return A {@link Mono} containing a {@link Response} whose {@link Response
* {@link KeyVaultKey key}.
* @throws ResourceNotFoundException when the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Response<KeyVaultKey>> getKeyWithResponse() {
try {
return withContext(context -> getKeyWithResponse(context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Gets the public part of the configured key. The get key operation is applicable to all key types and it requires
* the {@code keys/get} permission.
*
* <p><strong>Code Samples</strong></p>
* <p>Gets the configured key in the client. Subscribes to the call asynchronously and prints out the returned key
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.cryptographyclient.getKey}
*
* @return A {@link Mono} containing the requested {@link KeyVaultKey key}.
* @throws ResourceNotFoundException when the configured key doesn't exist in the key vault.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<KeyVaultKey> getKey() {
try {
return getKeyWithResponse().flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<Response<KeyVaultKey>> getKeyWithResponse(Context context) {
return cryptographyServiceClient.getKey(context);
}
Mono<JsonWebKey> getSecretKey() {
try {
return withContext(context -> cryptographyServiceClient.getSecretKey(context))
.flatMap(FluxUtil::toMono);
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a
* single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public
* portion of the key is used for encryption. This operation requires the keys/encrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param algorithm The algorithm to be used for encryption.
* @param plaintext The content to be encrypted.
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code plaintext} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptionAlgorithm algorithm, byte[] plaintext) {
return encrypt(new EncryptParameters(algorithm, plaintext, null, null), null);
}
/**
* Encrypts an arbitrary sequence of bytes using the configured key. Note that the encrypt operation only supports a
* single block of data, the size of which is dependent on the target key and the encryption algorithm to be used.
* The encrypt operation is supported for both symmetric keys and asymmetric keys. In case of asymmetric keys public
* portion of the key is used for encryption. This operation requires the keys/encrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for encrypting the
* specified {@code plaintext}. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Encrypts the content. Subscribes to the call asynchronously and prints out the encrypted content details when
* a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.encrypt
*
* @param encryptParameters The parameters to use in the encryption operation.
* @return A {@link Mono} containing a {@link EncryptResult} whose {@link EncryptResult
* contains the encrypted content.
* @throws ResourceNotFoundException If the key cannot be found for encryption.
* @throws UnsupportedOperationException If the encrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code encryptParameters} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<EncryptResult> encrypt(EncryptParameters encryptParameters) {
Objects.requireNonNull(encryptParameters, "'encryptParameters' cannot be null");
try {
return withContext(context -> encrypt(encryptParameters, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<EncryptResult> encrypt(EncryptParameters encryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.encrypt(encryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.ENCRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Encrypt Operation is missing permission/not supported for key with id %s", key.getId()))));
}
return localKeyCryptographyClient.encryptAsync(encryptParameters, context, key);
});
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm to
* be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires the
* keys/decrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the
* specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param algorithm The algorithm to be used for decryption.
* @param ciphertext The content to be decrypted.
* @return A {@link Mono} containing the decrypted blob.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code ciphertext} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<DecryptResult> decrypt(EncryptionAlgorithm algorithm, byte[] ciphertext) {
return decrypt(new DecryptParameters(algorithm, ciphertext, null, null, null));
}
/**
* Decrypts a single block of encrypted data using the configured key and specified algorithm. Note that only a
* single block of data may be decrypted, the size of this block is dependent on the target key and the algorithm to
* be used. The decrypt operation is supported for both asymmetric and symmetric keys. This operation requires the
* keys/decrypt permission.
*
* <p>The {@link EncryptionAlgorithm encryption algorithm} indicates the type of algorithm to use for decrypting the
* specified encrypted content. Possible values for asymmetric keys include:
* {@link EncryptionAlgorithm
* EncryptionAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Decrypts the encrypted content. Subscribes to the call asynchronously and prints out the decrypted content
* details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.decrypt
*
* @param decryptParameters The parameters to use in the decryption operation.
* @return A {@link Mono} containing the decrypted blob.
* @throws ResourceNotFoundException If the key cannot be found for decryption.
* @throws UnsupportedOperationException If the decrypt operation is not supported or configured on the key.
* @throws NullPointerException If {@code decryptParameters} is {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<DecryptResult> decrypt(DecryptParameters decryptParameters, Context context) {
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.decrypt(decryptParameters, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.DECRYPT)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Decrypt Operation is not allowed for key with id %s", key.getId()))));
}
return localKeyCryptographyClient.decryptAsync(decryptParameters, context, key);
});
}
/**
* Creates a signature from a digest using the configured key. The sign operation supports both asymmetric and
* symmetric keys. This operation requires the keys/sign permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to create the
* signature from the digest. Possible values include:
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Sings the digest. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.sign
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
* @throws ResourceNotFoundException if the key cannot be found for signing.
* @throws UnsupportedOperationException if the sign operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm} or {@code digest} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest) {
try {
return withContext(context -> sign(algorithm, digest, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> sign(SignatureAlgorithm algorithm, byte[] digest, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.sign(algorithm, digest, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key "
+ "with id %s", key.getId()))));
}
return localKeyCryptographyClient.signAsync(algorithm, digest, context, key);
});
}
/**
* Verifies a signature using the configured key. The verify operation supports both symmetric keys and asymmetric
* keys. In case of asymmetric keys public portion of the key is used to verify the signature . This operation
* requires the keys/verify permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the specified digest. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verify
*
* @param algorithm The algorithm to use for signing.
* @param digest The content from which signature is to be created.
* @param signature The signature to be verified.
* @return A {@link Mono} containing a {@link Boolean} indicating the signature verification result.
* @throws ResourceNotFoundException if the key cannot be found for verifying.
* @throws UnsupportedOperationException if the verify operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm}, {@code digest} or {@code signature} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature) {
try {
return withContext(context -> verify(algorithm, digest, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verify(SignatureAlgorithm algorithm, byte[] digest, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(digest, "Digest content cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verify(algorithm, digest, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Verify Operation is not allowed for "
+ "key with id %s", key.getId()))));
}
return localKeyCryptographyClient.verifyAsync(algorithm, digest, signature, context, key);
});
}
/**
* Wraps a symmetric key using the configured key. The wrap operation supports wrapping a symmetric key with both
* symmetric and asymmetric keys. This operation requires the keys/wrapKey permission.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for wrapping the specified
* key content. Possible values include:
* {@link KeyWrapAlgorithm
* KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link EncryptionAlgorithm
* {@link EncryptionAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Wraps the key content. Subscribes to the call asynchronously and prints out the wrapped key details when a
* response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.wrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param key The key content to be wrapped.
* @return A {@link Mono} containing a {@link WrapResult} whose {@link WrapResult
* contains the wrapped key result.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the wrap operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code key} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key) {
try {
return withContext(context -> wrapKey(algorithm, key, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<WrapResult> wrapKey(KeyWrapAlgorithm algorithm, byte[] key, Context context) {
Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null.");
Objects.requireNonNull(key, "Key content to be wrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.wrapKey(algorithm, key, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.WRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Wrap Key Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.wrapKeyAsync(algorithm, key, context, this.key);
});
}
/**
* Unwraps a symmetric key using the configured key that was initially used for wrapping that key. This operation is
* the reverse of the wrap operation. The unwrap operation supports asymmetric and symmetric keys to unwrap. This
* operation requires the keys/unwrapKey permission.
*
* <p>The {@link KeyWrapAlgorithm wrap algorithm} indicates the type of algorithm to use for unwrapping the
* specified encrypted key content. Possible values for asymmetric keys include:
* {@link KeyWrapAlgorithm
* KeyWrapAlgorithm
*
* Possible values for symmetric keys include: {@link KeyWrapAlgorithm
* {@link KeyWrapAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Unwraps the key content. Subscribes to the call asynchronously and prints out the unwrapped key details when a
* response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.unwrapKey
*
* @param algorithm The encryption algorithm to use for wrapping the key.
* @param encryptedKey The encrypted key content to unwrap.
* @return A {@link Mono} containing a the unwrapped key content.
* @throws ResourceNotFoundException If the key cannot be found for wrap operation.
* @throws UnsupportedOperationException If the unwrap operation is not supported or configured on the key.
* @throws NullPointerException If {@code algorithm} or {@code encryptedKey} are {@code null}.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey) {
try {
return withContext(context -> unwrapKey(algorithm, encryptedKey, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<UnwrapResult> unwrapKey(KeyWrapAlgorithm algorithm, byte[] encryptedKey, Context context) {
Objects.requireNonNull(algorithm, "Key Wrap algorithm cannot be null.");
Objects.requireNonNull(encryptedKey, "Encrypted key content to be unwrapped cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.unwrapKey(algorithm, encryptedKey, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.UNWRAP_KEY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Unwrap Key Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.unwrapKeyAsync(algorithm, encryptedKey, context, key);
});
}
/**
* Creates a signature from the raw data using the configured key. The sign data operation supports both asymmetric
* and symmetric keys. This operation requires the keys/sign permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to sign the digest.
* Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Signs the raw data. Subscribes to the call asynchronously and prints out the signature details when a response
* has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.signData
*
* @param algorithm The algorithm to use for signing.
* @param data The content from which signature is to be created.
* @return A {@link Mono} containing a {@link SignResult} whose {@link SignResult
* the created signature.
* @throws ResourceNotFoundException if the key cannot be found for signing.
* @throws UnsupportedOperationException if the sign operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm} or {@code data} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data) {
try {
return withContext(context -> signData(algorithm, data, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<SignResult> signData(SignatureAlgorithm algorithm, byte[] data, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data to be signed cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.signData(algorithm, data, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.SIGN)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format("Sign Operation is not allowed for key "
+ "with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.signDataAsync(algorithm, data, context, key);
});
}
/**
* Verifies a signature against the raw data using the configured key. The verify operation supports both symmetric
* keys and asymmetric keys.
* In case of asymmetric keys public portion of the key is used to verify the signature . This operation requires
* the keys/verify permission.
*
* <p>The {@link SignatureAlgorithm signature algorithm} indicates the type of algorithm to use to verify the
* signature. Possible values include:
* {@link SignatureAlgorithm
* ES512}, {@link SignatureAlgorithm
* SignatureAlgorithm
* {@link SignatureAlgorithm
*
* <p><strong>Code Samples</strong></p>
* <p>Verifies the signature against the raw data. Subscribes to the call asynchronously and prints out the
* verification details when a response has been received.</p>
* {@codesnippet com.azure.security.keyvault.keys.cryptography.CryptographyAsyncClient.verifyData
*
* @param algorithm The algorithm to use for signing.
* @param data The raw content against which signature is to be verified.
* @param signature The signature to be verified.
* @return The {@link Boolean} indicating the signature verification result.
* @throws ResourceNotFoundException if the key cannot be found for verifying.
* @throws UnsupportedOperationException if the verify operation is not supported or configured on the key.
* @throws NullPointerException if {@code algorithm}, {@code data} or {@code signature} is null.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature) {
try {
return withContext(context -> verifyData(algorithm, data, signature, context));
} catch (RuntimeException ex) {
return monoError(logger, ex);
}
}
Mono<VerifyResult> verifyData(SignatureAlgorithm algorithm, byte[] data, byte[] signature, Context context) {
Objects.requireNonNull(algorithm, "Signature algorithm cannot be null.");
Objects.requireNonNull(data, "Data cannot be null.");
Objects.requireNonNull(signature, "Signature to be verified cannot be null.");
return ensureValidKeyAvailable().flatMap(available -> {
if (!available) {
return cryptographyServiceClient.verifyData(algorithm, data, signature, context);
}
if (!checkKeyPermissions(this.key.getKeyOps(), KeyOperation.VERIFY)) {
return Mono.error(logger.logExceptionAsError(new UnsupportedOperationException(String.format(
"Verify Operation is not allowed for key with id %s", this.key.getId()))));
}
return localKeyCryptographyClient.verifyDataAsync(algorithm, data, signature, context, key);
});
}
private void unpackAndValidateId(String keyId) {
if (CoreUtils.isNullOrEmpty(keyId)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key Id is invalid"));
}
try {
URL url = new URL(keyId);
String[] tokens = url.getPath().split("/");
String endpoint = url.getProtocol() + ":
String keyName = (tokens.length >= 3 ? tokens[2] : null);
String version = (tokens.length >= 4 ? tokens[3] : null);
this.keyCollection = (tokens.length >= 2 ? tokens[1] : null);
if (Strings.isNullOrEmpty(endpoint)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key endpoint in key id is invalid"));
} else if (Strings.isNullOrEmpty(keyName)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key name in key id is invalid"));
} else if (Strings.isNullOrEmpty(version)) {
throw logger.logExceptionAsError(new IllegalArgumentException("Key version in key id is invalid"));
}
} catch (MalformedURLException e) {
throw logger.logExceptionAsError(new IllegalArgumentException("The key identifier is malformed", e));
}
}
private boolean checkKeyPermissions(List<KeyOperation> operations, KeyOperation keyOperation) {
return operations.contains(keyOperation);
}
private Mono<Boolean> ensureValidKeyAvailable() {
boolean keyNotAvailable = (key == null && keyCollection != null);
boolean keyNotValid = (key != null && !key.isValid());
if (keyNotAvailable || keyNotValid) {
if (keyCollection.equals(SECRETS_COLLECTION)) {
return getSecretKey().map(jsonWebKey -> {
key = (jsonWebKey);
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
} else {
return getKey().map(keyVaultKey -> {
key = (keyVaultKey.getKey());
if (key.isValid()) {
initializeCryptoClients();
return true;
} else {
return false;
}
});
}
} else {
return Mono.defer(() -> Mono.just(true));
}
}
CryptographyServiceClient getCryptographyServiceClient() {
return cryptographyServiceClient;
}
void setCryptographyServiceClient(CryptographyServiceClient serviceClient) {
this.cryptographyServiceClient = serviceClient;
}
}
|
@cescoffier: I switched to using the string version as you suggested. When adding the deployment dependencies, the build didn't work anymore anyways. The current version worked for me.
|
UnremovableBeanBuildItem ensureJsonParserAvailable() {
return UnremovableBeanBuildItem.beanTypes(ObjectMapper.class, Jsonb.class);
}
|
return UnremovableBeanBuildItem.beanTypes(ObjectMapper.class, Jsonb.class);
|
UnremovableBeanBuildItem ensureJsonParserAvailable() {
return UnremovableBeanBuildItem.beanClassNames(
"io.quarkus.jackson.ObjectMapperProducer",
"com.fasterxml.jackson.databind.ObjectMapper",
"io.quarkus.jsonb.JsonbProducer",
"javax.json.bind.Jsonb");
}
|
class KafkaProcessor {
static final Class[] BUILT_INS = {
ShortSerializer.class,
DoubleSerializer.class,
LongSerializer.class,
BytesSerializer.class,
ByteArraySerializer.class,
IntegerSerializer.class,
ByteBufferSerializer.class,
StringSerializer.class,
FloatSerializer.class,
ShortDeserializer.class,
DoubleDeserializer.class,
LongDeserializer.class,
BytesDeserializer.class,
ByteArrayDeserializer.class,
IntegerDeserializer.class,
ByteBufferDeserializer.class,
StringDeserializer.class,
FloatDeserializer.class
};
@BuildStep
void contributeClassesToIndex(BuildProducer<AdditionalIndexedClassesBuildItem> additionalIndexedClasses,
BuildProducer<IndexDependencyBuildItem> indexDependency) {
additionalIndexedClasses.produce(new AdditionalIndexedClassesBuildItem(
LoginModule.class.getName(),
javax.security.auth.Subject.class.getName(),
javax.security.auth.login.AppConfigurationEntry.class.getName(),
javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag.class.getName()));
indexDependency.produce(new IndexDependencyBuildItem("org.apache.kafka", "kafka-clients"));
}
@BuildStep
public void build(CombinedIndexBuildItem indexBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<NativeImageProxyDefinitionBuildItem> proxies,
Capabilities capabilities) {
final Set<DotName> toRegister = new HashSet<>();
collectImplementors(toRegister, indexBuildItem, Serializer.class);
collectImplementors(toRegister, indexBuildItem, Deserializer.class);
collectImplementors(toRegister, indexBuildItem, Partitioner.class);
collectImplementors(toRegister, indexBuildItem, PartitionAssignor.class);
collectImplementors(toRegister, indexBuildItem, ConsumerPartitionAssignor.class);
for (Class<?> i : BUILT_INS) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, i.getName()));
collectSubclasses(toRegister, indexBuildItem, i);
}
if (capabilities.isCapabilityPresent(Capabilities.JSONB)) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, JsonbSerializer.class, JsonbDeserializer.class));
collectSubclasses(toRegister, indexBuildItem, JsonbSerializer.class);
collectSubclasses(toRegister, indexBuildItem, JsonbDeserializer.class);
}
if (capabilities.isCapabilityPresent(Capabilities.JACKSON)) {
reflectiveClass.produce(
new ReflectiveClassBuildItem(false, false, ObjectMapperSerializer.class, ObjectMapperDeserializer.class));
collectSubclasses(toRegister, indexBuildItem, ObjectMapperSerializer.class);
collectSubclasses(toRegister, indexBuildItem, ObjectMapperDeserializer.class);
}
for (DotName s : toRegister) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, s.toString()));
}
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, DefaultPartitioner.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RangeAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RoundRobinAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, StickyAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "java.nio.DirectByteBuffer"));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "sun.misc.Cleaner"));
try {
Class.forName("io.confluent.kafka.serializers.KafkaAvroDeserializer");
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, false,
"io.confluent.kafka.serializers.KafkaAvroDeserializer",
"io.confluent.kafka.serializers.KafkaAvroSerializer"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.serializers.subject.TopicNameStrategy",
"io.confluent.kafka.serializers.subject.TopicRecordNameStrategy",
"io.confluent.kafka.serializers.subject.RecordNameStrategy"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.schemaregistry.client.rest.entities.ErrorMessage",
"io.confluent.kafka.schemaregistry.client.rest.entities.Schema",
"io.confluent.kafka.schemaregistry.client.rest.entities.Config",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaReference",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaString",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaTypeConverter",
"io.confluent.kafka.schemaregistry.client.rest.entities.ServerClusterId",
"io.confluent.kafka.schemaregistry.client.rest.entities.SujectVersion"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.CompatibilityCheckResponse",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ConfigUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ModeGetResponse",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ModeUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaResponse"));
} catch (ClassNotFoundException e) {
}
try {
Class.forName("io.apicurio.registry.utils.serde.AvroKafkaDeserializer");
reflectiveClass.produce(
new ReflectiveClassBuildItem(true, true, false,
"io.apicurio.registry.utils.serde.AvroKafkaDeserializer",
"io.apicurio.registry.utils.serde.AvroKafkaSerializer"));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, true, false,
"io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider",
"io.apicurio.registry.utils.serde.strategy.AutoRegisterIdStrategy",
"io.apicurio.registry.utils.serde.strategy.CachedSchemaIdStrategy",
"io.apicurio.registry.utils.serde.strategy.FindBySchemaIdStrategy",
"io.apicurio.registry.utils.serde.strategy.FindLatestIdStrategy",
"io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy",
"io.apicurio.registry.utils.serde.strategy.RecordIdStrategy",
"io.apicurio.registry.utils.serde.strategy.SimpleTopicIdStrategy",
"io.apicurio.registry.utils.serde.strategy.TopicIdStrategy",
"io.apicurio.registry.utils.serde.strategy.TopicRecordIdStrategy"));
proxies.produce(new NativeImageProxyDefinitionBuildItem("io.apicurio.registry.client.RegistryService",
"java.lang.AutoCloseable"));
} catch (ClassNotFoundException e) {
}
}
@BuildStep
public AdditionalBeanBuildItem runtimeConfig() {
return AdditionalBeanBuildItem.builder()
.addBeanClass(KafkaRuntimeConfigProducer.class)
.setUnremovable()
.build();
}
@BuildStep
public void withSasl(BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<ReflectiveHierarchyBuildItem> reflectiveHierarchy) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, AbstractLogin.DefaultLoginCallbackHandler.class));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, SaslClientCallbackHandler.class));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, DefaultLogin.class));
final Type loginModuleType = Type
.create(DotName.createSimple(LoginModule.class.getName()), Kind.CLASS);
reflectiveHierarchy.produce(new ReflectiveHierarchyBuildItem.Builder()
.type(loginModuleType)
.source(getClass().getSimpleName() + " > " + loginModuleType.name().toString())
.build());
}
private static void collectImplementors(Set<DotName> set, CombinedIndexBuildItem indexBuildItem, Class<?> cls) {
collectClassNames(set, indexBuildItem.getIndex().getAllKnownImplementors(DotName.createSimple(cls.getName())));
}
private static void collectSubclasses(Set<DotName> set, CombinedIndexBuildItem indexBuildItem, Class<?> cls) {
collectClassNames(set, indexBuildItem.getIndex().getAllKnownSubclasses(DotName.createSimple(cls.getName())));
}
private static void collectClassNames(Set<DotName> set, Collection<ClassInfo> classInfos) {
classInfos.forEach(new Consumer<ClassInfo>() {
@Override
public void accept(ClassInfo c) {
set.add(c.name());
}
});
}
@BuildStep
HealthBuildItem addHealthCheck(KafkaBuildTimeConfig buildTimeConfig) {
return new HealthBuildItem("io.quarkus.kafka.client.health.KafkaHealthCheck",
buildTimeConfig.healthEnabled);
}
@BuildStep
}
|
class KafkaProcessor {
static final Class[] BUILT_INS = {
ShortSerializer.class,
DoubleSerializer.class,
LongSerializer.class,
BytesSerializer.class,
ByteArraySerializer.class,
IntegerSerializer.class,
ByteBufferSerializer.class,
StringSerializer.class,
FloatSerializer.class,
ShortDeserializer.class,
DoubleDeserializer.class,
LongDeserializer.class,
BytesDeserializer.class,
ByteArrayDeserializer.class,
IntegerDeserializer.class,
ByteBufferDeserializer.class,
StringDeserializer.class,
FloatDeserializer.class
};
@BuildStep
void contributeClassesToIndex(BuildProducer<AdditionalIndexedClassesBuildItem> additionalIndexedClasses,
BuildProducer<IndexDependencyBuildItem> indexDependency) {
additionalIndexedClasses.produce(new AdditionalIndexedClassesBuildItem(
LoginModule.class.getName(),
javax.security.auth.Subject.class.getName(),
javax.security.auth.login.AppConfigurationEntry.class.getName(),
javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag.class.getName()));
indexDependency.produce(new IndexDependencyBuildItem("org.apache.kafka", "kafka-clients"));
}
@BuildStep
public void build(CombinedIndexBuildItem indexBuildItem, BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<NativeImageProxyDefinitionBuildItem> proxies,
Capabilities capabilities) {
final Set<DotName> toRegister = new HashSet<>();
collectImplementors(toRegister, indexBuildItem, Serializer.class);
collectImplementors(toRegister, indexBuildItem, Deserializer.class);
collectImplementors(toRegister, indexBuildItem, Partitioner.class);
collectImplementors(toRegister, indexBuildItem, PartitionAssignor.class);
collectImplementors(toRegister, indexBuildItem, ConsumerPartitionAssignor.class);
for (Class<?> i : BUILT_INS) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, i.getName()));
collectSubclasses(toRegister, indexBuildItem, i);
}
if (capabilities.isCapabilityPresent(Capabilities.JSONB)) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, JsonbSerializer.class, JsonbDeserializer.class));
collectSubclasses(toRegister, indexBuildItem, JsonbSerializer.class);
collectSubclasses(toRegister, indexBuildItem, JsonbDeserializer.class);
}
if (capabilities.isCapabilityPresent(Capabilities.JACKSON)) {
reflectiveClass.produce(
new ReflectiveClassBuildItem(false, false, ObjectMapperSerializer.class, ObjectMapperDeserializer.class));
collectSubclasses(toRegister, indexBuildItem, ObjectMapperSerializer.class);
collectSubclasses(toRegister, indexBuildItem, ObjectMapperDeserializer.class);
}
for (DotName s : toRegister) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, s.toString()));
}
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, DefaultPartitioner.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RangeAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, RoundRobinAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, StickyAssignor.class.getName()));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "java.nio.DirectByteBuffer"));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, false, "sun.misc.Cleaner"));
try {
Class.forName("io.confluent.kafka.serializers.KafkaAvroDeserializer");
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, false,
"io.confluent.kafka.serializers.KafkaAvroDeserializer",
"io.confluent.kafka.serializers.KafkaAvroSerializer"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.serializers.subject.TopicNameStrategy",
"io.confluent.kafka.serializers.subject.TopicRecordNameStrategy",
"io.confluent.kafka.serializers.subject.RecordNameStrategy"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.schemaregistry.client.rest.entities.ErrorMessage",
"io.confluent.kafka.schemaregistry.client.rest.entities.Schema",
"io.confluent.kafka.schemaregistry.client.rest.entities.Config",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaReference",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaString",
"io.confluent.kafka.schemaregistry.client.rest.entities.SchemaTypeConverter",
"io.confluent.kafka.schemaregistry.client.rest.entities.ServerClusterId",
"io.confluent.kafka.schemaregistry.client.rest.entities.SujectVersion"));
reflectiveClass
.produce(new ReflectiveClassBuildItem(true, true, false,
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.CompatibilityCheckResponse",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ConfigUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ModeGetResponse",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.ModeUpdateRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaRequest",
"io.confluent.kafka.schemaregistry.client.rest.entities.requests.RegisterSchemaResponse"));
} catch (ClassNotFoundException e) {
}
try {
Class.forName("io.apicurio.registry.utils.serde.AvroKafkaDeserializer");
reflectiveClass.produce(
new ReflectiveClassBuildItem(true, true, false,
"io.apicurio.registry.utils.serde.AvroKafkaDeserializer",
"io.apicurio.registry.utils.serde.AvroKafkaSerializer"));
reflectiveClass.produce(new ReflectiveClassBuildItem(true, true, false,
"io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider",
"io.apicurio.registry.utils.serde.strategy.AutoRegisterIdStrategy",
"io.apicurio.registry.utils.serde.strategy.CachedSchemaIdStrategy",
"io.apicurio.registry.utils.serde.strategy.FindBySchemaIdStrategy",
"io.apicurio.registry.utils.serde.strategy.FindLatestIdStrategy",
"io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy",
"io.apicurio.registry.utils.serde.strategy.RecordIdStrategy",
"io.apicurio.registry.utils.serde.strategy.SimpleTopicIdStrategy",
"io.apicurio.registry.utils.serde.strategy.TopicIdStrategy",
"io.apicurio.registry.utils.serde.strategy.TopicRecordIdStrategy"));
proxies.produce(new NativeImageProxyDefinitionBuildItem("io.apicurio.registry.client.RegistryService",
"java.lang.AutoCloseable"));
} catch (ClassNotFoundException e) {
}
}
@BuildStep
public AdditionalBeanBuildItem runtimeConfig() {
return AdditionalBeanBuildItem.builder()
.addBeanClass(KafkaRuntimeConfigProducer.class)
.setUnremovable()
.build();
}
@BuildStep
public void withSasl(BuildProducer<ReflectiveClassBuildItem> reflectiveClass,
BuildProducer<ReflectiveHierarchyBuildItem> reflectiveHierarchy) {
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, AbstractLogin.DefaultLoginCallbackHandler.class));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, SaslClientCallbackHandler.class));
reflectiveClass.produce(new ReflectiveClassBuildItem(false, false, DefaultLogin.class));
final Type loginModuleType = Type
.create(DotName.createSimple(LoginModule.class.getName()), Kind.CLASS);
reflectiveHierarchy.produce(new ReflectiveHierarchyBuildItem.Builder()
.type(loginModuleType)
.source(getClass().getSimpleName() + " > " + loginModuleType.name().toString())
.build());
}
private static void collectImplementors(Set<DotName> set, CombinedIndexBuildItem indexBuildItem, Class<?> cls) {
collectClassNames(set, indexBuildItem.getIndex().getAllKnownImplementors(DotName.createSimple(cls.getName())));
}
private static void collectSubclasses(Set<DotName> set, CombinedIndexBuildItem indexBuildItem, Class<?> cls) {
collectClassNames(set, indexBuildItem.getIndex().getAllKnownSubclasses(DotName.createSimple(cls.getName())));
}
private static void collectClassNames(Set<DotName> set, Collection<ClassInfo> classInfos) {
classInfos.forEach(new Consumer<ClassInfo>() {
@Override
public void accept(ClassInfo c) {
set.add(c.name());
}
});
}
@BuildStep
HealthBuildItem addHealthCheck(KafkaBuildTimeConfig buildTimeConfig) {
return new HealthBuildItem("io.quarkus.kafka.client.health.KafkaHealthCheck",
buildTimeConfig.healthEnabled);
}
@BuildStep
}
|
Yes, these `checkState`s should be in `finally`. Thanks!
|
public void testBufferRecycledOnFailure() throws IOException {
FailingChannelStateSerializer serializer = new FailingChannelStateSerializer();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = geStream(serializer, 10)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo");
checkState(serializer.failed);
checkState(!handler.requestedBuffers.isEmpty());
} finally {
assertTrue(handler.requestedBuffers.stream().allMatch(TestChannelStateByteBuffer::isRecycled));
}
}
|
checkState(serializer.failed);
|
public void testBufferRecycledOnFailure() throws IOException, InterruptedException {
FailingChannelStateSerializer serializer = new FailingChannelStateSerializer();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = getStream(serializer, 10)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo");
} finally {
checkState(serializer.failed);
checkState(!handler.requestedBuffers.isEmpty());
assertTrue(handler.requestedBuffers.stream().allMatch(TestChannelStateByteBuffer::isRecycled));
}
}
|
class ChannelStateChunkReaderTest {
@Test(expected = TestException.class)
@Test
public void testBuffersNotRequestedForEmptyStream() throws IOException {
ChannelStateSerializer serializer = new ChannelStateSerializerImpl();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = geStream(serializer, 0)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo");
} finally {
assertTrue(handler.requestedBuffers.isEmpty());
}
}
@Test
public void testNoSeekUnnecessarily() throws IOException {
final int offset = 123;
final FSDataInputStream stream = new FSDataInputStream() {
@Override
public long getPos() {
return offset;
}
@Override
public void seek(long ignored) {
fail();
}
@Override
public int read() {
return 0;
}
};
new ChannelStateChunkReader(new ChannelStateSerializerImpl())
.readChunk(stream, offset, new TestRecoveredChannelStateHandler(), "channelInfo");
}
private static class TestRecoveredChannelStateHandler implements RecoveredChannelStateHandler<Object, Object> {
private final List<TestChannelStateByteBuffer> requestedBuffers = new ArrayList<>();
@Override
public Tuple2<ChannelStateByteBuffer, Object> getBuffer(Object o) {
TestChannelStateByteBuffer buffer = new TestChannelStateByteBuffer();
requestedBuffers.add(buffer);
return Tuple2.of(buffer, null);
}
@Override
public void recover(Object o, Object o2) {
}
@Override
public void close() throws Exception {
}
}
private static class FailingChannelStateSerializer extends ChannelStateSerializerImpl {
private boolean failed;
@Override
public int readData(InputStream stream, ChannelStateByteBuffer buffer, int bytes) {
failed = true;
throw new TestException();
}
}
private static FSDataInputStream geStream(ChannelStateSerializer serializer, int size) throws IOException {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
DataOutputStream dataStream = new DataOutputStream(out);
serializer.writeHeader(dataStream);
serializer.writeData(dataStream, new NetworkBuffer(MemorySegmentFactory.wrap(new byte[size]), FreeingBufferRecycler.INSTANCE, Buffer.DataType.DATA_BUFFER, size));
dataStream.flush();
return new ByteStreamStateHandle("", out.toByteArray()).openInputStream();
}
}
private static class TestChannelStateByteBuffer implements ChannelStateByteBuffer {
private boolean recycled;
@Override
public boolean isWritable() {
return true;
}
@Override
public void recycle() {
checkArgument(!recycled);
recycled = true;
}
public boolean isRecycled() {
return recycled;
}
@Override
public int writeBytes(InputStream input, int bytesToRead) throws IOException {
checkArgument(!recycled);
input.skip(bytesToRead);
return bytesToRead;
}
}
}
|
class ChannelStateChunkReaderTest {
@Test(expected = TestException.class)
@Test
public void testBuffersNotRequestedForEmptyStream() throws IOException, InterruptedException {
ChannelStateSerializer serializer = new ChannelStateSerializerImpl();
TestRecoveredChannelStateHandler handler = new TestRecoveredChannelStateHandler();
try (FSDataInputStream stream = getStream(serializer, 0)) {
new ChannelStateChunkReader(serializer).readChunk(stream, serializer.getHeaderLength(), handler, "channelInfo");
} finally {
assertTrue(handler.requestedBuffers.isEmpty());
}
}
@Test
public void testNoSeekUnnecessarily() throws IOException, InterruptedException {
final int offset = 123;
final FSDataInputStream stream = new FSDataInputStream() {
@Override
public long getPos() {
return offset;
}
@Override
public void seek(long ignored) {
fail();
}
@Override
public int read() {
return 0;
}
};
new ChannelStateChunkReader(new ChannelStateSerializerImpl())
.readChunk(stream, offset, new TestRecoveredChannelStateHandler(), "channelInfo");
}
private static class TestRecoveredChannelStateHandler implements RecoveredChannelStateHandler<Object, Object> {
private final List<TestChannelStateByteBuffer> requestedBuffers = new ArrayList<>();
@Override
public BufferWithContext<Object> getBuffer(Object o) {
TestChannelStateByteBuffer buffer = new TestChannelStateByteBuffer();
requestedBuffers.add(buffer);
return new BufferWithContext<>(buffer, null);
}
@Override
public void recover(Object o, Object o2) {
}
@Override
public void close() throws Exception {
}
}
private static class FailingChannelStateSerializer extends ChannelStateSerializerImpl {
private boolean failed;
@Override
public int readData(InputStream stream, ChannelStateByteBuffer buffer, int bytes) {
failed = true;
throw new TestException();
}
}
private static FSDataInputStream getStream(ChannelStateSerializer serializer, int size) throws IOException {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
DataOutputStream dataStream = new DataOutputStream(out);
serializer.writeHeader(dataStream);
serializer.writeData(dataStream, new NetworkBuffer(MemorySegmentFactory.wrap(new byte[size]), FreeingBufferRecycler.INSTANCE, Buffer.DataType.DATA_BUFFER, size));
dataStream.flush();
return new ByteStreamStateHandle("", out.toByteArray()).openInputStream();
}
}
private static class TestChannelStateByteBuffer implements ChannelStateByteBuffer {
private boolean recycled;
@Override
public boolean isWritable() {
return true;
}
@Override
public void recycle() {
checkArgument(!recycled);
recycled = true;
}
public boolean isRecycled() {
return recycled;
}
@Override
public int writeBytes(InputStream input, int bytesToRead) throws IOException {
checkArgument(!recycled);
input.skip(bytesToRead);
return bytesToRead;
}
}
}
|
I don't think you can actually get here. The parser rejects non-SQL UDFs that don't specify a return type. I changed it to IllegalArgumentException.
|
void validateJavaUdf(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
for (FunctionArgumentType argumentType :
createFunctionStmt.getSignature().getFunctionArgumentList()) {
Type type = argumentType.getType();
if (type == null) {
throw new UnsupportedOperationException("UDF templated argument types are not supported.");
}
validateJavaUdfZetaSqlType(type);
}
if (createFunctionStmt.getReturnType() == null) {
throw new NullPointerException("UDF return type must not be null.");
}
validateJavaUdfZetaSqlType(createFunctionStmt.getReturnType());
}
|
throw new NullPointerException("UDF return type must not be null.");
|
void validateJavaUdf(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
for (FunctionArgumentType argumentType :
createFunctionStmt.getSignature().getFunctionArgumentList()) {
Type type = argumentType.getType();
if (type == null) {
throw new UnsupportedOperationException("UDF templated argument types are not supported.");
}
validateJavaUdfZetaSqlType(type);
}
if (createFunctionStmt.getReturnType() == null) {
throw new IllegalArgumentException("UDF return type must not be null.");
}
validateJavaUdfZetaSqlType(createFunctionStmt.getReturnType());
}
|
class BeamZetaSqlCatalog {
public static final String PRE_DEFINED_WINDOW_FUNCTIONS = "pre_defined_window_functions";
public static final String USER_DEFINED_SQL_FUNCTIONS = "user_defined_functions";
public static final String USER_DEFINED_JAVA_SCALAR_FUNCTIONS =
"user_defined_java_scalar_functions";
/**
* Same as {@link Function}.ZETASQL_FUNCTION_GROUP_NAME. Identifies built-in ZetaSQL functions.
*/
public static final String ZETASQL_FUNCTION_GROUP_NAME = "ZetaSQL";
private static final ImmutableList<String> PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS =
ImmutableList.of(
"CREATE FUNCTION TUMBLE(ts TIMESTAMP, window_size STRING) AS (1);",
"CREATE FUNCTION TUMBLE_START(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION TUMBLE_END(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP(ts TIMESTAMP, emit_frequency STRING, window_size STRING) AS (1);",
"CREATE FUNCTION HOP_START(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP_END(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION(ts TIMESTAMP, session_gap STRING) AS (1);",
"CREATE FUNCTION SESSION_START(session_gap STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION_END(session_gap STRING) RETURNS TIMESTAMP AS (null);");
/** The top-level Calcite schema, which may contain sub-schemas. */
private final SchemaPlus calciteSchema;
/**
* The top-level ZetaSQL catalog, which may contain nested catalogs for qualified table and
* function references.
*/
private final SimpleCatalog zetaSqlCatalog;
private final JavaTypeFactory typeFactory;
private final JavaUdfLoader javaUdfLoader = new JavaUdfLoader();
private final Map<List<String>, ResolvedNodes.ResolvedCreateFunctionStmt> sqlScalarUdfs =
new HashMap<>();
/** User-defined table valued functions. */
private final Map<List<String>, ResolvedNode> sqlUdtvfs = new HashMap<>();
private final Map<List<String>, UserFunctionDefinitions.JavaScalarFunction> javaScalarUdfs =
new HashMap<>();
private BeamZetaSqlCatalog(
SchemaPlus calciteSchema, SimpleCatalog zetaSqlCatalog, JavaTypeFactory typeFactory) {
this.calciteSchema = calciteSchema;
this.zetaSqlCatalog = zetaSqlCatalog;
this.typeFactory = typeFactory;
}
/** Return catalog pre-populated with builtin functions. */
static BeamZetaSqlCatalog create(
SchemaPlus calciteSchema, JavaTypeFactory typeFactory, AnalyzerOptions options) {
BeamZetaSqlCatalog catalog =
new BeamZetaSqlCatalog(
calciteSchema, new SimpleCatalog(calciteSchema.getName()), typeFactory);
catalog.addFunctionsToCatalog(options);
return catalog;
}
SimpleCatalog getZetaSqlCatalog() {
return zetaSqlCatalog;
}
void addTables(List<List<String>> tables, QueryTrait queryTrait) {
tables.forEach(table -> addTableToLeafCatalog(table, queryTrait));
}
void addFunction(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String functionGroup = getFunctionGroup(createFunctionStmt);
switch (functionGroup) {
case USER_DEFINED_SQL_FUNCTIONS:
sqlScalarUdfs.put(createFunctionStmt.getNamePath(), createFunctionStmt);
break;
case USER_DEFINED_JAVA_SCALAR_FUNCTIONS:
validateJavaUdf(createFunctionStmt);
String jarPath = getJarPath(createFunctionStmt);
ScalarFn scalarFn =
javaUdfLoader.loadScalarFunction(createFunctionStmt.getNamePath(), jarPath);
Method method = ScalarFnReflector.getApplyMethod(scalarFn);
javaScalarUdfs.put(
createFunctionStmt.getNamePath(),
UserFunctionDefinitions.JavaScalarFunction.create(method, jarPath));
break;
default:
throw new IllegalArgumentException(
String.format("Encountered unrecognized function group %s.", functionGroup));
}
zetaSqlCatalog.addFunction(
new Function(
createFunctionStmt.getNamePath(),
functionGroup,
createFunctionStmt.getIsAggregate()
? ZetaSQLFunctions.FunctionEnums.Mode.AGGREGATE
: ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(createFunctionStmt.getSignature())));
}
/**
* Throws {@link UnsupportedOperationException} if ZetaSQL type is not supported in Java UDF.
* Supported types are a subset of the types supported by {@link BeamJavaUdfCalcRule}.
*/
void validateJavaUdfZetaSqlType(Type type) {
switch (type.getKind()) {
case TYPE_INT64:
case TYPE_DOUBLE:
case TYPE_BOOL:
case TYPE_STRING:
case TYPE_BYTES:
break;
case TYPE_NUMERIC:
case TYPE_DATE:
case TYPE_TIME:
case TYPE_DATETIME:
case TYPE_TIMESTAMP:
case TYPE_ARRAY:
case TYPE_STRUCT:
throw new UnsupportedOperationException(
"ZetaSQL type not allowed in Java UDF: " + type.getKind().name());
default:
throw new UnsupportedOperationException("Unknown ZetaSQL type: " + type.getKind().name());
}
}
void addTableValuedFunction(
ResolvedNodes.ResolvedCreateTableFunctionStmt createTableFunctionStmt) {
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.FixedOutputSchemaTVF(
createTableFunctionStmt.getNamePath(),
createTableFunctionStmt.getSignature(),
TVFRelation.createColumnBased(
createTableFunctionStmt.getQuery().getColumnList().stream()
.map(c -> TVFRelation.Column.create(c.getName(), c.getType()))
.collect(Collectors.toList()))));
sqlUdtvfs.put(createTableFunctionStmt.getNamePath(), createTableFunctionStmt.getQuery());
}
UserFunctionDefinitions getUserFunctionDefinitions() {
return UserFunctionDefinitions.newBuilder()
.setSqlScalarFunctions(ImmutableMap.copyOf(sqlScalarUdfs))
.setSqlTableValuedFunctions(ImmutableMap.copyOf(sqlUdtvfs))
.setJavaScalarFunctions(ImmutableMap.copyOf(javaScalarUdfs))
.build();
}
private void addFunctionsToCatalog(AnalyzerOptions options) {
ZetaSQLBuiltinFunctionOptions zetasqlBuiltinFunctionOptions =
new ZetaSQLBuiltinFunctionOptions(options.getLanguageOptions());
SupportedZetaSqlBuiltinFunctions.ALLOWLIST.forEach(
zetasqlBuiltinFunctionOptions::includeFunctionSignatureId);
zetaSqlCatalog.addZetaSQLFunctions(zetasqlBuiltinFunctionOptions);
addWindowScalarFunctions(options);
addWindowTvfs();
addUdfsFromSchema();
}
private void addWindowScalarFunctions(AnalyzerOptions options) {
PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS.stream()
.map(
func ->
(ResolvedNodes.ResolvedCreateFunctionStmt)
Analyzer.analyzeStatement(func, options, zetaSqlCatalog))
.map(
resolvedFunc ->
new Function(
String.join(".", resolvedFunc.getNamePath()),
PRE_DEFINED_WINDOW_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(resolvedFunc.getSignature())))
.forEach(zetaSqlCatalog::addFunction);
}
@SuppressWarnings({
"nullness"
})
private void addWindowTvfs() {
FunctionArgumentType retType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType inputTableType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType descriptorType =
new FunctionArgumentType(
ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_DESCRIPTOR,
FunctionArgumentType.FunctionArgumentTypeOptions.builder()
.setDescriptorResolutionTableOffset(0)
.build(),
1);
FunctionArgumentType stringType =
new FunctionArgumentType(TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_STRING));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.FIXED_WINDOW_TVF),
new FunctionSignature(
retType, ImmutableList.of(inputTableType, descriptorType, stringType), -1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SLIDING_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, stringType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SESSION_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, descriptorType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
}
private void addUdfsFromSchema() {
for (String functionName : calciteSchema.getFunctionNames()) {
Collection<org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function>
functions = calciteSchema.getFunctions(functionName);
if (functions.size() != 1) {
throw new IllegalArgumentException(
String.format(
"Expected exactly 1 definition for function '%s', but found %d."
+ " Beam ZetaSQL supports only a single function definition per function name (BEAM-12073).",
functionName, functions.size()));
}
for (org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function function :
functions) {
if (function instanceof ScalarFunctionImpl) {
ScalarFunctionImpl scalarFunction = (ScalarFunctionImpl) function;
validateScalarFunctionImpl(scalarFunction);
List<String> path = Arrays.asList(functionName.split("\\."));
Method method = scalarFunction.method;
javaScalarUdfs.put(path, UserFunctionDefinitions.JavaScalarFunction.create(method, ""));
FunctionArgumentType resultType =
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
scalarFunction.getReturnType(typeFactory)));
List<FunctionArgumentType> argumentTypes =
scalarFunction.getParameters().stream()
.map(
(arg) ->
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
arg.getType(typeFactory))))
.collect(Collectors.toList());
FunctionSignature functionSignature =
new FunctionSignature(resultType, argumentTypes, 0L);
zetaSqlCatalog.addFunction(
new Function(
path,
USER_DEFINED_JAVA_SCALAR_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(functionSignature)));
} else {
throw new IllegalArgumentException(
String.format(
"Function %s has unrecognized implementation type %s.",
functionName, function.getClass().getName()));
}
}
}
}
private void validateScalarFunctionImpl(ScalarFunctionImpl scalarFunction) {
for (FunctionParameter parameter : scalarFunction.getParameters()) {
validateJavaUdfCalciteType(parameter.getType(typeFactory));
}
validateJavaUdfCalciteType(scalarFunction.getReturnType(typeFactory));
}
/**
* Throws {@link UnsupportedOperationException} if Calcite type is not supported in Java UDF.
* Supported types are a subset of the corresponding Calcite types supported by {@link
* BeamJavaUdfCalcRule}.
*/
private void validateJavaUdfCalciteType(RelDataType type) {
switch (type.getSqlTypeName()) {
case BIGINT:
case DOUBLE:
case BOOLEAN:
case VARCHAR:
case VARBINARY:
break;
case DECIMAL:
case DATE:
case TIME:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case TIMESTAMP:
case ARRAY:
case ROW:
throw new UnsupportedOperationException(
"Calcite type not allowed in Java UDF: " + type.getSqlTypeName().getName());
default:
throw new UnsupportedOperationException(
"Unknown Calcite type: " + type.getSqlTypeName().getName());
}
}
private String getFunctionGroup(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
switch (createFunctionStmt.getLanguage().toUpperCase()) {
case "JAVA":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Java SQL aggregate functions are not supported (BEAM-10925).");
}
return USER_DEFINED_JAVA_SCALAR_FUNCTIONS;
case "SQL":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Native SQL aggregate functions are not supported (BEAM-9954).");
}
return USER_DEFINED_SQL_FUNCTIONS;
case "PY":
case "PYTHON":
case "JS":
case "JAVASCRIPT":
throw new UnsupportedOperationException(
String.format(
"Function %s uses unsupported language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
default:
throw new IllegalArgumentException(
String.format(
"Function %s uses unrecognized language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
}
}
/**
* Assume last element in tablePath is a table name, and everything before is catalogs. So the
* logic is to create nested catalogs until the last level, then add a table at the last level.
*
* <p>Table schema is extracted from Calcite schema based on the table name resolution strategy,
* e.g. either by drilling down the schema.getSubschema() path or joining the table name with dots
* to construct a single compound identifier (e.g. Data Catalog use case).
*/
private void addTableToLeafCatalog(List<String> tablePath, QueryTrait queryTrait) {
SimpleCatalog leafCatalog = createNestedCatalogs(zetaSqlCatalog, tablePath);
org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Table calciteTable =
TableResolution.resolveCalciteTable(calciteSchema, tablePath);
if (calciteTable == null) {
throw new ZetaSqlException(
"Wasn't able to resolve the path "
+ tablePath
+ " in schema: "
+ calciteSchema.getName());
}
RelDataType rowType = calciteTable.getRowType(typeFactory);
TableResolution.SimpleTableWithPath tableWithPath =
TableResolution.SimpleTableWithPath.of(tablePath);
queryTrait.addResolvedTable(tableWithPath);
addFieldsToTable(tableWithPath, rowType);
leafCatalog.addSimpleTable(tableWithPath.getTable());
}
private static void addFieldsToTable(
TableResolution.SimpleTableWithPath tableWithPath, RelDataType rowType) {
for (RelDataTypeField field : rowType.getFieldList()) {
tableWithPath
.getTable()
.addSimpleColumn(
field.getName(), ZetaSqlCalciteTranslationUtils.toZetaSqlType(field.getType()));
}
}
/** For table path like a.b.c we assume c is the table and a.b are the nested catalogs/schemas. */
private static SimpleCatalog createNestedCatalogs(SimpleCatalog catalog, List<String> tablePath) {
SimpleCatalog currentCatalog = catalog;
for (int i = 0; i < tablePath.size() - 1; i++) {
String nextCatalogName = tablePath.get(i);
Optional<SimpleCatalog> existing = tryGetExisting(currentCatalog, nextCatalogName);
currentCatalog =
existing.isPresent() ? existing.get() : addNewCatalog(currentCatalog, nextCatalogName);
}
return currentCatalog;
}
private static Optional<SimpleCatalog> tryGetExisting(
SimpleCatalog currentCatalog, String nextCatalogName) {
return currentCatalog.getCatalogList().stream()
.filter(c -> nextCatalogName.equals(c.getFullName()))
.findFirst();
}
private static SimpleCatalog addNewCatalog(SimpleCatalog currentCatalog, String nextCatalogName) {
SimpleCatalog nextCatalog = new SimpleCatalog(nextCatalogName);
currentCatalog.addSimpleCatalog(nextCatalog);
return nextCatalog;
}
private static String getJarPath(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String jarPath = getOptionStringValue(createFunctionStmt, "path");
if (jarPath.isEmpty()) {
throw new IllegalArgumentException(
String.format(
"No jar was provided to define function %s. Add 'OPTIONS (path=<jar location>)' to the CREATE FUNCTION statement.",
String.join(".", createFunctionStmt.getNamePath())));
}
return jarPath;
}
private static String getOptionStringValue(
ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt, String optionName) {
for (ResolvedNodes.ResolvedOption option : createFunctionStmt.getOptionList()) {
if (optionName.equals(option.getName())) {
if (option.getValue() == null) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has null value (expected %s).",
optionName, ZetaSQLType.TypeKind.TYPE_STRING));
}
if (option.getValue().getType().getKind() != ZetaSQLType.TypeKind.TYPE_STRING) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has type %s (expected %s).",
optionName,
option.getValue().getType().getKind(),
ZetaSQLType.TypeKind.TYPE_STRING));
}
return ((ResolvedNodes.ResolvedLiteral) option.getValue()).getValue().getStringValue();
}
}
return "";
}
}
|
class BeamZetaSqlCatalog {
public static final String PRE_DEFINED_WINDOW_FUNCTIONS = "pre_defined_window_functions";
public static final String USER_DEFINED_SQL_FUNCTIONS = "user_defined_functions";
public static final String USER_DEFINED_JAVA_SCALAR_FUNCTIONS =
"user_defined_java_scalar_functions";
/**
* Same as {@link Function}.ZETASQL_FUNCTION_GROUP_NAME. Identifies built-in ZetaSQL functions.
*/
public static final String ZETASQL_FUNCTION_GROUP_NAME = "ZetaSQL";
private static final ImmutableList<String> PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS =
ImmutableList.of(
"CREATE FUNCTION TUMBLE(ts TIMESTAMP, window_size STRING) AS (1);",
"CREATE FUNCTION TUMBLE_START(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION TUMBLE_END(window_size STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP(ts TIMESTAMP, emit_frequency STRING, window_size STRING) AS (1);",
"CREATE FUNCTION HOP_START(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION HOP_END(emit_frequency STRING, window_size STRING) "
+ "RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION(ts TIMESTAMP, session_gap STRING) AS (1);",
"CREATE FUNCTION SESSION_START(session_gap STRING) RETURNS TIMESTAMP AS (null);",
"CREATE FUNCTION SESSION_END(session_gap STRING) RETURNS TIMESTAMP AS (null);");
/** The top-level Calcite schema, which may contain sub-schemas. */
private final SchemaPlus calciteSchema;
/**
* The top-level ZetaSQL catalog, which may contain nested catalogs for qualified table and
* function references.
*/
private final SimpleCatalog zetaSqlCatalog;
private final JavaTypeFactory typeFactory;
private final JavaUdfLoader javaUdfLoader = new JavaUdfLoader();
private final Map<List<String>, ResolvedNodes.ResolvedCreateFunctionStmt> sqlScalarUdfs =
new HashMap<>();
/** User-defined table valued functions. */
private final Map<List<String>, ResolvedNode> sqlUdtvfs = new HashMap<>();
private final Map<List<String>, UserFunctionDefinitions.JavaScalarFunction> javaScalarUdfs =
new HashMap<>();
private BeamZetaSqlCatalog(
SchemaPlus calciteSchema, SimpleCatalog zetaSqlCatalog, JavaTypeFactory typeFactory) {
this.calciteSchema = calciteSchema;
this.zetaSqlCatalog = zetaSqlCatalog;
this.typeFactory = typeFactory;
}
/** Return catalog pre-populated with builtin functions. */
static BeamZetaSqlCatalog create(
SchemaPlus calciteSchema, JavaTypeFactory typeFactory, AnalyzerOptions options) {
BeamZetaSqlCatalog catalog =
new BeamZetaSqlCatalog(
calciteSchema, new SimpleCatalog(calciteSchema.getName()), typeFactory);
catalog.addFunctionsToCatalog(options);
return catalog;
}
SimpleCatalog getZetaSqlCatalog() {
return zetaSqlCatalog;
}
void addTables(List<List<String>> tables, QueryTrait queryTrait) {
tables.forEach(table -> addTableToLeafCatalog(table, queryTrait));
}
void addFunction(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String functionGroup = getFunctionGroup(createFunctionStmt);
switch (functionGroup) {
case USER_DEFINED_SQL_FUNCTIONS:
sqlScalarUdfs.put(createFunctionStmt.getNamePath(), createFunctionStmt);
break;
case USER_DEFINED_JAVA_SCALAR_FUNCTIONS:
validateJavaUdf(createFunctionStmt);
String jarPath = getJarPath(createFunctionStmt);
ScalarFn scalarFn =
javaUdfLoader.loadScalarFunction(createFunctionStmt.getNamePath(), jarPath);
Method method = ScalarFnReflector.getApplyMethod(scalarFn);
javaScalarUdfs.put(
createFunctionStmt.getNamePath(),
UserFunctionDefinitions.JavaScalarFunction.create(method, jarPath));
break;
default:
throw new IllegalArgumentException(
String.format("Encountered unrecognized function group %s.", functionGroup));
}
zetaSqlCatalog.addFunction(
new Function(
createFunctionStmt.getNamePath(),
functionGroup,
createFunctionStmt.getIsAggregate()
? ZetaSQLFunctions.FunctionEnums.Mode.AGGREGATE
: ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(createFunctionStmt.getSignature())));
}
/**
* Throws {@link UnsupportedOperationException} if ZetaSQL type is not supported in Java UDF.
* Supported types are a subset of the types supported by {@link BeamJavaUdfCalcRule}.
*/
void validateJavaUdfZetaSqlType(Type type) {
switch (type.getKind()) {
case TYPE_INT64:
case TYPE_DOUBLE:
case TYPE_BOOL:
case TYPE_STRING:
case TYPE_BYTES:
break;
case TYPE_NUMERIC:
case TYPE_DATE:
case TYPE_TIME:
case TYPE_DATETIME:
case TYPE_TIMESTAMP:
case TYPE_ARRAY:
case TYPE_STRUCT:
default:
throw new UnsupportedOperationException(
"ZetaSQL type not allowed in Java UDF: " + type.getKind().name());
}
}
void addTableValuedFunction(
ResolvedNodes.ResolvedCreateTableFunctionStmt createTableFunctionStmt) {
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.FixedOutputSchemaTVF(
createTableFunctionStmt.getNamePath(),
createTableFunctionStmt.getSignature(),
TVFRelation.createColumnBased(
createTableFunctionStmt.getQuery().getColumnList().stream()
.map(c -> TVFRelation.Column.create(c.getName(), c.getType()))
.collect(Collectors.toList()))));
sqlUdtvfs.put(createTableFunctionStmt.getNamePath(), createTableFunctionStmt.getQuery());
}
UserFunctionDefinitions getUserFunctionDefinitions() {
return UserFunctionDefinitions.newBuilder()
.setSqlScalarFunctions(ImmutableMap.copyOf(sqlScalarUdfs))
.setSqlTableValuedFunctions(ImmutableMap.copyOf(sqlUdtvfs))
.setJavaScalarFunctions(ImmutableMap.copyOf(javaScalarUdfs))
.build();
}
private void addFunctionsToCatalog(AnalyzerOptions options) {
ZetaSQLBuiltinFunctionOptions zetasqlBuiltinFunctionOptions =
new ZetaSQLBuiltinFunctionOptions(options.getLanguageOptions());
SupportedZetaSqlBuiltinFunctions.ALLOWLIST.forEach(
zetasqlBuiltinFunctionOptions::includeFunctionSignatureId);
zetaSqlCatalog.addZetaSQLFunctions(zetasqlBuiltinFunctionOptions);
addWindowScalarFunctions(options);
addWindowTvfs();
addUdfsFromSchema();
}
private void addWindowScalarFunctions(AnalyzerOptions options) {
PRE_DEFINED_WINDOW_FUNCTION_DECLARATIONS.stream()
.map(
func ->
(ResolvedNodes.ResolvedCreateFunctionStmt)
Analyzer.analyzeStatement(func, options, zetaSqlCatalog))
.map(
resolvedFunc ->
new Function(
String.join(".", resolvedFunc.getNamePath()),
PRE_DEFINED_WINDOW_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(resolvedFunc.getSignature())))
.forEach(zetaSqlCatalog::addFunction);
}
@SuppressWarnings({
"nullness"
})
private void addWindowTvfs() {
FunctionArgumentType retType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType inputTableType =
new FunctionArgumentType(ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_RELATION);
FunctionArgumentType descriptorType =
new FunctionArgumentType(
ZetaSQLFunctions.SignatureArgumentKind.ARG_TYPE_DESCRIPTOR,
FunctionArgumentType.FunctionArgumentTypeOptions.builder()
.setDescriptorResolutionTableOffset(0)
.build(),
1);
FunctionArgumentType stringType =
new FunctionArgumentType(TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_STRING));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.FIXED_WINDOW_TVF),
new FunctionSignature(
retType, ImmutableList.of(inputTableType, descriptorType, stringType), -1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SLIDING_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, stringType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
zetaSqlCatalog.addTableValuedFunction(
new TableValuedFunction.ForwardInputSchemaToOutputSchemaWithAppendedColumnTVF(
ImmutableList.of(TVFStreamingUtils.SESSION_WINDOW_TVF),
new FunctionSignature(
retType,
ImmutableList.of(inputTableType, descriptorType, descriptorType, stringType),
-1),
ImmutableList.of(
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_START,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP)),
TVFRelation.Column.create(
TVFStreamingUtils.WINDOW_END,
TypeFactory.createSimpleType(ZetaSQLType.TypeKind.TYPE_TIMESTAMP))),
null,
null));
}
private void addUdfsFromSchema() {
for (String functionName : calciteSchema.getFunctionNames()) {
Collection<org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function>
functions = calciteSchema.getFunctions(functionName);
if (functions.size() != 1) {
throw new IllegalArgumentException(
String.format(
"Expected exactly 1 definition for function '%s', but found %d."
+ " Beam ZetaSQL supports only a single function definition per function name (BEAM-12073).",
functionName, functions.size()));
}
for (org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Function function :
functions) {
if (function instanceof ScalarFunctionImpl) {
ScalarFunctionImpl scalarFunction = (ScalarFunctionImpl) function;
validateScalarFunctionImpl(scalarFunction);
List<String> path = Arrays.asList(functionName.split("\\."));
Method method = scalarFunction.method;
javaScalarUdfs.put(path, UserFunctionDefinitions.JavaScalarFunction.create(method, ""));
FunctionArgumentType resultType =
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
scalarFunction.getReturnType(typeFactory)));
List<FunctionArgumentType> argumentTypes =
scalarFunction.getParameters().stream()
.map(
(arg) ->
new FunctionArgumentType(
ZetaSqlCalciteTranslationUtils.toZetaSqlType(
arg.getType(typeFactory))))
.collect(Collectors.toList());
FunctionSignature functionSignature =
new FunctionSignature(resultType, argumentTypes, 0L);
zetaSqlCatalog.addFunction(
new Function(
path,
USER_DEFINED_JAVA_SCALAR_FUNCTIONS,
ZetaSQLFunctions.FunctionEnums.Mode.SCALAR,
ImmutableList.of(functionSignature)));
} else {
throw new IllegalArgumentException(
String.format(
"Function %s has unrecognized implementation type %s.",
functionName, function.getClass().getName()));
}
}
}
}
private void validateScalarFunctionImpl(ScalarFunctionImpl scalarFunction) {
for (FunctionParameter parameter : scalarFunction.getParameters()) {
validateJavaUdfCalciteType(parameter.getType(typeFactory));
}
validateJavaUdfCalciteType(scalarFunction.getReturnType(typeFactory));
}
/**
* Throws {@link UnsupportedOperationException} if Calcite type is not supported in Java UDF.
* Supported types are a subset of the corresponding Calcite types supported by {@link
* BeamJavaUdfCalcRule}.
*/
private void validateJavaUdfCalciteType(RelDataType type) {
switch (type.getSqlTypeName()) {
case BIGINT:
case DOUBLE:
case BOOLEAN:
case VARCHAR:
case VARBINARY:
break;
case DECIMAL:
case DATE:
case TIME:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case TIMESTAMP:
case ARRAY:
case ROW:
default:
throw new UnsupportedOperationException(
"Calcite type not allowed in ZetaSQL Java UDF: " + type.getSqlTypeName().getName());
}
}
private String getFunctionGroup(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
switch (createFunctionStmt.getLanguage().toUpperCase()) {
case "JAVA":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Java SQL aggregate functions are not supported (BEAM-10925).");
}
return USER_DEFINED_JAVA_SCALAR_FUNCTIONS;
case "SQL":
if (createFunctionStmt.getIsAggregate()) {
throw new UnsupportedOperationException(
"Native SQL aggregate functions are not supported (BEAM-9954).");
}
return USER_DEFINED_SQL_FUNCTIONS;
case "PY":
case "PYTHON":
case "JS":
case "JAVASCRIPT":
throw new UnsupportedOperationException(
String.format(
"Function %s uses unsupported language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
default:
throw new IllegalArgumentException(
String.format(
"Function %s uses unrecognized language %s.",
String.join(".", createFunctionStmt.getNamePath()),
createFunctionStmt.getLanguage()));
}
}
/**
* Assume last element in tablePath is a table name, and everything before is catalogs. So the
* logic is to create nested catalogs until the last level, then add a table at the last level.
*
* <p>Table schema is extracted from Calcite schema based on the table name resolution strategy,
* e.g. either by drilling down the schema.getSubschema() path or joining the table name with dots
* to construct a single compound identifier (e.g. Data Catalog use case).
*/
private void addTableToLeafCatalog(List<String> tablePath, QueryTrait queryTrait) {
SimpleCatalog leafCatalog = createNestedCatalogs(zetaSqlCatalog, tablePath);
org.apache.beam.vendor.calcite.v1_20_0.org.apache.calcite.schema.Table calciteTable =
TableResolution.resolveCalciteTable(calciteSchema, tablePath);
if (calciteTable == null) {
throw new ZetaSqlException(
"Wasn't able to resolve the path "
+ tablePath
+ " in schema: "
+ calciteSchema.getName());
}
RelDataType rowType = calciteTable.getRowType(typeFactory);
TableResolution.SimpleTableWithPath tableWithPath =
TableResolution.SimpleTableWithPath.of(tablePath);
queryTrait.addResolvedTable(tableWithPath);
addFieldsToTable(tableWithPath, rowType);
leafCatalog.addSimpleTable(tableWithPath.getTable());
}
private static void addFieldsToTable(
TableResolution.SimpleTableWithPath tableWithPath, RelDataType rowType) {
for (RelDataTypeField field : rowType.getFieldList()) {
tableWithPath
.getTable()
.addSimpleColumn(
field.getName(), ZetaSqlCalciteTranslationUtils.toZetaSqlType(field.getType()));
}
}
/** For table path like a.b.c we assume c is the table and a.b are the nested catalogs/schemas. */
private static SimpleCatalog createNestedCatalogs(SimpleCatalog catalog, List<String> tablePath) {
SimpleCatalog currentCatalog = catalog;
for (int i = 0; i < tablePath.size() - 1; i++) {
String nextCatalogName = tablePath.get(i);
Optional<SimpleCatalog> existing = tryGetExisting(currentCatalog, nextCatalogName);
currentCatalog =
existing.isPresent() ? existing.get() : addNewCatalog(currentCatalog, nextCatalogName);
}
return currentCatalog;
}
private static Optional<SimpleCatalog> tryGetExisting(
SimpleCatalog currentCatalog, String nextCatalogName) {
return currentCatalog.getCatalogList().stream()
.filter(c -> nextCatalogName.equals(c.getFullName()))
.findFirst();
}
private static SimpleCatalog addNewCatalog(SimpleCatalog currentCatalog, String nextCatalogName) {
SimpleCatalog nextCatalog = new SimpleCatalog(nextCatalogName);
currentCatalog.addSimpleCatalog(nextCatalog);
return nextCatalog;
}
private static String getJarPath(ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt) {
String jarPath = getOptionStringValue(createFunctionStmt, "path");
if (jarPath.isEmpty()) {
throw new IllegalArgumentException(
String.format(
"No jar was provided to define function %s. Add 'OPTIONS (path=<jar location>)' to the CREATE FUNCTION statement.",
String.join(".", createFunctionStmt.getNamePath())));
}
return jarPath;
}
private static String getOptionStringValue(
ResolvedNodes.ResolvedCreateFunctionStmt createFunctionStmt, String optionName) {
for (ResolvedNodes.ResolvedOption option : createFunctionStmt.getOptionList()) {
if (optionName.equals(option.getName())) {
if (option.getValue() == null) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has null value (expected %s).",
optionName, ZetaSQLType.TypeKind.TYPE_STRING));
}
if (option.getValue().getType().getKind() != ZetaSQLType.TypeKind.TYPE_STRING) {
throw new IllegalArgumentException(
String.format(
"Option '%s' has type %s (expected %s).",
optionName,
option.getValue().getType().getKind(),
ZetaSQLType.TypeKind.TYPE_STRING));
}
return ((ResolvedNodes.ResolvedLiteral) option.getValue()).getValue().getStringValue();
}
}
return "";
}
}
|
To further improve this test: We can remove the timeout related logic here. The test already has a timeout set above.
|
public void testTimerExecution() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
options.setRunner(CrashingRunner.class);
options.as(FlinkPipelineOptions.class).setFlinkMaster("[local]");
options.as(FlinkPipelineOptions.class).setStreaming(isStreaming);
options
.as(PortablePipelineOptions.class)
.setDefaultEnvironmentType(Environments.ENVIRONMENT_EMBEDDED);
final String timerId = "foo";
final String stateId = "sizzle";
final int offset = 5000;
final int timerOutput = 4093;
int numKeys = 50;
List<KV<String, Integer>> input = new ArrayList<>();
List<KV<String, Integer>> expectedOutput = new ArrayList<>();
for (Integer key = 0; key < numKeys; ++key) {
expectedOutput.add(KV.of(key.toString(), timerOutput));
for (int i = 0; i < 15; ++i) {
input.add(KV.of(key.toString(), i));
expectedOutput.add(KV.of(key.toString(), i + offset));
}
}
Collections.shuffle(input);
DoFn<byte[], KV<String, Integer>> inputFn =
new DoFn<byte[], KV<String, Integer>>() {
@ProcessElement
public void processElement(ProcessContext context) {
for (KV<String, Integer> stringIntegerKV : input) {
context.output(stringIntegerKV);
}
}
};
DoFn<KV<String, Integer>, KV<String, Integer>> testFn =
new DoFn<KV<String, Integer>, KV<String, Integer>>() {
@TimerId(timerId)
private final TimerSpec spec = TimerSpecs.timer(TimeDomain.EVENT_TIME);
@StateId(stateId)
private final StateSpec<ValueState<String>> stateSpec =
StateSpecs.value(StringUtf8Coder.of());
@ProcessElement
public void processElement(
ProcessContext context,
@TimerId(timerId) Timer timer,
@StateId(stateId) ValueState<String> state,
BoundedWindow window) {
timer.set(window.maxTimestamp());
state.write(context.element().getKey());
context.output(
KV.of(context.element().getKey(), context.element().getValue() + offset));
}
@OnTimer(timerId)
public void onTimer(
@StateId(stateId) ValueState<String> state, OutputReceiver<KV<String, Integer>> r) {
r.output(KV.of(state.read(), timerOutput));
}
};
final Pipeline pipeline = Pipeline.create(options);
PCollection<KV<String, Integer>> output =
pipeline.apply(Impulse.create()).apply(ParDo.of(inputFn)).apply(ParDo.of(testFn));
PAssert.that(output).containsInAnyOrder(expectedOutput);
pipeline.replaceAll(Collections.singletonList(JavaReadViaImpulse.boundedOverride()));
RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline);
FlinkJobInvocation jobInvocation =
FlinkJobInvocation.create(
"id",
"none",
flinkJobExecutor,
pipelineProto,
options.as(FlinkPipelineOptions.class),
null,
Collections.emptyList());
jobInvocation.start();
long timeout = System.currentTimeMillis() + 2 * 60 * 1000;
while (jobInvocation.getState() != Enum.DONE && System.currentTimeMillis() < timeout) {
Thread.sleep(1000);
}
assertThat(jobInvocation.getState(), is(Enum.DONE));
}
|
while (jobInvocation.getState() != Enum.DONE && System.currentTimeMillis() < timeout) {
|
public void testTimerExecution() throws Exception {
PipelineOptions options = PipelineOptionsFactory.create();
options.setRunner(CrashingRunner.class);
options.as(FlinkPipelineOptions.class).setFlinkMaster("[local]");
options.as(FlinkPipelineOptions.class).setStreaming(isStreaming);
options
.as(PortablePipelineOptions.class)
.setDefaultEnvironmentType(Environments.ENVIRONMENT_EMBEDDED);
final String timerId = "foo";
final String stateId = "sizzle";
final int offset = 5000;
final int timerOutput = 4093;
int numKeys = 50;
List<KV<String, Integer>> input = new ArrayList<>();
List<KV<String, Integer>> expectedOutput = new ArrayList<>();
for (Integer key = 0; key < numKeys; ++key) {
expectedOutput.add(KV.of(key.toString(), timerOutput));
for (int i = 0; i < 15; ++i) {
input.add(KV.of(key.toString(), i));
expectedOutput.add(KV.of(key.toString(), i + offset));
}
}
Collections.shuffle(input);
DoFn<byte[], KV<String, Integer>> inputFn =
new DoFn<byte[], KV<String, Integer>>() {
@ProcessElement
public void processElement(ProcessContext context) {
for (KV<String, Integer> stringIntegerKV : input) {
context.output(stringIntegerKV);
}
}
};
DoFn<KV<String, Integer>, KV<String, Integer>> testFn =
new DoFn<KV<String, Integer>, KV<String, Integer>>() {
@TimerId(timerId)
private final TimerSpec spec = TimerSpecs.timer(TimeDomain.EVENT_TIME);
@StateId(stateId)
private final StateSpec<ValueState<String>> stateSpec =
StateSpecs.value(StringUtf8Coder.of());
@ProcessElement
public void processElement(
ProcessContext context,
@TimerId(timerId) Timer timer,
@StateId(stateId) ValueState<String> state,
BoundedWindow window) {
timer.set(window.maxTimestamp());
state.write(context.element().getKey());
context.output(
KV.of(context.element().getKey(), context.element().getValue() + offset));
}
@OnTimer(timerId)
public void onTimer(
@StateId(stateId) ValueState<String> state, OutputReceiver<KV<String, Integer>> r) {
r.output(KV.of(state.read(), timerOutput));
}
};
final Pipeline pipeline = Pipeline.create(options);
PCollection<KV<String, Integer>> output =
pipeline.apply(Impulse.create()).apply(ParDo.of(inputFn)).apply(ParDo.of(testFn));
PAssert.that(output).containsInAnyOrder(expectedOutput);
pipeline.replaceAll(Collections.singletonList(JavaReadViaImpulse.boundedOverride()));
RunnerApi.Pipeline pipelineProto = PipelineTranslation.toProto(pipeline);
FlinkJobInvocation jobInvocation =
FlinkJobInvocation.create(
"id",
"none",
flinkJobExecutor,
pipelineProto,
options.as(FlinkPipelineOptions.class),
null,
Collections.emptyList());
jobInvocation.start();
while (jobInvocation.getState() != Enum.DONE) {
Thread.sleep(1000);
}
assertThat(jobInvocation.getState(), is(Enum.DONE));
}
|
class PortableTimersExecutionTest implements Serializable {
@Parameters
public static Object[] testModes() {
return new Object[] {true, false};
}
@Parameter public boolean isStreaming;
private transient ListeningExecutorService flinkJobExecutor;
@Before
public void setup() {
flinkJobExecutor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
}
@After
public void tearDown() {
flinkJobExecutor.shutdown();
}
@Test(timeout = 120_000)
}
|
class PortableTimersExecutionTest implements Serializable {
@Parameters
public static Object[] testModes() {
return new Object[] {true, false};
}
@Parameter public boolean isStreaming;
private transient ListeningExecutorService flinkJobExecutor;
@Before
public void setup() {
flinkJobExecutor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
}
@After
public void tearDown() {
flinkJobExecutor.shutdown();
}
@Test(timeout = 120_000)
}
|
What I'm a bit concerned about here is that it is quite easy now for users to break things, for example, OIDC `AuthenticationFailedException`, if thrown by `quarkus-oidc` is better be handled by `quarkus-oidc` to prepare a correct challenge, and if the user has registered a mapper then this mapper is in total control now. For ex, a few users combine Basic and OIDC code flow, so if the mapper will blindly return 401 then it is a problem. I know, when it was not possible to customize with the proactive auth being on, it was a problem, and now that it is fixed, I'm looking for new problems :-). I'm not sure, it feels like for some cases, we should just not allow intercepting it. If it is only 401 which is returned by default, then sure, why not let users customize it. If it is a redirect to Keycloak that has to be returned, then the custom mapper will not be able to form a redirect URL correctly - as the way it is done it depends on OIDC config, and other factors
|
public Handler<RoutingContext> authenticationMechanismHandler(boolean proactiveAuthentication) {
return new Handler<RoutingContext>() {
volatile HttpAuthenticator authenticator;
@Override
public void handle(RoutingContext event) {
if (authenticator == null) {
authenticator = CDI.current().select(HttpAuthenticator.class).get();
}
event.put(HttpAuthenticator.class.getName(), authenticator);
if (event.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER) == null) {
event.put(QuarkusHttpUser.AUTH_FAILURE_HANDLER, new DefaultAuthFailureHandler());
}
if (proactiveAuthentication) {
Uni<SecurityIdentity> potentialUser = authenticator.attemptAuthentication(event).memoize().indefinitely();
potentialUser
.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription subscription) {
}
@Override
public void onItem(SecurityIdentity identity) {
if (event.response().ended()) {
return;
}
if (identity == null) {
Uni<SecurityIdentity> anon = authenticator.getIdentityProviderManager()
.authenticate(AnonymousAuthenticationRequest.INSTANCE);
anon.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription subscription) {
}
@Override
public void onItem(SecurityIdentity item) {
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, anon);
event.setUser(new QuarkusHttpUser(item));
event.next();
}
@Override
public void onFailure(Throwable failure) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, failure);
}
}
});
} else {
event.setUser(new QuarkusHttpUser(identity));
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, potentialUser);
event.next();
}
}
@Override
public void onFailure(Throwable failure) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, failure);
}
}
});
} else {
Uni<SecurityIdentity> lazyUser = Uni
.createFrom()
.nullItem()
.flatMap(n -> authenticator.attemptAuthentication(event))
.memoize()
.indefinitely()
.flatMap(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<? extends SecurityIdentity> apply(SecurityIdentity securityIdentity) {
if (securityIdentity == null) {
return authenticator.getIdentityProviderManager()
.authenticate(AnonymousAuthenticationRequest.INSTANCE);
}
return Uni.createFrom().item(securityIdentity);
}
}).onTermination().invoke(new Functions.TriConsumer<SecurityIdentity, Throwable, Boolean>() {
@Override
public void accept(SecurityIdentity identity, Throwable throwable, Boolean aBoolean) {
if (identity != null) {
if (identity != null) {
event.setUser(new QuarkusHttpUser(identity));
}
} else if (throwable != null) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, throwable);
}
}
}
}).memoize().indefinitely();
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, lazyUser);
event.next();
}
}
};
}
|
public Handler<RoutingContext> authenticationMechanismHandler(boolean proactiveAuthentication) {
return new Handler<RoutingContext>() {
volatile HttpAuthenticator authenticator;
@Override
public void handle(RoutingContext event) {
if (authenticator == null) {
authenticator = CDI.current().select(HttpAuthenticator.class).get();
}
event.put(HttpAuthenticator.class.getName(), authenticator);
event.put(QuarkusHttpUser.AUTH_FAILURE_HANDLER, new DefaultAuthFailureHandler());
if (proactiveAuthentication) {
Uni<SecurityIdentity> potentialUser = authenticator.attemptAuthentication(event).memoize().indefinitely();
potentialUser
.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription subscription) {
}
@Override
public void onItem(SecurityIdentity identity) {
if (event.response().ended()) {
return;
}
if (identity == null) {
Uni<SecurityIdentity> anon = authenticator.getIdentityProviderManager()
.authenticate(AnonymousAuthenticationRequest.INSTANCE);
anon.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription subscription) {
}
@Override
public void onItem(SecurityIdentity item) {
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, anon);
event.setUser(new QuarkusHttpUser(item));
event.next();
}
@Override
public void onFailure(Throwable failure) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, failure);
}
}
});
} else {
event.setUser(new QuarkusHttpUser(identity));
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, potentialUser);
event.next();
}
}
@Override
public void onFailure(Throwable failure) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, failure);
}
}
});
} else {
Uni<SecurityIdentity> lazyUser = Uni
.createFrom()
.nullItem()
.flatMap(n -> authenticator.attemptAuthentication(event))
.memoize()
.indefinitely()
.flatMap(new Function<SecurityIdentity, Uni<? extends SecurityIdentity>>() {
@Override
public Uni<? extends SecurityIdentity> apply(SecurityIdentity securityIdentity) {
if (securityIdentity == null) {
return authenticator.getIdentityProviderManager()
.authenticate(AnonymousAuthenticationRequest.INSTANCE);
}
return Uni.createFrom().item(securityIdentity);
}
}).onTermination().invoke(new Functions.TriConsumer<SecurityIdentity, Throwable, Boolean>() {
@Override
public void accept(SecurityIdentity identity, Throwable throwable, Boolean aBoolean) {
if (identity != null) {
if (identity != null) {
event.setUser(new QuarkusHttpUser(identity));
}
} else if (throwable != null) {
BiConsumer<RoutingContext, Throwable> handler = event
.get(QuarkusHttpUser.AUTH_FAILURE_HANDLER);
if (handler != null) {
handler.accept(event, throwable);
}
}
}
}).memoize().indefinitely();
event.put(QuarkusHttpUser.DEFERRED_IDENTITY_KEY, lazyUser);
event.next();
}
}
};
}
|
class HttpSecurityRecorder {
private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class);
protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() {
@Override
public void accept(Throwable throwable) {
}
};
final RuntimeValue<HttpConfiguration> httpConfiguration;
final HttpBuildTimeConfig buildTimeConfig;
static volatile String encryptionKey;
public HttpSecurityRecorder(RuntimeValue<HttpConfiguration> httpConfiguration, HttpBuildTimeConfig buildTimeConfig) {
this.httpConfiguration = httpConfiguration;
this.buildTimeConfig = buildTimeConfig;
}
public Handler<RoutingContext> permissionCheckHandler() {
return new Handler<RoutingContext>() {
volatile HttpAuthorizer authorizer;
@Override
public void handle(RoutingContext event) {
if (authorizer == null) {
authorizer = CDI.current().select(HttpAuthorizer.class).get();
}
authorizer.checkPermission(event);
}
};
}
public BeanContainerListener initPermissions(HttpBuildTimeConfig permissions,
Map<String, Supplier<HttpSecurityPolicy>> policies) {
return new BeanContainerListener() {
@Override
public void created(BeanContainer container) {
container.instance(PathMatchingHttpSecurityPolicy.class).init(permissions, policies);
}
};
}
public Supplier<FormAuthenticationMechanism> setupFormAuth() {
return new Supplier<FormAuthenticationMechanism>() {
@Override
public FormAuthenticationMechanism get() {
String key;
if (!httpConfiguration.getValue().encryptionKey.isPresent()) {
if (encryptionKey != null) {
key = encryptionKey;
} else {
byte[] data = new byte[32];
new SecureRandom().nextBytes(data);
key = encryptionKey = Base64.getEncoder().encodeToString(data);
log.warn("Encryption key was not specified for persistent FORM auth, using temporary key " + key);
}
} else {
key = httpConfiguration.getValue().encryptionKey.get();
}
FormAuthConfig form = buildTimeConfig.auth.form;
PersistentLoginManager loginManager = new PersistentLoginManager(key, form.cookieName, form.timeout.toMillis(),
form.newCookieInterval.toMillis(), form.httpOnlyCookie);
String loginPage = form.loginPage.startsWith("/") ? form.loginPage : "/" + form.loginPage;
String errorPage = form.errorPage.startsWith("/") ? form.errorPage : "/" + form.errorPage;
String landingPage = form.landingPage.startsWith("/") ? form.landingPage : "/" + form.landingPage;
String postLocation = form.postLocation.startsWith("/") ? form.postLocation : "/" + form.postLocation;
String usernameParameter = form.usernameParameter;
String passwordParameter = form.passwordParameter;
String locationCookie = form.locationCookie;
boolean redirectAfterLogin = form.redirectAfterLogin;
return new FormAuthenticationMechanism(loginPage, postLocation, usernameParameter, passwordParameter,
errorPage, landingPage, redirectAfterLogin, locationCookie, loginManager);
}
};
}
public Supplier<?> setupBasicAuth(HttpBuildTimeConfig buildTimeConfig) {
return new Supplier<BasicAuthenticationMechanism>() {
@Override
public BasicAuthenticationMechanism get() {
return new BasicAuthenticationMechanism(buildTimeConfig.auth.realm.orElse(null),
buildTimeConfig.auth.form.enabled);
}
};
}
public Supplier<?> setupMtlsClientAuth() {
return new Supplier<MtlsAuthenticationMechanism>() {
@Override
public MtlsAuthenticationMechanism get() {
return new MtlsAuthenticationMechanism();
}
};
}
/**
* This handler resolves the identity, and will be mapped to the post location. Otherwise,
* for lazy auth the post will not be evaluated if there is no security rule for the post location.
*/
public Handler<RoutingContext> formAuthPostHandler() {
return new Handler<RoutingContext>() {
@Override
public void handle(RoutingContext event) {
Uni<SecurityIdentity> user = event.get(QuarkusHttpUser.DEFERRED_IDENTITY_KEY);
user.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription uniSubscription) {
}
@Override
public void onItem(SecurityIdentity securityIdentity) {
event.next();
}
@Override
public void onFailure(Throwable throwable) {
event.fail(throwable);
}
});
}
};
}
}
|
class HttpSecurityRecorder {
private static final Logger log = Logger.getLogger(HttpSecurityRecorder.class);
protected static final Consumer<Throwable> NOOP_CALLBACK = new Consumer<Throwable>() {
@Override
public void accept(Throwable throwable) {
}
};
final RuntimeValue<HttpConfiguration> httpConfiguration;
final HttpBuildTimeConfig buildTimeConfig;
static volatile String encryptionKey;
public HttpSecurityRecorder(RuntimeValue<HttpConfiguration> httpConfiguration, HttpBuildTimeConfig buildTimeConfig) {
this.httpConfiguration = httpConfiguration;
this.buildTimeConfig = buildTimeConfig;
}
public Handler<RoutingContext> permissionCheckHandler() {
return new Handler<RoutingContext>() {
volatile HttpAuthorizer authorizer;
@Override
public void handle(RoutingContext event) {
if (authorizer == null) {
authorizer = CDI.current().select(HttpAuthorizer.class).get();
}
authorizer.checkPermission(event);
}
};
}
public BeanContainerListener initPermissions(HttpBuildTimeConfig permissions,
Map<String, Supplier<HttpSecurityPolicy>> policies) {
return new BeanContainerListener() {
@Override
public void created(BeanContainer container) {
container.instance(PathMatchingHttpSecurityPolicy.class).init(permissions, policies);
}
};
}
public Supplier<FormAuthenticationMechanism> setupFormAuth() {
return new Supplier<FormAuthenticationMechanism>() {
@Override
public FormAuthenticationMechanism get() {
String key;
if (!httpConfiguration.getValue().encryptionKey.isPresent()) {
if (encryptionKey != null) {
key = encryptionKey;
} else {
byte[] data = new byte[32];
new SecureRandom().nextBytes(data);
key = encryptionKey = Base64.getEncoder().encodeToString(data);
log.warn("Encryption key was not specified for persistent FORM auth, using temporary key " + key);
}
} else {
key = httpConfiguration.getValue().encryptionKey.get();
}
FormAuthConfig form = buildTimeConfig.auth.form;
PersistentLoginManager loginManager = new PersistentLoginManager(key, form.cookieName, form.timeout.toMillis(),
form.newCookieInterval.toMillis(), form.httpOnlyCookie);
String loginPage = form.loginPage.startsWith("/") ? form.loginPage : "/" + form.loginPage;
String errorPage = form.errorPage.startsWith("/") ? form.errorPage : "/" + form.errorPage;
String landingPage = form.landingPage.startsWith("/") ? form.landingPage : "/" + form.landingPage;
String postLocation = form.postLocation.startsWith("/") ? form.postLocation : "/" + form.postLocation;
String usernameParameter = form.usernameParameter;
String passwordParameter = form.passwordParameter;
String locationCookie = form.locationCookie;
boolean redirectAfterLogin = form.redirectAfterLogin;
return new FormAuthenticationMechanism(loginPage, postLocation, usernameParameter, passwordParameter,
errorPage, landingPage, redirectAfterLogin, locationCookie, loginManager);
}
};
}
public Supplier<?> setupBasicAuth(HttpBuildTimeConfig buildTimeConfig) {
return new Supplier<BasicAuthenticationMechanism>() {
@Override
public BasicAuthenticationMechanism get() {
return new BasicAuthenticationMechanism(buildTimeConfig.auth.realm.orElse(null),
buildTimeConfig.auth.form.enabled);
}
};
}
public Supplier<?> setupMtlsClientAuth() {
return new Supplier<MtlsAuthenticationMechanism>() {
@Override
public MtlsAuthenticationMechanism get() {
return new MtlsAuthenticationMechanism();
}
};
}
/**
* This handler resolves the identity, and will be mapped to the post location. Otherwise,
* for lazy auth the post will not be evaluated if there is no security rule for the post location.
*/
public Handler<RoutingContext> formAuthPostHandler() {
return new Handler<RoutingContext>() {
@Override
public void handle(RoutingContext event) {
Uni<SecurityIdentity> user = event.get(QuarkusHttpUser.DEFERRED_IDENTITY_KEY);
user.subscribe().withSubscriber(new UniSubscriber<SecurityIdentity>() {
@Override
public void onSubscribe(UniSubscription uniSubscription) {
}
@Override
public void onItem(SecurityIdentity securityIdentity) {
event.next();
}
@Override
public void onFailure(Throwable throwable) {
event.fail(throwable);
}
});
}
};
}
public static final class DefaultAuthFailureHandler implements BiConsumer<RoutingContext, Throwable> {
private DefaultAuthFailureHandler() {
}
@Override
public void accept(RoutingContext event, Throwable throwable) {
throwable = extractRootCause(throwable);
if (throwable instanceof AuthenticationFailedException) {
getAuthenticator(event).sendChallenge(event).subscribe().with(new Consumer<Boolean>() {
@Override
public void accept(Boolean aBoolean) {
if (!event.response().ended()) {
event.response().end();
}
}
}, new Consumer<Throwable>() {
@Override
public void accept(Throwable throwable) {
event.fail(throwable);
}
});
} else if (throwable instanceof AuthenticationCompletionException) {
log.debug("Authentication has failed, returning HTTP status 401");
event.response().setStatusCode(401);
event.response().end();
} else if (throwable instanceof AuthenticationRedirectException) {
AuthenticationRedirectException redirectEx = (AuthenticationRedirectException) throwable;
event.response().setStatusCode(redirectEx.getCode());
event.response().headers().set(HttpHeaders.LOCATION, redirectEx.getRedirectUri());
event.response().headers().set(HttpHeaders.CACHE_CONTROL, "no-store");
event.response().headers().set("Pragma", "no-cache");
event.response().end();
} else {
event.fail(throwable);
}
}
private static HttpAuthenticator getAuthenticator(RoutingContext event) {
return event.get(HttpAuthenticator.class.getName());
}
private static Throwable extractRootCause(Throwable throwable) {
while ((throwable instanceof CompletionException && throwable.getCause() != null) ||
(throwable instanceof CompositeException)) {
if (throwable instanceof CompositeException) {
throwable = ((CompositeException) throwable).getCauses().get(0);
} else {
throwable = throwable.getCause();
}
}
return throwable;
}
}
}
|
|
`null instanceof X` is always false so checking for null is redundant.
|
public boolean equals(@Nullable Object o) {
if (this == o) {
return true;
}
if (!(o instanceof WeightedValue)) {
return false;
}
WeightedValue<?> that = (WeightedValue<?>) o;
return weight == that.weight && Objects.equals(value, that.value);
}
|
if (!(o instanceof WeightedValue)) {
|
public boolean equals(@Nullable Object o) {
if (this == o) {
return true;
}
if (!(o instanceof WeightedValue)) {
return false;
}
WeightedValue<?> that = (WeightedValue<?>) o;
return weight == that.weight && Objects.equals(value, that.value);
}
|
class WeightedValue<T> implements Weighted {
private final T value;
private final long weight;
private WeightedValue(T value, long weight) {
this.value = value;
this.weight = weight;
}
public static <T> WeightedValue<T> of(T value, long weight) {
return new WeightedValue<>(value, weight);
}
@Override
public long getWeight() {
return weight;
}
public T getValue() {
return value;
}
@Override
@Override
public int hashCode() {
return Objects.hash(value, weight);
}
@Override
public String toString() {
return "WeightedValue{value=" + value + ", weight=" + weight + "}";
}
}
|
class WeightedValue<T> implements Weighted {
private final T value;
private final long weight;
private WeightedValue(T value, long weight) {
this.value = value;
this.weight = weight;
}
public static <T> WeightedValue<T> of(T value, long weight) {
return new WeightedValue<>(value, weight);
}
@Override
public long getWeight() {
return weight;
}
public T getValue() {
return value;
}
@Override
@Override
public int hashCode() {
return Objects.hash(value, weight);
}
@Override
public String toString() {
return "WeightedValue{value=" + value + ", weight=" + weight + "}";
}
}
|
move this after `configByConfiguration.addConfiguration(configuration);`
|
public void testGetInvalidLocalTimeZone() {
expectedException.expectMessage(
"The supported Zone ID is either an abbreviation such as 'PST',"
+ " a full name such as 'America/Los_Angeles', or a custom timezone id such as 'GMT-8:00',"
+ " but configured Zone ID is 'UTC+8'.");
configuration.setString("table.local-time-zone", "UTC+8");
configByConfiguration.addConfiguration(configuration);
configByConfiguration.getLocalTimeZone();
}
|
"The supported Zone ID is either an abbreviation such as 'PST',"
|
public void testGetInvalidLocalTimeZone() {
configuration.setString("table.local-time-zone", "UTC+8");
configByConfiguration.addConfiguration(configuration);
expectedException.expectMessage(
"The supported Zone ID is either a full name such as 'America/Los_Angeles',"
+ " or a custom timezone id such as 'GMT-8:00', but configured Zone ID is 'UTC+8'.");
configByConfiguration.getLocalTimeZone();
}
|
class TableConfigTest {
@Rule public ExpectedException expectedException = ExpectedException.none();
private static TableConfig configByMethod = new TableConfig();
private static TableConfig configByConfiguration = new TableConfig();
private static Configuration configuration = new Configuration();
@Test
public void testSetAndGetSqlDialect() {
configuration.setString("table.sql-dialect", "HIVE");
configByConfiguration.addConfiguration(configuration);
configByMethod.setSqlDialect(SqlDialect.HIVE);
assertEquals(SqlDialect.HIVE, configByMethod.getSqlDialect());
assertEquals(SqlDialect.HIVE, configByConfiguration.getSqlDialect());
}
@Test
public void testSetAndGetMaxGeneratedCodeLength() {
configuration.setString("table.generated-code.max-length", "5000");
configByConfiguration.addConfiguration(configuration);
configByMethod.setMaxGeneratedCodeLength(5000);
assertEquals(Integer.valueOf(5000), configByMethod.getMaxGeneratedCodeLength());
assertEquals(Integer.valueOf(5000), configByConfiguration.getMaxGeneratedCodeLength());
}
@Test
public void testSetAndGetLocalTimeZone() {
configuration.setString("table.local-time-zone", "Asia/Shanghai");
configByConfiguration.addConfiguration(configuration);
configByMethod.setLocalTimeZone(ZoneId.of("Asia/Shanghai"));
assertEquals(ZoneId.of("Asia/Shanghai"), configByMethod.getLocalTimeZone());
assertEquals(ZoneId.of("Asia/Shanghai"), configByConfiguration.getLocalTimeZone());
}
@Test
public void testSetInvalidLocalTimeZone() {
expectedException.expectMessage(
"The supported Zone ID is either an abbreviation such as 'PST',"
+ " a full name such as 'America/Los_Angeles', or a custom timezone id such as 'GMT-8:00',"
+ " but configured Zone ID is 'UTC-10:00'.");
configByMethod.setLocalTimeZone(ZoneId.of("UTC-10:00"));
}
@Test
@Test
public void testSetAndGetIdleStateRetention() {
configuration.setString("table.exec.state.ttl", "1 h");
configByConfiguration.addConfiguration(configuration);
configByMethod.setIdleStateRetention(Duration.ofHours(1));
assertEquals(Duration.ofHours(1), configByMethod.getIdleStateRetention());
assertEquals(Duration.ofHours(1), configByConfiguration.getIdleStateRetention());
}
}
|
class TableConfigTest {
@Rule public ExpectedException expectedException = ExpectedException.none();
private static TableConfig configByMethod = new TableConfig();
private static TableConfig configByConfiguration = new TableConfig();
private static Configuration configuration = new Configuration();
@Test
public void testSetAndGetSqlDialect() {
configuration.setString("table.sql-dialect", "HIVE");
configByConfiguration.addConfiguration(configuration);
configByMethod.setSqlDialect(SqlDialect.HIVE);
assertEquals(SqlDialect.HIVE, configByMethod.getSqlDialect());
assertEquals(SqlDialect.HIVE, configByConfiguration.getSqlDialect());
}
@Test
public void testSetAndGetMaxGeneratedCodeLength() {
configuration.setString("table.generated-code.max-length", "5000");
configByConfiguration.addConfiguration(configuration);
configByMethod.setMaxGeneratedCodeLength(5000);
assertEquals(Integer.valueOf(5000), configByMethod.getMaxGeneratedCodeLength());
assertEquals(Integer.valueOf(5000), configByConfiguration.getMaxGeneratedCodeLength());
}
@Test
public void testSetAndGetLocalTimeZone() {
configuration.setString("table.local-time-zone", "Asia/Shanghai");
configByConfiguration.addConfiguration(configuration);
configByMethod.setLocalTimeZone(ZoneId.of("Asia/Shanghai"));
assertEquals(ZoneId.of("Asia/Shanghai"), configByMethod.getLocalTimeZone());
assertEquals(ZoneId.of("Asia/Shanghai"), configByConfiguration.getLocalTimeZone());
}
@Test
public void testSetInvalidLocalTimeZone() {
expectedException.expectMessage(
"The supported Zone ID is either a full name such as 'America/Los_Angeles',"
+ " or a custom timezone id such as 'GMT-8:00', but configured Zone ID is 'UTC-10:00'.");
configByMethod.setLocalTimeZone(ZoneId.of("UTC-10:00"));
}
@Test
@Test
public void testGetInvalidAbbreviationLocalTimeZone() {
configuration.setString("table.local-time-zone", "PST");
configByConfiguration.addConfiguration(configuration);
expectedException.expectMessage(
"The supported Zone ID is either a full name such as 'America/Los_Angeles',"
+ " or a custom timezone id such as 'GMT-8:00', but configured Zone ID is 'PST'.");
configByConfiguration.getLocalTimeZone();
}
@Test
public void testSetAndGetIdleStateRetention() {
configuration.setString("table.exec.state.ttl", "1 h");
configByConfiguration.addConfiguration(configuration);
configByMethod.setIdleStateRetention(Duration.ofHours(1));
assertEquals(Duration.ofHours(1), configByMethod.getIdleStateRetention());
assertEquals(Duration.ofHours(1), configByConfiguration.getIdleStateRetention());
}
}
|
@rasika You are correct. I missed the `projectRoot.resolve()` part you have used. No issue with this then.
|
public static List<CodeAction> getAvailableCodeActions(CodeActionContext ctx) {
LSClientLogger clientLogger = LSClientLogger.getInstance(ctx.languageServercontext());
List<CodeAction> codeActions = new ArrayList<>();
CodeActionProvidersHolder codeActionProvidersHolder
= CodeActionProvidersHolder.getInstance(ctx.languageServercontext());
SyntaxTree syntaxTree = ctx.workspace().syntaxTree(ctx.filePath()).orElseThrow();
Optional<NonTerminalNode> matchedNode = CodeActionUtil.getTopLevelNode(ctx.cursorPosition(), syntaxTree);
CodeActionNodeType matchedNodeType = CodeActionUtil.codeActionNodeType(matchedNode.orElse(null));
SemanticModel semanticModel = ctx.workspace().semanticModel(ctx.filePath()).orElseThrow();
if (matchedNode.isPresent() && matchedNodeType != CodeActionNodeType.NONE) {
Range range = CommonUtil.toRange(matchedNode.get().lineRange());
Node expressionNode = CodeActionUtil.largestExpressionNode(matchedNode.get(), range);
TypeSymbol matchedTypeSymbol = semanticModel.type(expressionNode.lineRange()).orElse(null);
PositionDetails posDetails = CodeActionPositionDetails.from(matchedNode.get(), null, matchedTypeSymbol);
ctx.setPositionDetails(posDetails);
codeActionProvidersHolder.getActiveNodeBasedProviders(matchedNodeType, ctx).forEach(provider -> {
try {
List<CodeAction> codeActionsOut = provider.getNodeBasedCodeActions(ctx);
if (codeActionsOut != null) {
codeActions.addAll(codeActionsOut);
}
} catch (Exception e) {
String msg = "CodeAction '" + provider.getClass().getSimpleName() + "' failed!";
clientLogger.logError(msg, e, null, (Position) null);
}
});
}
Project project = ctx.workspace().project(ctx.filePath()).orElseThrow();
Path projectRoot = (project.kind() == ProjectKind.SINGLE_FILE_PROJECT)
? project.sourceRoot().getParent() :
project.sourceRoot();
ctx.allDiagnostics().stream().
filter(diag -> projectRoot.resolve(diag.location().lineRange().filePath()).equals(ctx.filePath()) &&
CommonUtil.isWithinRange(ctx.cursorPosition(), CommonUtil.toRange(diag.location().lineRange()))
)
.forEach(diagnostic -> {
Range range = CommonUtil.toRange(diagnostic.location().lineRange());
PositionDetails positionDetails = computePositionDetails(range, syntaxTree, ctx);
ctx.setPositionDetails(positionDetails);
codeActionProvidersHolder.getActiveDiagnosticsBasedProviders(ctx).forEach(provider -> {
try {
List<CodeAction> codeActionsOut = provider.getDiagBasedCodeActions(diagnostic, ctx);
if (codeActionsOut != null) {
codeActions.addAll(codeActionsOut);
}
} catch (Exception e) {
String msg = "CodeAction '" + provider.getClass().getSimpleName() + "' failed!";
clientLogger.logError(msg, e, null, (Position) null);
}
});
});
return codeActions;
}
|
CommonUtil.isWithinRange(ctx.cursorPosition(), CommonUtil.toRange(diag.location().lineRange()))
|
public static List<CodeAction> getAvailableCodeActions(CodeActionContext ctx) {
LSClientLogger clientLogger = LSClientLogger.getInstance(ctx.languageServercontext());
List<CodeAction> codeActions = new ArrayList<>();
CodeActionProvidersHolder codeActionProvidersHolder
= CodeActionProvidersHolder.getInstance(ctx.languageServercontext());
SyntaxTree syntaxTree = ctx.workspace().syntaxTree(ctx.filePath()).orElseThrow();
Optional<NonTerminalNode> matchedNode = CodeActionUtil.getTopLevelNode(ctx.cursorPosition(), syntaxTree);
CodeActionNodeType matchedNodeType = CodeActionUtil.codeActionNodeType(matchedNode.orElse(null));
SemanticModel semanticModel = ctx.workspace().semanticModel(ctx.filePath()).orElseThrow();
if (matchedNode.isPresent() && matchedNodeType != CodeActionNodeType.NONE) {
Range range = CommonUtil.toRange(matchedNode.get().lineRange());
Node expressionNode = CodeActionUtil.largestExpressionNode(matchedNode.get(), range);
TypeSymbol matchedTypeSymbol = semanticModel.type(expressionNode.lineRange()).orElse(null);
PositionDetails posDetails = CodeActionPositionDetails.from(matchedNode.get(), null, matchedTypeSymbol);
ctx.setPositionDetails(posDetails);
codeActionProvidersHolder.getActiveNodeBasedProviders(matchedNodeType, ctx).forEach(provider -> {
try {
List<CodeAction> codeActionsOut = provider.getNodeBasedCodeActions(ctx);
if (codeActionsOut != null) {
codeActions.addAll(codeActionsOut);
}
} catch (Exception e) {
String msg = "CodeAction '" + provider.getClass().getSimpleName() + "' failed!";
clientLogger.logError(msg, e, null, (Position) null);
}
});
}
Project project = ctx.workspace().project(ctx.filePath()).orElseThrow();
Path projectRoot = (project.kind() == ProjectKind.SINGLE_FILE_PROJECT)
? project.sourceRoot().getParent() :
project.sourceRoot();
ctx.allDiagnostics().stream().
filter(diag -> projectRoot.resolve(diag.location().lineRange().filePath()).equals(ctx.filePath()) &&
CommonUtil.isWithinRange(ctx.cursorPosition(), CommonUtil.toRange(diag.location().lineRange()))
)
.forEach(diagnostic -> {
Range range = CommonUtil.toRange(diagnostic.location().lineRange());
PositionDetails positionDetails = computePositionDetails(range, syntaxTree, ctx);
ctx.setPositionDetails(positionDetails);
codeActionProvidersHolder.getActiveDiagnosticsBasedProviders(ctx).forEach(provider -> {
try {
List<CodeAction> codeActionsOut = provider.getDiagBasedCodeActions(diagnostic, ctx);
if (codeActionsOut != null) {
codeActions.addAll(codeActionsOut);
}
} catch (Exception e) {
String msg = "CodeAction '" + provider.getClass().getSimpleName() + "' failed!";
clientLogger.logError(msg, e, null, (Position) null);
}
});
});
return codeActions;
}
|
class CodeActionRouter {
/**
* Returns a list of supported code actions.
*
* @param ctx {@link CodeActionContext}
* @return list of code actions
*/
}
|
class CodeActionRouter {
/**
* Returns a list of supported code actions.
*
* @param ctx {@link CodeActionContext}
* @return list of code actions
*/
}
|
Yes, by platform I meant JRE + OS + hardware (and when I briefly checked open-j9 this method was native and linux had suitable syscalls). But anyways I've already changed the code to 1ms.
|
private void buildGraph(StreamExecutionEnvironment env) {
env.fromSource(
new NumberSequenceSource(0, Long.MAX_VALUE),
WatermarkStrategy.noWatermarks(),
"num-source")
.keyBy(value -> value)
.map(
value -> {
Thread.sleep(0, 100);
return value;
})
.addSink(new DiscardingSink<>());
}
|
Thread.sleep(0, 100);
|
private void buildGraph(StreamExecutionEnvironment env) {
env.fromSource(
new NumberSequenceSource(0, Long.MAX_VALUE),
WatermarkStrategy.noWatermarks(),
"num-source")
.keyBy(value -> value)
.map(
value -> {
Thread.sleep(1);
return value;
})
.addSink(new DiscardingSink<>());
}
|
class UnalignedCheckpointFailureHandlingITCase {
private static final int PARALLELISM = 2;
@Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder();
@Rule public final SharedObjects sharedObjects = SharedObjects.create();
@Rule
public final MiniClusterWithClientResource miniClusterResource =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(PARALLELISM)
.setNumberSlotsPerTaskManager(1)
.build());
@Test
public void testCheckpointSuccessAfterFailure() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
TestCheckpointStorage storage =
new TestCheckpointStorage(
new JobManagerCheckpointStorage(), sharedObjects, temporaryFolder);
configure(env, storage);
buildGraph(env);
JobClient jobClient = env.executeAsync();
JobID jobID = jobClient.getJobID();
MiniCluster miniCluster = miniClusterResource.getMiniCluster();
waitForJobStatus(jobClient, singletonList(RUNNING), fromNow(Duration.ofSeconds(30)));
waitForAllTaskRunning(miniCluster, jobID, false);
triggerFailingCheckpoint(jobID, TestException.class, miniCluster);
miniCluster.triggerCheckpoint(jobID).get();
}
private void configure(StreamExecutionEnvironment env, TestCheckpointStorage storage) {
env.enableCheckpointing(Long.MAX_VALUE, CheckpointingMode.EXACTLY_ONCE);
env.getCheckpointConfig().setCheckpointStorage(storage);
env.setStateBackend(new MockStateBackend(true));
env.getCheckpointConfig().enableUnalignedCheckpoints();
env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ZERO);
env.getCheckpointConfig().setTolerableCheckpointFailureNumber(Integer.MAX_VALUE);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
}
private void triggerFailingCheckpoint(
JobID jobID, Class<TestException> expectedException, MiniCluster miniCluster)
throws InterruptedException, ExecutionException {
while (true) {
Optional<Throwable> cpFailure =
miniCluster
.triggerCheckpoint(jobID)
.thenApply(ign -> Optional.empty())
.handle((ign, err) -> Optional.ofNullable(err))
.get();
if (!cpFailure.isPresent()) {
Thread.sleep(50);
} else if (isCausedBy(cpFailure.get(), expectedException)) {
return;
} else {
rethrow(cpFailure.get());
}
}
}
private boolean isCausedBy(Throwable t, Class<TestException> expectedException) {
return findThrowable(t, SerializedThrowable.class)
.flatMap(
st -> {
Throwable deser = st.deserializeError(getClass().getClassLoader());
return findThrowable(deser, expectedException);
})
.isPresent();
}
private static class TestCheckpointStorage implements CheckpointStorage {
private final CheckpointStorage delegate;
private final SharedReference<AtomicBoolean> failOnCloseRef;
private final SharedReference<TemporaryFolder> tempFolderRef;
private TestCheckpointStorage(
CheckpointStorage delegate,
SharedObjects sharedObjects,
TemporaryFolder tempFolder) {
this.delegate = delegate;
this.failOnCloseRef = sharedObjects.add(new AtomicBoolean(true));
this.tempFolderRef = sharedObjects.add(tempFolder);
}
@Override
public CheckpointStorageAccess createCheckpointStorage(JobID jobId) throws IOException {
return new TestCheckpointStorageAccess(
delegate.createCheckpointStorage(jobId),
failOnCloseRef.get(),
tempFolderRef.get().newFolder());
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String externalPointer)
throws IOException {
return delegate.resolveCheckpoint(externalPointer);
}
}
private static class TestCheckpointStorageAccess implements CheckpointStorageAccess {
private final CheckpointStorageAccess delegate;
private final AtomicBoolean failOnClose;
private final File path;
public TestCheckpointStorageAccess(
CheckpointStorageAccess delegate, AtomicBoolean failOnClose, File file) {
this.delegate = delegate;
this.failOnClose = failOnClose;
this.path = file;
}
@Override
public CheckpointStreamFactory resolveCheckpointStorageLocation(
long checkpointId, CheckpointStorageLocationReference reference) {
return ign -> new FailingOnceFsCheckpointOutputStream(path, 100, 0, failOnClose);
}
@Override
public CheckpointStreamFactory.CheckpointStateOutputStream createTaskOwnedStateStream()
throws IOException {
return delegate.createTaskOwnedStateStream();
}
@Override
public boolean supportsHighlyAvailableStorage() {
return delegate.supportsHighlyAvailableStorage();
}
@Override
public boolean hasDefaultSavepointLocation() {
return delegate.hasDefaultSavepointLocation();
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String externalPointer)
throws IOException {
return delegate.resolveCheckpoint(externalPointer);
}
@Override
public void initializeBaseLocationsForCheckpoint() throws IOException {
delegate.initializeBaseLocationsForCheckpoint();
}
@Override
public CheckpointStorageLocation initializeLocationForCheckpoint(long checkpointId)
throws IOException {
return delegate.initializeLocationForCheckpoint(checkpointId);
}
@Override
public CheckpointStorageLocation initializeLocationForSavepoint(
long checkpointId, @Nullable String externalLocationPointer) throws IOException {
return delegate.initializeLocationForSavepoint(checkpointId, externalLocationPointer);
}
}
private static class FailingOnceFsCheckpointOutputStream extends FsCheckpointStateOutputStream {
private final AtomicBoolean failOnClose;
private volatile boolean failedCloseAndGetHandle = false;
public FailingOnceFsCheckpointOutputStream(
File path, int bufferSize, int localStateThreshold, AtomicBoolean failOnClose)
throws IOException {
super(
fromLocalFile(path.getAbsoluteFile()),
FileSystem.get(path.toURI()),
bufferSize,
localStateThreshold);
this.failOnClose = failOnClose;
}
@Override
public StreamStateHandle closeAndGetHandle() throws IOException {
if (failOnClose.get()) {
failedCloseAndGetHandle = true;
throw new TestException("failure from closeAndGetHandle");
} else {
return super.closeAndGetHandle();
}
}
@Override
public void close() {
if (failedCloseAndGetHandle && failOnClose.compareAndSet(true, false)) {
throw new TestException("failure from close");
} else {
super.close();
}
}
}
private static class TestException extends RuntimeException {
public TestException(String message) {
super(message);
}
}
}
|
class UnalignedCheckpointFailureHandlingITCase {
private static final int PARALLELISM = 2;
@Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder();
@Rule public final SharedObjects sharedObjects = SharedObjects.create();
@Rule
public final MiniClusterWithClientResource miniClusterResource =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setNumberTaskManagers(PARALLELISM)
.setNumberSlotsPerTaskManager(1)
.build());
@Test
public void testCheckpointSuccessAfterFailure() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
TestCheckpointStorage storage =
new TestCheckpointStorage(
new JobManagerCheckpointStorage(), sharedObjects, temporaryFolder);
configure(env, storage);
buildGraph(env);
JobClient jobClient = env.executeAsync();
JobID jobID = jobClient.getJobID();
MiniCluster miniCluster = miniClusterResource.getMiniCluster();
waitForJobStatus(jobClient, singletonList(RUNNING), fromNow(Duration.ofSeconds(30)));
waitForAllTaskRunning(miniCluster, jobID, false);
triggerFailingCheckpoint(jobID, TestException.class, miniCluster);
miniCluster.triggerCheckpoint(jobID).get();
}
private void configure(StreamExecutionEnvironment env, TestCheckpointStorage storage) {
env.enableCheckpointing(Long.MAX_VALUE, CheckpointingMode.EXACTLY_ONCE);
env.getCheckpointConfig().setCheckpointStorage(storage);
env.setStateBackend(new MockStateBackend(true));
env.getCheckpointConfig().enableUnalignedCheckpoints();
env.getCheckpointConfig().setAlignedCheckpointTimeout(Duration.ZERO);
env.getCheckpointConfig().setTolerableCheckpointFailureNumber(Integer.MAX_VALUE);
env.setParallelism(PARALLELISM);
env.disableOperatorChaining();
}
private void triggerFailingCheckpoint(
JobID jobID, Class<TestException> expectedException, MiniCluster miniCluster)
throws InterruptedException, ExecutionException {
while (true) {
Optional<Throwable> cpFailure =
miniCluster
.triggerCheckpoint(jobID)
.thenApply(ign -> Optional.empty())
.handle((ign, err) -> Optional.ofNullable(err))
.get();
if (!cpFailure.isPresent()) {
Thread.sleep(50);
} else if (isCausedBy(cpFailure.get(), expectedException)) {
return;
} else {
rethrow(cpFailure.get());
}
}
}
private boolean isCausedBy(Throwable t, Class<TestException> expectedException) {
return findThrowable(t, SerializedThrowable.class)
.flatMap(
st -> {
Throwable deser = st.deserializeError(getClass().getClassLoader());
return findThrowable(deser, expectedException);
})
.isPresent();
}
private static class TestCheckpointStorage implements CheckpointStorage {
private final CheckpointStorage delegate;
private final SharedReference<AtomicBoolean> failOnCloseRef;
private final SharedReference<TemporaryFolder> tempFolderRef;
private TestCheckpointStorage(
CheckpointStorage delegate,
SharedObjects sharedObjects,
TemporaryFolder tempFolder) {
this.delegate = delegate;
this.failOnCloseRef = sharedObjects.add(new AtomicBoolean(true));
this.tempFolderRef = sharedObjects.add(tempFolder);
}
@Override
public CheckpointStorageAccess createCheckpointStorage(JobID jobId) throws IOException {
return new TestCheckpointStorageAccess(
delegate.createCheckpointStorage(jobId),
failOnCloseRef.get(),
tempFolderRef.get().newFolder());
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String externalPointer)
throws IOException {
return delegate.resolveCheckpoint(externalPointer);
}
}
private static class TestCheckpointStorageAccess implements CheckpointStorageAccess {
private final CheckpointStorageAccess delegate;
private final AtomicBoolean failOnClose;
private final File path;
public TestCheckpointStorageAccess(
CheckpointStorageAccess delegate, AtomicBoolean failOnClose, File file) {
this.delegate = delegate;
this.failOnClose = failOnClose;
this.path = file;
}
@Override
public CheckpointStreamFactory resolveCheckpointStorageLocation(
long checkpointId, CheckpointStorageLocationReference reference) {
return ign -> new FailingOnceFsCheckpointOutputStream(path, 100, 0, failOnClose);
}
@Override
public CheckpointStreamFactory.CheckpointStateOutputStream createTaskOwnedStateStream()
throws IOException {
return delegate.createTaskOwnedStateStream();
}
@Override
public boolean supportsHighlyAvailableStorage() {
return delegate.supportsHighlyAvailableStorage();
}
@Override
public boolean hasDefaultSavepointLocation() {
return delegate.hasDefaultSavepointLocation();
}
@Override
public CompletedCheckpointStorageLocation resolveCheckpoint(String externalPointer)
throws IOException {
return delegate.resolveCheckpoint(externalPointer);
}
@Override
public void initializeBaseLocationsForCheckpoint() throws IOException {
delegate.initializeBaseLocationsForCheckpoint();
}
@Override
public CheckpointStorageLocation initializeLocationForCheckpoint(long checkpointId)
throws IOException {
return delegate.initializeLocationForCheckpoint(checkpointId);
}
@Override
public CheckpointStorageLocation initializeLocationForSavepoint(
long checkpointId, @Nullable String externalLocationPointer) throws IOException {
return delegate.initializeLocationForSavepoint(checkpointId, externalLocationPointer);
}
}
private static class FailingOnceFsCheckpointOutputStream extends FsCheckpointStateOutputStream {
private final AtomicBoolean failOnClose;
private volatile boolean failedCloseAndGetHandle = false;
public FailingOnceFsCheckpointOutputStream(
File path, int bufferSize, int localStateThreshold, AtomicBoolean failOnClose)
throws IOException {
super(
fromLocalFile(path.getAbsoluteFile()),
FileSystem.get(path.toURI()),
bufferSize,
localStateThreshold);
this.failOnClose = failOnClose;
}
@Override
public StreamStateHandle closeAndGetHandle() throws IOException {
if (failOnClose.get()) {
failedCloseAndGetHandle = true;
throw new TestException("failure from closeAndGetHandle");
} else {
return super.closeAndGetHandle();
}
}
@Override
public void close() {
if (failedCloseAndGetHandle && failOnClose.compareAndSet(true, false)) {
rethrow(new TestException("failure from close"));
} else {
super.close();
}
}
}
private static class TestException extends IOException {
public TestException(String message) {
super(message);
}
}
}
|
Can you add sql parser test case for this change?
|
public ASTNode visitTableStatement(final TableStatementContext ctx) {
MySQLSelectStatement result = new MySQLSelectStatement();
if (null != ctx.TABLE()) {
result.setFrom(new SimpleTableSegment(new TableNameSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(),
new IdentifierValue(ctx.tableName().getText()))));
} else {
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
}
return result;
}
|
if (null != ctx.TABLE()) {
|
public ASTNode visitTableStatement(final TableStatementContext ctx) {
MySQLSelectStatement result = new MySQLSelectStatement();
if (null != ctx.TABLE()) {
result.setFrom(new SimpleTableSegment(new TableNameSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(),
new IdentifierValue(ctx.tableName().getText()))));
} else {
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
}
return result;
}
|
class MySQLStatementVisitor extends MySQLStatementBaseVisitor<ASTNode> {
private final Collection<ParameterMarkerSegment> parameterMarkerSegments = new LinkedList<>();
@Override
public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) {
return new ParameterMarkerValue(parameterMarkerSegments.size(), ParameterMarkerType.QUESTION);
}
@Override
public final ASTNode visitLiterals(final LiteralsContext ctx) {
if (null != ctx.stringLiterals()) {
return visit(ctx.stringLiterals());
}
if (null != ctx.numberLiterals()) {
return visit(ctx.numberLiterals());
}
if (null != ctx.temporalLiterals()) {
return visit(ctx.temporalLiterals());
}
if (null != ctx.hexadecimalLiterals()) {
return visit(ctx.hexadecimalLiterals());
}
if (null != ctx.bitValueLiterals()) {
return visit(ctx.bitValueLiterals());
}
if (null != ctx.booleanLiterals()) {
return visit(ctx.booleanLiterals());
}
if (null != ctx.nullValueLiterals()) {
return visit(ctx.nullValueLiterals());
}
throw new IllegalStateException("Literals must have string, number, dateTime, hex, bit, boolean or null.");
}
@Override
public final ASTNode visitStringLiterals(final StringLiteralsContext ctx) {
return new StringLiteralValue(ctx.getText());
}
@Override
public ASTNode visitString_(final String_Context ctx) {
return new StringLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) {
return new NumberLiteralValue(ctx.getText());
}
@Override
public ASTNode visitTemporalLiterals(final TemporalLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitHexadecimalLiterals(final HexadecimalLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitBitValueLiterals(final BitValueLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) {
return new BooleanLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitNullValueLiterals(final NullValueLiteralsContext ctx) {
return new NullLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitIdentifier(final IdentifierContext ctx) {
return new IdentifierValue(ctx.getText());
}
@Override
public final ASTNode visitSchemaName(final SchemaNameContext ctx) {
return new DatabaseSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public final ASTNode visitTableName(final TableNameContext ctx) {
SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.name().getStart().getStartIndex(),
ctx.name().getStop().getStopIndex(), new IdentifierValue(ctx.name().identifier().getText())));
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner((OwnerSegment) visit(owner));
}
return result;
}
@Override
public final ASTNode visitViewName(final ViewNameContext ctx) {
SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.identifier().getStart().getStartIndex(),
ctx.identifier().getStop().getStopIndex(), new IdentifierValue(ctx.identifier().getText())));
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner((OwnerSegment) visit(owner));
}
return result;
}
@Override
public final ASTNode visitOwner(final OwnerContext ctx) {
return new OwnerSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public ASTNode visitFunctionName(final FunctionNameContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.identifier().IDENTIFIER_().getText(), ctx.getText());
if (null != ctx.owner()) {
result.setOwner((OwnerSegment) visit(ctx.owner()));
}
return result;
}
@Override
public final ASTNode visitColumnName(final ColumnNameContext ctx) {
return new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public final ASTNode visitIndexName(final IndexNameContext ctx) {
IndexNameSegment indexName = new IndexNameSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), indexName);
}
@Override
public ASTNode visitTableList(final TableListContext ctx) {
CollectionValue<SimpleTableSegment> result = new CollectionValue<>();
for (TableNameContext each : ctx.tableName()) {
result.getValue().add((SimpleTableSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitViewNames(final ViewNamesContext ctx) {
CollectionValue<SimpleTableSegment> result = new CollectionValue<>();
for (ViewNameContext each : ctx.viewName()) {
result.getValue().add((SimpleTableSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitColumnNames(final ColumnNamesContext ctx) {
CollectionValue<ColumnSegment> result = new CollectionValue<>();
for (ColumnNameContext each : ctx.columnName()) {
result.getValue().add((ColumnSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitExpr(final ExprContext ctx) {
if (null != ctx.booleanPrimary()) {
return visit(ctx.booleanPrimary());
}
if (null != ctx.XOR()) {
return createBinaryOperationExpression(ctx, "XOR");
}
if (null != ctx.andOperator()) {
return createBinaryOperationExpression(ctx, ctx.andOperator().getText());
}
if (null != ctx.orOperator()) {
return createBinaryOperationExpression(ctx, ctx.orOperator().getText());
}
return new NotExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0)));
}
private BinaryOperationExpression createBinaryOperationExpression(final ExprContext ctx, final String operator) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.expr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.expr(1));
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) {
if (null != ctx.IS()) {
String rightText = "";
if (null != ctx.NOT()) {
rightText = rightText.concat(ctx.start.getInputStream().getText(new Interval(ctx.NOT().getSymbol().getStartIndex(),
ctx.NOT().getSymbol().getStopIndex()))).concat(" ");
}
Token operatorToken = null;
if (null != ctx.NULL()) {
operatorToken = ctx.NULL().getSymbol();
}
if (null != ctx.TRUE()) {
operatorToken = ctx.TRUE().getSymbol();
}
if (null != ctx.FALSE()) {
operatorToken = ctx.FALSE().getSymbol();
}
int startIndex = null == operatorToken ? ctx.IS().getSymbol().getStopIndex() + 2 : operatorToken.getStartIndex();
rightText = rightText.concat(ctx.start.getInputStream().getText(new Interval(startIndex, ctx.stop.getStopIndex())));
ExpressionSegment right = new LiteralExpressionSegment(ctx.IS().getSymbol().getStopIndex() + 2, ctx.stop.getStopIndex(), rightText);
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
String operator = "IS";
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
if (null != ctx.comparisonOperator() || null != ctx.SAFE_EQ_()) {
return createCompareSegment(ctx);
}
if (null != ctx.MEMBER()) {
int startIndex = ctx.MEMBER().getSymbol().getStopIndex() + 5;
int endIndex = ctx.stop.getStopIndex() - 1;
String rightText = ctx.start.getInputStream().getText(new Interval(startIndex, endIndex));
ExpressionSegment right = new ExpressionProjectionSegment(startIndex, endIndex, rightText);
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
String operator = "MEMBER OF";
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
if (null != ctx.assignmentOperator()) {
return createAssignmentSegment(ctx);
}
return visit(ctx.predicate());
}
private ASTNode createAssignmentSegment(final BooleanPrimaryContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
ExpressionSegment right = (ExpressionSegment) visit(ctx.predicate());
String operator = ctx.assignmentOperator().getText();
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private ASTNode createCompareSegment(final BooleanPrimaryContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
ExpressionSegment right;
String operator;
if (null != ctx.ALL()) {
operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText() + " ALL";
} else {
operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText();
}
if (null != ctx.predicate()) {
right = (ExpressionSegment) visit(ctx.predicate());
} else {
right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())));
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitPredicate(final PredicateContext ctx) {
if (null != ctx.IN()) {
return createInSegment(ctx);
}
if (null != ctx.BETWEEN()) {
return createBetweenSegment(ctx);
}
if (null != ctx.LIKE()) {
return createBinaryOperationExpressionFromLike(ctx);
}
if (null != ctx.REGEXP()) {
return createBinaryOperationExpressionFromRegexp(ctx);
}
return visit(ctx.bitExpr(0));
}
private InExpression createInSegment(final PredicateContext ctx) {
boolean not = null != ctx.NOT();
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment right;
if (null != ctx.subquery()) {
right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())));
} else {
right = new ListExpression(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex());
for (ExprContext each : ctx.expr()) {
((ListExpression) right).getItems().add((ExpressionSegment) visit(each));
}
}
return new InExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, not);
}
private BinaryOperationExpression createBinaryOperationExpressionFromLike(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
String operator;
ExpressionSegment right;
if (null != ctx.SOUNDS()) {
right = (ExpressionSegment) visit(ctx.bitExpr(1));
operator = "SOUNDS LIKE";
} else {
ListExpression listExpression = new ListExpression(ctx.simpleExpr(0).start.getStartIndex(), ctx.simpleExpr().get(ctx.simpleExpr().size() - 1).stop.getStopIndex());
for (SimpleExprContext each : ctx.simpleExpr()) {
listExpression.getItems().add((ExpressionSegment) visit(each));
}
right = listExpression;
operator = null != ctx.NOT() ? "NOT LIKE" : "LIKE";
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private BinaryOperationExpression createBinaryOperationExpressionFromRegexp(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.bitExpr(1));
String operator = null != ctx.NOT() ? "NOT REGEXP" : "REGEXP";
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private BetweenExpression createBetweenSegment(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1));
ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate());
boolean not = null != ctx.NOT();
return new BetweenExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, between, and, not);
}
@Override
public final ASTNode visitBitExpr(final BitExprContext ctx) {
if (null != ctx.simpleExpr()) {
return visit(ctx.simpleExpr());
}
ExpressionSegment left = (ExpressionSegment) visit(ctx.getChild(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.getChild(2));
String operator = ctx.getChild(1).getText();
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitSimpleExpr(final SimpleExprContext ctx) {
int startIndex = ctx.start.getStartIndex();
int stopIndex = ctx.stop.getStopIndex();
if (null != ctx.subquery()) {
SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().getStart().getStartIndex(), ctx.subquery().getStop().getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()));
if (null != ctx.EXISTS()) {
return new ExistsSubqueryExpression(startIndex, stopIndex, subquerySegment);
}
return new SubqueryExpressionSegment(subquerySegment);
}
if (null != ctx.parameterMarker()) {
ParameterMarkerValue parameterMarker = (ParameterMarkerValue) visit(ctx.parameterMarker());
ParameterMarkerExpressionSegment segment = new ParameterMarkerExpressionSegment(startIndex, stopIndex, parameterMarker.getValue(), parameterMarker.getType());
parameterMarkerSegments.add(segment);
return segment;
}
if (null != ctx.literals()) {
return SQLUtils.createLiteralExpression(visit(ctx.literals()), startIndex, stopIndex, ctx.literals().start.getInputStream().getText(new Interval(startIndex, stopIndex)));
}
if (null != ctx.intervalExpression()) {
return visit(ctx.intervalExpression());
}
if (null != ctx.functionCall()) {
return visit(ctx.functionCall());
}
if (null != ctx.collateClause()) {
SimpleExpressionSegment collateValueSegment = (SimpleExpressionSegment) visit(ctx.collateClause());
return new CollateExpression(startIndex, stopIndex, collateValueSegment);
}
if (null != ctx.columnRef()) {
return visit(ctx.columnRef());
}
if (null != ctx.matchExpression()) {
return visit(ctx.matchExpression());
}
if (null != ctx.notOperator()) {
ASTNode expression = visit(ctx.simpleExpr(0));
if (expression instanceof ExistsSubqueryExpression) {
((ExistsSubqueryExpression) expression).setNot(true);
return expression;
}
return new NotExpression(startIndex, stopIndex, (ExpressionSegment) expression);
}
if (null != ctx.LP_() && 1 == ctx.expr().size()) {
return visit(ctx.expr(0));
}
if (null != ctx.OR_()) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.simpleExpr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.simpleExpr(1));
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, ctx.OR_().getText(), text);
}
return visitRemainSimpleExpr(ctx);
}
@Override
public ASTNode visitColumnRef(final ColumnRefContext ctx) {
int identifierCount = ctx.identifier().size();
ColumnSegment result;
if (1 == identifierCount) {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(0)));
} else if (2 == identifierCount) {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)));
result.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))));
} else {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(2)));
OwnerSegment owner = new OwnerSegment(ctx.identifier(1).start.getStartIndex(), ctx.identifier(1).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)));
owner.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))));
result.setOwner(owner);
}
return result;
}
@Override
public ASTNode visitSubquery(final SubqueryContext ctx) {
return visit(ctx.queryExpressionParens());
}
@Override
public ASTNode visitQueryExpressionParens(final QueryExpressionParensContext ctx) {
if (null != ctx.queryExpressionParens()) {
return visit(ctx.queryExpressionParens());
}
MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitLockClauseList(final LockClauseListContext ctx) {
LockSegment result = new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
for (LockClauseContext each : ctx.lockClause()) {
if (null != each.tableLockingList()) {
result.getTables().addAll(generateTablesFromTableAliasRefList(each.tableLockingList().tableAliasRefList()));
}
}
return result;
}
@Override
public ASTNode visitQueryExpression(final QueryExpressionContext ctx) {
MySQLSelectStatement result;
if (null != ctx.queryExpressionBody()) {
result = (MySQLSelectStatement) visit(ctx.queryExpressionBody());
} else {
result = (MySQLSelectStatement) visit(ctx.queryExpressionParens());
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
return result;
}
@Override
public ASTNode visitSelectWithInto(final SelectWithIntoContext ctx) {
if (null != ctx.selectWithInto()) {
return visit(ctx.selectWithInto());
}
MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
return result;
}
@Override
public ASTNode visitQueryExpressionBody(final QueryExpressionBodyContext ctx) {
if (1 == ctx.getChildCount() && ctx.getChild(0) instanceof QueryPrimaryContext) {
return visit(ctx.queryPrimary());
}
if (null != ctx.queryExpressionBody()) {
MySQLSelectStatement result = new MySQLSelectStatement();
MySQLSelectStatement left = (MySQLSelectStatement) visit(ctx.queryExpressionBody());
result.setProjections(left.getProjections());
result.setFrom(left.getFrom());
left.getTable().ifPresent(result::setTable);
result.setCombine(createCombineSegment(ctx.combineClause(), left));
return result;
}
return visit(ctx.queryExpressionParens());
}
private CombineSegment createCombineSegment(final CombineClauseContext ctx, final MySQLSelectStatement left) {
CombineType combineType = null != ctx.combineOption() && null != ctx.combineOption().ALL() ? CombineType.UNION_ALL : CombineType.UNION;
MySQLSelectStatement right = null != ctx.queryPrimary() ? (MySQLSelectStatement) visit(ctx.queryPrimary()) : (MySQLSelectStatement) visit(ctx.queryExpressionParens());
return new CombineSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), left, combineType, right);
}
@Override
public ASTNode visitQuerySpecification(final QuerySpecificationContext ctx) {
MySQLSelectStatement result = new MySQLSelectStatement();
result.setProjections((ProjectionsSegment) visit(ctx.projections()));
if (null != ctx.selectSpecification()) {
result.getProjections().setDistinctRow(isDistinct(ctx));
}
if (null != ctx.fromClause()) {
if (null != ctx.fromClause().tableReferences()) {
TableSegment tableSource = (TableSegment) visit(ctx.fromClause().tableReferences());
result.setFrom(tableSource);
}
if (null != ctx.fromClause().DUAL()) {
TableSegment tableSource = new SimpleTableSegment(new TableNameSegment(ctx.fromClause().DUAL().getSymbol().getStartIndex(),
ctx.fromClause().DUAL().getSymbol().getStopIndex(), new IdentifierValue(ctx.fromClause().DUAL().getText())));
result.setFrom(tableSource);
}
}
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.groupByClause()) {
result.setGroupBy((GroupBySegment) visit(ctx.groupByClause()));
}
if (null != ctx.havingClause()) {
result.setHaving((HavingSegment) visit(ctx.havingClause()));
}
if (null != ctx.windowClause()) {
result.setWindow((WindowSegment) visit(ctx.windowClause()));
}
return result;
}
@Override
@Override
public ASTNode visitWindowClause(final WindowClauseContext ctx) {
return new WindowSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
}
@Override
public ASTNode visitHavingClause(final HavingClauseContext ctx) {
ExpressionSegment expr = (ExpressionSegment) visit(ctx.expr());
return new HavingSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), expr);
}
@Override
public final ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.INTERVAL().getSymbol().getStartIndex(), ctx.INTERVAL().getSymbol().getStopIndex(), ctx.INTERVAL().getText(), ctx.INTERVAL().getText());
result.getParameters().add((ExpressionSegment) visit(ctx.intervalValue().expr()));
result.getParameters().add(new LiteralExpressionSegment(ctx.intervalValue().intervalUnit().getStart().getStartIndex(), ctx.intervalValue().intervalUnit().getStop().getStopIndex(),
ctx.intervalValue().intervalUnit().getText()));
return result;
}
@Override
public final ASTNode visitFunctionCall(final FunctionCallContext ctx) {
if (null != ctx.aggregationFunction()) {
return visit(ctx.aggregationFunction());
}
if (null != ctx.specialFunction()) {
return visit(ctx.specialFunction());
}
if (null != ctx.regularFunction()) {
return visit(ctx.regularFunction());
}
if (null != ctx.jsonFunction()) {
return visit(ctx.jsonFunction());
}
throw new IllegalStateException("FunctionCallContext must have aggregationFunction, regularFunction, specialFunction or jsonFunction.");
}
@Override
public final ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) {
String aggregationType = ctx.aggregationFunctionName().getText();
return AggregationType.isAggregationType(aggregationType)
? createAggregationSegment(ctx, aggregationType)
: new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), getOriginalText(ctx));
}
@Override
public final ASTNode visitJsonFunction(final JsonFunctionContext ctx) {
JsonFunctionNameContext functionNameContext = ctx.jsonFunctionName();
String functionName;
if (null != functionNameContext) {
functionName = functionNameContext.getText();
for (ExprContext each : ctx.expr()) {
visit(each);
}
} else if (null != ctx.JSON_SEPARATOR()) {
functionName = ctx.JSON_SEPARATOR().getText();
} else {
functionName = ctx.JSON_UNQUOTED_SEPARATOR().getText();
}
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), functionName, getOriginalText(ctx));
}
private ASTNode createAggregationSegment(final AggregationFunctionContext ctx, final String aggregationType) {
AggregationType type = AggregationType.valueOf(aggregationType.toUpperCase());
String innerExpression = ctx.start.getInputStream().getText(new Interval(ctx.LP_().getSymbol().getStartIndex(), ctx.stop.getStopIndex()));
if (null != ctx.distinct()) {
AggregationDistinctProjectionSegment result = new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
type, innerExpression, getDistinctExpression(ctx));
result.getParameters().addAll(getExpressions(ctx));
return result;
}
AggregationProjectionSegment result = new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression);
result.getParameters().addAll(getExpressions(ctx));
return result;
}
private Collection<ExpressionSegment> getExpressions(final AggregationFunctionContext ctx) {
if (null == ctx.expr()) {
return Collections.emptyList();
}
Collection<ExpressionSegment> result = new LinkedList<>();
for (ExprContext each : ctx.expr()) {
result.add((ExpressionSegment) visit(each));
}
return result;
}
private String getDistinctExpression(final AggregationFunctionContext ctx) {
StringBuilder result = new StringBuilder();
for (int i = 3; i < ctx.getChildCount() - 1; i++) {
result.append(ctx.getChild(i).getText());
}
return result.toString();
}
@Override
public final ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) {
if (null != ctx.groupConcatFunction()) {
return visit(ctx.groupConcatFunction());
}
if (null != ctx.windowFunction()) {
return visit(ctx.windowFunction());
}
if (null != ctx.castFunction()) {
return visit(ctx.castFunction());
}
if (null != ctx.convertFunction()) {
return visit(ctx.convertFunction());
}
if (null != ctx.positionFunction()) {
return visit(ctx.positionFunction());
}
if (null != ctx.substringFunction()) {
return visit(ctx.substringFunction());
}
if (null != ctx.extractFunction()) {
return visit(ctx.extractFunction());
}
if (null != ctx.charFunction()) {
return visit(ctx.charFunction());
}
if (null != ctx.trimFunction()) {
return visit(ctx.trimFunction());
}
if (null != ctx.weightStringFunction()) {
return visit(ctx.weightStringFunction());
}
if (null != ctx.valuesFunction()) {
return visit(ctx.valuesFunction());
}
if (null != ctx.currentUserFunction()) {
return visit(ctx.currentUserFunction());
}
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), getOriginalText(ctx), getOriginalText(ctx));
}
@Override
public final ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.GROUP_CONCAT().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
result.getParameters().add((ExpressionSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitWindowFunction(final WindowFunctionContext ctx) {
super.visitWindowFunction(ctx);
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.funcName.getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitCastFunction(final CastFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CAST().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
ASTNode expr = visit(each);
if (expr instanceof ColumnSegment) {
result.getParameters().add((ColumnSegment) expr);
} else if (expr instanceof LiteralExpressionSegment) {
result.getParameters().add((LiteralExpressionSegment) expr);
}
}
if (null != ctx.castType()) {
result.getParameters().add((DataTypeSegment) visit(ctx.castType()));
}
if (null != ctx.DATETIME()) {
DataTypeSegment dataType = new DataTypeSegment();
dataType.setDataTypeName(ctx.DATETIME().getText());
dataType.setStartIndex(ctx.DATETIME().getSymbol().getStartIndex());
dataType.setStopIndex(ctx.DATETIME().getSymbol().getStopIndex());
if (null != ctx.typeDatetimePrecision()) {
dataType.setDataLength((DataTypeLengthSegment) visit(ctx.typeDatetimePrecision()));
}
result.getParameters().add(dataType);
}
return result;
}
@Override
public ASTNode visitCastType(final CastTypeContext ctx) {
DataTypeSegment result = new DataTypeSegment();
result.setDataTypeName(ctx.castTypeName.getText());
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
if (null != ctx.fieldLength()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength());
result.setDataLength(dataTypeLengthSegment);
}
if (null != ctx.precision()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision());
result.setDataLength(dataTypeLengthSegment);
}
return result;
}
@Override
public final ASTNode visitConvertFunction(final ConvertFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CONVERT().getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitPositionFunction(final PositionFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.POSITION().getText(), getOriginalText(ctx));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr(0)));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr(1)));
return result;
}
@Override
public final ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(
ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null == ctx.SUBSTR() ? ctx.SUBSTRING().getText() : ctx.SUBSTR().getText(), getOriginalText(ctx));
result.getParameters().add((ExpressionSegment) visit(ctx.expr()));
for (TerminalNode each : ctx.NUMBER_()) {
result.getParameters().add(new LiteralExpressionSegment(each.getSymbol().getStartIndex(), each.getSymbol().getStopIndex(), new NumberLiteralValue(each.getText()).getValue()));
}
return result;
}
@Override
public final ASTNode visitExtractFunction(final ExtractFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.EXTRACT().getText(), getOriginalText(ctx));
result.getParameters().add(new LiteralExpressionSegment(ctx.identifier().getStart().getStartIndex(), ctx.identifier().getStop().getStopIndex(), ctx.identifier().getText()));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr()));
return result;
}
@Override
public final ASTNode visitCharFunction(final CharFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CHAR().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
ASTNode expr = visit(each);
result.getParameters().add((ExpressionSegment) expr);
}
return result;
}
@Override
public final ASTNode visitTrimFunction(final TrimFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.TRIM().getText(), getOriginalText(ctx));
if (null != ctx.BOTH()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.BOTH().getSymbol().getStartIndex(), ctx.BOTH().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.BOTH().getSymbol().getText()).getValue()));
}
if (null != ctx.TRAILING()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.TRAILING().getSymbol().getStartIndex(), ctx.TRAILING().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.TRAILING().getSymbol().getText()).getValue()));
}
if (null != ctx.LEADING()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.LEADING().getSymbol().getStartIndex(), ctx.LEADING().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.LEADING().getSymbol().getText()).getValue()));
}
for (ExprContext each : ctx.expr()) {
result.getParameters().add((ExpressionSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.WEIGHT_STRING().getText(), getOriginalText(ctx));
result.getParameters().add((ExpressionSegment) visit(ctx.expr()));
return result;
}
@Override
public final ASTNode visitValuesFunction(final ValuesFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.VALUES().getText(), getOriginalText(ctx));
if (!ctx.columnRefList().columnRef().isEmpty()) {
ColumnSegment columnSegment = (ColumnSegment) visit(ctx.columnRefList().columnRef(0));
result.getParameters().add(columnSegment);
}
return result;
}
@Override
public final ASTNode visitCurrentUserFunction(final CurrentUserFunctionContext ctx) {
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CURRENT_USER().getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitRegularFunction(final RegularFunctionContext ctx) {
return null != ctx.completeRegularFunction() ? visit(ctx.completeRegularFunction()) : visit(ctx.shorthandRegularFunction());
}
@Override
public ASTNode visitCompleteRegularFunction(final CompleteRegularFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.regularFunctionName().getText(), getOriginalText(ctx));
Collection<ExpressionSegment> expressionSegments = ctx.expr().stream().map(each -> (ExpressionSegment) visit(each)).collect(Collectors.toList());
result.getParameters().addAll(expressionSegments);
return result;
}
@Override
public ASTNode visitShorthandRegularFunction(final ShorthandRegularFunctionContext ctx) {
String text = getOriginalText(ctx);
FunctionSegment result;
if (null != ctx.CURRENT_TIME()) {
result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.CURRENT_TIME().getText(), text);
if (null != ctx.NUMBER_()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.NUMBER_().getSymbol().getStartIndex(), ctx.NUMBER_().getSymbol().getStopIndex(),
new NumberLiteralValue(ctx.NUMBER_().getText())));
}
} else {
result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), text);
}
return result;
}
private ASTNode visitRemainSimpleExpr(final SimpleExprContext ctx) {
if (null != ctx.caseExpression()) {
return visit(ctx.caseExpression());
}
if (null != ctx.BINARY()) {
return visit(ctx.simpleExpr(0));
}
if (null != ctx.variable()) {
return visit(ctx.variable());
}
for (ExprContext each : ctx.expr()) {
visit(each);
}
for (SimpleExprContext each : ctx.simpleExpr()) {
visit(each);
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text);
}
@Override
public ASTNode visitCaseExpression(final CaseExpressionContext ctx) {
Collection<ExpressionSegment> whenExprs = new LinkedList<>();
Collection<ExpressionSegment> thenExprs = new LinkedList<>();
for (CaseWhenContext each : ctx.caseWhen()) {
whenExprs.add((ExpressionSegment) visit(each.expr(0)));
thenExprs.add((ExpressionSegment) visit(each.expr(1)));
}
ExpressionSegment caseExpr = null == ctx.simpleExpr() ? null : (ExpressionSegment) visit(ctx.simpleExpr());
ExpressionSegment elseExpr = null == ctx.caseElse() ? null : (ExpressionSegment) visit(ctx.caseElse().expr());
return new CaseWhenExpression(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), caseExpr, whenExprs, thenExprs, elseExpr);
}
@Override
public ASTNode visitVariable(final VariableContext ctx) {
return null != ctx.systemVariable() ? visit(ctx.systemVariable()) : visit(ctx.userVariable());
}
@Override
public ASTNode visitUserVariable(final UserVariableContext ctx) {
return new VariableSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.textOrIdentifier().getText());
}
@Override
public ASTNode visitSystemVariable(final SystemVariableContext ctx) {
VariableSegment result = new VariableSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.rvalueSystemVariable().getText());
if (null != ctx.systemVariableScope) {
result.setScope(ctx.systemVariableScope.getText());
}
return result;
}
@Override
public final ASTNode visitMatchExpression(final MatchExpressionContext ctx) {
visit(ctx.expr());
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text);
}
private void calculateParameterCount(final Collection<ExprContext> exprContexts) {
for (ExprContext each : exprContexts) {
visit(each);
}
}
@Override
public final ASTNode visitDataType(final DataTypeContext ctx) {
DataTypeSegment result = new DataTypeSegment();
result.setDataTypeName(ctx.dataTypeName.getText());
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
if (null != ctx.fieldLength()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength());
result.setDataLength(dataTypeLengthSegment);
}
if (null != ctx.precision()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision());
result.setDataLength(dataTypeLengthSegment);
}
return result;
}
@Override
public ASTNode visitFieldLength(final FieldLengthContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
result.setPrecision(new BigDecimal(ctx.length.getText()).intValue());
return result;
}
@Override
public ASTNode visitPrecision(final PrecisionContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
List<TerminalNode> numbers = ctx.NUMBER_();
result.setPrecision(Integer.parseInt(numbers.get(0).getText()));
result.setScale(Integer.parseInt(numbers.get(1).getText()));
return result;
}
@Override
public ASTNode visitTypeDatetimePrecision(final TypeDatetimePrecisionContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
result.setPrecision(Integer.parseInt(ctx.NUMBER_().getText()));
return result;
}
@Override
public final ASTNode visitOrderByClause(final OrderByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new OrderBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items);
}
@Override
public final ASTNode visitOrderByItem(final OrderByItemContext ctx) {
OrderDirection orderDirection;
if (null != ctx.direction()) {
orderDirection = null != ctx.direction().DESC() ? OrderDirection.DESC : OrderDirection.ASC;
} else {
orderDirection = OrderDirection.ASC;
}
if (null != ctx.numberLiterals()) {
return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(),
SQLUtils.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection, null);
} else {
ASTNode expr = visitExpr(ctx.expr());
if (expr instanceof ColumnSegment) {
return new ColumnOrderByItemSegment((ColumnSegment) expr, orderDirection, null);
} else {
return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(),
ctx.expr().getStop().getStopIndex(), getOriginalText(ctx.expr()), orderDirection, null, (ExpressionSegment) expr);
}
}
}
@Override
public ASTNode visitInsert(final InsertContext ctx) {
MySQLInsertStatement result;
if (null != ctx.insertValuesClause()) {
result = (MySQLInsertStatement) visit(ctx.insertValuesClause());
} else if (null != ctx.insertSelectClause()) {
result = (MySQLInsertStatement) visit(ctx.insertSelectClause());
} else {
result = new MySQLInsertStatement();
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
}
if (null != ctx.onDuplicateKeyClause()) {
result.setOnDuplicateKeyColumns((OnDuplicateKeyColumnsSegment) visit(ctx.onDuplicateKeyClause()));
}
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitInsertSelectClause(final InsertSelectClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.setInsertSelect(createInsertSelectSegment(ctx));
return result;
}
private SubquerySegment createInsertSelectSegment(final InsertSelectClauseContext ctx) {
MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select());
return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement);
}
@Override
public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues()));
return result;
}
private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) {
Collection<InsertValuesSegment> result = new LinkedList<>();
for (AssignmentValuesContext each : assignmentValuesContexts) {
result.add((InsertValuesSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) {
Collection<AssignmentSegment> columns = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
columns.add((AssignmentSegment) visit(each));
}
return new OnDuplicateKeyColumnsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columns);
}
@Override
public ASTNode visitReplace(final ReplaceContext ctx) {
MySQLInsertStatement result;
if (null != ctx.replaceValuesClause()) {
result = (MySQLInsertStatement) visit(ctx.replaceValuesClause());
} else if (null != ctx.replaceSelectClause()) {
result = (MySQLInsertStatement) visit(ctx.replaceSelectClause());
} else {
result = new MySQLInsertStatement();
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
}
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitReplaceSelectClause(final ReplaceSelectClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.setInsertSelect(createReplaceSelectSegment(ctx));
return result;
}
private SubquerySegment createReplaceSelectSegment(final ReplaceSelectClauseContext ctx) {
MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select());
return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement);
}
@Override
public ASTNode visitReplaceValuesClause(final ReplaceValuesClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues()));
return result;
}
private List<ColumnSegment> createInsertColumns(final FieldsContext fields) {
List<ColumnSegment> result = new LinkedList<>();
for (InsertIdentifierContext each : fields.insertIdentifier()) {
result.add((ColumnSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitUpdate(final UpdateContext ctx) {
MySQLUpdateStatement result = new MySQLUpdateStatement();
TableSegment tableSegment = (TableSegment) visit(ctx.tableReferences());
result.setTable(tableSegment);
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) {
Collection<AssignmentSegment> assignments = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
assignments.add((AssignmentSegment) visit(each));
}
return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments);
}
@Override
public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) {
List<ExpressionSegment> segments = new LinkedList<>();
for (AssignmentValueContext each : ctx.assignmentValue()) {
segments.add((ExpressionSegment) visit(each));
}
return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments);
}
@Override
public ASTNode visitAssignment(final AssignmentContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnRef());
ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue());
List<ColumnSegment> columnSegments = new LinkedList<>();
columnSegments.add(column);
return new ColumnAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columnSegments, value);
}
@Override
public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) {
ExprContext expr = ctx.expr();
if (null != expr) {
ASTNode result = visit(expr);
if (result instanceof ColumnSegment) {
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText());
} else {
return result;
}
}
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitBlobValue(final BlobValueContext ctx) {
return new StringLiteralValue(ctx.string_().getText());
}
@Override
public ASTNode visitDelete(final DeleteContext ctx) {
MySQLDeleteStatement result = new MySQLDeleteStatement();
if (null != ctx.multipleTablesClause()) {
result.setTable((TableSegment) visit(ctx.multipleTablesClause()));
} else {
result.setTable((TableSegment) visit(ctx.singleTableClause()));
}
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) {
SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
@Override
public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) {
DeleteMultiTableSegment result = new DeleteMultiTableSegment();
TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences());
result.setRelationTable(relateTableSource);
result.setActualDeleteTables(generateTablesFromTableAliasRefList(ctx.tableAliasRefList()));
return result;
}
private List<SimpleTableSegment> generateTablesFromTableAliasRefList(final TableAliasRefListContext ctx) {
List<SimpleTableSegment> result = new LinkedList<>();
for (TableIdentOptWildContext each : ctx.tableIdentOptWild()) {
result.add((SimpleTableSegment) visit(each.tableName()));
}
return result;
}
@Override
public ASTNode visitSelect(final SelectContext ctx) {
MySQLSelectStatement result;
if (null != ctx.queryExpression()) {
result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
} else if (null != ctx.selectWithInto()) {
result = (MySQLSelectStatement) visit(ctx.selectWithInto());
} else {
result = (MySQLSelectStatement) visit(ctx.getChild(0));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
private boolean isDistinct(final QuerySpecificationContext ctx) {
for (SelectSpecificationContext each : ctx.selectSpecification()) {
if (((BooleanLiteralValue) visit(each)).getValue()) {
return true;
}
}
return false;
}
@Override
public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) {
if (null != ctx.duplicateSpecification()) {
return visit(ctx.duplicateSpecification());
}
return new BooleanLiteralValue(false);
}
@Override
public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) {
String text = ctx.getText();
if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) {
return new BooleanLiteralValue(true);
}
return new BooleanLiteralValue(false);
}
@Override
public ASTNode visitProjections(final ProjectionsContext ctx) {
Collection<ProjectionSegment> projections = new LinkedList<>();
if (null != ctx.unqualifiedShorthand()) {
projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex()));
}
for (ProjectionContext each : ctx.projection()) {
projections.add((ProjectionSegment) visit(each));
}
ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
result.getProjections().addAll(projections);
return result;
}
@Override
public ASTNode visitProjection(final ProjectionContext ctx) {
if (null != ctx.qualifiedShorthand()) {
return createShorthandProjection(ctx.qualifiedShorthand());
}
AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias());
ASTNode exprProjection = visit(ctx.expr());
if (exprProjection instanceof ColumnSegment) {
ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) exprProjection);
result.setAlias(alias);
return result;
}
if (exprProjection instanceof SubquerySegment) {
SubquerySegment subquerySegment = (SubquerySegment) exprProjection;
String text = ctx.start.getInputStream().getText(new Interval(subquerySegment.getStartIndex(), subquerySegment.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment((SubquerySegment) exprProjection, text);
result.setAlias(alias);
return result;
}
if (exprProjection instanceof ExistsSubqueryExpression) {
ExistsSubqueryExpression existsSubqueryExpression = (ExistsSubqueryExpression) exprProjection;
String text = ctx.start.getInputStream().getText(new Interval(existsSubqueryExpression.getStartIndex(), existsSubqueryExpression.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment(((ExistsSubqueryExpression) exprProjection).getSubquery(), text);
result.setAlias(alias);
return result;
}
return createProjection(ctx, alias, exprProjection);
}
private ShorthandProjectionSegment createShorthandProjection(final QualifiedShorthandContext shorthand) {
ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex());
IdentifierContext identifier = shorthand.identifier().get(shorthand.identifier().size() - 1);
OwnerSegment owner = new OwnerSegment(identifier.getStart().getStartIndex(), identifier.getStop().getStopIndex(), new IdentifierValue(identifier.getText()));
result.setOwner(owner);
if (shorthand.identifier().size() > 1) {
IdentifierContext schemaIdentifier = shorthand.identifier().get(0);
owner.setOwner(new OwnerSegment(schemaIdentifier.getStart().getStartIndex(), schemaIdentifier.getStop().getStopIndex(), new IdentifierValue(schemaIdentifier.getText())));
}
return result;
}
@Override
public ASTNode visitAlias(final AliasContext ctx) {
return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.textOrIdentifier().getText()));
}
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) {
if (projection instanceof AggregationProjectionSegment) {
((AggregationProjectionSegment) projection).setAlias(alias);
return projection;
}
if (projection instanceof ExpressionProjectionSegment) {
((ExpressionProjectionSegment) projection).setAlias(alias);
return projection;
}
if (projection instanceof FunctionSegment) {
FunctionSegment functionSegment = (FunctionSegment) projection;
ExpressionProjectionSegment result = new ExpressionProjectionSegment(functionSegment.getStartIndex(), functionSegment.getStopIndex(), functionSegment.getText(), functionSegment);
result.setAlias(alias);
return result;
}
if (projection instanceof CommonExpressionSegment) {
CommonExpressionSegment segment = (CommonExpressionSegment) projection;
ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText(), segment);
result.setAlias(alias);
return result;
}
if (projection instanceof ColumnSegment) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx), (ColumnSegment) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof SubqueryExpressionSegment) {
SubqueryExpressionSegment subqueryExpressionSegment = (SubqueryExpressionSegment) projection;
String text = ctx.start.getInputStream().getText(new Interval(subqueryExpressionSegment.getStartIndex(), subqueryExpressionSegment.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment(subqueryExpressionSegment.getSubquery(), text);
result.setAlias(alias);
return result;
}
if (projection instanceof BinaryOperationExpression) {
int startIndex = ((BinaryOperationExpression) projection).getStartIndex();
int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex();
ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText(), (BinaryOperationExpression) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof ParameterMarkerExpressionSegment) {
ParameterMarkerExpressionSegment result = (ParameterMarkerExpressionSegment) projection;
result.setAlias(alias);
return projection;
}
if (projection instanceof CaseWhenExpression) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (CaseWhenExpression) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof VariableSegment) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (VariableSegment) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof BetweenExpression) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (BetweenExpression) projection);
result.setAlias(alias);
return result;
}
LiteralExpressionSegment column = (LiteralExpressionSegment) projection;
ExpressionProjectionSegment result = null == alias
? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()), column)
: new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()), column);
result.setAlias(alias);
return result;
}
@Override
public ASTNode visitFromClause(final FromClauseContext ctx) {
return visit(ctx.tableReferences());
}
@Override
public ASTNode visitTableReferences(final TableReferencesContext ctx) {
TableSegment result = (TableSegment) visit(ctx.tableReference(0));
if (ctx.tableReference().size() > 1) {
for (int i = 1; i < ctx.tableReference().size(); i++) {
result = generateJoinTableSourceFromEscapedTableReference(ctx.tableReference(i), result);
}
}
return result;
}
private JoinTableSegment generateJoinTableSourceFromEscapedTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) {
JoinTableSegment result = new JoinTableSegment();
result.setStartIndex(tableSegment.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
result.setLeft(tableSegment);
result.setJoinType(JoinType.COMMA.name());
result.setRight((TableSegment) visit(ctx));
return result;
}
@Override
public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) {
TableSegment result;
TableSegment left;
left = (TableSegment) visit(ctx.tableFactor());
for (JoinedTableContext each : ctx.joinedTable()) {
left = visitJoinedTable(each, left);
}
result = left;
return result;
}
@Override
public ASTNode visitTableReference(final TableReferenceContext ctx) {
TableSegment result;
TableSegment left;
left = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.escapedTableReference());
for (JoinedTableContext each : ctx.joinedTable()) {
left = visitJoinedTable(each, left);
}
result = left;
return result;
}
@Override
public ASTNode visitTableFactor(final TableFactorContext ctx) {
if (null != ctx.subquery()) {
MySQLSelectStatement subquery = (MySQLSelectStatement) visit(ctx.subquery());
SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery);
SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment);
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
if (null != ctx.tableName()) {
SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
return visit(ctx.tableReferences());
}
private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) {
JoinTableSegment result = new JoinTableSegment();
result.setLeft(tableSegment);
result.setStartIndex(tableSegment.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
result.setJoinType(getJoinType(ctx));
result.setNatural(null != ctx.naturalJoinType());
TableSegment right = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.tableReference());
result.setRight(right);
return null != ctx.joinSpecification() ? visitJoinSpecification(ctx.joinSpecification(), result) : result;
}
private String getJoinType(final JoinedTableContext ctx) {
if (null != ctx.innerJoinType()) {
return JoinType.INNER.name();
}
if (null != ctx.outerJoinType()) {
return ctx.outerJoinType().LEFT() != null ? JoinType.LEFT.name() : JoinType.RIGHT.name();
}
if (null != ctx.naturalJoinType()) {
return getNaturalJoinType(ctx.naturalJoinType());
}
return JoinType.COMMA.name();
}
private String getNaturalJoinType(final NaturalJoinTypeContext ctx) {
if (null != ctx.LEFT()) {
return JoinType.LEFT.name();
} else if (null != ctx.RIGHT()) {
return JoinType.RIGHT.name();
} else {
return JoinType.INNER.name();
}
}
private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment result) {
if (null != ctx.expr()) {
ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr());
result.setCondition(condition);
}
if (null != ctx.USING()) {
result.setUsing(ctx.columnNames().columnName().stream().map(each -> (ColumnSegment) visit(each)).collect(Collectors.toList()));
}
return result;
}
@Override
public ASTNode visitWhereClause(final WhereClauseContext ctx) {
ASTNode segment = visit(ctx.expr());
return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment);
}
@Override
public ASTNode visitGroupByClause(final GroupByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items);
}
@Override
public ASTNode visitLimitClause(final LimitClauseContext ctx) {
if (null == ctx.limitOffset()) {
return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null, (PaginationValueSegment) visit(ctx.limitRowCount()));
}
PaginationValueSegment rowCount;
PaginationValueSegment offset;
if (null != ctx.OFFSET()) {
rowCount = (PaginationValueSegment) visit(ctx.limitRowCount());
offset = (PaginationValueSegment) visit(ctx.limitOffset());
} else {
offset = (PaginationValueSegment) visit(ctx.limitOffset());
rowCount = (PaginationValueSegment) visit(ctx.limitRowCount());
}
return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), offset, rowCount);
}
@Override
public ASTNode visitLimitRowCount(final LimitRowCountContext ctx) {
if (null != ctx.numberLiterals()) {
return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue());
}
ParameterMarkerSegment result = new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(result);
return result;
}
@Override
public final ASTNode visitConstraintName(final ConstraintNameContext ctx) {
return new ConstraintSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public ASTNode visitLimitOffset(final LimitOffsetContext ctx) {
if (null != ctx.numberLiterals()) {
return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue());
}
ParameterMarkerSegment result = new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(result);
return result;
}
@Override
public ASTNode visitCollateClause(final CollateClauseContext ctx) {
if (null != ctx.collationName()) {
return new LiteralExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.collationName().textOrIdentifier().getText());
}
ParameterMarkerExpressionSegment segment = new ParameterMarkerExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(segment);
return segment;
}
/**
* Get original text.
*
* @param ctx context
* @return original text
*/
protected String getOriginalText(final ParserRuleContext ctx) {
return ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
}
}
|
class MySQLStatementVisitor extends MySQLStatementBaseVisitor<ASTNode> {
private final Collection<ParameterMarkerSegment> parameterMarkerSegments = new LinkedList<>();
@Override
public final ASTNode visitParameterMarker(final ParameterMarkerContext ctx) {
return new ParameterMarkerValue(parameterMarkerSegments.size(), ParameterMarkerType.QUESTION);
}
@Override
public final ASTNode visitLiterals(final LiteralsContext ctx) {
if (null != ctx.stringLiterals()) {
return visit(ctx.stringLiterals());
}
if (null != ctx.numberLiterals()) {
return visit(ctx.numberLiterals());
}
if (null != ctx.temporalLiterals()) {
return visit(ctx.temporalLiterals());
}
if (null != ctx.hexadecimalLiterals()) {
return visit(ctx.hexadecimalLiterals());
}
if (null != ctx.bitValueLiterals()) {
return visit(ctx.bitValueLiterals());
}
if (null != ctx.booleanLiterals()) {
return visit(ctx.booleanLiterals());
}
if (null != ctx.nullValueLiterals()) {
return visit(ctx.nullValueLiterals());
}
throw new IllegalStateException("Literals must have string, number, dateTime, hex, bit, boolean or null.");
}
@Override
public final ASTNode visitStringLiterals(final StringLiteralsContext ctx) {
return new StringLiteralValue(ctx.getText());
}
@Override
public ASTNode visitString_(final String_Context ctx) {
return new StringLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitNumberLiterals(final NumberLiteralsContext ctx) {
return new NumberLiteralValue(ctx.getText());
}
@Override
public ASTNode visitTemporalLiterals(final TemporalLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitHexadecimalLiterals(final HexadecimalLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitBitValueLiterals(final BitValueLiteralsContext ctx) {
return new OtherLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitBooleanLiterals(final BooleanLiteralsContext ctx) {
return new BooleanLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitNullValueLiterals(final NullValueLiteralsContext ctx) {
return new NullLiteralValue(ctx.getText());
}
@Override
public final ASTNode visitIdentifier(final IdentifierContext ctx) {
return new IdentifierValue(ctx.getText());
}
@Override
public final ASTNode visitSchemaName(final SchemaNameContext ctx) {
return new DatabaseSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public final ASTNode visitTableName(final TableNameContext ctx) {
SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.name().getStart().getStartIndex(),
ctx.name().getStop().getStopIndex(), new IdentifierValue(ctx.name().identifier().getText())));
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner((OwnerSegment) visit(owner));
}
return result;
}
@Override
public final ASTNode visitViewName(final ViewNameContext ctx) {
SimpleTableSegment result = new SimpleTableSegment(new TableNameSegment(ctx.identifier().getStart().getStartIndex(),
ctx.identifier().getStop().getStopIndex(), new IdentifierValue(ctx.identifier().getText())));
OwnerContext owner = ctx.owner();
if (null != owner) {
result.setOwner((OwnerSegment) visit(owner));
}
return result;
}
@Override
public final ASTNode visitOwner(final OwnerContext ctx) {
return new OwnerSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public ASTNode visitFunctionName(final FunctionNameContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.identifier().IDENTIFIER_().getText(), ctx.getText());
if (null != ctx.owner()) {
result.setOwner((OwnerSegment) visit(ctx.owner()));
}
return result;
}
@Override
public final ASTNode visitColumnName(final ColumnNameContext ctx) {
return new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public final ASTNode visitIndexName(final IndexNameContext ctx) {
IndexNameSegment indexName = new IndexNameSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
return new IndexSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), indexName);
}
@Override
public ASTNode visitTableList(final TableListContext ctx) {
CollectionValue<SimpleTableSegment> result = new CollectionValue<>();
for (TableNameContext each : ctx.tableName()) {
result.getValue().add((SimpleTableSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitViewNames(final ViewNamesContext ctx) {
CollectionValue<SimpleTableSegment> result = new CollectionValue<>();
for (ViewNameContext each : ctx.viewName()) {
result.getValue().add((SimpleTableSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitColumnNames(final ColumnNamesContext ctx) {
CollectionValue<ColumnSegment> result = new CollectionValue<>();
for (ColumnNameContext each : ctx.columnName()) {
result.getValue().add((ColumnSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitExpr(final ExprContext ctx) {
if (null != ctx.booleanPrimary()) {
return visit(ctx.booleanPrimary());
}
if (null != ctx.XOR()) {
return createBinaryOperationExpression(ctx, "XOR");
}
if (null != ctx.andOperator()) {
return createBinaryOperationExpression(ctx, ctx.andOperator().getText());
}
if (null != ctx.orOperator()) {
return createBinaryOperationExpression(ctx, ctx.orOperator().getText());
}
return new NotExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), (ExpressionSegment) visit(ctx.expr(0)));
}
private BinaryOperationExpression createBinaryOperationExpression(final ExprContext ctx, final String operator) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.expr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.expr(1));
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitBooleanPrimary(final BooleanPrimaryContext ctx) {
if (null != ctx.IS()) {
String rightText = "";
if (null != ctx.NOT()) {
rightText = rightText.concat(ctx.start.getInputStream().getText(new Interval(ctx.NOT().getSymbol().getStartIndex(),
ctx.NOT().getSymbol().getStopIndex()))).concat(" ");
}
Token operatorToken = null;
if (null != ctx.NULL()) {
operatorToken = ctx.NULL().getSymbol();
}
if (null != ctx.TRUE()) {
operatorToken = ctx.TRUE().getSymbol();
}
if (null != ctx.FALSE()) {
operatorToken = ctx.FALSE().getSymbol();
}
int startIndex = null == operatorToken ? ctx.IS().getSymbol().getStopIndex() + 2 : operatorToken.getStartIndex();
rightText = rightText.concat(ctx.start.getInputStream().getText(new Interval(startIndex, ctx.stop.getStopIndex())));
ExpressionSegment right = new LiteralExpressionSegment(ctx.IS().getSymbol().getStopIndex() + 2, ctx.stop.getStopIndex(), rightText);
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
String operator = "IS";
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
if (null != ctx.comparisonOperator() || null != ctx.SAFE_EQ_()) {
return createCompareSegment(ctx);
}
if (null != ctx.MEMBER()) {
int startIndex = ctx.MEMBER().getSymbol().getStopIndex() + 5;
int endIndex = ctx.stop.getStopIndex() - 1;
String rightText = ctx.start.getInputStream().getText(new Interval(startIndex, endIndex));
ExpressionSegment right = new ExpressionProjectionSegment(startIndex, endIndex, rightText);
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
String operator = "MEMBER OF";
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
if (null != ctx.assignmentOperator()) {
return createAssignmentSegment(ctx);
}
return visit(ctx.predicate());
}
private ASTNode createAssignmentSegment(final BooleanPrimaryContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
ExpressionSegment right = (ExpressionSegment) visit(ctx.predicate());
String operator = ctx.assignmentOperator().getText();
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private ASTNode createCompareSegment(final BooleanPrimaryContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.booleanPrimary());
ExpressionSegment right;
String operator;
if (null != ctx.ALL()) {
operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText() + " ALL";
} else {
operator = null != ctx.SAFE_EQ_() ? ctx.SAFE_EQ_().getText() : ctx.comparisonOperator().getText();
}
if (null != ctx.predicate()) {
right = (ExpressionSegment) visit(ctx.predicate());
} else {
right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())));
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitPredicate(final PredicateContext ctx) {
if (null != ctx.IN()) {
return createInSegment(ctx);
}
if (null != ctx.BETWEEN()) {
return createBetweenSegment(ctx);
}
if (null != ctx.LIKE()) {
return createBinaryOperationExpressionFromLike(ctx);
}
if (null != ctx.REGEXP()) {
return createBinaryOperationExpressionFromRegexp(ctx);
}
return visit(ctx.bitExpr(0));
}
private InExpression createInSegment(final PredicateContext ctx) {
boolean not = null != ctx.NOT();
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment right;
if (null != ctx.subquery()) {
right = new SubqueryExpressionSegment(new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery())));
} else {
right = new ListExpression(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex());
for (ExprContext each : ctx.expr()) {
((ListExpression) right).getItems().add((ExpressionSegment) visit(each));
}
}
return new InExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, not);
}
private BinaryOperationExpression createBinaryOperationExpressionFromLike(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
String operator;
ExpressionSegment right;
if (null != ctx.SOUNDS()) {
right = (ExpressionSegment) visit(ctx.bitExpr(1));
operator = "SOUNDS LIKE";
} else {
ListExpression listExpression = new ListExpression(ctx.simpleExpr(0).start.getStartIndex(), ctx.simpleExpr().get(ctx.simpleExpr().size() - 1).stop.getStopIndex());
for (SimpleExprContext each : ctx.simpleExpr()) {
listExpression.getItems().add((ExpressionSegment) visit(each));
}
right = listExpression;
operator = null != ctx.NOT() ? "NOT LIKE" : "LIKE";
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private BinaryOperationExpression createBinaryOperationExpressionFromRegexp(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.bitExpr(1));
String operator = null != ctx.NOT() ? "NOT REGEXP" : "REGEXP";
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
private BetweenExpression createBetweenSegment(final PredicateContext ctx) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.bitExpr(0));
ExpressionSegment between = (ExpressionSegment) visit(ctx.bitExpr(1));
ExpressionSegment and = (ExpressionSegment) visit(ctx.predicate());
boolean not = null != ctx.NOT();
return new BetweenExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, between, and, not);
}
@Override
public final ASTNode visitBitExpr(final BitExprContext ctx) {
if (null != ctx.simpleExpr()) {
return visit(ctx.simpleExpr());
}
ExpressionSegment left = (ExpressionSegment) visit(ctx.getChild(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.getChild(2));
String operator = ctx.getChild(1).getText();
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, operator, text);
}
@Override
public final ASTNode visitSimpleExpr(final SimpleExprContext ctx) {
int startIndex = ctx.start.getStartIndex();
int stopIndex = ctx.stop.getStopIndex();
if (null != ctx.subquery()) {
SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().getStart().getStartIndex(), ctx.subquery().getStop().getStopIndex(), (MySQLSelectStatement) visit(ctx.subquery()));
if (null != ctx.EXISTS()) {
return new ExistsSubqueryExpression(startIndex, stopIndex, subquerySegment);
}
return new SubqueryExpressionSegment(subquerySegment);
}
if (null != ctx.parameterMarker()) {
ParameterMarkerValue parameterMarker = (ParameterMarkerValue) visit(ctx.parameterMarker());
ParameterMarkerExpressionSegment segment = new ParameterMarkerExpressionSegment(startIndex, stopIndex, parameterMarker.getValue(), parameterMarker.getType());
parameterMarkerSegments.add(segment);
return segment;
}
if (null != ctx.literals()) {
return SQLUtils.createLiteralExpression(visit(ctx.literals()), startIndex, stopIndex, ctx.literals().start.getInputStream().getText(new Interval(startIndex, stopIndex)));
}
if (null != ctx.intervalExpression()) {
return visit(ctx.intervalExpression());
}
if (null != ctx.functionCall()) {
return visit(ctx.functionCall());
}
if (null != ctx.collateClause()) {
SimpleExpressionSegment collateValueSegment = (SimpleExpressionSegment) visit(ctx.collateClause());
return new CollateExpression(startIndex, stopIndex, collateValueSegment);
}
if (null != ctx.columnRef()) {
return visit(ctx.columnRef());
}
if (null != ctx.matchExpression()) {
return visit(ctx.matchExpression());
}
if (null != ctx.notOperator()) {
ASTNode expression = visit(ctx.simpleExpr(0));
if (expression instanceof ExistsSubqueryExpression) {
((ExistsSubqueryExpression) expression).setNot(true);
return expression;
}
return new NotExpression(startIndex, stopIndex, (ExpressionSegment) expression);
}
if (null != ctx.LP_() && 1 == ctx.expr().size()) {
return visit(ctx.expr(0));
}
if (null != ctx.OR_()) {
ExpressionSegment left = (ExpressionSegment) visit(ctx.simpleExpr(0));
ExpressionSegment right = (ExpressionSegment) visit(ctx.simpleExpr(1));
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new BinaryOperationExpression(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), left, right, ctx.OR_().getText(), text);
}
return visitRemainSimpleExpr(ctx);
}
@Override
public ASTNode visitColumnRef(final ColumnRefContext ctx) {
int identifierCount = ctx.identifier().size();
ColumnSegment result;
if (1 == identifierCount) {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(0)));
} else if (2 == identifierCount) {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)));
result.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))));
} else {
result = new ColumnSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier(2)));
OwnerSegment owner = new OwnerSegment(ctx.identifier(1).start.getStartIndex(), ctx.identifier(1).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(1)));
owner.setOwner(new OwnerSegment(ctx.identifier(0).start.getStartIndex(), ctx.identifier(0).stop.getStopIndex(), (IdentifierValue) visit(ctx.identifier(0))));
result.setOwner(owner);
}
return result;
}
@Override
public ASTNode visitSubquery(final SubqueryContext ctx) {
return visit(ctx.queryExpressionParens());
}
@Override
public ASTNode visitQueryExpressionParens(final QueryExpressionParensContext ctx) {
if (null != ctx.queryExpressionParens()) {
return visit(ctx.queryExpressionParens());
}
MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitLockClauseList(final LockClauseListContext ctx) {
LockSegment result = new LockSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
for (LockClauseContext each : ctx.lockClause()) {
if (null != each.tableLockingList()) {
result.getTables().addAll(generateTablesFromTableAliasRefList(each.tableLockingList().tableAliasRefList()));
}
}
return result;
}
@Override
public ASTNode visitQueryExpression(final QueryExpressionContext ctx) {
MySQLSelectStatement result;
if (null != ctx.queryExpressionBody()) {
result = (MySQLSelectStatement) visit(ctx.queryExpressionBody());
} else {
result = (MySQLSelectStatement) visit(ctx.queryExpressionParens());
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
return result;
}
@Override
public ASTNode visitSelectWithInto(final SelectWithIntoContext ctx) {
if (null != ctx.selectWithInto()) {
return visit(ctx.selectWithInto());
}
MySQLSelectStatement result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
return result;
}
@Override
public ASTNode visitQueryExpressionBody(final QueryExpressionBodyContext ctx) {
if (1 == ctx.getChildCount() && ctx.getChild(0) instanceof QueryPrimaryContext) {
return visit(ctx.queryPrimary());
}
if (null != ctx.queryExpressionBody()) {
MySQLSelectStatement result = new MySQLSelectStatement();
MySQLSelectStatement left = (MySQLSelectStatement) visit(ctx.queryExpressionBody());
result.setProjections(left.getProjections());
result.setFrom(left.getFrom());
left.getTable().ifPresent(result::setTable);
result.setCombine(createCombineSegment(ctx.combineClause(), left));
return result;
}
return visit(ctx.queryExpressionParens());
}
private CombineSegment createCombineSegment(final CombineClauseContext ctx, final MySQLSelectStatement left) {
CombineType combineType = null != ctx.combineOption() && null != ctx.combineOption().ALL() ? CombineType.UNION_ALL : CombineType.UNION;
MySQLSelectStatement right = null != ctx.queryPrimary() ? (MySQLSelectStatement) visit(ctx.queryPrimary()) : (MySQLSelectStatement) visit(ctx.queryExpressionParens());
return new CombineSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), left, combineType, right);
}
@Override
public ASTNode visitQuerySpecification(final QuerySpecificationContext ctx) {
MySQLSelectStatement result = new MySQLSelectStatement();
result.setProjections((ProjectionsSegment) visit(ctx.projections()));
if (null != ctx.selectSpecification()) {
result.getProjections().setDistinctRow(isDistinct(ctx));
}
if (null != ctx.fromClause()) {
if (null != ctx.fromClause().tableReferences()) {
TableSegment tableSource = (TableSegment) visit(ctx.fromClause().tableReferences());
result.setFrom(tableSource);
}
if (null != ctx.fromClause().DUAL()) {
TableSegment tableSource = new SimpleTableSegment(new TableNameSegment(ctx.fromClause().DUAL().getSymbol().getStartIndex(),
ctx.fromClause().DUAL().getSymbol().getStopIndex(), new IdentifierValue(ctx.fromClause().DUAL().getText())));
result.setFrom(tableSource);
}
}
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.groupByClause()) {
result.setGroupBy((GroupBySegment) visit(ctx.groupByClause()));
}
if (null != ctx.havingClause()) {
result.setHaving((HavingSegment) visit(ctx.havingClause()));
}
if (null != ctx.windowClause()) {
result.setWindow((WindowSegment) visit(ctx.windowClause()));
}
return result;
}
@Override
@Override
public ASTNode visitWindowClause(final WindowClauseContext ctx) {
return new WindowSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
}
@Override
public ASTNode visitHavingClause(final HavingClauseContext ctx) {
ExpressionSegment expr = (ExpressionSegment) visit(ctx.expr());
return new HavingSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), expr);
}
@Override
public final ASTNode visitIntervalExpression(final IntervalExpressionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.INTERVAL().getSymbol().getStartIndex(), ctx.INTERVAL().getSymbol().getStopIndex(), ctx.INTERVAL().getText(), ctx.INTERVAL().getText());
result.getParameters().add((ExpressionSegment) visit(ctx.intervalValue().expr()));
result.getParameters().add(new LiteralExpressionSegment(ctx.intervalValue().intervalUnit().getStart().getStartIndex(), ctx.intervalValue().intervalUnit().getStop().getStopIndex(),
ctx.intervalValue().intervalUnit().getText()));
return result;
}
@Override
public final ASTNode visitFunctionCall(final FunctionCallContext ctx) {
if (null != ctx.aggregationFunction()) {
return visit(ctx.aggregationFunction());
}
if (null != ctx.specialFunction()) {
return visit(ctx.specialFunction());
}
if (null != ctx.regularFunction()) {
return visit(ctx.regularFunction());
}
if (null != ctx.jsonFunction()) {
return visit(ctx.jsonFunction());
}
throw new IllegalStateException("FunctionCallContext must have aggregationFunction, regularFunction, specialFunction or jsonFunction.");
}
@Override
public final ASTNode visitAggregationFunction(final AggregationFunctionContext ctx) {
String aggregationType = ctx.aggregationFunctionName().getText();
return AggregationType.isAggregationType(aggregationType)
? createAggregationSegment(ctx, aggregationType)
: new ExpressionProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), getOriginalText(ctx));
}
@Override
public final ASTNode visitJsonFunction(final JsonFunctionContext ctx) {
JsonFunctionNameContext functionNameContext = ctx.jsonFunctionName();
String functionName;
if (null != functionNameContext) {
functionName = functionNameContext.getText();
for (ExprContext each : ctx.expr()) {
visit(each);
}
} else if (null != ctx.JSON_SEPARATOR()) {
functionName = ctx.JSON_SEPARATOR().getText();
} else {
functionName = ctx.JSON_UNQUOTED_SEPARATOR().getText();
}
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), functionName, getOriginalText(ctx));
}
private ASTNode createAggregationSegment(final AggregationFunctionContext ctx, final String aggregationType) {
AggregationType type = AggregationType.valueOf(aggregationType.toUpperCase());
String innerExpression = ctx.start.getInputStream().getText(new Interval(ctx.LP_().getSymbol().getStartIndex(), ctx.stop.getStopIndex()));
if (null != ctx.distinct()) {
AggregationDistinctProjectionSegment result = new AggregationDistinctProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
type, innerExpression, getDistinctExpression(ctx));
result.getParameters().addAll(getExpressions(ctx));
return result;
}
AggregationProjectionSegment result = new AggregationProjectionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), type, innerExpression);
result.getParameters().addAll(getExpressions(ctx));
return result;
}
private Collection<ExpressionSegment> getExpressions(final AggregationFunctionContext ctx) {
if (null == ctx.expr()) {
return Collections.emptyList();
}
Collection<ExpressionSegment> result = new LinkedList<>();
for (ExprContext each : ctx.expr()) {
result.add((ExpressionSegment) visit(each));
}
return result;
}
private String getDistinctExpression(final AggregationFunctionContext ctx) {
StringBuilder result = new StringBuilder();
for (int i = 3; i < ctx.getChildCount() - 1; i++) {
result.append(ctx.getChild(i).getText());
}
return result.toString();
}
@Override
public final ASTNode visitSpecialFunction(final SpecialFunctionContext ctx) {
if (null != ctx.groupConcatFunction()) {
return visit(ctx.groupConcatFunction());
}
if (null != ctx.windowFunction()) {
return visit(ctx.windowFunction());
}
if (null != ctx.castFunction()) {
return visit(ctx.castFunction());
}
if (null != ctx.convertFunction()) {
return visit(ctx.convertFunction());
}
if (null != ctx.positionFunction()) {
return visit(ctx.positionFunction());
}
if (null != ctx.substringFunction()) {
return visit(ctx.substringFunction());
}
if (null != ctx.extractFunction()) {
return visit(ctx.extractFunction());
}
if (null != ctx.charFunction()) {
return visit(ctx.charFunction());
}
if (null != ctx.trimFunction()) {
return visit(ctx.trimFunction());
}
if (null != ctx.weightStringFunction()) {
return visit(ctx.weightStringFunction());
}
if (null != ctx.valuesFunction()) {
return visit(ctx.valuesFunction());
}
if (null != ctx.currentUserFunction()) {
return visit(ctx.currentUserFunction());
}
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), getOriginalText(ctx), getOriginalText(ctx));
}
@Override
public final ASTNode visitGroupConcatFunction(final GroupConcatFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.GROUP_CONCAT().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
result.getParameters().add((ExpressionSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitWindowFunction(final WindowFunctionContext ctx) {
super.visitWindowFunction(ctx);
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.funcName.getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitCastFunction(final CastFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CAST().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
ASTNode expr = visit(each);
if (expr instanceof ColumnSegment) {
result.getParameters().add((ColumnSegment) expr);
} else if (expr instanceof LiteralExpressionSegment) {
result.getParameters().add((LiteralExpressionSegment) expr);
}
}
if (null != ctx.castType()) {
result.getParameters().add((DataTypeSegment) visit(ctx.castType()));
}
if (null != ctx.DATETIME()) {
DataTypeSegment dataType = new DataTypeSegment();
dataType.setDataTypeName(ctx.DATETIME().getText());
dataType.setStartIndex(ctx.DATETIME().getSymbol().getStartIndex());
dataType.setStopIndex(ctx.DATETIME().getSymbol().getStopIndex());
if (null != ctx.typeDatetimePrecision()) {
dataType.setDataLength((DataTypeLengthSegment) visit(ctx.typeDatetimePrecision()));
}
result.getParameters().add(dataType);
}
return result;
}
@Override
public ASTNode visitCastType(final CastTypeContext ctx) {
DataTypeSegment result = new DataTypeSegment();
result.setDataTypeName(ctx.castTypeName.getText());
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
if (null != ctx.fieldLength()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength());
result.setDataLength(dataTypeLengthSegment);
}
if (null != ctx.precision()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision());
result.setDataLength(dataTypeLengthSegment);
}
return result;
}
@Override
public final ASTNode visitConvertFunction(final ConvertFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CONVERT().getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitPositionFunction(final PositionFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.POSITION().getText(), getOriginalText(ctx));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr(0)));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr(1)));
return result;
}
@Override
public final ASTNode visitSubstringFunction(final SubstringFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(
ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null == ctx.SUBSTR() ? ctx.SUBSTRING().getText() : ctx.SUBSTR().getText(), getOriginalText(ctx));
result.getParameters().add((ExpressionSegment) visit(ctx.expr()));
for (TerminalNode each : ctx.NUMBER_()) {
result.getParameters().add(new LiteralExpressionSegment(each.getSymbol().getStartIndex(), each.getSymbol().getStopIndex(), new NumberLiteralValue(each.getText()).getValue()));
}
return result;
}
@Override
public final ASTNode visitExtractFunction(final ExtractFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.EXTRACT().getText(), getOriginalText(ctx));
result.getParameters().add(new LiteralExpressionSegment(ctx.identifier().getStart().getStartIndex(), ctx.identifier().getStop().getStopIndex(), ctx.identifier().getText()));
result.getParameters().add((LiteralExpressionSegment) visit(ctx.expr()));
return result;
}
@Override
public final ASTNode visitCharFunction(final CharFunctionContext ctx) {
calculateParameterCount(ctx.expr());
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CHAR().getText(), getOriginalText(ctx));
for (ExprContext each : ctx.expr()) {
ASTNode expr = visit(each);
result.getParameters().add((ExpressionSegment) expr);
}
return result;
}
@Override
public final ASTNode visitTrimFunction(final TrimFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.TRIM().getText(), getOriginalText(ctx));
if (null != ctx.BOTH()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.BOTH().getSymbol().getStartIndex(), ctx.BOTH().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.BOTH().getSymbol().getText()).getValue()));
}
if (null != ctx.TRAILING()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.TRAILING().getSymbol().getStartIndex(), ctx.TRAILING().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.TRAILING().getSymbol().getText()).getValue()));
}
if (null != ctx.LEADING()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.LEADING().getSymbol().getStartIndex(), ctx.LEADING().getSymbol().getStopIndex(),
new OtherLiteralValue(ctx.LEADING().getSymbol().getText()).getValue()));
}
for (ExprContext each : ctx.expr()) {
result.getParameters().add((ExpressionSegment) visit(each));
}
return result;
}
@Override
public final ASTNode visitWeightStringFunction(final WeightStringFunctionContext ctx) {
calculateParameterCount(Collections.singleton(ctx.expr()));
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.WEIGHT_STRING().getText(), getOriginalText(ctx));
result.getParameters().add((ExpressionSegment) visit(ctx.expr()));
return result;
}
@Override
public final ASTNode visitValuesFunction(final ValuesFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.VALUES().getText(), getOriginalText(ctx));
if (!ctx.columnRefList().columnRef().isEmpty()) {
ColumnSegment columnSegment = (ColumnSegment) visit(ctx.columnRefList().columnRef(0));
result.getParameters().add(columnSegment);
}
return result;
}
@Override
public final ASTNode visitCurrentUserFunction(final CurrentUserFunctionContext ctx) {
return new FunctionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.CURRENT_USER().getText(), getOriginalText(ctx));
}
@Override
public final ASTNode visitRegularFunction(final RegularFunctionContext ctx) {
return null != ctx.completeRegularFunction() ? visit(ctx.completeRegularFunction()) : visit(ctx.shorthandRegularFunction());
}
@Override
public ASTNode visitCompleteRegularFunction(final CompleteRegularFunctionContext ctx) {
FunctionSegment result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.regularFunctionName().getText(), getOriginalText(ctx));
Collection<ExpressionSegment> expressionSegments = ctx.expr().stream().map(each -> (ExpressionSegment) visit(each)).collect(Collectors.toList());
result.getParameters().addAll(expressionSegments);
return result;
}
@Override
public ASTNode visitShorthandRegularFunction(final ShorthandRegularFunctionContext ctx) {
String text = getOriginalText(ctx);
FunctionSegment result;
if (null != ctx.CURRENT_TIME()) {
result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.CURRENT_TIME().getText(), text);
if (null != ctx.NUMBER_()) {
result.getParameters().add(new LiteralExpressionSegment(ctx.NUMBER_().getSymbol().getStartIndex(), ctx.NUMBER_().getSymbol().getStopIndex(),
new NumberLiteralValue(ctx.NUMBER_().getText())));
}
} else {
result = new FunctionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.getText(), text);
}
return result;
}
private ASTNode visitRemainSimpleExpr(final SimpleExprContext ctx) {
if (null != ctx.caseExpression()) {
return visit(ctx.caseExpression());
}
if (null != ctx.BINARY()) {
return visit(ctx.simpleExpr(0));
}
if (null != ctx.variable()) {
return visit(ctx.variable());
}
for (ExprContext each : ctx.expr()) {
visit(each);
}
for (SimpleExprContext each : ctx.simpleExpr()) {
visit(each);
}
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text);
}
@Override
public ASTNode visitCaseExpression(final CaseExpressionContext ctx) {
Collection<ExpressionSegment> whenExprs = new LinkedList<>();
Collection<ExpressionSegment> thenExprs = new LinkedList<>();
for (CaseWhenContext each : ctx.caseWhen()) {
whenExprs.add((ExpressionSegment) visit(each.expr(0)));
thenExprs.add((ExpressionSegment) visit(each.expr(1)));
}
ExpressionSegment caseExpr = null == ctx.simpleExpr() ? null : (ExpressionSegment) visit(ctx.simpleExpr());
ExpressionSegment elseExpr = null == ctx.caseElse() ? null : (ExpressionSegment) visit(ctx.caseElse().expr());
return new CaseWhenExpression(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), caseExpr, whenExprs, thenExprs, elseExpr);
}
@Override
public ASTNode visitVariable(final VariableContext ctx) {
return null != ctx.systemVariable() ? visit(ctx.systemVariable()) : visit(ctx.userVariable());
}
@Override
public ASTNode visitUserVariable(final UserVariableContext ctx) {
return new VariableSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.textOrIdentifier().getText());
}
@Override
public ASTNode visitSystemVariable(final SystemVariableContext ctx) {
VariableSegment result = new VariableSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.rvalueSystemVariable().getText());
if (null != ctx.systemVariableScope) {
result.setScope(ctx.systemVariableScope.getText());
}
return result;
}
@Override
public final ASTNode visitMatchExpression(final MatchExpressionContext ctx) {
visit(ctx.expr());
String text = ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), text);
}
private void calculateParameterCount(final Collection<ExprContext> exprContexts) {
for (ExprContext each : exprContexts) {
visit(each);
}
}
@Override
public final ASTNode visitDataType(final DataTypeContext ctx) {
DataTypeSegment result = new DataTypeSegment();
result.setDataTypeName(ctx.dataTypeName.getText());
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
if (null != ctx.fieldLength()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.fieldLength());
result.setDataLength(dataTypeLengthSegment);
}
if (null != ctx.precision()) {
DataTypeLengthSegment dataTypeLengthSegment = (DataTypeLengthSegment) visit(ctx.precision());
result.setDataLength(dataTypeLengthSegment);
}
return result;
}
@Override
public ASTNode visitFieldLength(final FieldLengthContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
result.setPrecision(new BigDecimal(ctx.length.getText()).intValue());
return result;
}
@Override
public ASTNode visitPrecision(final PrecisionContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
List<TerminalNode> numbers = ctx.NUMBER_();
result.setPrecision(Integer.parseInt(numbers.get(0).getText()));
result.setScale(Integer.parseInt(numbers.get(1).getText()));
return result;
}
@Override
public ASTNode visitTypeDatetimePrecision(final TypeDatetimePrecisionContext ctx) {
DataTypeLengthSegment result = new DataTypeLengthSegment();
result.setStartIndex(ctx.start.getStartIndex());
result.setStopIndex(ctx.stop.getStartIndex());
result.setPrecision(Integer.parseInt(ctx.NUMBER_().getText()));
return result;
}
@Override
public final ASTNode visitOrderByClause(final OrderByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new OrderBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items);
}
@Override
public final ASTNode visitOrderByItem(final OrderByItemContext ctx) {
OrderDirection orderDirection;
if (null != ctx.direction()) {
orderDirection = null != ctx.direction().DESC() ? OrderDirection.DESC : OrderDirection.ASC;
} else {
orderDirection = OrderDirection.ASC;
}
if (null != ctx.numberLiterals()) {
return new IndexOrderByItemSegment(ctx.numberLiterals().getStart().getStartIndex(), ctx.numberLiterals().getStop().getStopIndex(),
SQLUtils.getExactlyNumber(ctx.numberLiterals().getText(), 10).intValue(), orderDirection, null);
} else {
ASTNode expr = visitExpr(ctx.expr());
if (expr instanceof ColumnSegment) {
return new ColumnOrderByItemSegment((ColumnSegment) expr, orderDirection, null);
} else {
return new ExpressionOrderByItemSegment(ctx.expr().getStart().getStartIndex(),
ctx.expr().getStop().getStopIndex(), getOriginalText(ctx.expr()), orderDirection, null, (ExpressionSegment) expr);
}
}
}
@Override
public ASTNode visitInsert(final InsertContext ctx) {
MySQLInsertStatement result;
if (null != ctx.insertValuesClause()) {
result = (MySQLInsertStatement) visit(ctx.insertValuesClause());
} else if (null != ctx.insertSelectClause()) {
result = (MySQLInsertStatement) visit(ctx.insertSelectClause());
} else {
result = new MySQLInsertStatement();
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
}
if (null != ctx.onDuplicateKeyClause()) {
result.setOnDuplicateKeyColumns((OnDuplicateKeyColumnsSegment) visit(ctx.onDuplicateKeyClause()));
}
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitInsertSelectClause(final InsertSelectClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.setInsertSelect(createInsertSelectSegment(ctx));
return result;
}
private SubquerySegment createInsertSelectSegment(final InsertSelectClauseContext ctx) {
MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select());
return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement);
}
@Override
public ASTNode visitInsertValuesClause(final InsertValuesClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues()));
return result;
}
private Collection<InsertValuesSegment> createInsertValuesSegments(final Collection<AssignmentValuesContext> assignmentValuesContexts) {
Collection<InsertValuesSegment> result = new LinkedList<>();
for (AssignmentValuesContext each : assignmentValuesContexts) {
result.add((InsertValuesSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitOnDuplicateKeyClause(final OnDuplicateKeyClauseContext ctx) {
Collection<AssignmentSegment> columns = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
columns.add((AssignmentSegment) visit(each));
}
return new OnDuplicateKeyColumnsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columns);
}
@Override
public ASTNode visitReplace(final ReplaceContext ctx) {
MySQLInsertStatement result;
if (null != ctx.replaceValuesClause()) {
result = (MySQLInsertStatement) visit(ctx.replaceValuesClause());
} else if (null != ctx.replaceSelectClause()) {
result = (MySQLInsertStatement) visit(ctx.replaceSelectClause());
} else {
result = new MySQLInsertStatement();
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
}
result.setTable((SimpleTableSegment) visit(ctx.tableName()));
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitReplaceSelectClause(final ReplaceSelectClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.setInsertSelect(createReplaceSelectSegment(ctx));
return result;
}
private SubquerySegment createReplaceSelectSegment(final ReplaceSelectClauseContext ctx) {
MySQLSelectStatement selectStatement = (MySQLSelectStatement) visit(ctx.select());
return new SubquerySegment(ctx.select().start.getStartIndex(), ctx.select().stop.getStopIndex(), selectStatement);
}
@Override
public ASTNode visitReplaceValuesClause(final ReplaceValuesClauseContext ctx) {
MySQLInsertStatement result = new MySQLInsertStatement();
if (null != ctx.LP_()) {
if (null != ctx.fields()) {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), createInsertColumns(ctx.fields())));
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.LP_().getSymbol().getStartIndex(), ctx.RP_().getSymbol().getStopIndex(), Collections.emptyList()));
}
} else {
result.setInsertColumns(new InsertColumnsSegment(ctx.start.getStartIndex() - 1, ctx.start.getStartIndex() - 1, Collections.emptyList()));
}
result.getValues().addAll(createInsertValuesSegments(ctx.assignmentValues()));
return result;
}
private List<ColumnSegment> createInsertColumns(final FieldsContext fields) {
List<ColumnSegment> result = new LinkedList<>();
for (InsertIdentifierContext each : fields.insertIdentifier()) {
result.add((ColumnSegment) visit(each));
}
return result;
}
@Override
public ASTNode visitUpdate(final UpdateContext ctx) {
MySQLUpdateStatement result = new MySQLUpdateStatement();
TableSegment tableSegment = (TableSegment) visit(ctx.tableReferences());
result.setTable(tableSegment);
result.setSetAssignment((SetAssignmentSegment) visit(ctx.setAssignmentsClause()));
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitSetAssignmentsClause(final SetAssignmentsClauseContext ctx) {
Collection<AssignmentSegment> assignments = new LinkedList<>();
for (AssignmentContext each : ctx.assignment()) {
assignments.add((AssignmentSegment) visit(each));
}
return new SetAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), assignments);
}
@Override
public ASTNode visitAssignmentValues(final AssignmentValuesContext ctx) {
List<ExpressionSegment> segments = new LinkedList<>();
for (AssignmentValueContext each : ctx.assignmentValue()) {
segments.add((ExpressionSegment) visit(each));
}
return new InsertValuesSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), segments);
}
@Override
public ASTNode visitAssignment(final AssignmentContext ctx) {
ColumnSegment column = (ColumnSegment) visit(ctx.columnRef());
ExpressionSegment value = (ExpressionSegment) visit(ctx.assignmentValue());
List<ColumnSegment> columnSegments = new LinkedList<>();
columnSegments.add(column);
return new ColumnAssignmentSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), columnSegments, value);
}
@Override
public ASTNode visitAssignmentValue(final AssignmentValueContext ctx) {
ExprContext expr = ctx.expr();
if (null != expr) {
ASTNode result = visit(expr);
if (result instanceof ColumnSegment) {
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText());
} else {
return result;
}
}
return new CommonExpressionSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ctx.getText());
}
@Override
public ASTNode visitBlobValue(final BlobValueContext ctx) {
return new StringLiteralValue(ctx.string_().getText());
}
@Override
public ASTNode visitDelete(final DeleteContext ctx) {
MySQLDeleteStatement result = new MySQLDeleteStatement();
if (null != ctx.multipleTablesClause()) {
result.setTable((TableSegment) visit(ctx.multipleTablesClause()));
} else {
result.setTable((TableSegment) visit(ctx.singleTableClause()));
}
if (null != ctx.whereClause()) {
result.setWhere((WhereSegment) visit(ctx.whereClause()));
}
if (null != ctx.orderByClause()) {
result.setOrderBy((OrderBySegment) visit(ctx.orderByClause()));
}
if (null != ctx.limitClause()) {
result.setLimit((LimitSegment) visit(ctx.limitClause()));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
@Override
public ASTNode visitSingleTableClause(final SingleTableClauseContext ctx) {
SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
@Override
public ASTNode visitMultipleTablesClause(final MultipleTablesClauseContext ctx) {
DeleteMultiTableSegment result = new DeleteMultiTableSegment();
TableSegment relateTableSource = (TableSegment) visit(ctx.tableReferences());
result.setRelationTable(relateTableSource);
result.setActualDeleteTables(generateTablesFromTableAliasRefList(ctx.tableAliasRefList()));
return result;
}
private List<SimpleTableSegment> generateTablesFromTableAliasRefList(final TableAliasRefListContext ctx) {
List<SimpleTableSegment> result = new LinkedList<>();
for (TableIdentOptWildContext each : ctx.tableIdentOptWild()) {
result.add((SimpleTableSegment) visit(each.tableName()));
}
return result;
}
@Override
public ASTNode visitSelect(final SelectContext ctx) {
MySQLSelectStatement result;
if (null != ctx.queryExpression()) {
result = (MySQLSelectStatement) visit(ctx.queryExpression());
if (null != ctx.lockClauseList()) {
result.setLock((LockSegment) visit(ctx.lockClauseList()));
}
} else if (null != ctx.selectWithInto()) {
result = (MySQLSelectStatement) visit(ctx.selectWithInto());
} else {
result = (MySQLSelectStatement) visit(ctx.getChild(0));
}
result.getParameterMarkerSegments().addAll(getParameterMarkerSegments());
return result;
}
private boolean isDistinct(final QuerySpecificationContext ctx) {
for (SelectSpecificationContext each : ctx.selectSpecification()) {
if (((BooleanLiteralValue) visit(each)).getValue()) {
return true;
}
}
return false;
}
@Override
public ASTNode visitSelectSpecification(final SelectSpecificationContext ctx) {
if (null != ctx.duplicateSpecification()) {
return visit(ctx.duplicateSpecification());
}
return new BooleanLiteralValue(false);
}
@Override
public ASTNode visitDuplicateSpecification(final DuplicateSpecificationContext ctx) {
String text = ctx.getText();
if ("DISTINCT".equalsIgnoreCase(text) || "DISTINCTROW".equalsIgnoreCase(text)) {
return new BooleanLiteralValue(true);
}
return new BooleanLiteralValue(false);
}
@Override
public ASTNode visitProjections(final ProjectionsContext ctx) {
Collection<ProjectionSegment> projections = new LinkedList<>();
if (null != ctx.unqualifiedShorthand()) {
projections.add(new ShorthandProjectionSegment(ctx.unqualifiedShorthand().getStart().getStartIndex(), ctx.unqualifiedShorthand().getStop().getStopIndex()));
}
for (ProjectionContext each : ctx.projection()) {
projections.add((ProjectionSegment) visit(each));
}
ProjectionsSegment result = new ProjectionsSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex());
result.getProjections().addAll(projections);
return result;
}
@Override
public ASTNode visitProjection(final ProjectionContext ctx) {
if (null != ctx.qualifiedShorthand()) {
return createShorthandProjection(ctx.qualifiedShorthand());
}
AliasSegment alias = null == ctx.alias() ? null : (AliasSegment) visit(ctx.alias());
ASTNode exprProjection = visit(ctx.expr());
if (exprProjection instanceof ColumnSegment) {
ColumnProjectionSegment result = new ColumnProjectionSegment((ColumnSegment) exprProjection);
result.setAlias(alias);
return result;
}
if (exprProjection instanceof SubquerySegment) {
SubquerySegment subquerySegment = (SubquerySegment) exprProjection;
String text = ctx.start.getInputStream().getText(new Interval(subquerySegment.getStartIndex(), subquerySegment.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment((SubquerySegment) exprProjection, text);
result.setAlias(alias);
return result;
}
if (exprProjection instanceof ExistsSubqueryExpression) {
ExistsSubqueryExpression existsSubqueryExpression = (ExistsSubqueryExpression) exprProjection;
String text = ctx.start.getInputStream().getText(new Interval(existsSubqueryExpression.getStartIndex(), existsSubqueryExpression.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment(((ExistsSubqueryExpression) exprProjection).getSubquery(), text);
result.setAlias(alias);
return result;
}
return createProjection(ctx, alias, exprProjection);
}
private ShorthandProjectionSegment createShorthandProjection(final QualifiedShorthandContext shorthand) {
ShorthandProjectionSegment result = new ShorthandProjectionSegment(shorthand.getStart().getStartIndex(), shorthand.getStop().getStopIndex());
IdentifierContext identifier = shorthand.identifier().get(shorthand.identifier().size() - 1);
OwnerSegment owner = new OwnerSegment(identifier.getStart().getStartIndex(), identifier.getStop().getStopIndex(), new IdentifierValue(identifier.getText()));
result.setOwner(owner);
if (shorthand.identifier().size() > 1) {
IdentifierContext schemaIdentifier = shorthand.identifier().get(0);
owner.setOwner(new OwnerSegment(schemaIdentifier.getStart().getStartIndex(), schemaIdentifier.getStop().getStopIndex(), new IdentifierValue(schemaIdentifier.getText())));
}
return result;
}
@Override
public ASTNode visitAlias(final AliasContext ctx) {
return new AliasSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), new IdentifierValue(ctx.textOrIdentifier().getText()));
}
private ASTNode createProjection(final ProjectionContext ctx, final AliasSegment alias, final ASTNode projection) {
if (projection instanceof AggregationProjectionSegment) {
((AggregationProjectionSegment) projection).setAlias(alias);
return projection;
}
if (projection instanceof ExpressionProjectionSegment) {
((ExpressionProjectionSegment) projection).setAlias(alias);
return projection;
}
if (projection instanceof FunctionSegment) {
FunctionSegment functionSegment = (FunctionSegment) projection;
ExpressionProjectionSegment result = new ExpressionProjectionSegment(functionSegment.getStartIndex(), functionSegment.getStopIndex(), functionSegment.getText(), functionSegment);
result.setAlias(alias);
return result;
}
if (projection instanceof CommonExpressionSegment) {
CommonExpressionSegment segment = (CommonExpressionSegment) projection;
ExpressionProjectionSegment result = new ExpressionProjectionSegment(segment.getStartIndex(), segment.getStopIndex(), segment.getText(), segment);
result.setAlias(alias);
return result;
}
if (projection instanceof ColumnSegment) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx), (ColumnSegment) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof SubqueryExpressionSegment) {
SubqueryExpressionSegment subqueryExpressionSegment = (SubqueryExpressionSegment) projection;
String text = ctx.start.getInputStream().getText(new Interval(subqueryExpressionSegment.getStartIndex(), subqueryExpressionSegment.getStopIndex()));
SubqueryProjectionSegment result = new SubqueryProjectionSegment(subqueryExpressionSegment.getSubquery(), text);
result.setAlias(alias);
return result;
}
if (projection instanceof BinaryOperationExpression) {
int startIndex = ((BinaryOperationExpression) projection).getStartIndex();
int stopIndex = null != alias ? alias.getStopIndex() : ((BinaryOperationExpression) projection).getStopIndex();
ExpressionProjectionSegment result = new ExpressionProjectionSegment(startIndex, stopIndex, ((BinaryOperationExpression) projection).getText(), (BinaryOperationExpression) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof ParameterMarkerExpressionSegment) {
ParameterMarkerExpressionSegment result = (ParameterMarkerExpressionSegment) projection;
result.setAlias(alias);
return projection;
}
if (projection instanceof CaseWhenExpression) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (CaseWhenExpression) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof VariableSegment) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (VariableSegment) projection);
result.setAlias(alias);
return result;
}
if (projection instanceof BetweenExpression) {
ExpressionProjectionSegment result = new ExpressionProjectionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), getOriginalText(ctx.expr()), (BetweenExpression) projection);
result.setAlias(alias);
return result;
}
LiteralExpressionSegment column = (LiteralExpressionSegment) projection;
ExpressionProjectionSegment result = null == alias
? new ExpressionProjectionSegment(column.getStartIndex(), column.getStopIndex(), String.valueOf(column.getLiterals()), column)
: new ExpressionProjectionSegment(column.getStartIndex(), ctx.alias().stop.getStopIndex(), String.valueOf(column.getLiterals()), column);
result.setAlias(alias);
return result;
}
@Override
public ASTNode visitFromClause(final FromClauseContext ctx) {
return visit(ctx.tableReferences());
}
@Override
public ASTNode visitTableReferences(final TableReferencesContext ctx) {
TableSegment result = (TableSegment) visit(ctx.tableReference(0));
if (ctx.tableReference().size() > 1) {
for (int i = 1; i < ctx.tableReference().size(); i++) {
result = generateJoinTableSourceFromEscapedTableReference(ctx.tableReference(i), result);
}
}
return result;
}
private JoinTableSegment generateJoinTableSourceFromEscapedTableReference(final TableReferenceContext ctx, final TableSegment tableSegment) {
JoinTableSegment result = new JoinTableSegment();
result.setStartIndex(tableSegment.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
result.setLeft(tableSegment);
result.setJoinType(JoinType.COMMA.name());
result.setRight((TableSegment) visit(ctx));
return result;
}
@Override
public ASTNode visitEscapedTableReference(final EscapedTableReferenceContext ctx) {
TableSegment result;
TableSegment left;
left = (TableSegment) visit(ctx.tableFactor());
for (JoinedTableContext each : ctx.joinedTable()) {
left = visitJoinedTable(each, left);
}
result = left;
return result;
}
@Override
public ASTNode visitTableReference(final TableReferenceContext ctx) {
TableSegment result;
TableSegment left;
left = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.escapedTableReference());
for (JoinedTableContext each : ctx.joinedTable()) {
left = visitJoinedTable(each, left);
}
result = left;
return result;
}
@Override
public ASTNode visitTableFactor(final TableFactorContext ctx) {
if (null != ctx.subquery()) {
MySQLSelectStatement subquery = (MySQLSelectStatement) visit(ctx.subquery());
SubquerySegment subquerySegment = new SubquerySegment(ctx.subquery().start.getStartIndex(), ctx.subquery().stop.getStopIndex(), subquery);
SubqueryTableSegment result = new SubqueryTableSegment(subquerySegment);
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
if (null != ctx.tableName()) {
SimpleTableSegment result = (SimpleTableSegment) visit(ctx.tableName());
if (null != ctx.alias()) {
result.setAlias((AliasSegment) visit(ctx.alias()));
}
return result;
}
return visit(ctx.tableReferences());
}
private JoinTableSegment visitJoinedTable(final JoinedTableContext ctx, final TableSegment tableSegment) {
JoinTableSegment result = new JoinTableSegment();
result.setLeft(tableSegment);
result.setStartIndex(tableSegment.getStartIndex());
result.setStopIndex(ctx.stop.getStopIndex());
result.setJoinType(getJoinType(ctx));
result.setNatural(null != ctx.naturalJoinType());
TableSegment right = null != ctx.tableFactor() ? (TableSegment) visit(ctx.tableFactor()) : (TableSegment) visit(ctx.tableReference());
result.setRight(right);
return null != ctx.joinSpecification() ? visitJoinSpecification(ctx.joinSpecification(), result) : result;
}
private String getJoinType(final JoinedTableContext ctx) {
if (null != ctx.innerJoinType()) {
return JoinType.INNER.name();
}
if (null != ctx.outerJoinType()) {
return ctx.outerJoinType().LEFT() != null ? JoinType.LEFT.name() : JoinType.RIGHT.name();
}
if (null != ctx.naturalJoinType()) {
return getNaturalJoinType(ctx.naturalJoinType());
}
return JoinType.COMMA.name();
}
private String getNaturalJoinType(final NaturalJoinTypeContext ctx) {
if (null != ctx.LEFT()) {
return JoinType.LEFT.name();
} else if (null != ctx.RIGHT()) {
return JoinType.RIGHT.name();
} else {
return JoinType.INNER.name();
}
}
private JoinTableSegment visitJoinSpecification(final JoinSpecificationContext ctx, final JoinTableSegment result) {
if (null != ctx.expr()) {
ExpressionSegment condition = (ExpressionSegment) visit(ctx.expr());
result.setCondition(condition);
}
if (null != ctx.USING()) {
result.setUsing(ctx.columnNames().columnName().stream().map(each -> (ColumnSegment) visit(each)).collect(Collectors.toList()));
}
return result;
}
@Override
public ASTNode visitWhereClause(final WhereClauseContext ctx) {
ASTNode segment = visit(ctx.expr());
return new WhereSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (ExpressionSegment) segment);
}
@Override
public ASTNode visitGroupByClause(final GroupByClauseContext ctx) {
Collection<OrderByItemSegment> items = new LinkedList<>();
for (OrderByItemContext each : ctx.orderByItem()) {
items.add((OrderByItemSegment) visit(each));
}
return new GroupBySegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), items);
}
@Override
public ASTNode visitLimitClause(final LimitClauseContext ctx) {
if (null == ctx.limitOffset()) {
return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), null, (PaginationValueSegment) visit(ctx.limitRowCount()));
}
PaginationValueSegment rowCount;
PaginationValueSegment offset;
if (null != ctx.OFFSET()) {
rowCount = (PaginationValueSegment) visit(ctx.limitRowCount());
offset = (PaginationValueSegment) visit(ctx.limitOffset());
} else {
offset = (PaginationValueSegment) visit(ctx.limitOffset());
rowCount = (PaginationValueSegment) visit(ctx.limitRowCount());
}
return new LimitSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), offset, rowCount);
}
@Override
public ASTNode visitLimitRowCount(final LimitRowCountContext ctx) {
if (null != ctx.numberLiterals()) {
return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue());
}
ParameterMarkerSegment result = new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(result);
return result;
}
@Override
public final ASTNode visitConstraintName(final ConstraintNameContext ctx) {
return new ConstraintSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), (IdentifierValue) visit(ctx.identifier()));
}
@Override
public ASTNode visitLimitOffset(final LimitOffsetContext ctx) {
if (null != ctx.numberLiterals()) {
return new NumberLiteralLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(), ((NumberLiteralValue) visit(ctx.numberLiterals())).getValue().longValue());
}
ParameterMarkerSegment result = new ParameterMarkerLimitValueSegment(ctx.getStart().getStartIndex(), ctx.getStop().getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(result);
return result;
}
@Override
public ASTNode visitCollateClause(final CollateClauseContext ctx) {
if (null != ctx.collationName()) {
return new LiteralExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(), ctx.collationName().textOrIdentifier().getText());
}
ParameterMarkerExpressionSegment segment = new ParameterMarkerExpressionSegment(ctx.start.getStartIndex(), ctx.stop.getStopIndex(),
((ParameterMarkerValue) visit(ctx.parameterMarker())).getValue());
parameterMarkerSegments.add(segment);
return segment;
}
/**
* Get original text.
*
* @param ctx context
* @return original text
*/
protected String getOriginalText(final ParserRuleContext ctx) {
return ctx.start.getInputStream().getText(new Interval(ctx.start.getStartIndex(), ctx.stop.getStopIndex()));
}
}
|
Could we avoid 1 `& allSelectedMask` here? It feels like it should only be relevant for `ALL`. We could then eagerly apply `allSelectedMask` instead of setting ALL (`new InputSelection(-1 & allSelectedMask)`).
|
boolean shouldSetAvailableForAnotherInput() {
return (inputSelection.getInputMask() & allSelectedMask & ~availableInputsMask) != 0;
}
|
return (inputSelection.getInputMask() & allSelectedMask & ~availableInputsMask) != 0;
|
boolean shouldSetAvailableForAnotherInput() {
return (inputSelection.getInputMask() & allSelectedMask & ~availableInputsMask) != 0;
}
|
class MultipleInputSelectionHandler {
public static final int MAX_SUPPORTED_INPUT_COUNT = Long.SIZE;
@Nullable
private final InputSelectable inputSelector;
private InputSelection inputSelection = InputSelection.ALL;
private final long allSelectedMask;
private long availableInputsMask;
private long notFinishedInputsMask;
public MultipleInputSelectionHandler(@Nullable InputSelectable inputSelectable, int inputCount) {
checkSupportedInputCount(inputCount);
this.inputSelector = inputSelectable;
this.allSelectedMask = (1 << inputCount) - 1;
this.availableInputsMask = allSelectedMask;
this.notFinishedInputsMask = allSelectedMask;
}
public static void checkSupportedInputCount(int inputCount) {
checkArgument(
inputCount <= MAX_SUPPORTED_INPUT_COUNT,
"Only up to %d inputs are supported at once, while encountered %d",
MAX_SUPPORTED_INPUT_COUNT,
inputCount);
}
public InputStatus updateStatus(InputStatus inputStatus, int inputIndex) throws IOException {
switch (inputStatus) {
case MORE_AVAILABLE:
checkState(checkBitMask(availableInputsMask, inputIndex));
return InputStatus.MORE_AVAILABLE;
case NOTHING_AVAILABLE:
availableInputsMask = unsetBitMask(availableInputsMask, inputIndex);
break;
case END_OF_INPUT:
notFinishedInputsMask = unsetBitMask(notFinishedInputsMask, inputIndex);
break;
default:
throw new UnsupportedOperationException("Unsupported inputStatus = " + inputStatus);
}
return calculateOverallStatus();
}
public InputStatus calculateOverallStatus() throws IOException {
if (areAllInputsFinished()) {
return InputStatus.END_OF_INPUT;
}
if (isAnyInputAvailable()) {
return InputStatus.MORE_AVAILABLE;
}
else {
long selectedNotFinishedInputMask = inputSelection.getInputMask() & notFinishedInputsMask;
if (selectedNotFinishedInputMask == 0) {
throw new IOException("Can not make a progress: all selected inputs are already finished");
}
return InputStatus.NOTHING_AVAILABLE;
}
}
void nextSelection() {
if (inputSelector == null) {
inputSelection = InputSelection.ALL;
} else {
inputSelection = inputSelector.nextSelection();
}
}
int selectNextInputIndex(int lastReadInputIndex) {
return inputSelection.fairSelectNextIndex(
availableInputsMask & notFinishedInputsMask,
lastReadInputIndex);
}
void setAvailableInput(int inputIndex) {
availableInputsMask = setBitMask(availableInputsMask, inputIndex);
}
void setUnavailableInput(int inputIndex) {
availableInputsMask = unsetBitMask(availableInputsMask, inputIndex);
}
boolean isAnyInputAvailable() {
return (inputSelection.getInputMask() & availableInputsMask & notFinishedInputsMask) != 0;
}
boolean areAllInputsSelected() {
return inputSelection.areAllInputsSelected();
}
boolean isInputSelected(int inputIndex) {
return inputSelection.isInputSelected(inputIndex + 1);
}
public boolean isInputFinished(int inputIndex) {
return !checkBitMask(notFinishedInputsMask, inputIndex);
}
public boolean areAllInputsFinished() {
return notFinishedInputsMask == 0;
}
long setBitMask(long mask, int inputIndex) {
return mask | 1L << inputIndex;
}
long unsetBitMask(long mask, int inputIndex) {
return mask & ~(1L << inputIndex);
}
boolean checkBitMask(long mask, int inputIndex) {
return (mask & (1L << inputIndex)) != 0;
}
}
|
class MultipleInputSelectionHandler {
public static final int MAX_SUPPORTED_INPUT_COUNT = Long.SIZE;
@Nullable
private final InputSelectable inputSelectable;
private InputSelection inputSelection = InputSelection.ALL;
private final long allSelectedMask;
private long availableInputsMask;
private long notFinishedInputsMask;
public MultipleInputSelectionHandler(@Nullable InputSelectable inputSelectable, int inputCount) {
checkSupportedInputCount(inputCount);
this.inputSelectable = inputSelectable;
this.allSelectedMask = (1 << inputCount) - 1;
this.availableInputsMask = allSelectedMask;
this.notFinishedInputsMask = allSelectedMask;
}
public static void checkSupportedInputCount(int inputCount) {
checkArgument(
inputCount <= MAX_SUPPORTED_INPUT_COUNT,
"Only up to %d inputs are supported at once, while encountered %d",
MAX_SUPPORTED_INPUT_COUNT,
inputCount);
}
public InputStatus updateStatus(InputStatus inputStatus, int inputIndex) throws IOException {
switch (inputStatus) {
case MORE_AVAILABLE:
checkState(checkBitMask(availableInputsMask, inputIndex));
return InputStatus.MORE_AVAILABLE;
case NOTHING_AVAILABLE:
availableInputsMask = unsetBitMask(availableInputsMask, inputIndex);
break;
case END_OF_INPUT:
notFinishedInputsMask = unsetBitMask(notFinishedInputsMask, inputIndex);
break;
default:
throw new UnsupportedOperationException("Unsupported inputStatus = " + inputStatus);
}
return calculateOverallStatus();
}
public InputStatus calculateOverallStatus() throws IOException {
if (areAllInputsFinished()) {
return InputStatus.END_OF_INPUT;
}
if (isAnyInputAvailable()) {
return InputStatus.MORE_AVAILABLE;
}
else {
long selectedNotFinishedInputMask = inputSelection.getInputMask() & notFinishedInputsMask;
if (selectedNotFinishedInputMask == 0) {
throw new IOException("Can not make a progress: all selected inputs are already finished");
}
return InputStatus.NOTHING_AVAILABLE;
}
}
void nextSelection() {
if (inputSelectable == null) {
inputSelection = InputSelection.ALL;
} else {
inputSelection = inputSelectable.nextSelection();
}
}
int selectNextInputIndex(int lastReadInputIndex) {
return inputSelection.fairSelectNextIndex(
availableInputsMask & notFinishedInputsMask,
lastReadInputIndex);
}
void setAvailableInput(int inputIndex) {
availableInputsMask = setBitMask(availableInputsMask, inputIndex);
}
void setUnavailableInput(int inputIndex) {
availableInputsMask = unsetBitMask(availableInputsMask, inputIndex);
}
boolean isAnyInputAvailable() {
return (inputSelection.getInputMask() & availableInputsMask & notFinishedInputsMask) != 0;
}
boolean areAllInputsSelected() {
return inputSelection.areAllInputsSelected();
}
boolean isInputSelected(int inputIndex) {
return inputSelection.isInputSelected(inputIndex + 1);
}
public boolean isInputFinished(int inputIndex) {
return !checkBitMask(notFinishedInputsMask, inputIndex);
}
public boolean areAllInputsFinished() {
return notFinishedInputsMask == 0;
}
long setBitMask(long mask, int inputIndex) {
return mask | 1L << inputIndex;
}
long unsetBitMask(long mask, int inputIndex) {
return mask & ~(1L << inputIndex);
}
boolean checkBitMask(long mask, int inputIndex) {
return (mask & (1L << inputIndex)) != 0;
}
}
|
@shehan360 , Could you check this code block ?
|
private static Path getAbsoluteModulePath(String sourceRoot, Path modulePath) {
Path sourcePath = Paths.get(sourceRoot);
if (sourcePath.endsWith(modulePath)) {
return Paths.get(sourceRoot);
}
return sourcePath.resolve(ProjectDirConstants.MODULES_ROOT).resolve(modulePath);
}
|
return sourcePath.resolve(ProjectDirConstants.MODULES_ROOT).resolve(modulePath);
|
private static Path getAbsoluteModulePath(String sourceRoot, Path modulePath) {
Path sourcePath = Paths.get(sourceRoot);
if (sourcePath.endsWith(modulePath)) {
return Paths.get(sourceRoot);
}
return sourcePath.resolve(ProjectDirConstants.MODULES_ROOT).resolve(modulePath);
}
|
class BallerinaDocGenerator {
private static final Logger log = LoggerFactory.getLogger(BallerinaDocGenerator.class);
private static PrintStream out = System.out;
private static final String MODULE_CONTENT_FILE = "Module.md";
private static final Path BAL_BUILTIN = Paths.get("ballerina", "builtin");
private static final String HTML = ".html";
private static final String DOC_JSON = "api-doc-data.json";
private static final String JSON = ".json";
private static final String MODULE_SEARCH = "search";
private static final String SEARCH_DATA = "search-data.js";
private static final String SEARCH_DIR = "doc-search";
private static Gson gson = new GsonBuilder().registerTypeHierarchyAdapter(Path.class, new PathToJson())
.excludeFieldsWithoutExposeAnnotation().setPrettyPrinting().create();
/**
* API to merge multiple api docs.
* @param apiDocsRoot api doc root
*/
public static void mergeApiDocs(String apiDocsRoot) {
out.println("docerina: API documentation generation for doc path - " + apiDocsRoot);
File directory = new File(apiDocsRoot);
File[] fList = directory.listFiles();
if (fList == null) {
String errorMsg = String.format("docerina: API documentation generation failed. Could not find any module" +
" in given path %s", apiDocsRoot);
out.println(errorMsg);
log.error(errorMsg);
return;
}
Arrays.sort(fList);
List<Module> moduleList = new ArrayList<>(fList.length);
for (File file : fList) {
if (file.isDirectory()) {
Path moduleJsonPath = Paths.get(file.getAbsolutePath(), file.getName() + JSON);
if (moduleJsonPath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(moduleJsonPath, StandardCharsets.UTF_8)) {
Module module = gson.fromJson(br, Module.class);
moduleList.add(module);
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
}
mergeSearchJsons(apiDocsRoot);
Project project = new Project();
project.modules = moduleList;
String projectTemplateName = System.getProperty(BallerinaDocConstants.PROJECT_TEMPLATE_NAME_KEY, "index");
String indexHtmlPath = apiDocsRoot + File.separator + projectTemplateName + HTML;
ProjectPageContext projectPageContext = new ProjectPageContext(project, "API Documentation", "",
false);
try {
Writer.writeHtmlDocument(projectPageContext, projectTemplateName, indexHtmlPath);
} catch (IOException e) {
out.println(String.format("docerina: failed to create the index.html. Cause: %s", e.getMessage()));
log.error("Failed to create the index.html file.", e);
}
}
public static void writeAPIDocsToJSON(Map<String, ModuleDoc> docsMap, String output) {
List<ModuleDoc> moduleDocList = new ArrayList<>(docsMap.values());
moduleDocList.sort(Comparator.comparing(pkg -> pkg.bLangPackage.packageID.toString()));
Project project = getDocsGenModel(moduleDocList);
File jsonFile = new File(output + File.separator + DOC_JSON);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(project);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the " + DOC_JSON + ". Cause: %s", e.getMessage()));
log.error("Failed to create " + DOC_JSON + " file.", e);
}
}
public static void writeAPIDocsForModulesFromJson(Path jsonpath, String output, boolean excludeIndex) {
if (jsonpath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(jsonpath, StandardCharsets.UTF_8)) {
Project project = gson.fromJson(br, Project.class);
writeAPIDocs(project, output, excludeIndex);
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
public static void writeAPIDocsForModules(Map<String, ModuleDoc> docsMap, String output, boolean excludeIndex) {
List<ModuleDoc> moduleDocList = new ArrayList<>(docsMap.values());
moduleDocList.sort(Comparator.comparing(pkg -> pkg.bLangPackage.packageID.toString()));
Project project = getDocsGenModel(moduleDocList);
if (!project.modules.isEmpty()) {
writeAPIDocs(project, output, excludeIndex);
}
}
public static void writeAPIDocs(Project project, String output, boolean excludeIndex) {
String moduleTemplateName = System.getProperty(BallerinaDocConstants.MODULE_TEMPLATE_NAME_KEY, "module");
String recordTemplateName = System.getProperty(BallerinaDocConstants.RECORD_TEMPLATE_NAME_KEY, "record");
String classTemplateName = System.getProperty(BallerinaDocConstants.CLASS_TEMPLATE_NAME_KEY, "class");
String abstractObjectTemplateName = System.getProperty(BallerinaDocConstants.ABSTRACT_OBJECT_TEMPLATE_NAME_KEY,
"abstractObject");
String clientTemplateName = System.getProperty(BallerinaDocConstants.CLIENT_TEMPLATE_NAME_KEY, "client");
String listenerTemplateName = System.getProperty(BallerinaDocConstants.LISTENER_TEMPLATE_NAME_KEY,
"listener");
String functionsTemplateName = System.getProperty(BallerinaDocConstants.FUNCTIONS_TEMPLATE_NAME_KEY,
"functions");
String constantsTemplateName = System.getProperty(BallerinaDocConstants.CONSTANTS_TEMPLATE_NAME_KEY,
"constants");
String typesTemplateName = System.getProperty(BallerinaDocConstants.TYPES_TEMPLATE_NAME_KEY, "types");
String annotationsTemplateName = System.getProperty(BallerinaDocConstants.ANNOTATIONS_TEMPLATE_NAME_KEY,
"annotations");
String errorsTemplateName = System.getProperty(BallerinaDocConstants.ERRORS_TEMPLATE_NAME_KEY, "errors");
String rootPathModuleLevel = project.isSingleFile ? "./" : "../";
String rootPathConstructLevel = project.isSingleFile ? "../" : "../../";
if (project.modules == null) {
String errMessage =
"docerina: API documentation generation failed. Couldn't create the [output directory] " + output;
out.println(errMessage);
log.error(errMessage);
return;
}
for (Module module : project.modules) {
String modDir = output + File.separator + module.id;
try {
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: starting to generate docs for module: " + module.id);
}
Files.createDirectories(Paths.get(modDir));
ModulePageContext modulePageContext = new ModulePageContext(module, project,
rootPathModuleLevel,
"API Docs - " + (project.isSingleFile ? project.sourceFileName
: module.orgName + "/" + module.id), excludeIndex);
String modIndexPath = modDir + File.separator + "index" + HTML;
Writer.writeHtmlDocument(modulePageContext, moduleTemplateName, modIndexPath);
if (!module.records.isEmpty()) {
String recordsDir = modDir + File.separator + "records";
Files.createDirectories(Paths.get(recordsDir));
for (Record record : module.records) {
RecordPageContext recordPageContext = new RecordPageContext(record, module, project,
rootPathConstructLevel, "API Docs - Record : " + record.name, excludeIndex);
String recordFilePath = recordsDir + File.separator + record.name + HTML;
Writer.writeHtmlDocument(recordPageContext, recordTemplateName, recordFilePath);
}
}
if (!module.classes.isEmpty()) {
String classesDir = modDir + File.separator + "classes";
Files.createDirectories(Paths.get(classesDir));
for (BClass bClass : module.classes) {
ClassPageContext classPageContext = new ClassPageContext(bClass, module, project,
rootPathConstructLevel, "API Docs - Class : " + bClass.name, excludeIndex);
String classFilePath = classesDir + File.separator + bClass.name + HTML;
Writer.writeHtmlDocument(classPageContext, classTemplateName, classFilePath);
}
}
if (!module.abstractObjects.isEmpty()) {
String absObjDir = modDir + File.separator + "abstractobjects";
Files.createDirectories(Paths.get(absObjDir));
for (BAbstractObject absObj : module.abstractObjects) {
AbstractObjectPageContext absObjPageContext = new AbstractObjectPageContext(absObj, module,
project, rootPathConstructLevel, "API Docs - Class : " + absObj.name,
excludeIndex);
String absObjFilePath = absObjDir + File.separator + absObj.name + HTML;
Writer.writeHtmlDocument(absObjPageContext, abstractObjectTemplateName, absObjFilePath);
}
}
if (!module.clients.isEmpty()) {
String clientsDir = modDir + File.separator + "clients";
Files.createDirectories(Paths.get(clientsDir));
for (Client client : module.clients) {
ClientPageContext clientPageContext = new ClientPageContext(client, module, project,
rootPathConstructLevel, "API Docs - Client : " + client.name, excludeIndex);
String clientFilePath = clientsDir + File.separator + client.name + HTML;
Writer.writeHtmlDocument(clientPageContext, clientTemplateName, clientFilePath);
}
}
if (!module.listeners.isEmpty()) {
String listenersDir = modDir + File.separator + "listeners";
Files.createDirectories(Paths.get(listenersDir));
for (Listener listener : module.listeners) {
ListenerPageContext listenerPageContext = new ListenerPageContext(listener, module, project,
rootPathConstructLevel, "API Docs - Listener : " + listener.name, excludeIndex);
String listenerFilePath = listenersDir + File.separator + listener.name + HTML;
Writer.writeHtmlDocument(listenerPageContext, listenerTemplateName, listenerFilePath);
}
}
if (!module.functions.isEmpty()) {
String functionsFile = modDir + File.separator + "functions" + HTML;
FunctionsPageContext functionsPageContext = new FunctionsPageContext(module.functions,
module, project, rootPathModuleLevel, "API Docs - Functions : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(functionsPageContext, functionsTemplateName, functionsFile);
}
if (!module.constants.isEmpty()) {
String constantsFile = modDir + File.separator + "constants" + HTML;
ConstantsPageContext constantsPageContext = new ConstantsPageContext(module.constants,
module, project, rootPathModuleLevel, "API Docs - Constants : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(constantsPageContext, constantsTemplateName, constantsFile);
}
if (!(module.unionTypes.isEmpty() && module.finiteTypes.isEmpty())) {
String typesFile = modDir + File.separator + "types" + HTML;
TypesPageContext typesPageContext = new TypesPageContext(module.unionTypes, module, project,
rootPathModuleLevel, "API Docs - Types : " + module.id, excludeIndex);
Writer.writeHtmlDocument(typesPageContext, typesTemplateName, typesFile);
}
if (!module.annotations.isEmpty()) {
String annotationsFile = modDir + File.separator + "annotations" + HTML;
AnnotationsPageContext annotationsPageContext = new AnnotationsPageContext(module.annotations,
module, project, rootPathModuleLevel, "API Docs - Annotations : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(annotationsPageContext, annotationsTemplateName, annotationsFile);
}
if (!module.errors.isEmpty()) {
String errorsFile = modDir + File.separator + "errors" + HTML;
ErrorsPageContext errorsPageContext = new ErrorsPageContext(module.errors, module, project,
rootPathModuleLevel, "API Docs - Errors : " + module.id, excludeIndex);
Writer.writeHtmlDocument(errorsPageContext, errorsTemplateName, errorsFile);
}
genModuleJson(module, modDir + File.separator + module.id + JSON);
genSearchJson(module, modDir + File.separator + MODULE_SEARCH + JSON);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: generated docs for module: " + module.id);
}
} catch (IOException e) {
out.println(String.format("docerina: API documentation generation failed for module %s: %s",
module.id, e.getMessage()));
log.error(String.format("API documentation generation failed for %s", module.id), e);
}
if (!module.resources.isEmpty()) {
String resourcesDir = modDir + File.separator + "resources";
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: copying project resources ");
}
for (Path resourcePath : module.resources) {
File resourcesDirFile = new File(resourcesDir);
try {
FileUtils.copyFileToDirectory(resourcePath.toFile(), resourcesDirFile);
} catch (IOException e) {
out.println(String.format("docerina: failed to copy [resource] %s into " +
"[resources directory] %s. Cause: %s", resourcePath.toString(),
resourcesDirFile.toString(), e.getMessage()));
log.error(String.format("docerina: failed to copy [resource] %s into [resources directory] "
+ "%s. Cause: %s", resourcePath.toString(), resourcesDirFile.toString(),
e.getMessage()), e);
}
}
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully copied project resources into " + resourcesDir);
}
}
}
if (!excludeIndex) {
genIndexHtml(output, project);
}
mergeSearchJsons(output);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: copying HTML theme into " + output);
}
try {
BallerinaDocUtils.copyResources("html-template-resources", output);
BallerinaDocUtils.copyResources("syntax-highlighter", output);
BallerinaDocUtils.copyResources("doc-search", output);
} catch (IOException e) {
out.println(String.format("docerina: failed to copy the docerina-theme resource. Cause: %s", e.getMessage
()));
log.error("Failed to copy the docerina-theme resource.", e);
}
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully copied HTML theme into " + output);
}
try {
String zipPath = System.getProperty(BallerinaDocConstants.OUTPUT_ZIP_PATH);
if (zipPath != null) {
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: generating the documentation zip file.");
}
BallerinaDocUtils.packageToZipFile(output, zipPath);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully generated the documentation zip file.");
}
}
} catch (IOException e) {
out.println(String.format("docerina: API documentation zip packaging failed for %s: %s", output, e
.getMessage()));
log.error(String.format("API documentation zip packaging failed for %s", output), e);
}
}
private static void genIndexHtml(String output, Project project) {
String projectTemplateName = System.getProperty(BallerinaDocConstants.PROJECT_TEMPLATE_NAME_KEY, "index");
String indexHtmlPath = output + File.separator + projectTemplateName + HTML;
ProjectPageContext projectPageContext = new ProjectPageContext(project, "API Documentation", "",
false);
try {
Writer.writeHtmlDocument(projectPageContext, projectTemplateName, indexHtmlPath);
} catch (IOException e) {
out.println(String.format("docerina: failed to create the index.html. Cause: %s", e.getMessage()));
log.error("Failed to create the index.html file.", e);
}
}
private static void genModuleJson(Module module, String moduleJsonPath) {
File jsonFile = new File(moduleJsonPath);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(module);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the module.json. Cause: %s", e.getMessage()));
log.error("Failed to create module.json file.", e);
}
}
private static void genSearchJson(Module module, String jsonPath) {
List<ModuleSearchJson> searchModules = new ArrayList<>();
List<ConstructSearchJson> searchFunctions = new ArrayList<>();
List<ConstructSearchJson> searchClasses = new ArrayList<>();
List<ConstructSearchJson> searchRecords = new ArrayList<>();
List<ConstructSearchJson> searchConstants = new ArrayList<>();
List<ConstructSearchJson> searchErrors = new ArrayList<>();
List<ConstructSearchJson> searchTypes = new ArrayList<>();
List<ConstructSearchJson> searchClients = new ArrayList<>();
List<ConstructSearchJson> searchListeners = new ArrayList<>();
List<ConstructSearchJson> searchAnnotations = new ArrayList<>();
List<ConstructSearchJson> searchAbstractObjects = new ArrayList<>();
if (module.summary != null) {
searchModules.add(new ModuleSearchJson(module.id, getFirstLine(module.summary)));
}
module.functions.forEach((function) ->
searchFunctions.add(new ConstructSearchJson(function.name, module.id,
getFirstLine(function.description))));
module.classes.forEach((bClass) ->
searchClasses.add(new ConstructSearchJson(bClass.name, module.id, getFirstLine(bClass.description))));
module.abstractObjects.forEach((absObj) ->
searchAbstractObjects.add(new ConstructSearchJson(absObj.name, module.id,
getFirstLine(absObj.description))));
module.clients.forEach((client) ->
searchClients.add(new ConstructSearchJson(client.name, module.id, getFirstLine(client.description))));
module.listeners.forEach((listener) ->
searchListeners.add(new ConstructSearchJson(listener.name, module.id,
getFirstLine(listener.description))));
module.records.forEach((record) ->
searchRecords.add(new ConstructSearchJson(record.name, module.id, getFirstLine(record.description))));
module.constants.forEach((constant) ->
searchConstants.add(new ConstructSearchJson(constant.name, module.id,
getFirstLine(constant.description))));
module.errors.forEach((error) ->
searchErrors.add(new ConstructSearchJson(error.name, module.id, getFirstLine(error.description))));
module.unionTypes.forEach((unionType) ->
searchTypes.add(new ConstructSearchJson(unionType.name, module.id,
getFirstLine(unionType.description))));
module.finiteTypes.forEach((finiteType) ->
searchTypes.add(new ConstructSearchJson(finiteType.name, module.id,
getFirstLine(finiteType.description))));
module.annotations.forEach((annotation) ->
searchAnnotations.add(new ConstructSearchJson(annotation.name, module.id,
getFirstLine(annotation.description))));
SearchJson searchJson = new SearchJson(searchModules, searchClasses, searchFunctions, searchRecords,
searchConstants, searchErrors, searchTypes, searchClients, searchListeners, searchAnnotations,
searchAbstractObjects);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
File jsonFile = new File(jsonPath);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(searchJson);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the search.json. Cause: %s", e.getMessage()));
log.error("Failed to create search.json file.", e);
}
}
private static String getFirstLine(String description) {
String[] splits = description.split("\\.", 2);
if (splits.length < 2) {
return splits[0];
} else {
if (splits[0].contains("<p>")) {
return splits[0] + ".</p>";
}
return splits[0] + ".";
}
}
private static void mergeSearchJsons(String docRoot) {
Gson gson = new GsonBuilder().setPrettyPrinting().create();
File directory = new File(docRoot);
File[] fList = directory.listFiles();
if (fList != null) {
Arrays.sort(fList);
SearchJson searchJson = new SearchJson();
for (File file : fList) {
if (file.isDirectory()) {
Path moduleJsonPath = Paths.get(file.getAbsolutePath(), MODULE_SEARCH + JSON);
if (moduleJsonPath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(moduleJsonPath, StandardCharsets.UTF_8)) {
SearchJson modSearchJson = gson.fromJson(br, SearchJson.class);
searchJson.getModules().addAll(modSearchJson.getModules());
searchJson.getFunctions().addAll(modSearchJson.getFunctions());
searchJson.getClasses().addAll(modSearchJson.getClasses());
searchJson.getClients().addAll(modSearchJson.getClients());
searchJson.getListeners().addAll(modSearchJson.getListeners());
searchJson.getRecords().addAll(modSearchJson.getRecords());
searchJson.getConstants().addAll(modSearchJson.getConstants());
searchJson.getErrors().addAll(modSearchJson.getErrors());
searchJson.getTypes().addAll(modSearchJson.getTypes());
searchJson.getAnnotations().addAll(modSearchJson.getAnnotations());
searchJson.getAbstractObjects().addAll(modSearchJson.getAbstractObjects());
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
}
File docSearchDir = new File(docRoot + File.separator + SEARCH_DIR);
boolean docSearchDirExists = docSearchDir.exists() || docSearchDir.mkdir();
if (!docSearchDirExists) {
out.println("docerina: failed to create " + SEARCH_DIR + " directory");
log.error("Failed to create " + SEARCH_DIR + " directory.");
}
File jsonFile = new File(docRoot + File.separator + SEARCH_DIR + File.separator + SEARCH_DATA);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile),
StandardCharsets.UTF_8)) {
String json = gson.toJson(searchJson);
String js = "var searchData = " + json + ";";
writer.write(new String(js.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the " + SEARCH_DATA + ". Cause: %s",
e.getMessage()));
log.error("Failed to create " + SEARCH_DATA + " file.", e);
}
}
}
public static Map<String, ModuleDoc> generateModuleDocs(String sourceRoot,
List<BLangPackage> modules) throws IOException {
Map<String, ModuleDoc> moduleDocMap = new HashMap<>();
for (BLangPackage bLangPackage : modules) {
moduleDocMap.put(bLangPackage.packageID.name.toString(), generateModuleDoc(sourceRoot, bLangPackage));
}
return moduleDocMap;
}
public static Map<String, ModuleDoc> generateModuleDocs(String sourceRoot, List<BLangPackage> modules,
Set<String> moduleFilter) throws IOException {
Map<String, ModuleDoc> moduleDocMap = new HashMap<>();
for (BLangPackage bLangPackage : modules) {
String moduleName = bLangPackage.packageID.name.toString();
if (moduleFilter.contains(moduleName)) {
continue;
}
moduleDocMap.put(moduleName, generateModuleDoc(sourceRoot, bLangPackage));
}
return moduleDocMap;
}
public static ModuleDoc generateModuleDoc(String sourceRoot, BLangPackage bLangPackage) throws IOException {
String moduleName = bLangPackage.packageID.name.toString();
Path absolutePkgPath = getAbsoluteModulePath(sourceRoot, Paths.get(moduleName));
Path packageMd = getModuleDocPath(absolutePkgPath);
List<Path> resources = getResourcePaths(absolutePkgPath);
return new ModuleDoc(packageMd == null ? null : packageMd.toAbsolutePath(), resources, bLangPackage);
}
public static void setPrintStream(PrintStream out) {
BallerinaDocGenerator.out = out;
}
/**
* Generate docs generator model.
*
* @param moduleDocList moduleDocList modules list whose docs to be generated
* @return docs generator model of the project
*/
public static Project getDocsGenModel(List<ModuleDoc> moduleDocList) {
Project project = new Project();
project.isSingleFile =
moduleDocList.size() == 1 && moduleDocList.get(0).bLangPackage.packageID.name.value.equals(".");
if (project.isSingleFile) {
project.sourceFileName = moduleDocList.get(0).bLangPackage.packageID.sourceFileName.value;
}
project.name = "";
project.description = "";
List<Module> moduleDocs = new ArrayList<>();
for (ModuleDoc moduleDoc : moduleDocList) {
Module module = new Module();
module.id = moduleDoc.bLangPackage.packageID.name.toString();
module.orgName = moduleDoc.bLangPackage.packageID.orgName.toString();
String moduleVersion = moduleDoc.bLangPackage.packageID.version.toString();
module.version = moduleVersion.equals("") ?
System.getProperty(BallerinaDocConstants.VERSION) :
moduleVersion;
module.summary = moduleDoc.summary;
module.description = moduleDoc.description;
sortModuleConstructs(moduleDoc.bLangPackage);
boolean hasPublicConstructs = Generator.generateModuleConstructs(module, moduleDoc.bLangPackage);
if (hasPublicConstructs) {
module.resources.addAll(moduleDoc.resources);
moduleDocs.add(module);
}
}
project.modules = moduleDocs;
return project;
}
private static void sortModuleConstructs(BLangPackage bLangPackage) {
bLangPackage.getFunctions().sort(Comparator.comparing(f -> (f.getReceiver() == null ? "" : f
.getReceiver().getName()) + f.getName().getValue()));
bLangPackage.getAnnotations().sort(Comparator.comparing(a -> a.getName().getValue()));
bLangPackage.getTypeDefinitions()
.sort(Comparator.comparing(a -> a.getName() == null ? "" : a.getName().getValue()));
bLangPackage.getGlobalVariables().sort(Comparator.comparing(a -> a.getName().getValue()));
}
private static List<Path> getResourcePaths(Path absolutePkgPath) throws IOException {
Path resourcesDirPath = absolutePkgPath.resolve("resources");
List<Path> resources = new ArrayList<>();
if (resourcesDirPath.toFile().exists()) {
resources = Files.walk(resourcesDirPath).filter(path -> !path.equals(resourcesDirPath)).collect(Collectors
.toList());
}
return resources;
}
private static Path getModuleDocPath(Path absolutePkgPath) throws IOException {
Path packageMd;
Optional<Path> o = Files.find(absolutePkgPath, 1, (path, attr) -> {
Path fileName = path.getFileName();
if (fileName != null) {
return fileName.toString().equals(MODULE_CONTENT_FILE);
}
return false;
}).findFirst();
packageMd = o.isPresent() ? o.get() : null;
return packageMd;
}
}
|
class BallerinaDocGenerator {
private static final Logger log = LoggerFactory.getLogger(BallerinaDocGenerator.class);
private static PrintStream out = System.out;
private static final String MODULE_CONTENT_FILE = "Module.md";
private static final Path BAL_BUILTIN = Paths.get("ballerina", "builtin");
private static final String HTML = ".html";
private static final String DOC_JSON = "api-doc-data.json";
private static final String JSON = ".json";
private static final String MODULE_SEARCH = "search";
private static final String SEARCH_DATA = "search-data.js";
private static final String SEARCH_DIR = "doc-search";
private static Gson gson = new GsonBuilder().registerTypeHierarchyAdapter(Path.class, new PathToJson())
.excludeFieldsWithoutExposeAnnotation().setPrettyPrinting().create();
/**
* API to merge multiple api docs.
* @param apiDocsRoot api doc root
*/
public static void mergeApiDocs(String apiDocsRoot) {
out.println("docerina: API documentation generation for doc path - " + apiDocsRoot);
File directory = new File(apiDocsRoot);
File[] fList = directory.listFiles();
if (fList == null) {
String errorMsg = String.format("docerina: API documentation generation failed. Could not find any module" +
" in given path %s", apiDocsRoot);
out.println(errorMsg);
log.error(errorMsg);
return;
}
Arrays.sort(fList);
List<Module> moduleList = new ArrayList<>(fList.length);
for (File file : fList) {
if (file.isDirectory()) {
Path moduleJsonPath = Paths.get(file.getAbsolutePath(), file.getName() + JSON);
if (moduleJsonPath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(moduleJsonPath, StandardCharsets.UTF_8)) {
Module module = gson.fromJson(br, Module.class);
moduleList.add(module);
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
}
mergeSearchJsons(apiDocsRoot);
Project project = new Project();
project.modules = moduleList;
String projectTemplateName = System.getProperty(BallerinaDocConstants.PROJECT_TEMPLATE_NAME_KEY, "index");
String indexHtmlPath = apiDocsRoot + File.separator + projectTemplateName + HTML;
ProjectPageContext projectPageContext = new ProjectPageContext(project, "API Documentation", "",
false);
try {
Writer.writeHtmlDocument(projectPageContext, projectTemplateName, indexHtmlPath);
} catch (IOException e) {
out.println(String.format("docerina: failed to create the index.html. Cause: %s", e.getMessage()));
log.error("Failed to create the index.html file.", e);
}
}
public static void writeAPIDocsToJSON(Map<String, ModuleDoc> docsMap, String output) {
List<ModuleDoc> moduleDocList = new ArrayList<>(docsMap.values());
moduleDocList.sort(Comparator.comparing(pkg -> pkg.bLangPackage.packageID.toString()));
Project project = getDocsGenModel(moduleDocList);
File jsonFile = new File(output + File.separator + DOC_JSON);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(project);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the " + DOC_JSON + ". Cause: %s", e.getMessage()));
log.error("Failed to create " + DOC_JSON + " file.", e);
}
}
public static void writeAPIDocsForModulesFromJson(Path jsonpath, String output, boolean excludeIndex) {
if (jsonpath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(jsonpath, StandardCharsets.UTF_8)) {
Project project = gson.fromJson(br, Project.class);
writeAPIDocs(project, output, excludeIndex);
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
public static void writeAPIDocsForModules(Map<String, ModuleDoc> docsMap, String output, boolean excludeIndex) {
List<ModuleDoc> moduleDocList = new ArrayList<>(docsMap.values());
moduleDocList.sort(Comparator.comparing(pkg -> pkg.bLangPackage.packageID.toString()));
Project project = getDocsGenModel(moduleDocList);
if (!project.modules.isEmpty()) {
writeAPIDocs(project, output, excludeIndex);
}
}
public static void writeAPIDocs(Project project, String output, boolean excludeIndex) {
String moduleTemplateName = System.getProperty(BallerinaDocConstants.MODULE_TEMPLATE_NAME_KEY, "module");
String recordTemplateName = System.getProperty(BallerinaDocConstants.RECORD_TEMPLATE_NAME_KEY, "record");
String classTemplateName = System.getProperty(BallerinaDocConstants.CLASS_TEMPLATE_NAME_KEY, "class");
String abstractObjectTemplateName = System.getProperty(BallerinaDocConstants.ABSTRACT_OBJECT_TEMPLATE_NAME_KEY,
"abstractObject");
String clientTemplateName = System.getProperty(BallerinaDocConstants.CLIENT_TEMPLATE_NAME_KEY, "client");
String listenerTemplateName = System.getProperty(BallerinaDocConstants.LISTENER_TEMPLATE_NAME_KEY,
"listener");
String functionsTemplateName = System.getProperty(BallerinaDocConstants.FUNCTIONS_TEMPLATE_NAME_KEY,
"functions");
String constantsTemplateName = System.getProperty(BallerinaDocConstants.CONSTANTS_TEMPLATE_NAME_KEY,
"constants");
String typesTemplateName = System.getProperty(BallerinaDocConstants.TYPES_TEMPLATE_NAME_KEY, "types");
String annotationsTemplateName = System.getProperty(BallerinaDocConstants.ANNOTATIONS_TEMPLATE_NAME_KEY,
"annotations");
String errorsTemplateName = System.getProperty(BallerinaDocConstants.ERRORS_TEMPLATE_NAME_KEY, "errors");
String rootPathModuleLevel = project.isSingleFile ? "./" : "../";
String rootPathConstructLevel = project.isSingleFile ? "../" : "../../";
if (project.modules == null) {
String errMessage =
"docerina: API documentation generation failed. Couldn't create the [output directory] " + output;
out.println(errMessage);
log.error(errMessage);
return;
}
for (Module module : project.modules) {
String modDir = output + File.separator + module.id;
try {
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: starting to generate docs for module: " + module.id);
}
Files.createDirectories(Paths.get(modDir));
ModulePageContext modulePageContext = new ModulePageContext(module, project,
rootPathModuleLevel,
"API Docs - " + (project.isSingleFile ? project.sourceFileName
: module.orgName + "/" + module.id), excludeIndex);
String modIndexPath = modDir + File.separator + "index" + HTML;
Writer.writeHtmlDocument(modulePageContext, moduleTemplateName, modIndexPath);
if (!module.records.isEmpty()) {
String recordsDir = modDir + File.separator + "records";
Files.createDirectories(Paths.get(recordsDir));
for (Record record : module.records) {
RecordPageContext recordPageContext = new RecordPageContext(record, module, project,
rootPathConstructLevel, "API Docs - Record : " + record.name, excludeIndex);
String recordFilePath = recordsDir + File.separator + record.name + HTML;
Writer.writeHtmlDocument(recordPageContext, recordTemplateName, recordFilePath);
}
}
if (!module.classes.isEmpty()) {
String classesDir = modDir + File.separator + "classes";
Files.createDirectories(Paths.get(classesDir));
for (BClass bClass : module.classes) {
ClassPageContext classPageContext = new ClassPageContext(bClass, module, project,
rootPathConstructLevel, "API Docs - Class : " + bClass.name, excludeIndex);
String classFilePath = classesDir + File.separator + bClass.name + HTML;
Writer.writeHtmlDocument(classPageContext, classTemplateName, classFilePath);
}
}
if (!module.abstractObjects.isEmpty()) {
String absObjDir = modDir + File.separator + "abstractobjects";
Files.createDirectories(Paths.get(absObjDir));
for (BAbstractObject absObj : module.abstractObjects) {
AbstractObjectPageContext absObjPageContext = new AbstractObjectPageContext(absObj, module,
project, rootPathConstructLevel, "API Docs - Class : " + absObj.name,
excludeIndex);
String absObjFilePath = absObjDir + File.separator + absObj.name + HTML;
Writer.writeHtmlDocument(absObjPageContext, abstractObjectTemplateName, absObjFilePath);
}
}
if (!module.clients.isEmpty()) {
String clientsDir = modDir + File.separator + "clients";
Files.createDirectories(Paths.get(clientsDir));
for (Client client : module.clients) {
ClientPageContext clientPageContext = new ClientPageContext(client, module, project,
rootPathConstructLevel, "API Docs - Client : " + client.name, excludeIndex);
String clientFilePath = clientsDir + File.separator + client.name + HTML;
Writer.writeHtmlDocument(clientPageContext, clientTemplateName, clientFilePath);
}
}
if (!module.listeners.isEmpty()) {
String listenersDir = modDir + File.separator + "listeners";
Files.createDirectories(Paths.get(listenersDir));
for (Listener listener : module.listeners) {
ListenerPageContext listenerPageContext = new ListenerPageContext(listener, module, project,
rootPathConstructLevel, "API Docs - Listener : " + listener.name, excludeIndex);
String listenerFilePath = listenersDir + File.separator + listener.name + HTML;
Writer.writeHtmlDocument(listenerPageContext, listenerTemplateName, listenerFilePath);
}
}
if (!module.functions.isEmpty()) {
String functionsFile = modDir + File.separator + "functions" + HTML;
FunctionsPageContext functionsPageContext = new FunctionsPageContext(module.functions,
module, project, rootPathModuleLevel, "API Docs - Functions : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(functionsPageContext, functionsTemplateName, functionsFile);
}
if (!module.constants.isEmpty()) {
String constantsFile = modDir + File.separator + "constants" + HTML;
ConstantsPageContext constantsPageContext = new ConstantsPageContext(module.constants,
module, project, rootPathModuleLevel, "API Docs - Constants : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(constantsPageContext, constantsTemplateName, constantsFile);
}
if (!(module.unionTypes.isEmpty() && module.finiteTypes.isEmpty())) {
String typesFile = modDir + File.separator + "types" + HTML;
TypesPageContext typesPageContext = new TypesPageContext(module.unionTypes, module, project,
rootPathModuleLevel, "API Docs - Types : " + module.id, excludeIndex);
Writer.writeHtmlDocument(typesPageContext, typesTemplateName, typesFile);
}
if (!module.annotations.isEmpty()) {
String annotationsFile = modDir + File.separator + "annotations" + HTML;
AnnotationsPageContext annotationsPageContext = new AnnotationsPageContext(module.annotations,
module, project, rootPathModuleLevel, "API Docs - Annotations : " + module.id,
excludeIndex);
Writer.writeHtmlDocument(annotationsPageContext, annotationsTemplateName, annotationsFile);
}
if (!module.errors.isEmpty()) {
String errorsFile = modDir + File.separator + "errors" + HTML;
ErrorsPageContext errorsPageContext = new ErrorsPageContext(module.errors, module, project,
rootPathModuleLevel, "API Docs - Errors : " + module.id, excludeIndex);
Writer.writeHtmlDocument(errorsPageContext, errorsTemplateName, errorsFile);
}
genModuleJson(module, modDir + File.separator + module.id + JSON);
genSearchJson(module, modDir + File.separator + MODULE_SEARCH + JSON);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: generated docs for module: " + module.id);
}
} catch (IOException e) {
out.println(String.format("docerina: API documentation generation failed for module %s: %s",
module.id, e.getMessage()));
log.error(String.format("API documentation generation failed for %s", module.id), e);
}
if (!module.resources.isEmpty()) {
String resourcesDir = modDir + File.separator + "resources";
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: copying project resources ");
}
for (Path resourcePath : module.resources) {
File resourcesDirFile = new File(resourcesDir);
try {
FileUtils.copyFileToDirectory(resourcePath.toFile(), resourcesDirFile);
} catch (IOException e) {
out.println(String.format("docerina: failed to copy [resource] %s into " +
"[resources directory] %s. Cause: %s", resourcePath.toString(),
resourcesDirFile.toString(), e.getMessage()));
log.error(String.format("docerina: failed to copy [resource] %s into [resources directory] "
+ "%s. Cause: %s", resourcePath.toString(), resourcesDirFile.toString(),
e.getMessage()), e);
}
}
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully copied project resources into " + resourcesDir);
}
}
}
if (!excludeIndex) {
genIndexHtml(output, project);
}
mergeSearchJsons(output);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: copying HTML theme into " + output);
}
try {
BallerinaDocUtils.copyResources("html-template-resources", output);
BallerinaDocUtils.copyResources("syntax-highlighter", output);
BallerinaDocUtils.copyResources("doc-search", output);
} catch (IOException e) {
out.println(String.format("docerina: failed to copy the docerina-theme resource. Cause: %s", e.getMessage
()));
log.error("Failed to copy the docerina-theme resource.", e);
}
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully copied HTML theme into " + output);
}
try {
String zipPath = System.getProperty(BallerinaDocConstants.OUTPUT_ZIP_PATH);
if (zipPath != null) {
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: generating the documentation zip file.");
}
BallerinaDocUtils.packageToZipFile(output, zipPath);
if (BallerinaDocUtils.isDebugEnabled()) {
out.println("docerina: successfully generated the documentation zip file.");
}
}
} catch (IOException e) {
out.println(String.format("docerina: API documentation zip packaging failed for %s: %s", output, e
.getMessage()));
log.error(String.format("API documentation zip packaging failed for %s", output), e);
}
}
private static void genIndexHtml(String output, Project project) {
String projectTemplateName = System.getProperty(BallerinaDocConstants.PROJECT_TEMPLATE_NAME_KEY, "index");
String indexHtmlPath = output + File.separator + projectTemplateName + HTML;
ProjectPageContext projectPageContext = new ProjectPageContext(project, "API Documentation", "",
false);
try {
Writer.writeHtmlDocument(projectPageContext, projectTemplateName, indexHtmlPath);
} catch (IOException e) {
out.println(String.format("docerina: failed to create the index.html. Cause: %s", e.getMessage()));
log.error("Failed to create the index.html file.", e);
}
}
private static void genModuleJson(Module module, String moduleJsonPath) {
File jsonFile = new File(moduleJsonPath);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(module);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the module.json. Cause: %s", e.getMessage()));
log.error("Failed to create module.json file.", e);
}
}
private static void genSearchJson(Module module, String jsonPath) {
List<ModuleSearchJson> searchModules = new ArrayList<>();
List<ConstructSearchJson> searchFunctions = new ArrayList<>();
List<ConstructSearchJson> searchClasses = new ArrayList<>();
List<ConstructSearchJson> searchRecords = new ArrayList<>();
List<ConstructSearchJson> searchConstants = new ArrayList<>();
List<ConstructSearchJson> searchErrors = new ArrayList<>();
List<ConstructSearchJson> searchTypes = new ArrayList<>();
List<ConstructSearchJson> searchClients = new ArrayList<>();
List<ConstructSearchJson> searchListeners = new ArrayList<>();
List<ConstructSearchJson> searchAnnotations = new ArrayList<>();
List<ConstructSearchJson> searchAbstractObjects = new ArrayList<>();
if (module.summary != null) {
searchModules.add(new ModuleSearchJson(module.id, getFirstLine(module.summary)));
}
module.functions.forEach((function) ->
searchFunctions.add(new ConstructSearchJson(function.name, module.id,
getFirstLine(function.description))));
module.classes.forEach((bClass) ->
searchClasses.add(new ConstructSearchJson(bClass.name, module.id, getFirstLine(bClass.description))));
module.abstractObjects.forEach((absObj) ->
searchAbstractObjects.add(new ConstructSearchJson(absObj.name, module.id,
getFirstLine(absObj.description))));
module.clients.forEach((client) ->
searchClients.add(new ConstructSearchJson(client.name, module.id, getFirstLine(client.description))));
module.listeners.forEach((listener) ->
searchListeners.add(new ConstructSearchJson(listener.name, module.id,
getFirstLine(listener.description))));
module.records.forEach((record) ->
searchRecords.add(new ConstructSearchJson(record.name, module.id, getFirstLine(record.description))));
module.constants.forEach((constant) ->
searchConstants.add(new ConstructSearchJson(constant.name, module.id,
getFirstLine(constant.description))));
module.errors.forEach((error) ->
searchErrors.add(new ConstructSearchJson(error.name, module.id, getFirstLine(error.description))));
module.unionTypes.forEach((unionType) ->
searchTypes.add(new ConstructSearchJson(unionType.name, module.id,
getFirstLine(unionType.description))));
module.finiteTypes.forEach((finiteType) ->
searchTypes.add(new ConstructSearchJson(finiteType.name, module.id,
getFirstLine(finiteType.description))));
module.annotations.forEach((annotation) ->
searchAnnotations.add(new ConstructSearchJson(annotation.name, module.id,
getFirstLine(annotation.description))));
SearchJson searchJson = new SearchJson(searchModules, searchClasses, searchFunctions, searchRecords,
searchConstants, searchErrors, searchTypes, searchClients, searchListeners, searchAnnotations,
searchAbstractObjects);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
File jsonFile = new File(jsonPath);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile), StandardCharsets.UTF_8)) {
String json = gson.toJson(searchJson);
writer.write(new String(json.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the search.json. Cause: %s", e.getMessage()));
log.error("Failed to create search.json file.", e);
}
}
private static String getFirstLine(String description) {
String[] splits = description.split("\\.", 2);
if (splits.length < 2) {
return splits[0];
} else {
if (splits[0].contains("<p>")) {
return splits[0] + ".</p>";
}
return splits[0] + ".";
}
}
private static void mergeSearchJsons(String docRoot) {
Gson gson = new GsonBuilder().setPrettyPrinting().create();
File directory = new File(docRoot);
File[] fList = directory.listFiles();
if (fList != null) {
Arrays.sort(fList);
SearchJson searchJson = new SearchJson();
for (File file : fList) {
if (file.isDirectory()) {
Path moduleJsonPath = Paths.get(file.getAbsolutePath(), MODULE_SEARCH + JSON);
if (moduleJsonPath.toFile().exists()) {
try (BufferedReader br = Files.newBufferedReader(moduleJsonPath, StandardCharsets.UTF_8)) {
SearchJson modSearchJson = gson.fromJson(br, SearchJson.class);
searchJson.getModules().addAll(modSearchJson.getModules());
searchJson.getFunctions().addAll(modSearchJson.getFunctions());
searchJson.getClasses().addAll(modSearchJson.getClasses());
searchJson.getClients().addAll(modSearchJson.getClients());
searchJson.getListeners().addAll(modSearchJson.getListeners());
searchJson.getRecords().addAll(modSearchJson.getRecords());
searchJson.getConstants().addAll(modSearchJson.getConstants());
searchJson.getErrors().addAll(modSearchJson.getErrors());
searchJson.getTypes().addAll(modSearchJson.getTypes());
searchJson.getAnnotations().addAll(modSearchJson.getAnnotations());
searchJson.getAbstractObjects().addAll(modSearchJson.getAbstractObjects());
} catch (IOException e) {
String errorMsg = String.format("API documentation generation failed. Cause: %s",
e.getMessage());
out.println(errorMsg);
log.error(errorMsg, e);
return;
}
}
}
}
File docSearchDir = new File(docRoot + File.separator + SEARCH_DIR);
boolean docSearchDirExists = docSearchDir.exists() || docSearchDir.mkdir();
if (!docSearchDirExists) {
out.println("docerina: failed to create " + SEARCH_DIR + " directory");
log.error("Failed to create " + SEARCH_DIR + " directory.");
}
File jsonFile = new File(docRoot + File.separator + SEARCH_DIR + File.separator + SEARCH_DATA);
try (java.io.Writer writer = new OutputStreamWriter(new FileOutputStream(jsonFile),
StandardCharsets.UTF_8)) {
String json = gson.toJson(searchJson);
String js = "var searchData = " + json + ";";
writer.write(new String(js.getBytes(StandardCharsets.UTF_8), StandardCharsets.UTF_8));
} catch (IOException e) {
out.println(String.format("docerina: failed to create the " + SEARCH_DATA + ". Cause: %s",
e.getMessage()));
log.error("Failed to create " + SEARCH_DATA + " file.", e);
}
}
}
public static Map<String, ModuleDoc> generateModuleDocs(String sourceRoot,
List<BLangPackage> modules) throws IOException {
Map<String, ModuleDoc> moduleDocMap = new HashMap<>();
for (BLangPackage bLangPackage : modules) {
moduleDocMap.put(bLangPackage.packageID.name.toString(), generateModuleDoc(sourceRoot, bLangPackage));
}
return moduleDocMap;
}
public static Map<String, ModuleDoc> generateModuleDocs(String sourceRoot, List<BLangPackage> modules,
Set<String> moduleFilter) throws IOException {
Map<String, ModuleDoc> moduleDocMap = new HashMap<>();
for (BLangPackage bLangPackage : modules) {
String moduleName = bLangPackage.packageID.name.toString();
if (moduleFilter.contains(moduleName)) {
continue;
}
moduleDocMap.put(moduleName, generateModuleDoc(sourceRoot, bLangPackage));
}
return moduleDocMap;
}
public static ModuleDoc generateModuleDoc(String sourceRoot, BLangPackage bLangPackage) throws IOException {
String moduleName = bLangPackage.packageID.name.toString();
Path absolutePkgPath = getAbsoluteModulePath(sourceRoot, Paths.get(moduleName));
Path packageMd = getModuleDocPath(absolutePkgPath);
List<Path> resources = getResourcePaths(absolutePkgPath);
return new ModuleDoc(packageMd == null ? null : packageMd.toAbsolutePath(), resources, bLangPackage);
}
public static void setPrintStream(PrintStream out) {
BallerinaDocGenerator.out = out;
}
/**
* Generate docs generator model.
*
* @param moduleDocList moduleDocList modules list whose docs to be generated
* @return docs generator model of the project
*/
public static Project getDocsGenModel(List<ModuleDoc> moduleDocList) {
Project project = new Project();
project.isSingleFile =
moduleDocList.size() == 1 && moduleDocList.get(0).bLangPackage.packageID.name.value.equals(".");
if (project.isSingleFile) {
project.sourceFileName = moduleDocList.get(0).bLangPackage.packageID.sourceFileName.value;
}
project.name = "";
project.description = "";
List<Module> moduleDocs = new ArrayList<>();
for (ModuleDoc moduleDoc : moduleDocList) {
Module module = new Module();
module.id = moduleDoc.bLangPackage.packageID.name.toString();
module.orgName = moduleDoc.bLangPackage.packageID.orgName.toString();
String moduleVersion = moduleDoc.bLangPackage.packageID.version.toString();
module.version = moduleVersion.equals("") ?
System.getProperty(BallerinaDocConstants.VERSION) :
moduleVersion;
module.summary = moduleDoc.summary;
module.description = moduleDoc.description;
sortModuleConstructs(moduleDoc.bLangPackage);
boolean hasPublicConstructs = Generator.generateModuleConstructs(module, moduleDoc.bLangPackage);
if (hasPublicConstructs) {
module.resources.addAll(moduleDoc.resources);
moduleDocs.add(module);
}
}
project.modules = moduleDocs;
return project;
}
private static void sortModuleConstructs(BLangPackage bLangPackage) {
bLangPackage.getFunctions().sort(Comparator.comparing(f -> (f.getReceiver() == null ? "" : f
.getReceiver().getName()) + f.getName().getValue()));
bLangPackage.getAnnotations().sort(Comparator.comparing(a -> a.getName().getValue()));
bLangPackage.getTypeDefinitions()
.sort(Comparator.comparing(a -> a.getName() == null ? "" : a.getName().getValue()));
bLangPackage.getGlobalVariables().sort(Comparator.comparing(a -> a.getName().getValue()));
}
private static List<Path> getResourcePaths(Path absolutePkgPath) throws IOException {
Path resourcesDirPath = absolutePkgPath.resolve("resources");
List<Path> resources = new ArrayList<>();
if (resourcesDirPath.toFile().exists()) {
resources = Files.walk(resourcesDirPath).filter(path -> !path.equals(resourcesDirPath)).collect(Collectors
.toList());
}
return resources;
}
private static Path getModuleDocPath(Path absolutePkgPath) throws IOException {
Path packageMd;
Optional<Path> o = Files.find(absolutePkgPath, 1, (path, attr) -> {
Path fileName = path.getFileName();
if (fileName != null) {
return fileName.toString().equals(MODULE_CONTENT_FILE);
}
return false;
}).findFirst();
packageMd = o.isPresent() ? o.get() : null;
return packageMd;
}
}
|
I'm kinda sure that `volatile` doesn't matter here as `VarHandle`'s `getAcquire` + `setRelease` are used.
|
public T get() {
T current = currentClient();
if (current == null) {
synchronized (this) {
current = currentClient();
if (current == null) {
MongoClients mongoClients = Arc.container().instance(MongoClients.class).get();
current = producer.apply(mongoClients);
CLIENT.setRelease(this, current);
}
}
}
return current;
}
|
CLIENT.setRelease(this, current);
|
public T get() {
MongoClients mongoClients = Arc.container().instance(MongoClients.class).get();
return producer.apply(mongoClients);
}
|
class MongoClientSupplier<T> implements Supplier<T> {
private static final VarHandle CLIENT;
static {
try {
MethodHandles.Lookup lookup = MethodHandles.lookup();
CLIENT = lookup.findVarHandle(MongoClientSupplier.class, "client", Object.class);
} catch (ReflectiveOperationException e) {
throw new ExceptionInInitializerError(e);
}
}
@SuppressWarnings("unused")
private volatile T client;
private final Function<MongoClients, T> producer;
MongoClientSupplier(Function<MongoClients, T> producer) {
this.producer = producer;
}
@Override
@SuppressWarnings("unchecked")
private T currentClient() {
return (T) CLIENT.getAcquire(this);
}
}
|
class MongoClientSupplier<T> implements Supplier<T> {
private final Function<MongoClients, T> producer;
MongoClientSupplier(Function<MongoClients, T> producer) {
this.producer = producer;
}
@Override
}
|
Please rewrite the error message too
|
public Savepoint setSavepoint(final String savepointName) throws SQLException {
if (!connectionTransaction.isInTransaction()) {
throw new SQLException("Now, not in transaction");
}
ShardingSphereSavepoint result = new ShardingSphereSavepoint(savepointName);
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().setSavepoint(each, savepointName);
}
methodInvocationRecorder.record("setSavepoint", target -> ConnectionSavepointManager.getInstance().setSavepoint(target, savepointName));
return result;
}
|
throw new SQLException("Now, not in transaction");
|
public Savepoint setSavepoint(final String savepointName) throws SQLException {
if (!connectionTransaction.isInTransaction()) {
throw new SQLException("Savepoint can only be used in transaction blocks.");
}
ShardingSphereSavepoint result = new ShardingSphereSavepoint(savepointName);
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().setSavepoint(each, savepointName);
}
methodInvocationRecorder.record("setSavepoint", target -> ConnectionSavepointManager.getInstance().setSavepoint(target, savepointName));
return result;
}
|
class ConnectionManager implements ExecutorJDBCConnectionManager, AutoCloseable {
private final Map<String, DataSource> dataSourceMap = new LinkedHashMap<>();
private final Map<String, DataSource> physicalDataSourceMap = new LinkedHashMap<>();
@Getter
private final ConnectionTransaction connectionTransaction;
private final Multimap<String, Connection> cachedConnections = LinkedHashMultimap.create();
private final MethodInvocationRecorder<Connection> methodInvocationRecorder = new MethodInvocationRecorder<>();
private final ForceExecuteTemplate<Connection> forceExecuteTemplate = new ForceExecuteTemplate<>();
private final Random random = new SecureRandom();
public ConnectionManager(final String schema, final ContextManager contextManager) {
dataSourceMap.putAll(contextManager.getDataSourceMap(schema));
dataSourceMap.putAll(getTrafficDataSourceMap(schema, contextManager));
physicalDataSourceMap.putAll(contextManager.getDataSourceMap(schema));
connectionTransaction = createConnectionTransaction(schema, contextManager);
}
private Map<String, DataSource> getTrafficDataSourceMap(final String schema, final ContextManager contextManager) {
Optional<TrafficRule> trafficRule = contextManager.getMetaDataContexts().getGlobalRuleMetaData().findSingleRule(TrafficRule.class);
Optional<MetaDataPersistService> metaDataPersistService = contextManager.getMetaDataContexts().getMetaDataPersistService();
if (!trafficRule.isPresent() || trafficRule.get().getStrategyRules().isEmpty() || !metaDataPersistService.isPresent()) {
return Collections.emptyMap();
}
Map<String, DataSourceProperties> dataSourcePropsMap = metaDataPersistService.get().getDataSourceService().load(schema);
Preconditions.checkState(!dataSourcePropsMap.isEmpty(), "Can not get data source properties from meta data.");
DataSourceProperties dataSourcePropsSample = dataSourcePropsMap.values().iterator().next();
Collection<ShardingSphereUser> users = metaDataPersistService.get().getGlobalRuleService().loadUsers();
Collection<InstanceId> instanceIds = contextManager.getInstanceContext().getComputeNodeInstanceIds(InstanceType.PROXY, trafficRule.get().getLabels());
return DataSourcePoolCreator.create(createDataSourcePropertiesMap(instanceIds, users, dataSourcePropsSample, schema));
}
private Map<String, DataSourceProperties> createDataSourcePropertiesMap(final Collection<InstanceId> instanceIds, final Collection<ShardingSphereUser> users,
final DataSourceProperties dataSourcePropsSample, final String schema) {
Map<String, DataSourceProperties> result = new LinkedHashMap<>();
for (InstanceId each : instanceIds) {
result.put(each.getId(), createDataSourceProperties(each, users, dataSourcePropsSample, schema));
}
return result;
}
private DataSourceProperties createDataSourceProperties(final InstanceId instanceId, final Collection<ShardingSphereUser> users,
final DataSourceProperties dataSourcePropsSample, final String schema) {
Map<String, Object> props = dataSourcePropsSample.getAllLocalProperties();
props.put("jdbcUrl", createJdbcUrl(instanceId, schema, props));
ShardingSphereUser user = users.iterator().next();
props.put("username", user.getGrantee().getUsername());
props.put("password", user.getPassword());
return new DataSourceProperties("com.zaxxer.hikari.HikariDataSource", props);
}
private String createJdbcUrl(final InstanceId instanceId, final String schema, final Map<String, Object> props) {
String jdbcUrl = String.valueOf(props.get("jdbcUrl"));
String jdbcUrlPrefix = jdbcUrl.substring(0, jdbcUrl.indexOf("
String jdbcUrlSuffix = jdbcUrl.contains("?") ? jdbcUrl.substring(jdbcUrl.indexOf("?")) : "";
return String.format("%s
}
private ConnectionTransaction createConnectionTransaction(final String schemaName, final ContextManager contextManager) {
TransactionType type = TransactionTypeHolder.get();
if (null == type) {
Optional<TransactionRule> transactionRule = contextManager.getMetaDataContexts().getGlobalRuleMetaData().findSingleRule(TransactionRule.class);
return transactionRule.map(optional -> new ConnectionTransaction(schemaName, optional, contextManager.getTransactionContexts()))
.orElseGet(() -> new ConnectionTransaction(schemaName, contextManager.getTransactionContexts()));
}
return new ConnectionTransaction(schemaName, type, contextManager.getTransactionContexts());
}
/**
* Set auto commit.
*
* @param autoCommit auto commit
* @throws SQLException SQL exception
*/
public void setAutoCommit(final boolean autoCommit) throws SQLException {
methodInvocationRecorder.record("setAutoCommit", target -> target.setAutoCommit(autoCommit));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setAutoCommit(autoCommit));
}
/**
* Commit.
*
* @throws SQLException SQL exception
*/
public void commit() throws SQLException {
if (connectionTransaction.isLocalTransaction() && connectionTransaction.isRollbackOnly()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::rollback);
} else if (connectionTransaction.isLocalTransaction() && !connectionTransaction.isRollbackOnly()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::commit);
} else {
connectionTransaction.commit();
}
}
/**
* Rollback.
*
* @throws SQLException SQL exception
*/
public void rollback() throws SQLException {
if (connectionTransaction.isLocalTransaction()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::rollback);
} else {
connectionTransaction.rollback();
}
}
/**
* Rollback to savepoint.
*
* @param savepoint savepoint
* @throws SQLException SQL exception
*/
public void rollback(final Savepoint savepoint) throws SQLException {
if (!connectionTransaction.isInTransaction()) {
throw new SQLException("Now, not in transaction");
}
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().rollbackToSavepoint(each, savepoint.getSavepointName());
}
}
/**
* Set savepoint.
*
* @param savepointName savepoint name
* @return savepoint savepoint
* @throws SQLException SQL exception
*/
/**
* Set savepoint.
*
* @return savepoint savepoint
* @throws SQLException SQL exception
*/
public Savepoint setSavepoint() throws SQLException {
if (!connectionTransaction.isInTransaction()) {
throw new SQLException("Now, not in transaction");
}
ShardingSphereSavepoint result = new ShardingSphereSavepoint();
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().setSavepoint(each, result.getSavepointName());
}
methodInvocationRecorder.record("setSavepoint", target -> ConnectionSavepointManager.getInstance().setSavepoint(target, result.getSavepointName()));
return result;
}
/**
* Release savepoint.
*
* @param savepoint savepoint
* @throws SQLException SQL exception
*/
public void releaseSavepoint(final Savepoint savepoint) throws SQLException {
if (!connectionTransaction.isInTransaction()) {
return;
}
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().releaseSavepoint(each, savepoint.getSavepointName());
}
}
/**
* Get transaction isolation.
*
* @return transaction isolation level
* @throws SQLException SQL exception
*/
public Optional<Integer> getTransactionIsolation() throws SQLException {
return cachedConnections.values().isEmpty() ? Optional.empty() : Optional.of(cachedConnections.values().iterator().next().getTransactionIsolation());
}
/**
* Set transaction isolation.
*
* @param level transaction isolation level
* @throws SQLException SQL exception
*/
public void setTransactionIsolation(final int level) throws SQLException {
methodInvocationRecorder.record("setTransactionIsolation", connection -> connection.setTransactionIsolation(level));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setTransactionIsolation(level));
}
/**
* Set read only.
*
* @param readOnly read only
* @throws SQLException SQL exception
*/
public void setReadOnly(final boolean readOnly) throws SQLException {
methodInvocationRecorder.record("setReadOnly", connection -> connection.setReadOnly(readOnly));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setReadOnly(readOnly));
}
/**
* Whether connection valid.
*
* @param timeout timeout
* @return connection valid or not
* @throws SQLException SQL exception
*/
public boolean isValid(final int timeout) throws SQLException {
for (Connection each : cachedConnections.values()) {
if (!each.isValid(timeout)) {
return false;
}
}
return true;
}
/**
* Get random physical data source name.
*
* @return random physical data source name
*/
public String getRandomPhysicalDataSourceName() {
Collection<String> cachedPhysicalDataSourceNames = Sets.intersection(physicalDataSourceMap.keySet(), cachedConnections.keySet());
Collection<String> datasourceNames = cachedPhysicalDataSourceNames.isEmpty() ? physicalDataSourceMap.keySet() : cachedPhysicalDataSourceNames;
return new ArrayList<>(datasourceNames).get(random.nextInt(datasourceNames.size()));
}
/**
* Get random connection.
*
* @return random connection
* @throws SQLException SQL exception
*/
public Connection getRandomConnection() throws SQLException {
return getConnections(getRandomPhysicalDataSourceName(), 1, ConnectionMode.MEMORY_STRICTLY).get(0);
}
@Override
public List<Connection> getConnections(final String dataSourceName, final int connectionSize, final ConnectionMode connectionMode) throws SQLException {
DataSource dataSource = dataSourceMap.get(dataSourceName);
Preconditions.checkState(null != dataSource, "Missing the data source name: '%s'", dataSourceName);
Collection<Connection> connections;
synchronized (cachedConnections) {
connections = cachedConnections.get(dataSourceName);
}
List<Connection> result;
if (connections.size() >= connectionSize) {
result = new ArrayList<>(connections).subList(0, connectionSize);
} else if (!connections.isEmpty()) {
result = new ArrayList<>(connectionSize);
result.addAll(connections);
List<Connection> newConnections = createConnections(dataSourceName, dataSource, connectionSize - connections.size(), connectionMode);
result.addAll(newConnections);
synchronized (cachedConnections) {
cachedConnections.putAll(dataSourceName, newConnections);
}
} else {
result = new ArrayList<>(createConnections(dataSourceName, dataSource, connectionSize, connectionMode));
synchronized (cachedConnections) {
cachedConnections.putAll(dataSourceName, result);
}
}
return result;
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
private List<Connection> createConnections(final String dataSourceName, final DataSource dataSource, final int connectionSize, final ConnectionMode connectionMode) throws SQLException {
if (1 == connectionSize) {
Connection connection = createConnection(dataSourceName, dataSource);
methodInvocationRecorder.replay(connection);
return Collections.singletonList(connection);
}
if (ConnectionMode.CONNECTION_STRICTLY == connectionMode) {
return createConnections(dataSourceName, dataSource, connectionSize);
}
synchronized (dataSource) {
return createConnections(dataSourceName, dataSource, connectionSize);
}
}
private List<Connection> createConnections(final String dataSourceName, final DataSource dataSource, final int connectionSize) throws SQLException {
List<Connection> result = new ArrayList<>(connectionSize);
for (int i = 0; i < connectionSize; i++) {
try {
Connection connection = createConnection(dataSourceName, dataSource);
methodInvocationRecorder.replay(connection);
result.add(connection);
} catch (final SQLException ex) {
for (Connection each : result) {
each.close();
}
throw new SQLException(String.format("Can not get %d connections one time, partition succeed connection(%d) have released!", connectionSize, result.size()), ex);
}
}
return result;
}
private Connection createConnection(final String dataSourceName, final DataSource dataSource) throws SQLException {
Optional<Connection> connectionInTransaction = isRawJdbcDataSource(dataSourceName) ? connectionTransaction.getConnection(dataSourceName) : Optional.empty();
return connectionInTransaction.isPresent() ? connectionInTransaction.get() : dataSource.getConnection();
}
private boolean isRawJdbcDataSource(final String dataSourceName) {
return physicalDataSourceMap.containsKey(dataSourceName);
}
@Override
public void close() throws SQLException {
try {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::close);
} finally {
cachedConnections.clear();
}
}
}
|
class ConnectionManager implements ExecutorJDBCConnectionManager, AutoCloseable {
private final Map<String, DataSource> dataSourceMap = new LinkedHashMap<>();
private final Map<String, DataSource> physicalDataSourceMap = new LinkedHashMap<>();
@Getter
private final ConnectionTransaction connectionTransaction;
private final Multimap<String, Connection> cachedConnections = LinkedHashMultimap.create();
private final MethodInvocationRecorder<Connection> methodInvocationRecorder = new MethodInvocationRecorder<>();
private final ForceExecuteTemplate<Connection> forceExecuteTemplate = new ForceExecuteTemplate<>();
private final Random random = new SecureRandom();
public ConnectionManager(final String schema, final ContextManager contextManager) {
dataSourceMap.putAll(contextManager.getDataSourceMap(schema));
dataSourceMap.putAll(getTrafficDataSourceMap(schema, contextManager));
physicalDataSourceMap.putAll(contextManager.getDataSourceMap(schema));
connectionTransaction = createConnectionTransaction(schema, contextManager);
}
private Map<String, DataSource> getTrafficDataSourceMap(final String schema, final ContextManager contextManager) {
Optional<TrafficRule> trafficRule = contextManager.getMetaDataContexts().getGlobalRuleMetaData().findSingleRule(TrafficRule.class);
Optional<MetaDataPersistService> metaDataPersistService = contextManager.getMetaDataContexts().getMetaDataPersistService();
if (!trafficRule.isPresent() || trafficRule.get().getStrategyRules().isEmpty() || !metaDataPersistService.isPresent()) {
return Collections.emptyMap();
}
Map<String, DataSourceProperties> dataSourcePropsMap = metaDataPersistService.get().getDataSourceService().load(schema);
Preconditions.checkState(!dataSourcePropsMap.isEmpty(), "Can not get data source properties from meta data.");
DataSourceProperties dataSourcePropsSample = dataSourcePropsMap.values().iterator().next();
Collection<ShardingSphereUser> users = metaDataPersistService.get().getGlobalRuleService().loadUsers();
Collection<InstanceId> instanceIds = contextManager.getInstanceContext().getComputeNodeInstanceIds(InstanceType.PROXY, trafficRule.get().getLabels());
return DataSourcePoolCreator.create(createDataSourcePropertiesMap(instanceIds, users, dataSourcePropsSample, schema));
}
private Map<String, DataSourceProperties> createDataSourcePropertiesMap(final Collection<InstanceId> instanceIds, final Collection<ShardingSphereUser> users,
final DataSourceProperties dataSourcePropsSample, final String schema) {
Map<String, DataSourceProperties> result = new LinkedHashMap<>();
for (InstanceId each : instanceIds) {
result.put(each.getId(), createDataSourceProperties(each, users, dataSourcePropsSample, schema));
}
return result;
}
private DataSourceProperties createDataSourceProperties(final InstanceId instanceId, final Collection<ShardingSphereUser> users,
final DataSourceProperties dataSourcePropsSample, final String schema) {
Map<String, Object> props = dataSourcePropsSample.getAllLocalProperties();
props.put("jdbcUrl", createJdbcUrl(instanceId, schema, props));
ShardingSphereUser user = users.iterator().next();
props.put("username", user.getGrantee().getUsername());
props.put("password", user.getPassword());
return new DataSourceProperties("com.zaxxer.hikari.HikariDataSource", props);
}
private String createJdbcUrl(final InstanceId instanceId, final String schema, final Map<String, Object> props) {
String jdbcUrl = String.valueOf(props.get("jdbcUrl"));
String jdbcUrlPrefix = jdbcUrl.substring(0, jdbcUrl.indexOf("
String jdbcUrlSuffix = jdbcUrl.contains("?") ? jdbcUrl.substring(jdbcUrl.indexOf("?")) : "";
return String.format("%s
}
private ConnectionTransaction createConnectionTransaction(final String schemaName, final ContextManager contextManager) {
TransactionType type = TransactionTypeHolder.get();
if (null == type) {
Optional<TransactionRule> transactionRule = contextManager.getMetaDataContexts().getGlobalRuleMetaData().findSingleRule(TransactionRule.class);
return transactionRule.map(optional -> new ConnectionTransaction(schemaName, optional, contextManager.getTransactionContexts()))
.orElseGet(() -> new ConnectionTransaction(schemaName, contextManager.getTransactionContexts()));
}
return new ConnectionTransaction(schemaName, type, contextManager.getTransactionContexts());
}
/**
* Set auto commit.
*
* @param autoCommit auto commit
* @throws SQLException SQL exception
*/
public void setAutoCommit(final boolean autoCommit) throws SQLException {
methodInvocationRecorder.record("setAutoCommit", target -> target.setAutoCommit(autoCommit));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setAutoCommit(autoCommit));
}
/**
* Commit.
*
* @throws SQLException SQL exception
*/
public void commit() throws SQLException {
if (connectionTransaction.isLocalTransaction() && connectionTransaction.isRollbackOnly()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::rollback);
} else if (connectionTransaction.isLocalTransaction() && !connectionTransaction.isRollbackOnly()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::commit);
} else {
connectionTransaction.commit();
}
}
/**
* Rollback.
*
* @throws SQLException SQL exception
*/
public void rollback() throws SQLException {
if (connectionTransaction.isLocalTransaction()) {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::rollback);
} else {
connectionTransaction.rollback();
}
}
/**
* Rollback to savepoint.
*
* @param savepoint savepoint
* @throws SQLException SQL exception
*/
public void rollback(final Savepoint savepoint) throws SQLException {
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().rollbackToSavepoint(each, savepoint.getSavepointName());
}
}
/**
* Set savepoint.
*
* @param savepointName savepoint name
* @return savepoint savepoint
* @throws SQLException SQL exception
*/
/**
* Set savepoint.
*
* @return savepoint savepoint
* @throws SQLException SQL exception
*/
public Savepoint setSavepoint() throws SQLException {
if (!connectionTransaction.isInTransaction()) {
throw new SQLException("Savepoint can only be used in transaction blocks.");
}
ShardingSphereSavepoint result = new ShardingSphereSavepoint();
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().setSavepoint(each, result.getSavepointName());
}
methodInvocationRecorder.record("setSavepoint", target -> ConnectionSavepointManager.getInstance().setSavepoint(target, result.getSavepointName()));
return result;
}
/**
* Release savepoint.
*
* @param savepoint savepoint
* @throws SQLException SQL exception
*/
public void releaseSavepoint(final Savepoint savepoint) throws SQLException {
if (!connectionTransaction.isInTransaction()) {
return;
}
for (Connection each : cachedConnections.values()) {
ConnectionSavepointManager.getInstance().releaseSavepoint(each, savepoint.getSavepointName());
}
}
/**
* Get transaction isolation.
*
* @return transaction isolation level
* @throws SQLException SQL exception
*/
public Optional<Integer> getTransactionIsolation() throws SQLException {
return cachedConnections.values().isEmpty() ? Optional.empty() : Optional.of(cachedConnections.values().iterator().next().getTransactionIsolation());
}
/**
* Set transaction isolation.
*
* @param level transaction isolation level
* @throws SQLException SQL exception
*/
public void setTransactionIsolation(final int level) throws SQLException {
methodInvocationRecorder.record("setTransactionIsolation", connection -> connection.setTransactionIsolation(level));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setTransactionIsolation(level));
}
/**
* Set read only.
*
* @param readOnly read only
* @throws SQLException SQL exception
*/
public void setReadOnly(final boolean readOnly) throws SQLException {
methodInvocationRecorder.record("setReadOnly", connection -> connection.setReadOnly(readOnly));
forceExecuteTemplate.execute(cachedConnections.values(), connection -> connection.setReadOnly(readOnly));
}
/**
* Whether connection valid.
*
* @param timeout timeout
* @return connection valid or not
* @throws SQLException SQL exception
*/
public boolean isValid(final int timeout) throws SQLException {
for (Connection each : cachedConnections.values()) {
if (!each.isValid(timeout)) {
return false;
}
}
return true;
}
/**
* Get random physical data source name.
*
* @return random physical data source name
*/
public String getRandomPhysicalDataSourceName() {
Collection<String> cachedPhysicalDataSourceNames = Sets.intersection(physicalDataSourceMap.keySet(), cachedConnections.keySet());
Collection<String> datasourceNames = cachedPhysicalDataSourceNames.isEmpty() ? physicalDataSourceMap.keySet() : cachedPhysicalDataSourceNames;
return new ArrayList<>(datasourceNames).get(random.nextInt(datasourceNames.size()));
}
/**
* Get random connection.
*
* @return random connection
* @throws SQLException SQL exception
*/
public Connection getRandomConnection() throws SQLException {
return getConnections(getRandomPhysicalDataSourceName(), 1, ConnectionMode.MEMORY_STRICTLY).get(0);
}
@Override
public List<Connection> getConnections(final String dataSourceName, final int connectionSize, final ConnectionMode connectionMode) throws SQLException {
DataSource dataSource = dataSourceMap.get(dataSourceName);
Preconditions.checkState(null != dataSource, "Missing the data source name: '%s'", dataSourceName);
Collection<Connection> connections;
synchronized (cachedConnections) {
connections = cachedConnections.get(dataSourceName);
}
List<Connection> result;
if (connections.size() >= connectionSize) {
result = new ArrayList<>(connections).subList(0, connectionSize);
} else if (!connections.isEmpty()) {
result = new ArrayList<>(connectionSize);
result.addAll(connections);
List<Connection> newConnections = createConnections(dataSourceName, dataSource, connectionSize - connections.size(), connectionMode);
result.addAll(newConnections);
synchronized (cachedConnections) {
cachedConnections.putAll(dataSourceName, newConnections);
}
} else {
result = new ArrayList<>(createConnections(dataSourceName, dataSource, connectionSize, connectionMode));
synchronized (cachedConnections) {
cachedConnections.putAll(dataSourceName, result);
}
}
return result;
}
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
private List<Connection> createConnections(final String dataSourceName, final DataSource dataSource, final int connectionSize, final ConnectionMode connectionMode) throws SQLException {
if (1 == connectionSize) {
Connection connection = createConnection(dataSourceName, dataSource);
methodInvocationRecorder.replay(connection);
return Collections.singletonList(connection);
}
if (ConnectionMode.CONNECTION_STRICTLY == connectionMode) {
return createConnections(dataSourceName, dataSource, connectionSize);
}
synchronized (dataSource) {
return createConnections(dataSourceName, dataSource, connectionSize);
}
}
private List<Connection> createConnections(final String dataSourceName, final DataSource dataSource, final int connectionSize) throws SQLException {
List<Connection> result = new ArrayList<>(connectionSize);
for (int i = 0; i < connectionSize; i++) {
try {
Connection connection = createConnection(dataSourceName, dataSource);
methodInvocationRecorder.replay(connection);
result.add(connection);
} catch (final SQLException ex) {
for (Connection each : result) {
each.close();
}
throw new SQLException(String.format("Can not get %d connections one time, partition succeed connection(%d) have released!", connectionSize, result.size()), ex);
}
}
return result;
}
private Connection createConnection(final String dataSourceName, final DataSource dataSource) throws SQLException {
Optional<Connection> connectionInTransaction = isRawJdbcDataSource(dataSourceName) ? connectionTransaction.getConnection(dataSourceName) : Optional.empty();
return connectionInTransaction.isPresent() ? connectionInTransaction.get() : dataSource.getConnection();
}
private boolean isRawJdbcDataSource(final String dataSourceName) {
return physicalDataSourceMap.containsKey(dataSourceName);
}
@Override
public void close() throws SQLException {
try {
forceExecuteTemplate.execute(cachedConnections.values(), Connection::close);
} finally {
cachedConnections.clear();
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.