repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java | azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.azureblob.storage.AzureBlobPayloadStorage;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(AzureBlobProperties.class)
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "azureblob")
public class AzureBlobConfiguration {
@Bean
public ExternalPayloadStorage azureBlobExternalPayloadStorage(
IDGenerator idGenerator, AzureBlobProperties properties) {
return new AzureBlobPayloadStorage(idGenerator, properties);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java | azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.external-payload-storage.azureblob")
public class AzureBlobProperties {
/** The connection string to be used to connect to Azure Blob storage */
private String connectionString = null;
/** The name of the container where the payloads will be stored */
private String containerName = "conductor-payloads";
/** The endpoint to be used to connect to Azure Blob storage */
private String endpoint = null;
/** The sas token to be used for authenticating requests */
private String sasToken = null;
/** The time for which the shared access signature is valid */
@DurationUnit(ChronoUnit.SECONDS)
private Duration signedUrlExpirationDuration = Duration.ofSeconds(5);
/** The path at which the workflow inputs will be stored */
private String workflowInputPath = "workflow/input/";
/** The path at which the workflow outputs will be stored */
private String workflowOutputPath = "workflow/output/";
/** The path at which the task inputs will be stored */
private String taskInputPath = "task/input/";
/** The path at which the task outputs will be stored */
private String taskOutputPath = "task/output/";
public String getConnectionString() {
return connectionString;
}
public void setConnectionString(String connectionString) {
this.connectionString = connectionString;
}
public String getContainerName() {
return containerName;
}
public void setContainerName(String containerName) {
this.containerName = containerName;
}
public String getEndpoint() {
return endpoint;
}
public void setEndpoint(String endpoint) {
this.endpoint = endpoint;
}
public String getSasToken() {
return sasToken;
}
public void setSasToken(String sasToken) {
this.sasToken = sasToken;
}
public Duration getSignedUrlExpirationDuration() {
return signedUrlExpirationDuration;
}
public void setSignedUrlExpirationDuration(Duration signedUrlExpirationDuration) {
this.signedUrlExpirationDuration = signedUrlExpirationDuration;
}
public String getWorkflowInputPath() {
return workflowInputPath;
}
public void setWorkflowInputPath(String workflowInputPath) {
this.workflowInputPath = workflowInputPath;
}
public String getWorkflowOutputPath() {
return workflowOutputPath;
}
public void setWorkflowOutputPath(String workflowOutputPath) {
this.workflowOutputPath = workflowOutputPath;
}
public String getTaskInputPath() {
return taskInputPath;
}
public void setTaskInputPath(String taskInputPath) {
this.taskInputPath = taskInputPath;
}
public String getTaskOutputPath() {
return taskOutputPath;
}
public void setTaskOutputPath(String taskOutputPath) {
this.taskOutputPath = taskOutputPath;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java | amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import java.time.Duration;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings;
import com.rabbitmq.client.AMQP.PROTOCOL;
import com.rabbitmq.client.ConnectionFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AMQPSettingsTest {
private AMQPEventQueueProperties properties;
@Before
public void setUp() {
properties = mock(AMQPEventQueueProperties.class);
when(properties.getBatchSize()).thenReturn(1);
when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100));
when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST);
when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER);
when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS);
when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST);
when(properties.getPort()).thenReturn(PROTOCOL.PORT);
when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000);
when(properties.isUseNio()).thenReturn(false);
when(properties.isDurable()).thenReturn(true);
when(properties.isExclusive()).thenReturn(false);
when(properties.isAutoDelete()).thenReturn(false);
when(properties.getContentType()).thenReturn("application/json");
when(properties.getContentEncoding()).thenReturn("UTF-8");
when(properties.getExchangeType()).thenReturn("topic");
when(properties.getDeliveryMode()).thenReturn(2);
when(properties.isUseExchange()).thenReturn(true);
}
@Test
public void testAMQPSettings_queue_fromuri_without_exchange_prefix() {
String exchangestring =
"myExchangeName?bindQueueName=myQueueName&exchangeType=topic&routingKey=test&deliveryMode=2";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(exchangestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("test", settings.getRoutingKey());
assertEquals("myExchangeName", settings.getQueueOrExchangeName());
assertEquals("myQueueName", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.QUEUE, settings.getType());
}
@Test
public void testAMQPSettings_queue_fromuri_without_exchange_prefix_and_bind_queue() {
String exchangestring = "myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(exchangestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("test", settings.getRoutingKey());
assertEquals("myExchangeName", settings.getQueueOrExchangeName());
assertEquals("bound_to_myExchangeName", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.QUEUE, settings.getType());
}
@Test
public void testAMQPSettings_exchange() {
String exchangestring = "myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2";
AMQPSettings settings = new AMQPSettings(properties, "amqp_exchange");
settings.fromURI(exchangestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("test", settings.getRoutingKey());
assertEquals("myExchangeName", settings.getQueueOrExchangeName());
assertEquals("bound_to_myExchangeName", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.EXCHANGE, settings.getType());
}
@Test
public void testAMQPSettings_queue() {
String queuestring = "myQueue";
AMQPSettings settings = new AMQPSettings(properties, "amqp_queue");
settings.fromURI(queuestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("myQueue", settings.getQueueOrExchangeName());
assertEquals("bound_to_myQueue", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.QUEUE, settings.getType());
}
@Test
public void testAMQPSettings_queue_fromUri() {
String queuestring = "amqp_queue:myQueue";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(queuestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("myQueue", settings.getQueueOrExchangeName());
assertEquals("bound_to_myQueue", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.QUEUE, settings.getType());
}
@Test
public void testAMQPSettings_exchange_fromUri() {
String queuestring = "amqp_exchange:myExchange";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(queuestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("myExchange", settings.getQueueOrExchangeName());
assertEquals("bound_to_myExchange", settings.getExchangeBoundQueueName());
assertEquals(AMQPSettings.Type.EXCHANGE, settings.getType());
}
@Test
public void testAMQPSettings_exchange_fromuri_defaultconfig() {
String exchangestring =
"amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(exchangestring);
assertEquals("topic", settings.getExchangeType());
assertEquals("test", settings.getRoutingKey());
assertEquals("myExchangeName", settings.getQueueOrExchangeName());
}
@Test
public void testAMQPSettings_queue_fromuri_defaultconfig() {
String exchangestring =
"amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(exchangestring);
assertFalse(settings.isDurable());
assertTrue(settings.isExclusive());
assertTrue(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals("myQueueName", settings.getQueueOrExchangeName());
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPSettings_exchange_fromuri_wrongdeliverymode() {
String exchangestring =
"amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=3";
AMQPSettings settings = new AMQPSettings(properties);
settings.fromURI(exchangestring);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java | amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import java.time.Duration;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProvider;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.rabbitmq.client.AMQP.PROTOCOL;
import com.rabbitmq.client.ConnectionFactory;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AMQPEventQueueProviderTest {
private AMQPEventQueueProperties properties;
@Before
public void setUp() {
properties = mock(AMQPEventQueueProperties.class);
when(properties.getBatchSize()).thenReturn(1);
when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100));
when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST);
when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER);
when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS);
when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST);
when(properties.getPort()).thenReturn(PROTOCOL.PORT);
when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000);
when(properties.isUseNio()).thenReturn(false);
when(properties.isDurable()).thenReturn(true);
when(properties.isExclusive()).thenReturn(false);
when(properties.isAutoDelete()).thenReturn(false);
when(properties.getContentType()).thenReturn("application/json");
when(properties.getContentEncoding()).thenReturn("UTF-8");
when(properties.getExchangeType()).thenReturn("topic");
when(properties.getDeliveryMode()).thenReturn(2);
when(properties.isUseExchange()).thenReturn(true);
}
@Test
public void testAMQPEventQueueProvider_defaultconfig_exchange() {
String exchangestring =
"amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2";
AMQPEventQueueProvider eventqProvider =
new AMQPEventQueueProvider(properties, "amqp_exchange", true);
ObservableQueue queue = eventqProvider.getQueue(exchangestring);
assertNotNull(queue);
assertEquals(exchangestring, queue.getName());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, queue.getType());
}
@Test
public void testAMQPEventQueueProvider_defaultconfig_queue() {
String exchangestring =
"amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true";
AMQPEventQueueProvider eventqProvider =
new AMQPEventQueueProvider(properties, "amqp_queue", false);
ObservableQueue queue = eventqProvider.getQueue(exchangestring);
assertNotNull(queue);
assertEquals(exchangestring, queue.getName());
assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, queue.getType());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java | amqp/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.DoesNothing;
import org.mockito.stubbing.OngoingStubbing;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings;
import com.netflix.conductor.contribs.queue.amqp.util.RetryType;
import com.netflix.conductor.core.events.queue.Message;
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.AMQP.PROTOCOL;
import com.rabbitmq.client.AMQP.Queue.DeclareOk;
import com.rabbitmq.client.Address;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.Consumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.GetResponse;
import com.rabbitmq.client.impl.AMQImpl;
import rx.Observable;
import rx.observers.Subscribers;
import rx.observers.TestSubscriber;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyMap;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.atLeastOnce;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@SuppressWarnings({"rawtypes", "unchecked"})
public class AMQPObservableQueueTest {
final int batchSize = 10;
final int pollTimeMs = 500;
Address[] addresses;
AMQPEventQueueProperties properties;
@Before
public void setUp() {
properties = mock(AMQPEventQueueProperties.class);
when(properties.getBatchSize()).thenReturn(1);
when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100));
when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST);
when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER);
when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS);
when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST);
when(properties.getPort()).thenReturn(PROTOCOL.PORT);
when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000);
when(properties.isUseNio()).thenReturn(false);
when(properties.isDurable()).thenReturn(true);
when(properties.isExclusive()).thenReturn(false);
when(properties.isAutoDelete()).thenReturn(false);
when(properties.getContentType()).thenReturn("application/json");
when(properties.getContentEncoding()).thenReturn("UTF-8");
when(properties.getExchangeType()).thenReturn("topic");
when(properties.getDeliveryMode()).thenReturn(2);
when(properties.isUseExchange()).thenReturn(true);
addresses = new Address[] {new Address("localhost", PROTOCOL.PORT)};
AMQPConnection.setAMQPConnection(null);
}
List<GetResponse> buildQueue(final Random random, final int bound) {
final LinkedList<GetResponse> queue = new LinkedList();
for (int i = 0; i < bound; i++) {
AMQP.BasicProperties props = mock(AMQP.BasicProperties.class);
when(props.getMessageId()).thenReturn(UUID.randomUUID().toString());
Envelope envelope = mock(Envelope.class);
when(envelope.getDeliveryTag()).thenReturn(random.nextLong());
GetResponse response = mock(GetResponse.class);
when(response.getProps()).thenReturn(props);
when(response.getEnvelope()).thenReturn(envelope);
when(response.getBody()).thenReturn("{}".getBytes());
when(response.getMessageCount()).thenReturn(bound - i);
queue.add(response);
}
return queue;
}
Channel mockBaseChannel() throws IOException, TimeoutException {
Channel channel = mock(Channel.class);
when(channel.isOpen()).thenReturn(Boolean.TRUE);
/*
* doAnswer(invocation -> { when(channel.isOpen()).thenReturn(Boolean.FALSE);
* return DoesNothing.doesNothing(); }).when(channel).close();
*/
return channel;
}
Channel mockChannelForQueue(
Channel channel,
boolean isWorking,
boolean exists,
String name,
List<GetResponse> queue)
throws IOException {
// queueDeclarePassive
final AMQImpl.Queue.DeclareOk queueDeclareOK =
new AMQImpl.Queue.DeclareOk(name, queue.size(), 1);
if (exists) {
when(channel.queueDeclarePassive(eq(name))).thenReturn(queueDeclareOK);
} else {
when(channel.queueDeclarePassive(eq(name)))
.thenThrow(new IOException("Queue " + name + " exists"));
}
// queueDeclare
OngoingStubbing<DeclareOk> declareOkOngoingStubbing =
when(channel.queueDeclare(
eq(name), anyBoolean(), anyBoolean(), anyBoolean(), anyMap()))
.thenReturn(queueDeclareOK);
if (!isWorking) {
declareOkOngoingStubbing.thenThrow(
new IOException("Cannot declare queue " + name),
new RuntimeException("Not working"));
}
// messageCount
when(channel.messageCount(eq(name))).thenReturn((long) queue.size());
// basicGet
OngoingStubbing<String> getResponseOngoingStubbing =
Mockito.when(channel.basicConsume(eq(name), anyBoolean(), any(Consumer.class)))
.thenReturn(name);
if (!isWorking) {
getResponseOngoingStubbing.thenThrow(
new IOException("Not working"), new RuntimeException("Not working"));
}
// basicPublish
if (isWorking) {
doNothing()
.when(channel)
.basicPublish(
eq(StringUtils.EMPTY),
eq(name),
any(AMQP.BasicProperties.class),
any(byte[].class));
} else {
doThrow(new IOException("Not working"))
.when(channel)
.basicPublish(
eq(StringUtils.EMPTY),
eq(name),
any(AMQP.BasicProperties.class),
any(byte[].class));
}
return channel;
}
Channel mockChannelForExchange(
Channel channel,
boolean isWorking,
boolean exists,
String queueName,
String name,
String type,
String routingKey,
List<GetResponse> queue)
throws IOException {
// exchangeDeclarePassive
final AMQImpl.Exchange.DeclareOk exchangeDeclareOK = new AMQImpl.Exchange.DeclareOk();
if (exists) {
when(channel.exchangeDeclarePassive(eq(name))).thenReturn(exchangeDeclareOK);
} else {
when(channel.exchangeDeclarePassive(eq(name)))
.thenThrow(new IOException("Exchange " + name + " exists"));
}
// exchangeDeclare
OngoingStubbing<AMQP.Exchange.DeclareOk> declareOkOngoingStubbing =
when(channel.exchangeDeclare(
eq(name), eq(type), anyBoolean(), anyBoolean(), anyMap()))
.thenReturn(exchangeDeclareOK);
if (!isWorking) {
declareOkOngoingStubbing.thenThrow(
new IOException("Cannot declare exchange " + name + " of type " + type),
new RuntimeException("Not working"));
}
// queueDeclarePassive
final AMQImpl.Queue.DeclareOk queueDeclareOK =
new AMQImpl.Queue.DeclareOk(queueName, queue.size(), 1);
if (exists) {
when(channel.queueDeclarePassive(eq(queueName))).thenReturn(queueDeclareOK);
} else {
when(channel.queueDeclarePassive(eq(queueName)))
.thenThrow(new IOException("Queue " + queueName + " exists"));
}
// queueDeclare
when(channel.queueDeclare(
eq(queueName), anyBoolean(), anyBoolean(), anyBoolean(), anyMap()))
.thenReturn(queueDeclareOK);
// queueBind
when(channel.queueBind(eq(queueName), eq(name), eq(routingKey)))
.thenReturn(new AMQImpl.Queue.BindOk());
// messageCount
when(channel.messageCount(eq(queueName))).thenReturn((long) queue.size());
// basicGet
OngoingStubbing<String> getResponseOngoingStubbing =
Mockito.when(channel.basicConsume(eq(queueName), anyBoolean(), any(Consumer.class)))
.thenReturn(queueName);
if (!isWorking) {
getResponseOngoingStubbing.thenThrow(
new IOException("Not working"), new RuntimeException("Not working"));
}
// basicPublish
if (isWorking) {
doNothing()
.when(channel)
.basicPublish(
eq(name),
eq(routingKey),
any(AMQP.BasicProperties.class),
any(byte[].class));
} else {
doThrow(new IOException("Not working"))
.when(channel)
.basicPublish(
eq(name),
eq(routingKey),
any(AMQP.BasicProperties.class),
any(byte[].class));
}
return channel;
}
Connection mockGoodConnection(Channel channel) throws IOException {
Connection connection = mock(Connection.class);
when(connection.createChannel()).thenReturn(channel);
when(connection.isOpen()).thenReturn(Boolean.TRUE);
/*
* doAnswer(invocation -> { when(connection.isOpen()).thenReturn(Boolean.FALSE);
* return DoesNothing.doesNothing(); }).when(connection).close();
*/ return connection;
}
Connection mockBadConnection() throws IOException {
Connection connection = mock(Connection.class);
when(connection.createChannel()).thenThrow(new IOException("Can't create channel"));
when(connection.isOpen()).thenReturn(Boolean.TRUE);
doThrow(new IOException("Can't close connection")).when(connection).close();
return connection;
}
ConnectionFactory mockConnectionFactory(Connection connection)
throws IOException, TimeoutException {
ConnectionFactory connectionFactory = mock(ConnectionFactory.class);
when(connectionFactory.newConnection(eq(addresses), Mockito.anyString()))
.thenReturn(connection);
return connectionFactory;
}
void runObserve(
Channel channel,
AMQPObservableQueue observableQueue,
String queueName,
boolean useWorkingChannel,
int batchSize)
throws IOException {
final List<Message> found = new ArrayList<>(batchSize);
TestSubscriber<Message> subscriber = TestSubscriber.create(Subscribers.create(found::add));
rx.Observable<Message> observable =
observableQueue.observe().take(pollTimeMs * 2, TimeUnit.MILLISECONDS);
assertNotNull(observable);
observable.subscribe(subscriber);
subscriber.awaitTerminalEvent();
subscriber.assertNoErrors();
subscriber.assertCompleted();
if (useWorkingChannel) {
verify(channel, atLeast(1))
.basicConsume(eq(queueName), anyBoolean(), any(Consumer.class));
doNothing().when(channel).basicAck(anyLong(), eq(false));
doAnswer(DoesNothing.doesNothing()).when(channel).basicAck(anyLong(), eq(false));
observableQueue.ack(Collections.synchronizedList(found));
} else {
assertNotNull(found);
assertTrue(found.isEmpty());
}
observableQueue.close();
}
@Test
public void
testGetMessagesFromExistingExchangeWithDurableExclusiveAutoDeleteQueueConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndCustomConfigurationFromURI(
channel, connection, true, true, true, true, true);
}
@Test
public void testGetMessagesFromExistingExchangeWithDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromExchangeAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testPublishMessagesToNotExistingExchangeAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, false, true);
}
@Test
public void testAck() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30),
type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
AMQPRetryPattern retrySettings = null;
final AMQPSettings settings =
new AMQPSettings(properties)
.fromURI(
"amqp_exchange:"
+ name
+ "?exchangeType="
+ type
+ "&routingKey="
+ routingKey);
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses,
true,
settings,
retrySettings,
batchSize,
pollTimeMs);
List<Message> messages = new LinkedList<>();
Message msg = new Message();
msg.setId("0e3eef8f-ebb1-4244-9665-759ab5bdf433");
msg.setPayload("Payload");
msg.setReceipt("1");
messages.add(msg);
List<String> failedMessages = observableQueue.ack(messages);
assertNotNull(failedMessages);
assertTrue(failedMessages.isEmpty());
}
private void testGetMessagesFromExchangeAndDefaultConfiguration(
Channel channel, Connection connection, boolean exists, boolean useWorkingChannel)
throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30),
type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final String queueName = String.format("bound_to_%s", name);
final AMQPSettings settings =
new AMQPSettings(properties)
.fromURI(
"amqp_exchange:"
+ name
+ "?exchangeType="
+ type
+ "&routingKey="
+ routingKey);
assertTrue(settings.isDurable());
assertFalse(settings.isExclusive());
assertFalse(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(routingKey, settings.getRoutingKey());
assertEquals(queueName, settings.getExchangeBoundQueueName());
List<GetResponse> queue = buildQueue(random, batchSize);
channel =
mockChannelForExchange(
channel,
useWorkingChannel,
exists,
queueName,
name,
type,
routingKey,
queue);
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses,
true,
settings,
retrySettings,
batchSize,
pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(
AMQPConstants.AMQP_EXCHANGE_TYPE
+ ":"
+ name
+ "?exchangeType="
+ type
+ "&routingKey="
+ routingKey,
observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize);
if (useWorkingChannel) {
verify(channel, atLeastOnce())
.exchangeDeclare(
eq(name),
eq(type),
eq(settings.isDurable()),
eq(settings.autoDelete()),
eq(Collections.emptyMap()));
verify(channel, atLeastOnce())
.queueDeclare(
eq(queueName),
eq(settings.isDurable()),
eq(settings.isExclusive()),
eq(settings.autoDelete()),
anyMap());
verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey));
}
}
private void testGetMessagesFromExchangeAndCustomConfigurationFromURI(
Channel channel,
Connection connection,
boolean exists,
boolean useWorkingChannel,
boolean durable,
boolean exclusive,
boolean autoDelete)
throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30),
type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final String queueName = String.format("bound_to_%s", name);
final AMQPSettings settings =
new AMQPSettings(properties)
.fromURI(
"amqp_exchange:"
+ name
+ "?exchangeType="
+ type
+ "&bindQueueName="
+ queueName
+ "&routingKey="
+ routingKey
+ "&deliveryMode=2"
+ "&durable="
+ durable
+ "&exclusive="
+ exclusive
+ "&autoDelete="
+ autoDelete);
assertEquals(durable, settings.isDurable());
assertEquals(exclusive, settings.isExclusive());
assertEquals(autoDelete, settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(queueName, settings.getExchangeBoundQueueName());
assertEquals(routingKey, settings.getRoutingKey());
List<GetResponse> queue = buildQueue(random, batchSize);
channel =
mockChannelForExchange(
channel,
useWorkingChannel,
exists,
queueName,
name,
type,
routingKey,
queue);
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses,
true,
settings,
retrySettings,
batchSize,
pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(
AMQPConstants.AMQP_EXCHANGE_TYPE
+ ":"
+ name
+ "?exchangeType="
+ type
+ "&bindQueueName="
+ queueName
+ "&routingKey="
+ routingKey
+ "&deliveryMode=2"
+ "&durable="
+ durable
+ "&exclusive="
+ exclusive
+ "&autoDelete="
+ autoDelete,
observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize);
if (useWorkingChannel) {
verify(channel, atLeastOnce())
.exchangeDeclare(
eq(name),
eq(type),
eq(settings.isDurable()),
eq(settings.autoDelete()),
eq(Collections.emptyMap()));
verify(channel, atLeastOnce())
.queueDeclare(
eq(queueName),
eq(settings.isDurable()),
eq(settings.isExclusive()),
eq(settings.autoDelete()),
anyMap());
verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey));
}
}
private void testPublishMessagesToExchangeAndDefaultConfiguration(
Channel channel, Connection connection, boolean exists, boolean useWorkingChannel)
throws IOException, TimeoutException {
final Random random = new Random();
final String name = RandomStringUtils.randomAlphabetic(30),
type = "topic",
routingKey = RandomStringUtils.randomAlphabetic(30);
final String queueName = String.format("bound_to_%s", name);
final AMQPSettings settings =
new AMQPSettings(properties)
.fromURI(
"amqp_exchange:"
+ name
+ "?exchangeType="
+ type
+ "&routingKey="
+ routingKey
+ "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true");
assertTrue(settings.isDurable());
assertFalse(settings.isExclusive());
assertTrue(settings.autoDelete());
assertEquals(2, settings.getDeliveryMode());
assertEquals(name, settings.getQueueOrExchangeName());
assertEquals(type, settings.getExchangeType());
assertEquals(routingKey, settings.getRoutingKey());
List<GetResponse> queue = buildQueue(random, batchSize);
channel =
mockChannelForExchange(
channel,
useWorkingChannel,
exists,
queueName,
name,
type,
routingKey,
queue);
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(connection),
addresses,
true,
settings,
retrySettings,
batchSize,
pollTimeMs);
assertArrayEquals(addresses, observableQueue.getAddresses());
assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType());
assertEquals(
AMQPConstants.AMQP_EXCHANGE_TYPE
+ ":"
+ name
+ "?exchangeType="
+ type
+ "&routingKey="
+ routingKey
+ "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true",
observableQueue.getName());
assertEquals(name, observableQueue.getURI());
assertEquals(batchSize, observableQueue.getBatchSize());
assertEquals(pollTimeMs, observableQueue.getPollTimeInMS());
assertEquals(queue.size(), observableQueue.size());
List<Message> messages = new LinkedList<>();
Observable.range(0, batchSize)
.forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null)));
assertEquals(batchSize, messages.size());
observableQueue.publish(messages);
if (useWorkingChannel) {
verify(channel, times(batchSize))
.basicPublish(
eq(name),
eq(routingKey),
any(AMQP.BasicProperties.class),
any(byte[].class));
}
}
@Test
public void testGetMessagesFromExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, true);
}
@Test
public void testGetMessagesFromNotExistingQueueAndDefaultConfiguration()
throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, false, true);
}
@Test
public void testGetMessagesFromQueueWithBadChannel() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, false);
}
@Test(expected = RuntimeException.class)
public void testPublishMessagesToQueueWithBadChannel() throws IOException, TimeoutException {
// Mock channel and connection
Channel channel = mockBaseChannel();
Connection connection = mockGoodConnection(channel);
testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, true, false);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_empty() throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
null, addresses, false, settings, retrySettings, batchSize, pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_addressEmpty() throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
null,
false,
settings,
retrySettings,
batchSize,
pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_settingsEmpty() throws IOException, TimeoutException {
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
addresses,
false,
null,
retrySettings,
batchSize,
pollTimeMs);
}
@Test(expected = IllegalArgumentException.class)
public void testAMQPObservalbleQueue_batchsizezero() throws IOException, TimeoutException {
AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test");
AMQPRetryPattern retrySettings = null;
AMQPObservableQueue observableQueue =
new AMQPObservableQueue(
mockConnectionFactory(mockGoodConnection(mockBaseChannel())),
addresses,
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants;
import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType;
import com.rabbitmq.client.Address;
import com.rabbitmq.client.BlockedListener;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.ShutdownListener;
import com.rabbitmq.client.ShutdownSignalException;
public class AMQPConnection {
private static Logger LOGGER = LoggerFactory.getLogger(AMQPConnection.class);
private volatile Connection publisherConnection = null;
private volatile Connection subscriberConnection = null;
private ConnectionFactory factory = null;
private Address[] addresses = null;
private static AMQPConnection amqpConnection = null;
private static final String PUBLISHER = "Publisher";
private static final String SUBSCRIBER = "Subscriber";
private static final Map<ConnectionType, Set<Channel>> availableChannelPool =
new ConcurrentHashMap<ConnectionType, Set<Channel>>();
private static final Map<String, Channel> subscriberReservedChannelPool =
new ConcurrentHashMap<String, Channel>();
private static AMQPRetryPattern retrySettings = null;
private AMQPConnection() {}
private AMQPConnection(final ConnectionFactory factory, final Address[] address) {
this.factory = factory;
this.addresses = address;
}
public static synchronized AMQPConnection getInstance(
final ConnectionFactory factory,
final Address[] address,
final AMQPRetryPattern retrySettings) {
if (AMQPConnection.amqpConnection == null) {
AMQPConnection.amqpConnection = new AMQPConnection(factory, address);
}
AMQPConnection.retrySettings = retrySettings;
return AMQPConnection.amqpConnection;
}
// Exposed for UT
public static void setAMQPConnection(AMQPConnection amqpConnection) {
AMQPConnection.amqpConnection = amqpConnection;
}
public Address[] getAddresses() {
return addresses;
}
private Connection createConnection(String connectionPrefix) {
int retryIndex = 1;
while (true) {
try {
Connection connection =
factory.newConnection(
addresses, System.getenv("HOSTNAME") + "-" + connectionPrefix);
if (connection == null || !connection.isOpen()) {
throw new RuntimeException("Failed to open connection");
}
connection.addShutdownListener(
new ShutdownListener() {
@Override
public void shutdownCompleted(ShutdownSignalException cause) {
LOGGER.error(
"Received a shutdown exception for the connection {}. reason {} cause{}",
connection.getClientProvidedName(),
cause.getMessage(),
cause);
}
});
connection.addBlockedListener(
new BlockedListener() {
@Override
public void handleUnblocked() throws IOException {
LOGGER.info(
"Connection {} is unblocked",
connection.getClientProvidedName());
}
@Override
public void handleBlocked(String reason) throws IOException {
LOGGER.error(
"Connection {} is blocked. reason: {}",
connection.getClientProvidedName(),
reason);
}
});
return connection;
} catch (final IOException e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
final String error =
"IO error while connecting to "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(","));
LOGGER.error(error, e);
throw new RuntimeException(error, e);
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
final String error =
"Retries completed. IO error while connecting to "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(","));
LOGGER.error(error, e);
throw new RuntimeException(error, e);
}
retryIndex++;
} catch (final TimeoutException e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
final String error =
"Timeout while connecting to "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(","));
LOGGER.error(error, e);
throw new RuntimeException(error, e);
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
final String error =
"Retries completed. Timeout while connecting to "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(","));
LOGGER.error(error, e);
throw new RuntimeException(error, e);
}
retryIndex++;
}
}
}
public Channel getOrCreateChannel(ConnectionType connectionType, String queueOrExchangeName)
throws Exception {
LOGGER.debug(
"Accessing the channel for queueOrExchange {} with type {} ",
queueOrExchangeName,
connectionType);
switch (connectionType) {
case SUBSCRIBER:
String subChnName = connectionType + ";" + queueOrExchangeName;
if (subscriberReservedChannelPool.containsKey(subChnName)) {
Channel locChn = subscriberReservedChannelPool.get(subChnName);
if (locChn != null && locChn.isOpen()) {
return locChn;
}
}
synchronized (this) {
if (subscriberConnection == null || !subscriberConnection.isOpen()) {
subscriberConnection = createConnection(SUBSCRIBER);
}
}
Channel subChn = borrowChannel(connectionType, subscriberConnection);
// Add the subscribed channels to Map to avoid messages being acknowledged on
// different from the subscribed one
subscriberReservedChannelPool.put(subChnName, subChn);
return subChn;
case PUBLISHER:
synchronized (this) {
if (publisherConnection == null || !publisherConnection.isOpen()) {
publisherConnection = createConnection(PUBLISHER);
}
}
return borrowChannel(connectionType, publisherConnection);
default:
return null;
}
}
private Channel getOrCreateChannel(ConnectionType connType, Connection rmqConnection) {
// Channel creation is required
Channel locChn = null;
int retryIndex = 1;
while (true) {
try {
LOGGER.debug("Creating a channel for " + connType);
locChn = rmqConnection.createChannel();
if (locChn == null || !locChn.isOpen()) {
throw new RuntimeException("Fail to open " + connType + " channel");
}
locChn.addShutdownListener(
cause -> {
LOGGER.error(
connType + " Channel has been shutdown: {}",
cause.getMessage(),
cause);
});
return locChn;
} catch (final IOException e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
throw new RuntimeException(
"Cannot open "
+ connType
+ " channel on "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(",")),
e);
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
throw new RuntimeException(
"Retries completed. Cannot open "
+ connType
+ " channel on "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(",")),
e);
}
retryIndex++;
} catch (final Exception e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
throw new RuntimeException(
"Cannot open "
+ connType
+ " channel on "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(",")),
e);
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
throw new RuntimeException(
"Retries completed. Cannot open "
+ connType
+ " channel on "
+ Arrays.stream(addresses)
.map(address -> address.toString())
.collect(Collectors.joining(",")),
e);
}
retryIndex++;
}
}
}
public void close() {
LOGGER.info("Closing all connections and channels");
try {
closeChannelsInMap(ConnectionType.PUBLISHER);
closeChannelsInMap(ConnectionType.SUBSCRIBER);
closeConnection(publisherConnection);
closeConnection(subscriberConnection);
} finally {
availableChannelPool.clear();
publisherConnection = null;
subscriberConnection = null;
}
}
private void closeChannelsInMap(ConnectionType conType) {
Set<Channel> channels = availableChannelPool.get(conType);
if (channels != null && !channels.isEmpty()) {
Iterator<Channel> itr = channels.iterator();
while (itr.hasNext()) {
Channel channel = itr.next();
closeChannel(channel);
}
channels.clear();
}
}
private void closeConnection(Connection connection) {
if (connection == null || !connection.isOpen()) {
LOGGER.warn("Connection is null or closed already. Not closing it again");
} else {
try {
connection.close();
} catch (Exception e) {
LOGGER.warn("Fail to close connection: {}", e.getMessage(), e);
}
}
}
private void closeChannel(Channel channel) {
if (channel == null || !channel.isOpen()) {
LOGGER.warn("Channel is null or closed already. Not closing it again");
} else {
try {
channel.close();
} catch (Exception e) {
LOGGER.warn("Fail to close channel: {}", e.getMessage(), e);
}
}
}
/**
* Gets the channel for specified connectionType.
*
* @param connectionType holds the multiple channels for different connection types for thread
* safe operation.
* @param rmqConnection publisher or subscriber connection instance
* @return channel instance
* @throws Exception
*/
private synchronized Channel borrowChannel(
ConnectionType connectionType, Connection rmqConnection) throws Exception {
if (!availableChannelPool.containsKey(connectionType)) {
Channel channel = getOrCreateChannel(connectionType, rmqConnection);
LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType));
return channel;
}
Set<Channel> channels = availableChannelPool.get(connectionType);
if (channels != null && channels.isEmpty()) {
Channel channel = getOrCreateChannel(connectionType, rmqConnection);
LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType));
return channel;
}
Iterator<Channel> itr = channels.iterator();
while (itr.hasNext()) {
Channel channel = itr.next();
if (channel != null && channel.isOpen()) {
itr.remove();
LOGGER.info(
String.format(AMQPConstants.INFO_CHANNEL_BORROW_SUCCESS, connectionType));
return channel;
} else {
itr.remove();
}
}
Channel channel = getOrCreateChannel(connectionType, rmqConnection);
LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RESET_SUCCESS, connectionType));
return channel;
}
/**
* Returns the channel to connection pool for specified connectionType.
*
* @param connectionType
* @param channel
* @throws Exception
*/
public synchronized void returnChannel(ConnectionType connectionType, Channel channel)
throws Exception {
if (channel == null || !channel.isOpen()) {
channel = null; // channel is reset.
}
Set<Channel> channels = availableChannelPool.get(connectionType);
if (channels == null) {
channels = new HashSet<Channel>();
availableChannelPool.put(connectionType, channels);
}
channels.add(channel);
LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RETURN_SUCCESS, connectionType));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp;
import java.io.IOException;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants;
import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings;
import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.metrics.Monitors;
import com.google.common.collect.Maps;
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Address;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.Consumer;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.GetResponse;
import rx.Observable;
import rx.Subscriber;
/**
* @author Ritu Parathody
*/
public class AMQPObservableQueue implements ObservableQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(AMQPObservableQueue.class);
private final AMQPSettings settings;
private final AMQPRetryPattern retrySettings;
private final String QUEUE_TYPE = "x-queue-type";
private final int batchSize;
private final boolean useExchange;
private int pollTimeInMS;
private AMQPConnection amqpConnection;
protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>();
private volatile boolean running;
public AMQPObservableQueue(
ConnectionFactory factory,
Address[] addresses,
boolean useExchange,
AMQPSettings settings,
AMQPRetryPattern retrySettings,
int batchSize,
int pollTimeInMS) {
if (factory == null) {
throw new IllegalArgumentException("Connection factory is undefined");
}
if (addresses == null || addresses.length == 0) {
throw new IllegalArgumentException("Addresses are undefined");
}
if (settings == null) {
throw new IllegalArgumentException("Settings are undefined");
}
if (batchSize <= 0) {
throw new IllegalArgumentException("Batch size must be greater than 0");
}
if (pollTimeInMS <= 0) {
throw new IllegalArgumentException("Poll time must be greater than 0 ms");
}
this.useExchange = useExchange;
this.settings = settings;
this.batchSize = batchSize;
this.amqpConnection = AMQPConnection.getInstance(factory, addresses, retrySettings);
this.retrySettings = retrySettings;
this.setPollTimeInMS(pollTimeInMS);
}
@Override
public Observable<Message> observe() {
Observable.OnSubscribe<Message> onSubscribe = null;
// This will enabled the messages to be processed one after the other as per the
// observable next behavior.
if (settings.isSequentialProcessing()) {
LOGGER.info("Subscribing for the message processing on schedule basis");
receiveMessages();
onSubscribe =
subscriber -> {
Observable<Long> interval =
Observable.interval(pollTimeInMS, TimeUnit.MILLISECONDS);
interval.flatMap(
(Long x) -> {
if (!isRunning()) {
LOGGER.debug(
"Component stopped, skip listening for messages from RabbitMQ");
return Observable.from(Collections.emptyList());
} else {
List<Message> available = new LinkedList<>();
messages.drainTo(available);
if (!available.isEmpty()) {
AtomicInteger count = new AtomicInteger(0);
StringBuilder buffer = new StringBuilder();
available.forEach(
msg -> {
buffer.append(msg.getId())
.append("=")
.append(msg.getPayload());
count.incrementAndGet();
if (count.get()
< available.size()) {
buffer.append(",");
}
});
LOGGER.info(
String.format(
"Batch from %s to conductor is %s",
settings
.getQueueOrExchangeName(),
buffer.toString()));
}
return Observable.from(available);
}
})
.subscribe(subscriber::onNext, subscriber::onError);
};
LOGGER.info("Subscribed for the message processing on schedule basis");
} else {
onSubscribe =
subscriber -> {
LOGGER.info("Subscribing for the event based AMQP message processing");
receiveMessages(subscriber);
LOGGER.info("Subscribed for the event based AMQP message processing");
};
}
return Observable.create(onSubscribe);
}
@Override
public String getType() {
return useExchange ? AMQPConstants.AMQP_EXCHANGE_TYPE : AMQPConstants.AMQP_QUEUE_TYPE;
}
@Override
public String getName() {
return settings.getEventName();
}
@Override
public String getURI() {
return settings.getQueueOrExchangeName();
}
public int getBatchSize() {
return batchSize;
}
public AMQPSettings getSettings() {
return settings;
}
public Address[] getAddresses() {
return amqpConnection.getAddresses();
}
public List<String> ack(List<Message> messages) {
final List<String> failedMessages = new ArrayList<>();
for (final Message message : messages) {
try {
ackMsg(message);
} catch (final Exception e) {
LOGGER.error("Cannot ACK message with delivery tag {}", message.getReceipt(), e);
failedMessages.add(message.getReceipt());
}
}
return failedMessages;
}
public void ackMsg(Message message) throws Exception {
int retryIndex = 1;
while (true) {
try {
LOGGER.info("ACK message with delivery tag {}", message.getReceipt());
Channel chn =
amqpConnection.getOrCreateChannel(
ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName());
chn.basicAck(Long.parseLong(message.getReceipt()), false);
LOGGER.info("Ack'ed the message with delivery tag {}", message.getReceipt());
break;
} catch (final Exception e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
LOGGER.error(
"Cannot ACK message with delivery tag {}", message.getReceipt(), e);
throw e;
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
LOGGER.error(
"Retries completed. Cannot ACK message with delivery tag {}",
message.getReceipt(),
e);
throw ex;
}
retryIndex++;
}
}
}
@Override
public void nack(List<Message> messages) {
for (final Message message : messages) {
int retryIndex = 1;
while (true) {
try {
LOGGER.info("NACK message with delivery tag {}", message.getReceipt());
Channel chn =
amqpConnection.getOrCreateChannel(
ConnectionType.SUBSCRIBER,
getSettings().getQueueOrExchangeName());
chn.basicNack(Long.parseLong(message.getReceipt()), false, false);
LOGGER.info("Nack'ed the message with delivery tag {}", message.getReceipt());
break;
} catch (final Exception e) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
LOGGER.error(
"Cannot NACK message with delivery tag {}",
message.getReceipt(),
e);
}
try {
retry.continueOrPropogate(e, retryIndex);
} catch (Exception ex) {
LOGGER.error(
"Retries completed. Cannot NACK message with delivery tag {}",
message.getReceipt(),
e);
break;
}
retryIndex++;
}
}
}
}
private static AMQP.BasicProperties buildBasicProperties(
final Message message, final AMQPSettings settings) {
return new AMQP.BasicProperties.Builder()
.messageId(
StringUtils.isEmpty(message.getId())
? UUID.randomUUID().toString()
: message.getId())
.correlationId(
StringUtils.isEmpty(message.getReceipt())
? UUID.randomUUID().toString()
: message.getReceipt())
.contentType(settings.getContentType())
.contentEncoding(settings.getContentEncoding())
.deliveryMode(settings.getDeliveryMode())
.build();
}
private void publishMessage(Message message, String exchange, String routingKey) {
Channel chn = null;
int retryIndex = 1;
while (true) {
try {
final String payload = message.getPayload();
chn =
amqpConnection.getOrCreateChannel(
ConnectionType.PUBLISHER, getSettings().getQueueOrExchangeName());
chn.basicPublish(
exchange,
routingKey,
buildBasicProperties(message, settings),
payload.getBytes(settings.getContentEncoding()));
LOGGER.info(String.format("Published message to %s: %s", exchange, payload));
break;
} catch (Exception ex) {
AMQPRetryPattern retry = retrySettings;
if (retry == null) {
LOGGER.error(
"Failed to publish message {} to {}",
message.getPayload(),
exchange,
ex);
throw new RuntimeException(ex);
}
try {
retry.continueOrPropogate(ex, retryIndex);
} catch (Exception e) {
LOGGER.error(
"Retries completed. Failed to publish message {} to {}",
message.getPayload(),
exchange,
ex);
throw new RuntimeException(ex);
}
retryIndex++;
} finally {
if (chn != null) {
try {
amqpConnection.returnChannel(ConnectionType.PUBLISHER, chn);
} catch (Exception e) {
LOGGER.error(
"Failed to return the channel of {}. {}",
ConnectionType.PUBLISHER,
e);
}
}
}
}
}
@Override
public void publish(List<Message> messages) {
try {
final String exchange, routingKey;
if (useExchange) {
// Use exchange + routing key for publishing
getOrCreateExchange(
ConnectionType.PUBLISHER,
settings.getQueueOrExchangeName(),
settings.getExchangeType(),
settings.isDurable(),
settings.autoDelete(),
settings.getArguments());
exchange = settings.getQueueOrExchangeName();
routingKey = settings.getRoutingKey();
} else {
// Use queue for publishing
final AMQP.Queue.DeclareOk declareOk =
getOrCreateQueue(
ConnectionType.PUBLISHER,
settings.getQueueOrExchangeName(),
settings.isDurable(),
settings.isExclusive(),
settings.autoDelete(),
settings.getArguments());
exchange = StringUtils.EMPTY; // Empty exchange name for queue
routingKey = declareOk.getQueue(); // Routing name is the name of queue
}
messages.forEach(message -> publishMessage(message, exchange, routingKey));
} catch (final RuntimeException ex) {
throw ex;
} catch (final Exception ex) {
LOGGER.error("Failed to publish messages: {}", ex.getMessage(), ex);
throw new RuntimeException(ex);
}
}
@Override
public void setUnackTimeout(Message message, long unackTimeout) {
throw new UnsupportedOperationException();
}
@Override
public long size() {
Channel chn = null;
try {
chn =
amqpConnection.getOrCreateChannel(
ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName());
return switch (settings.getType()) {
case EXCHANGE -> chn.messageCount(settings.getExchangeBoundQueueName());
case QUEUE -> chn.messageCount(settings.getQueueOrExchangeName());
};
} catch (final Exception e) {
throw new RuntimeException(e);
} finally {
if (chn != null) {
try {
amqpConnection.returnChannel(ConnectionType.SUBSCRIBER, chn);
} catch (Exception e) {
LOGGER.error(
"Failed to return the channel of {}. {}", ConnectionType.SUBSCRIBER, e);
}
}
}
}
@Override
public void close() {
amqpConnection.close();
}
@Override
public void start() {
LOGGER.info(
"Started listening to {}:{}",
getClass().getSimpleName(),
settings.getQueueOrExchangeName());
running = true;
}
@Override
public void stop() {
LOGGER.info(
"Stopped listening to {}:{}",
getClass().getSimpleName(),
settings.getQueueOrExchangeName());
running = false;
}
@Override
public boolean isRunning() {
return running;
}
public static class Builder {
private final Address[] addresses;
private final int batchSize;
private final int pollTimeInMS;
private final ConnectionFactory factory;
private final AMQPEventQueueProperties properties;
public Builder(AMQPEventQueueProperties properties) {
this.properties = properties;
this.addresses = buildAddressesFromHosts();
this.factory = buildConnectionFactory();
// messages polling settings
this.batchSize = properties.getBatchSize();
this.pollTimeInMS = (int) properties.getPollTimeDuration().toMillis();
}
private Address[] buildAddressesFromHosts() {
// Read hosts from config
final String hosts = properties.getHosts();
if (StringUtils.isEmpty(hosts)) {
throw new IllegalArgumentException("Hosts are undefined");
}
return Address.parseAddresses(hosts);
}
private ConnectionFactory buildConnectionFactory() {
final ConnectionFactory factory = new ConnectionFactory();
// Get rabbitmq username from config
final String username = properties.getUsername();
if (StringUtils.isEmpty(username)) {
throw new IllegalArgumentException("Username is null or empty");
} else {
factory.setUsername(username);
}
// Get rabbitmq password from config
final String password = properties.getPassword();
if (StringUtils.isEmpty(password)) {
throw new IllegalArgumentException("Password is null or empty");
} else {
factory.setPassword(password);
}
// Get vHost from config
final String virtualHost = properties.getVirtualHost();
;
if (StringUtils.isEmpty(virtualHost)) {
throw new IllegalArgumentException("Virtual host is null or empty");
} else {
factory.setVirtualHost(virtualHost);
}
// Get server port from config
final int port = properties.getPort();
if (port <= 0) {
throw new IllegalArgumentException("Port must be greater than 0");
} else {
factory.setPort(port);
}
final boolean useNio = properties.isUseNio();
if (useNio) {
factory.useNio();
}
final boolean useSslProtocol = properties.isUseSslProtocol();
if (useSslProtocol) {
try {
factory.useSslProtocol();
} catch (NoSuchAlgorithmException | KeyManagementException e) {
throw new IllegalArgumentException("Invalid sslProtocol ", e);
}
}
factory.setConnectionTimeout(properties.getConnectionTimeoutInMilliSecs());
factory.setRequestedHeartbeat(properties.getRequestHeartbeatTimeoutInSecs());
factory.setNetworkRecoveryInterval(properties.getNetworkRecoveryIntervalInMilliSecs());
factory.setHandshakeTimeout(properties.getHandshakeTimeoutInMilliSecs());
factory.setAutomaticRecoveryEnabled(true);
factory.setTopologyRecoveryEnabled(true);
factory.setRequestedChannelMax(properties.getMaxChannelCount());
return factory;
}
public AMQPObservableQueue build(
final boolean useExchange, final String queueURI, final String queueType) {
final AMQPSettings settings = new AMQPSettings(properties, queueType).fromURI(queueURI);
final AMQPRetryPattern retrySettings =
new AMQPRetryPattern(
properties.getLimit(), properties.getDuration(), properties.getType());
return new AMQPObservableQueue(
factory,
addresses,
useExchange,
settings,
retrySettings,
batchSize,
pollTimeInMS);
}
}
private AMQP.Exchange.DeclareOk getOrCreateExchange(ConnectionType connectionType)
throws Exception {
return getOrCreateExchange(
connectionType,
settings.getQueueOrExchangeName(),
settings.getExchangeType(),
settings.isDurable(),
settings.autoDelete(),
settings.getArguments());
}
private AMQP.Exchange.DeclareOk getOrCreateExchange(
ConnectionType connectionType,
String name,
final String type,
final boolean isDurable,
final boolean autoDelete,
final Map<String, Object> arguments)
throws Exception {
if (StringUtils.isEmpty(name)) {
throw new RuntimeException("Exchange name is undefined");
}
if (StringUtils.isEmpty(type)) {
throw new RuntimeException("Exchange type is undefined");
}
Channel chn = null;
try {
LOGGER.debug("Creating exchange {} of type {}", name, type);
chn =
amqpConnection.getOrCreateChannel(
connectionType, getSettings().getQueueOrExchangeName());
return chn.exchangeDeclare(name, type, isDurable, autoDelete, arguments);
} catch (final Exception e) {
LOGGER.warn("Failed to create exchange {} of type {}", name, type, e);
throw e;
} finally {
if (chn != null) {
try {
amqpConnection.returnChannel(connectionType, chn);
} catch (Exception e) {
LOGGER.error("Failed to return the channel of {}. {}", connectionType, e);
}
}
}
}
private AMQP.Queue.DeclareOk getOrCreateQueue(ConnectionType connectionType) throws Exception {
return getOrCreateQueue(
connectionType,
settings.getQueueOrExchangeName(),
settings.isDurable(),
settings.isExclusive(),
settings.autoDelete(),
settings.getArguments());
}
private AMQP.Queue.DeclareOk getOrCreateQueue(
ConnectionType connectionType,
final String name,
final boolean isDurable,
final boolean isExclusive,
final boolean autoDelete,
final Map<String, Object> arguments)
throws Exception {
if (StringUtils.isEmpty(name)) {
throw new RuntimeException("Queue name is undefined");
}
arguments.put(QUEUE_TYPE, settings.getQueueType());
Channel chn = null;
try {
LOGGER.debug("Creating queue {}", name);
chn =
amqpConnection.getOrCreateChannel(
connectionType, getSettings().getQueueOrExchangeName());
return chn.queueDeclare(name, isDurable, isExclusive, autoDelete, arguments);
} catch (final Exception e) {
LOGGER.warn("Failed to create queue {}", name, e);
throw e;
} finally {
if (chn != null) {
try {
amqpConnection.returnChannel(connectionType, chn);
} catch (Exception e) {
LOGGER.error("Failed to return the channel of {}. {}", connectionType, e);
}
}
}
}
private static Message asMessage(AMQPSettings settings, GetResponse response) throws Exception {
if (response == null) {
return null;
}
final Message message = new Message();
message.setId(response.getProps().getMessageId());
message.setPayload(new String(response.getBody(), settings.getContentEncoding()));
message.setReceipt(String.valueOf(response.getEnvelope().getDeliveryTag()));
return message;
}
private void receiveMessagesFromQueue(String queueName) throws Exception {
LOGGER.debug("Accessing channel for queue {}", queueName);
Consumer consumer =
new DefaultConsumer(
amqpConnection.getOrCreateChannel(
ConnectionType.SUBSCRIBER,
getSettings().getQueueOrExchangeName())) {
@Override
public void handleDelivery(
final String consumerTag,
final Envelope envelope,
final AMQP.BasicProperties properties,
final byte[] body)
throws IOException {
try {
Message message =
asMessage(
settings,
new GetResponse(
envelope, properties, body, Integer.MAX_VALUE));
if (message != null) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(
"Got message with ID {} and receipt {}",
message.getId(),
message.getReceipt());
}
messages.add(message);
LOGGER.info("receiveMessagesFromQueue- End method {}", messages);
}
} catch (InterruptedException e) {
LOGGER.error(
"Issue in handling the mesages for the subscriber with consumer tag {}. {}",
consumerTag,
e);
Thread.currentThread().interrupt();
} catch (Exception e) {
LOGGER.error(
"Issue in handling the mesages for the subscriber with consumer tag {}. {}",
consumerTag,
e);
}
}
public void handleCancel(String consumerTag) throws IOException {
LOGGER.error(
"Recieved a consumer cancel notification for subscriber {}",
consumerTag);
}
};
amqpConnection
.getOrCreateChannel(
ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName())
.basicConsume(queueName, false, consumer);
Monitors.recordEventQueueMessagesProcessed(getType(), queueName, messages.size());
}
private void receiveMessagesFromQueue(String queueName, Subscriber<? super Message> subscriber)
throws Exception {
LOGGER.debug("Accessing channel for queue {}", queueName);
Consumer consumer =
new DefaultConsumer(
amqpConnection.getOrCreateChannel(
ConnectionType.SUBSCRIBER,
getSettings().getQueueOrExchangeName())) {
@Override
public void handleDelivery(
final String consumerTag,
final Envelope envelope,
final AMQP.BasicProperties properties,
final byte[] body)
throws IOException {
try {
Message message =
asMessage(
settings,
new GetResponse(
envelope, properties, body, Integer.MAX_VALUE));
if (message == null) {
return;
}
LOGGER.info(
"Got message with ID {} and receipt {}",
message.getId(),
message.getReceipt());
LOGGER.debug("Message content {}", message);
// Not using thread-pool here as the number of concurrent threads are
// controlled
// by the number of messages delivery using pre-fetch count in RabbitMQ
Thread newThread =
new Thread(
() -> {
LOGGER.info(
"Spawning a new thread for message with ID {}",
message.getId());
subscriber.onNext(message);
});
newThread.start();
} catch (InterruptedException e) {
LOGGER.error(
"Issue in handling the mesages for the subscriber with consumer tag {}. {}",
consumerTag,
e);
Thread.currentThread().interrupt();
} catch (Exception e) {
LOGGER.error(
"Issue in handling the mesages for the subscriber with consumer tag {}. {}",
consumerTag,
e);
}
}
public void handleCancel(String consumerTag) throws IOException {
LOGGER.error(
"Recieved a consumer cancel notification for subscriber {}",
consumerTag);
}
};
amqpConnection
.getOrCreateChannel(
ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName())
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.util;
/**
* @author Ritu Parathody
*/
public enum AMQPConfigurations {
// queue exchange settings
PARAM_EXCHANGE_TYPE("exchangeType"),
PARAM_QUEUE_NAME("bindQueueName"),
PARAM_ROUTING_KEY("routingKey"),
PARAM_DELIVERY_MODE("deliveryMode"),
PARAM_DURABLE("durable"),
PARAM_EXCLUSIVE("exclusive"),
PARAM_AUTO_DELETE("autoDelete"),
PARAM_MAX_PRIORITY("maxPriority");
String propertyName;
AMQPConfigurations(String propertyName) {
this.propertyName = propertyName;
}
@Override
public String toString() {
return propertyName;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.util;
public enum ConnectionType {
PUBLISHER,
SUBSCRIBER
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.util;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties;
import lombok.Getter;
import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.*;
/**
* @author Ritu Parathody
*/
public class AMQPSettings {
private static final Pattern URI_PATTERN =
Pattern.compile(
"^(?<type>amqp_(?:queue|exchange))?:?(?<name>[^?]+)\\??(?<params>.*)$",
Pattern.CASE_INSENSITIVE);
private Type type;
private String queueOrExchangeName;
private String eventName;
private String exchangeType;
private String exchangeBoundQueueName;
private String queueType;
private String routingKey;
private final String contentEncoding;
private final String contentType;
private boolean durable;
private boolean exclusive;
private boolean autoDelete;
private boolean sequentialProcessing;
private int deliveryMode;
private final Map<String, Object> arguments = new HashMap<>();
private static final Logger LOGGER = LoggerFactory.getLogger(AMQPSettings.class);
public AMQPSettings(final AMQPEventQueueProperties properties) {
// Initialize with a default values
durable = properties.isDurable();
exclusive = properties.isExclusive();
autoDelete = properties.isAutoDelete();
contentType = properties.getContentType();
contentEncoding = properties.getContentEncoding();
exchangeType = properties.getExchangeType();
routingKey = StringUtils.EMPTY;
queueType = properties.getQueueType();
sequentialProcessing = properties.isSequentialMsgProcessing();
type = Type.QUEUE;
// Set common settings for publishing and consuming
setDeliveryMode(properties.getDeliveryMode());
}
public AMQPSettings(final AMQPEventQueueProperties properties, final String type) {
this(properties);
this.type = Type.fromString(type);
}
public final boolean isDurable() {
return durable;
}
public final boolean isExclusive() {
return exclusive;
}
public final boolean autoDelete() {
return autoDelete;
}
public final Map<String, Object> getArguments() {
return arguments;
}
public final String getContentEncoding() {
return contentEncoding;
}
/**
* Use queue for publishing
*
* @param queueName the name of queue
*/
public void setQueue(String queueName) {
if (StringUtils.isEmpty(queueName)) {
throw new IllegalArgumentException("Queue name for publishing is undefined");
}
this.queueOrExchangeName = queueName;
}
public String getQueueOrExchangeName() {
return queueOrExchangeName;
}
public String getExchangeBoundQueueName() {
if (StringUtils.isEmpty(exchangeBoundQueueName)) {
return String.format("bound_to_%s", queueOrExchangeName);
}
return exchangeBoundQueueName;
}
public String getExchangeType() {
return exchangeType;
}
public String getRoutingKey() {
return routingKey;
}
public int getDeliveryMode() {
return deliveryMode;
}
public AMQPSettings setDeliveryMode(int deliveryMode) {
if (deliveryMode != 1 && deliveryMode != 2) {
throw new IllegalArgumentException("Delivery mode must be 1 or 2");
}
this.deliveryMode = deliveryMode;
return this;
}
public String getContentType() {
return contentType;
}
/**
* Complete settings from the queue URI.
*
* <p><u>Example for queue:</u>
*
* <pre>
* amqp_queue:myQueue?deliveryMode=1&autoDelete=true&exclusive=true
* </pre>
*
* <u>Example for exchange:</u>
*
* <pre>
* amqp_exchange:myExchange?bindQueueName=myQueue&exchangeType=topic&routingKey=myRoutingKey&exclusive=true
* </pre>
*
* @param queueURI
* @return
*/
public final AMQPSettings fromURI(final String queueURI) {
final Matcher matcher = URI_PATTERN.matcher(queueURI);
if (!matcher.matches()) {
throw new IllegalArgumentException("Queue URI doesn't matches the expected regexp");
}
// Set name of queue or exchange from group "name"
LOGGER.info("Queue URI:{}", queueURI);
if (Objects.nonNull(matcher.group("type"))) {
type = Type.fromString(matcher.group("type"));
}
queueOrExchangeName = matcher.group("name");
eventName = queueURI;
if (matcher.groupCount() > 1) {
final String queryParams = matcher.group("params");
if (StringUtils.isNotEmpty(queryParams)) {
// Handle parameters
Arrays.stream(queryParams.split("\\s*\\&\\s*"))
.forEach(
param -> {
final String[] kv = param.split("\\s*=\\s*");
if (kv.length == 2) {
if (kv[0].equalsIgnoreCase(
String.valueOf(PARAM_EXCHANGE_TYPE))) {
String value = kv[1];
if (StringUtils.isEmpty(value)) {
throw new IllegalArgumentException(
"The provided exchange type is empty");
}
exchangeType = value;
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_QUEUE_NAME)))) {
exchangeBoundQueueName = kv[1];
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_ROUTING_KEY)))) {
String value = kv[1];
if (StringUtils.isEmpty(value)) {
throw new IllegalArgumentException(
"The provided routing key is empty");
}
routingKey = value;
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_DURABLE)))) {
durable = Boolean.parseBoolean(kv[1]);
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_EXCLUSIVE)))) {
exclusive = Boolean.parseBoolean(kv[1]);
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_AUTO_DELETE)))) {
autoDelete = Boolean.parseBoolean(kv[1]);
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_DELIVERY_MODE)))) {
setDeliveryMode(Integer.parseInt(kv[1]));
}
if (kv[0].equalsIgnoreCase(
(String.valueOf(PARAM_MAX_PRIORITY)))) {
arguments.put("x-max-priority", Integer.valueOf(kv[1]));
}
}
});
}
}
return this;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (!(obj instanceof AMQPSettings)) return false;
AMQPSettings other = (AMQPSettings) obj;
return Objects.equals(arguments, other.arguments)
&& autoDelete == other.autoDelete
&& Objects.equals(contentEncoding, other.contentEncoding)
&& Objects.equals(contentType, other.contentType)
&& deliveryMode == other.deliveryMode
&& durable == other.durable
&& Objects.equals(eventName, other.eventName)
&& Objects.equals(exchangeType, other.exchangeType)
&& exclusive == other.exclusive
&& Objects.equals(queueOrExchangeName, other.queueOrExchangeName)
&& Objects.equals(exchangeBoundQueueName, other.exchangeBoundQueueName)
&& Objects.equals(queueType, other.queueType)
&& Objects.equals(routingKey, other.routingKey)
&& sequentialProcessing == other.sequentialProcessing;
}
@Override
public int hashCode() {
return Objects.hash(
arguments,
autoDelete,
contentEncoding,
contentType,
deliveryMode,
durable,
eventName,
exchangeType,
exclusive,
queueOrExchangeName,
exchangeBoundQueueName,
queueType,
routingKey,
sequentialProcessing);
}
@Override
public String toString() {
return "AMQPSettings [queueOrExchangeName="
+ queueOrExchangeName
+ ", eventName="
+ eventName
+ ", exchangeType="
+ exchangeType
+ ", exchangeQueueName="
+ exchangeBoundQueueName
+ ", queueType="
+ queueType
+ ", routingKey="
+ routingKey
+ ", contentEncoding="
+ contentEncoding
+ ", contentType="
+ contentType
+ ", durable="
+ durable
+ ", exclusive="
+ exclusive
+ ", autoDelete="
+ autoDelete
+ ", sequentialProcessing="
+ sequentialProcessing
+ ", deliveryMode="
+ deliveryMode
+ ", arguments="
+ arguments
+ "]";
}
public String getEventName() {
return eventName;
}
/**
* @return the queueType
*/
public String getQueueType() {
return queueType;
}
/**
* @return the sequentialProcessing
*/
public boolean isSequentialProcessing() {
return sequentialProcessing;
}
/**
* Determine observer type - exchange or queue
*
* @return the observer type
*/
public Type getType() {
return type;
}
public enum Type {
QUEUE("amqp_queue"),
EXCHANGE("amqp_exchange");
@Getter private String value;
Type(String value) {
this.value = value;
}
public static Type fromString(String value) {
for (Type type : Type.values()) {
if (type.value.equalsIgnoreCase(value)) {
return type;
}
}
return QUEUE;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.util;
/**
* @author Ritu Parathody
*/
public class AMQPConstants {
/** this when set will create a rabbitmq queue */
public static String AMQP_QUEUE_TYPE = "amqp_queue";
/** this when set will create a rabbitmq exchange */
public static String AMQP_EXCHANGE_TYPE = "amqp_exchange";
public static String PROPERTY_KEY_TEMPLATE = "conductor.event-queues.amqp.%s";
/** default content type for the message read from rabbitmq */
public static String DEFAULT_CONTENT_TYPE = "application/json";
/** default encoding for the message read from rabbitmq */
public static String DEFAULT_CONTENT_ENCODING = "UTF-8";
/** default rabbitmq exchange type */
public static String DEFAULT_EXCHANGE_TYPE = "topic";
/**
* default rabbitmq durability When set to true the queues are persisted to the disk.
*
* <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}.
*/
public static boolean DEFAULT_DURABLE = true;
/**
* default rabbitmq exclusivity When set to true the queues can be only used by one connection.
*
* <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}.
*/
public static boolean DEFAULT_EXCLUSIVE = false;
/**
* default rabbitmq auto delete When set to true the queues will be deleted when the last
* consumer is cancelled
*
* <p>{@see <a href="https://www.rabbitmq.com/queues.html">RabbitMQ</a>}.
*/
public static boolean DEFAULT_AUTO_DELETE = false;
/**
* default rabbitmq delivery mode This is a property of the message When set to 1 the will be
* non persistent and 2 will be persistent {@see <a
* href="https://www.rabbitmq.com/releases/rabbitmq-java-client/v3.5.4/rabbitmq-java-client-javadoc-3.5.4/com/rabbitmq/client/MessageProperties.html>
* Message Properties</a>}.
*/
public static int DEFAULT_DELIVERY_MODE = 2;
/**
* default rabbitmq delivery mode This is a property of the channel limit to get the number of
* unacknowledged messages. {@see <a
* href="https://www.rabbitmq.com/consumer-prefetch.html>Consumer Prefetch</a>}.
*/
public static int DEFAULT_BATCH_SIZE = 1;
/**
* default rabbitmq delivery mode This is a property of the amqp implementation which sets teh
* polling time to drain the in-memory queue.
*/
public static int DEFAULT_POLL_TIME_MS = 100;
// info channel messages.
public static final String INFO_CHANNEL_BORROW_SUCCESS =
"Borrowed the channel object from the channel pool for " + "the connection type [%s]";
public static final String INFO_CHANNEL_RETURN_SUCCESS =
"Returned the borrowed channel object to the pool for " + "the connection type [%s]";
public static final String INFO_CHANNEL_CREATION_SUCCESS =
"Channels are not available in the pool. Created a"
+ " channel for the connection type [%s]";
public static final String INFO_CHANNEL_RESET_SUCCESS =
"No proper channels available in the pool. Created a "
+ "channel for the connection type [%s]";
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.util;
/** RetryType holds the retry type */
public enum RetryType {
REGULARINTERVALS,
EXPONENTIALBACKOFF,
INCREMENTALINTERVALS
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.config;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.model.TaskModel.Status;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(AMQPEventQueueProperties.class)
@ConditionalOnProperty(name = "conductor.event-queues.amqp.enabled", havingValue = "true")
public class AMQPEventQueueConfiguration {
private enum QUEUE_TYPE {
AMQP_QUEUE("amqp_queue"),
AMQP_EXCHANGE("amqp_exchange");
private final String type;
QUEUE_TYPE(String type) {
this.type = type;
}
public String getType() {
return type;
}
}
@Bean
public EventQueueProvider amqpEventQueueProvider(AMQPEventQueueProperties properties) {
return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_QUEUE.getType(), false);
}
@Bean
public EventQueueProvider amqpExchangeEventQueueProvider(AMQPEventQueueProperties properties) {
return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_EXCHANGE.getType(), true);
}
@ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "amqp")
@Bean
public Map<Status, ObservableQueue> getQueues(
ConductorProperties conductorProperties, AMQPEventQueueProperties properties) {
String stack = "";
if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
stack = conductorProperties.getStack() + "_";
}
final boolean useExchange = properties.isUseExchange();
Status[] statuses = new Status[] {Status.COMPLETED, Status.FAILED};
Map<Status, ObservableQueue> queues = new HashMap<>();
for (Status status : statuses) {
String queuePrefix =
StringUtils.isBlank(properties.getListenerQueuePrefix())
? conductorProperties.getAppId() + "_amqp_notify_" + stack
: properties.getListenerQueuePrefix();
String queueName = queuePrefix + status.name();
final ObservableQueue queue =
new Builder(properties)
.build(useExchange, queueName, QUEUE_TYPE.AMQP_QUEUE.getType());
queues.put(status, queue);
}
return queues;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.config;
import java.time.Duration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import com.netflix.conductor.contribs.queue.amqp.util.RetryType;
import com.rabbitmq.client.AMQP.PROTOCOL;
import com.rabbitmq.client.ConnectionFactory;
@ConfigurationProperties("conductor.event-queues.amqp")
public class AMQPEventQueueProperties {
private int batchSize = 1;
private Duration pollTimeDuration = Duration.ofMillis(100);
private String hosts = ConnectionFactory.DEFAULT_HOST;
private String username = ConnectionFactory.DEFAULT_USER;
private String password = ConnectionFactory.DEFAULT_PASS;
private String virtualHost = ConnectionFactory.DEFAULT_VHOST;
private int port = PROTOCOL.PORT;
private int connectionTimeoutInMilliSecs = 180000;
private int networkRecoveryIntervalInMilliSecs = 5000;
private int requestHeartbeatTimeoutInSecs = 30;
private int handshakeTimeoutInMilliSecs = 180000;
private int maxChannelCount = 5000;
private int limit = 50;
private int duration = 1000;
private RetryType retryType = RetryType.REGULARINTERVALS;
public int getLimit() {
return limit;
}
public void setLimit(int limit) {
this.limit = limit;
}
public int getDuration() {
return duration;
}
public void setDuration(int duration) {
this.duration = duration;
}
public RetryType getType() {
return retryType;
}
public void setType(RetryType type) {
this.retryType = type;
}
public int getConnectionTimeoutInMilliSecs() {
return connectionTimeoutInMilliSecs;
}
public void setConnectionTimeoutInMilliSecs(int connectionTimeoutInMilliSecs) {
this.connectionTimeoutInMilliSecs = connectionTimeoutInMilliSecs;
}
public int getHandshakeTimeoutInMilliSecs() {
return handshakeTimeoutInMilliSecs;
}
public void setHandshakeTimeoutInMilliSecs(int handshakeTimeoutInMilliSecs) {
this.handshakeTimeoutInMilliSecs = handshakeTimeoutInMilliSecs;
}
public int getMaxChannelCount() {
return maxChannelCount;
}
public void setMaxChannelCount(int maxChannelCount) {
this.maxChannelCount = maxChannelCount;
}
private boolean useNio = false;
private boolean durable = true;
private boolean exclusive = false;
private boolean autoDelete = false;
private String contentType = "application/json";
private String contentEncoding = "UTF-8";
private String exchangeType = "topic";
private String queueType = "classic";
private boolean sequentialMsgProcessing = true;
private int deliveryMode = 2;
private boolean useExchange = true;
private String listenerQueuePrefix = "";
private boolean useSslProtocol = false;
public int getBatchSize() {
return batchSize;
}
public void setBatchSize(int batchSize) {
this.batchSize = batchSize;
}
public Duration getPollTimeDuration() {
return pollTimeDuration;
}
public void setPollTimeDuration(Duration pollTimeDuration) {
this.pollTimeDuration = pollTimeDuration;
}
public String getHosts() {
return hosts;
}
public void setHosts(String hosts) {
this.hosts = hosts;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getVirtualHost() {
return virtualHost;
}
public void setVirtualHost(String virtualHost) {
this.virtualHost = virtualHost;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public boolean isUseNio() {
return useNio;
}
public void setUseNio(boolean useNio) {
this.useNio = useNio;
}
public boolean isDurable() {
return durable;
}
public void setDurable(boolean durable) {
this.durable = durable;
}
public boolean isExclusive() {
return exclusive;
}
public void setExclusive(boolean exclusive) {
this.exclusive = exclusive;
}
public boolean isAutoDelete() {
return autoDelete;
}
public void setAutoDelete(boolean autoDelete) {
this.autoDelete = autoDelete;
}
public String getContentType() {
return contentType;
}
public void setContentType(String contentType) {
this.contentType = contentType;
}
public String getContentEncoding() {
return contentEncoding;
}
public void setContentEncoding(String contentEncoding) {
this.contentEncoding = contentEncoding;
}
public String getExchangeType() {
return exchangeType;
}
public void setExchangeType(String exchangeType) {
this.exchangeType = exchangeType;
}
public int getDeliveryMode() {
return deliveryMode;
}
public void setDeliveryMode(int deliveryMode) {
this.deliveryMode = deliveryMode;
}
public boolean isUseExchange() {
return useExchange;
}
public void setUseExchange(boolean useExchange) {
this.useExchange = useExchange;
}
public String getListenerQueuePrefix() {
return listenerQueuePrefix;
}
public void setListenerQueuePrefix(String listenerQueuePrefix) {
this.listenerQueuePrefix = listenerQueuePrefix;
}
public String getQueueType() {
return queueType;
}
public boolean isUseSslProtocol() {
return useSslProtocol;
}
public void setUseSslProtocol(boolean useSslProtocol) {
this.useSslProtocol = useSslProtocol;
}
/**
* @param queueType Supports two queue types, 'classic' and 'quorum'. Classic will be be
* deprecated in 2022 and its usage discouraged from RabbitMQ community. So not using enum
* type here to hold different values.
*/
public void setQueueType(String queueType) {
this.queueType = queueType;
}
/**
* @return the sequentialMsgProcessing
*/
public boolean isSequentialMsgProcessing() {
return sequentialMsgProcessing;
}
/**
* @param sequentialMsgProcessing the sequentialMsgProcessing to set Supports sequential and
* parallel message processing capabilities. In parallel message processing, number of
* threads are controlled by batch size. No thread control or execution framework required
* here as threads are limited and short-lived.
*/
public void setSequentialMsgProcessing(boolean sequentialMsgProcessing) {
this.sequentialMsgProcessing = sequentialMsgProcessing;
}
public int getNetworkRecoveryIntervalInMilliSecs() {
return networkRecoveryIntervalInMilliSecs;
}
public void setNetworkRecoveryIntervalInMilliSecs(int networkRecoveryIntervalInMilliSecs) {
this.networkRecoveryIntervalInMilliSecs = networkRecoveryIntervalInMilliSecs;
}
public int getRequestHeartbeatTimeoutInSecs() {
return requestHeartbeatTimeoutInSecs;
}
public void setRequestHeartbeatTimeoutInSecs(int requestHeartbeatTimeoutInSecs) {
this.requestHeartbeatTimeoutInSecs = requestHeartbeatTimeoutInSecs;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.config;
import com.netflix.conductor.contribs.queue.amqp.util.RetryType;
public class AMQPRetryPattern {
private int limit = 50;
private int duration = 1000;
private RetryType type = RetryType.REGULARINTERVALS;
public AMQPRetryPattern() {}
public AMQPRetryPattern(int limit, int duration, RetryType type) {
this.limit = limit;
this.duration = duration;
this.type = type;
}
/**
* This gets executed if the retry index is within the allowed limits, otherwise exception will
* be thrown.
*
* @throws Exception
*/
public void continueOrPropogate(Exception ex, int retryIndex) throws Exception {
if (retryIndex > limit) {
throw ex;
}
// Regular Intervals is the default
long waitDuration = duration;
if (type == RetryType.INCREMENTALINTERVALS) {
waitDuration = duration * retryIndex;
} else if (type == RetryType.EXPONENTIALBACKOFF) {
waitDuration = (long) Math.pow(2, retryIndex) * duration;
}
try {
Thread.sleep(waitDuration);
} catch (InterruptedException ignored) {
Thread.currentThread().interrupt();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java | amqp/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.amqp.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue;
import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
/**
* @author Ritu Parathody
*/
public class AMQPEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(AMQPEventQueueProvider.class);
protected Map<String, AMQPObservableQueue> queues = new ConcurrentHashMap<>();
private final boolean useExchange;
private final AMQPEventQueueProperties properties;
private final String queueType;
public AMQPEventQueueProvider(
AMQPEventQueueProperties properties, String queueType, boolean useExchange) {
this.properties = properties;
this.queueType = queueType;
this.useExchange = useExchange;
}
@Override
public String getQueueType() {
return queueType;
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Retrieve queue with URI {}", queueURI);
}
// Build the queue with the inner Builder class of AMQPObservableQueue
return queues.computeIfAbsent(
queueURI, q -> new Builder(properties).build(useExchange, q, queueType));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresLockDAOTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresLockDAOTest.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.time.Instant;
import java.util.UUID;
import java.util.concurrent.*;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.Assertions;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@RunWith(SpringRunner.class)
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@TestPropertySource(
properties = {
"conductor.workflow-execution-lock.type=postgres",
"spring.flyway.clean-disabled=false",
"conductor.app.workflow.name-validation.enabled=true"
})
@SpringBootTest
public class PostgresLockDAOTest {
@Autowired private PostgresLockDAO postgresLock;
@Autowired private DataSource dataSource;
@Autowired private Flyway flyway;
@Before
public void before() {
flyway.migrate(); // Clean and migrate the database before each test.
}
@Test
public void testLockAcquisitionAndRelease() throws SQLException {
String lockId = UUID.randomUUID().toString();
Instant beforeAcquisitionTimeUtc = Instant.now();
long leaseTime = 2000;
try (var connection = dataSource.getConnection()) {
assertTrue(
postgresLock.acquireLock(lockId, 500, leaseTime, TimeUnit.MILLISECONDS),
"Lock acquisition failed");
Instant afterAcquisitionTimeUtc = Instant.now();
try (var ps = connection.prepareStatement("SELECT * FROM locks WHERE lock_id = ?")) {
ps.setString(1, lockId);
var rs = ps.executeQuery();
if (rs.next()) {
assertEquals(lockId, rs.getString("lock_id"));
long leaseExpirationTime = rs.getTimestamp("lease_expiration").getTime();
assertTrue(
leaseExpirationTime
>= beforeAcquisitionTimeUtc
.plusMillis(leaseTime)
.toEpochMilli(),
"Lease expiration is too early");
assertTrue(
leaseExpirationTime
<= afterAcquisitionTimeUtc.plusMillis(leaseTime).toEpochMilli(),
"Lease expiration is too late");
} else {
Assertions.fail("Lock not found in the database");
}
}
postgresLock.releaseLock(lockId);
try (PreparedStatement ps =
connection.prepareStatement("SELECT * FROM locks WHERE lock_id = ?")) {
ps.setString(1, lockId);
var rs = ps.executeQuery();
Assertions.assertFalse(rs.next(), "Lock was not released properly");
}
}
}
@Test
public void testExpiredLockCanBeAcquiredAgain() throws InterruptedException {
String lockId = UUID.randomUUID().toString();
assertTrue(
postgresLock.acquireLock(lockId, 500, 500, TimeUnit.MILLISECONDS),
"First lock acquisition failed");
Thread.sleep(1000); // Ensure the lock has expired.
assertTrue(
postgresLock.acquireLock(lockId, 500, 500, TimeUnit.MILLISECONDS),
"Lock acquisition after expiration failed");
postgresLock.releaseLock(lockId);
}
@Test
public void testConcurrentLockAcquisition() throws ExecutionException, InterruptedException {
ExecutorService executorService = Executors.newFixedThreadPool(2);
String lockId = UUID.randomUUID().toString();
Future<Boolean> future1 =
executorService.submit(
() -> postgresLock.acquireLock(lockId, 2000, TimeUnit.MILLISECONDS));
Future<Boolean> future2 =
executorService.submit(
() -> postgresLock.acquireLock(lockId, 2000, TimeUnit.MILLISECONDS));
assertTrue(
future1.get()
^ future2.get()); // One of the futures should hold the lock, the other
// should get rejected
executorService.shutdown();
executorService.awaitTermination(5, TimeUnit.SECONDS);
postgresLock.releaseLock(lockId);
}
@Test
public void testDifferentLockCanBeAcquiredConcurrently() {
String lockId1 = UUID.randomUUID().toString();
String lockId2 = UUID.randomUUID().toString();
assertTrue(postgresLock.acquireLock(lockId1, 2000, 10000, TimeUnit.MILLISECONDS));
assertTrue(postgresLock.acquireLock(lockId2, 2000, 10000, TimeUnit.MILLISECONDS));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.*;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class PostgresIndexDAOTest {
@Autowired private PostgresIndexDAO indexDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
private WorkflowSummary getMockWorkflowSummary(String id) {
WorkflowSummary wfs = new WorkflowSummary();
wfs.setWorkflowId(id);
wfs.setCorrelationId("correlation-id");
wfs.setWorkflowType("workflow-type");
wfs.setStartTime("2023-02-07T08:42:45Z");
wfs.setUpdateTime("2023-02-07T08:43:45Z");
wfs.setStatus(Workflow.WorkflowStatus.COMPLETED);
return wfs;
}
private TaskSummary getMockTaskSummary(String taskId) {
TaskSummary ts = new TaskSummary();
ts.setTaskId(taskId);
ts.setTaskType("task-type");
ts.setTaskDefName("task-def-name");
ts.setStatus(Task.Status.COMPLETED);
ts.setStartTime("2023-02-07T09:41:45Z");
ts.setUpdateTime("2023-02-07T09:42:45Z");
ts.setWorkflowType("workflow-type");
return ts;
}
private TaskExecLog getMockTaskExecutionLog(String taskId, long createdTime, String log) {
TaskExecLog tse = new TaskExecLog();
tse.setTaskId(taskId);
tse.setLog(log);
tse.setCreatedTime(createdTime);
return tse;
}
private void compareWorkflowSummary(WorkflowSummary wfs) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM workflow_index WHERE workflow_id = '%s'",
wfs.getWorkflowId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals(
"Workflow id does not match",
wfs.getWorkflowId(),
result.get(0).get("workflow_id"));
assertEquals(
"Correlation id does not match",
wfs.getCorrelationId(),
result.get(0).get("correlation_id"));
assertEquals(
"Workflow type does not match",
wfs.getWorkflowType(),
result.get(0).get("workflow_type"));
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(wfs.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
assertEquals("Start time does not match", startTime, result.get(0).get("start_time"));
assertEquals(
"Status does not match", wfs.getStatus().toString(), result.get(0).get("status"));
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
private void compareTaskSummary(TaskSummary ts) throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM task_index WHERE task_id = '%s'", ts.getTaskId()));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals("Task id does not match", ts.getTaskId(), result.get(0).get("task_id"));
assertEquals("Task type does not match", ts.getTaskType(), result.get(0).get("task_type"));
assertEquals(
"Task def name does not match",
ts.getTaskDefName(),
result.get(0).get("task_def_name"));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
assertEquals("Start time does not match", startTime, result.get(0).get("start_time"));
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(ts.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
assertEquals("Update time does not match", updateTime, result.get(0).get("update_time"));
assertEquals(
"Status does not match", ts.getStatus().toString(), result.get(0).get("status"));
assertEquals(
"Workflow type does not match",
ts.getWorkflowType().toString(),
result.get(0).get("workflow_type"));
}
@Test
public void testIndexNewWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-new");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testIndexExistingWorkflow() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-existing");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
wfs.setStatus(Workflow.WorkflowStatus.FAILED);
wfs.setUpdateTime("2023-02-07T08:44:45Z");
indexDAO.indexWorkflow(wfs);
compareWorkflowSummary(wfs);
}
@Test
public void testWhenWorkflowIsIndexedOutOfOrderOnlyLatestIsIndexed() throws SQLException {
WorkflowSummary firstWorkflowUpdate =
getMockWorkflowSummary("workflow-id-existing-no-index");
firstWorkflowUpdate.setUpdateTime("2023-02-07T08:42:45Z");
WorkflowSummary secondWorkflowUpdateSummary =
getMockWorkflowSummary("workflow-id-existing-no-index");
secondWorkflowUpdateSummary.setUpdateTime("2023-02-07T08:43:45Z");
secondWorkflowUpdateSummary.setStatus(Workflow.WorkflowStatus.FAILED);
indexDAO.indexWorkflow(secondWorkflowUpdateSummary);
compareWorkflowSummary(secondWorkflowUpdateSummary);
indexDAO.indexWorkflow(firstWorkflowUpdate);
compareWorkflowSummary(secondWorkflowUpdateSummary);
}
@Test
public void testWhenWorkflowUpdatesHaveTheSameUpdateTimeTheLastIsIndexed() throws SQLException {
WorkflowSummary firstWorkflowUpdate =
getMockWorkflowSummary("workflow-id-existing-same-time-index");
firstWorkflowUpdate.setUpdateTime("2023-02-07T08:42:45Z");
WorkflowSummary secondWorkflowUpdateSummary =
getMockWorkflowSummary("workflow-id-existing-same-time-index");
secondWorkflowUpdateSummary.setUpdateTime("2023-02-07T08:42:45Z");
secondWorkflowUpdateSummary.setStatus(Workflow.WorkflowStatus.FAILED);
indexDAO.indexWorkflow(firstWorkflowUpdate);
compareWorkflowSummary(firstWorkflowUpdate);
indexDAO.indexWorkflow(secondWorkflowUpdateSummary);
compareWorkflowSummary(secondWorkflowUpdateSummary);
}
@Test
public void testIndexNewTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id-new");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testIndexExistingTask() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id-existing");
indexDAO.indexTask(ts);
compareTaskSummary(ts);
ts.setUpdateTime("2023-02-07T09:43:45Z");
ts.setStatus(Task.Status.FAILED);
indexDAO.indexTask(ts);
compareTaskSummary(ts);
}
@Test
public void testWhenTaskIsIndexedOutOfOrderOnlyLatestIsIndexed() throws SQLException {
TaskSummary firstTaskState = getMockTaskSummary("task-id-exiting-no-update");
firstTaskState.setUpdateTime("2023-02-07T09:41:45Z");
firstTaskState.setStatus(Task.Status.FAILED);
TaskSummary secondTaskState = getMockTaskSummary("task-id-exiting-no-update");
secondTaskState.setUpdateTime("2023-02-07T09:42:45Z");
indexDAO.indexTask(secondTaskState);
compareTaskSummary(secondTaskState);
indexDAO.indexTask(firstTaskState);
compareTaskSummary(secondTaskState);
}
@Test
public void testWhenTaskUpdatesHaveTheSameUpdateTimeTheLastIsIndexed() throws SQLException {
TaskSummary firstTaskState = getMockTaskSummary("task-id-exiting-same-time-update");
firstTaskState.setUpdateTime("2023-02-07T09:42:45Z");
firstTaskState.setStatus(Task.Status.FAILED);
TaskSummary secondTaskState = getMockTaskSummary("task-id-exiting-same-time-update");
secondTaskState.setUpdateTime("2023-02-07T09:42:45Z");
indexDAO.indexTask(firstTaskState);
compareTaskSummary(firstTaskState);
indexDAO.indexTask(secondTaskState);
compareTaskSummary(secondTaskState);
}
@Test
public void testAddTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = UUID.randomUUID().toString();
logs.add(getMockTaskExecutionLog(taskId, 1675845986000L, "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, 1675845987000L, "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<Map<String, Object>> records =
queryDb("SELECT * FROM task_execution_logs ORDER BY created_time ASC");
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).get("log"));
assertEquals(new Date(1675845986000L), records.get(0).get("created_time"));
assertEquals(logs.get(1).getLog(), records.get(1).get("log"));
assertEquals(new Date(1675845987000L), records.get(1).get("created_time"));
}
@Test
public void testSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String query = String.format("workflowId=\"%s\"", wfs.getWorkflowId());
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testFullTextSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
String freeText = "notworkflow-id";
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("Wrong number of results returned", 0, results.getResults().size());
freeText = "workflow-id";
results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testJsonSearchWorkflowSummary() {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-summary");
wfs.setVersion(3);
indexDAO.indexWorkflow(wfs);
String freeText = "{\"correlationId\":\"not-the-id\"}";
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("Wrong number of results returned", 0, results.getResults().size());
freeText = "{\"correlationId\":\"correlation-id\", \"version\":3}";
results = indexDAO.searchWorkflowSummary("", freeText, 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong workflow returned",
wfs.getWorkflowId(),
results.getResults().get(0).getWorkflowId());
}
@Test
public void testSearchWorkflowSummaryPagination() {
for (int i = 0; i < 5; i++) {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id-pagination-" + i);
indexDAO.indexWorkflow(wfs);
}
List<String> orderBy = Arrays.asList(new String[] {"workflowId:DESC"});
SearchResult<WorkflowSummary> results =
indexDAO.searchWorkflowSummary("", "workflow-id-pagination*", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 5, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-4",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-3",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 8, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-2",
results.getResults().get(0).getWorkflowId());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-1",
results.getResults().get(1).getWorkflowId());
results = indexDAO.searchWorkflowSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 8, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"workflow-id-pagination-0",
results.getResults().get(0).getWorkflowId());
}
@Test
public void testSearchTaskSummary() {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
String query = String.format("taskId=\"%s\"", ts.getTaskId());
SearchResult<TaskSummary> results =
indexDAO.searchTaskSummary(query, "*", 0, 15, new ArrayList());
assertEquals("No results returned", 1, results.getResults().size());
assertEquals(
"Wrong task returned", ts.getTaskId(), results.getResults().get(0).getTaskId());
}
@Test
public void testSearchTaskSummaryPagination() {
for (int i = 0; i < 5; i++) {
TaskSummary ts = getMockTaskSummary("task-id-pagination-" + i);
indexDAO.indexTask(ts);
}
List<String> orderBy = Arrays.asList(new String[] {"taskId:DESC"});
SearchResult<TaskSummary> results = indexDAO.searchTaskSummary("", "*", 0, 2, orderBy);
assertEquals("Wrong totalHits returned", 10, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-4",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-3",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 2, 2, orderBy);
assertEquals("Wrong totalHits returned", 10, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-2",
results.getResults().get(0).getTaskId());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-1",
results.getResults().get(1).getTaskId());
results = indexDAO.searchTaskSummary("", "*", 4, 2, orderBy);
assertEquals("Wrong totalHits returned", 10, results.getTotalHits());
assertEquals("Wrong number of results returned", 2, results.getResults().size());
assertEquals(
"Results returned in wrong order",
"task-id-pagination-0",
results.getResults().get(0).getTaskId());
}
@Test
public void testGetTaskExecutionLogs() throws SQLException {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = UUID.randomUUID().toString();
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845986000L).getTime(), "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845987000L).getTime(), "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> records = indexDAO.getTaskExecutionLogs(logs.get(0).getTaskId());
assertEquals("Wrong number of logs returned", 2, records.size());
assertEquals(logs.get(0).getLog(), records.get(0).getLog());
assertEquals(logs.get(0).getCreatedTime(), 1675845986000L);
assertEquals(logs.get(1).getLog(), records.get(1).getLog());
assertEquals(logs.get(1).getCreatedTime(), 1675845987000L);
}
@Test
public void testRemoveWorkflow() throws SQLException {
String workflowId = UUID.randomUUID().toString();
WorkflowSummary wfs = getMockWorkflowSummary(workflowId);
indexDAO.indexWorkflow(wfs);
List<Map<String, Object>> workflow_records =
queryDb("SELECT * FROM workflow_index WHERE workflow_id = '" + workflowId + "'");
assertEquals("Workflow index record was not created", 1, workflow_records.size());
indexDAO.removeWorkflow(workflowId);
workflow_records =
queryDb("SELECT * FROM workflow_index WHERE workflow_id = '" + workflowId + "'");
assertEquals("Workflow index record was not deleted", 0, workflow_records.size());
}
@Test
public void testRemoveTask() throws SQLException {
String workflowId = UUID.randomUUID().toString();
String taskId = UUID.randomUUID().toString();
TaskSummary ts = getMockTaskSummary(taskId);
indexDAO.indexTask(ts);
List<TaskExecLog> logs = new ArrayList<>();
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845986000L).getTime(), "Log 1"));
logs.add(getMockTaskExecutionLog(taskId, new Date(1675845987000L).getTime(), "Log 2"));
indexDAO.addTaskExecutionLogs(logs);
List<Map<String, Object>> task_records =
queryDb("SELECT * FROM task_index WHERE task_id = '" + taskId + "'");
assertEquals("Task index record was not created", 1, task_records.size());
List<Map<String, Object>> log_records =
queryDb("SELECT * FROM task_execution_logs WHERE task_id = '" + taskId + "'");
assertEquals("Task execution logs were not created", 2, log_records.size());
indexDAO.removeTask(workflowId, taskId);
task_records = queryDb("SELECT * FROM task_index WHERE task_id = '" + taskId + "'");
assertEquals("Task index record was not deleted", 0, task_records.size());
log_records = queryDb("SELECT * FROM task_execution_logs WHERE task_id = '" + taskId + "'");
assertEquals("Task execution logs were not deleted", 0, log_records.size());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.util.List;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.google.common.collect.Iterables;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class PostgresExecutionDAOTest extends ExecutionDAOTest {
@Autowired private PostgresExecutionDAO executionDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
@Test
public void testPendingByCorrelationId() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_correlation_jtest");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
generateWorkflows(workflow, 10);
List<WorkflowModel> bycorrelationId =
getExecutionDAO()
.getWorkflowsByCorrelationId(
"pending_count_correlation_jtest", "corr001", true);
assertNotNull(bycorrelationId);
assertEquals(10, bycorrelationId.size());
}
@Test
public void testRemoveWorkflow() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
assertEquals(1, getExecutionDAO().getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> getExecutionDAO().removeWorkflow(wfId));
assertEquals(0, getExecutionDAO().getPendingWorkflowCount("workflow"));
}
@Test
public void testRemoveWorkflowWithExpiry() {
WorkflowDef def = new WorkflowDef();
def.setName("workflow");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
List<String> ids = generateWorkflows(workflow, 1);
final ExecutionDAO execDao = Mockito.spy(getExecutionDAO());
assertEquals(1, execDao.getPendingWorkflowCount("workflow"));
ids.forEach(wfId -> execDao.removeWorkflowWithExpiry(wfId, 1));
Mockito.verify(execDao, Mockito.timeout(10 * 1000)).removeWorkflow(Iterables.getLast(ids));
}
@Override
public ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAOCacheTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAOCacheTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres",
"conductor.postgres.pollDataFlushInterval=200",
"conductor.postgres.pollDataCacheValidityPeriod=100",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class PostgresPollDataDAOCacheTest {
@Autowired private PollDataDAO pollDataDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
try (Connection conn = dataSource.getConnection()) {
// Explicitly disable autoCommit to match HikariCP pool configuration
// and ensure we can control transaction boundaries
conn.setAutoCommit(false);
// Use RESTART IDENTITY to reset sequences and CASCADE for foreign keys
conn.prepareStatement("truncate table poll_data restart identity cascade")
.executeUpdate();
// Explicitly commit the truncation in a separate transaction
// This ensures the truncation is visible to all subsequent connections
conn.commit();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
private void waitForCacheFlush() throws InterruptedException {
long startTime = System.currentTimeMillis();
long lastFlushTime = ((PostgresPollDataDAO) pollDataDAO).getLastFlushTime();
while (System.currentTimeMillis() - startTime < 1000
&& lastFlushTime <= ((PostgresPollDataDAO) pollDataDAO).getLastFlushTime()) {
Thread.sleep(10);
}
}
@Test
public void cacheFlushTest()
throws SQLException, JsonProcessingException, InterruptedException {
waitForCacheFlush();
pollDataDAO.updateLastPollData("dummy-task", "dummy-domain", "dummy-worker-id");
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("Poll data records returned", 0, records.size());
waitForCacheFlush();
records = queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("Poll data records returned", 1, records.size());
assertEquals("Wrong domain set", "dummy-domain", records.get(0).get("domain"));
JsonNode jsonData = objectMapper.readTree(records.get(0).get("json_data").toString());
assertEquals(
"Poll data is incorrect", "dummy-worker-id", jsonData.get("workerId").asText());
}
@Test
public void getCachedPollDataByDomainTest() throws InterruptedException, SQLException {
waitForCacheFlush();
pollDataDAO.updateLastPollData("dummy-task2", "dummy-domain2", "dummy-worker-id2");
PollData pollData = pollDataDAO.getPollData("dummy-task2", "dummy-domain2");
assertNotNull("pollData is null", pollData);
assertEquals("dummy-worker-id2", pollData.getWorkerId());
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task2'");
assertEquals("Poll data records returned", 0, records.size());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOStatusChangeOnlyTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresIndexDAOStatusChangeOnlyTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.*;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres",
"conductor.postgres.onlyIndexOnStatusChange=true",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class PostgresIndexDAOStatusChangeOnlyTest {
@Autowired private PostgresIndexDAO indexDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
private WorkflowSummary getMockWorkflowSummary(String id) {
WorkflowSummary wfs = new WorkflowSummary();
wfs.setWorkflowId(id);
wfs.setCorrelationId("correlation-id");
wfs.setWorkflowType("workflow-type");
wfs.setStartTime("2023-02-07T08:42:45Z");
wfs.setUpdateTime("2023-02-07T08:43:45Z");
wfs.setStatus(Workflow.WorkflowStatus.RUNNING);
return wfs;
}
private TaskSummary getMockTaskSummary(String taskId) {
TaskSummary ts = new TaskSummary();
ts.setTaskId(taskId);
ts.setTaskType("task-type");
ts.setTaskDefName("task-def-name");
ts.setStatus(Task.Status.SCHEDULED);
ts.setStartTime("2023-02-07T09:41:45Z");
ts.setUpdateTime("2023-02-07T09:42:45Z");
ts.setWorkflowType("workflow-type");
return ts;
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
public void checkWorkflow(String workflowId, String status, String correlationId)
throws SQLException {
List<Map<String, Object>> result =
queryDb(
String.format(
"SELECT * FROM workflow_index WHERE workflow_id = '%s'",
workflowId));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals("Wrong status returned", status, result.get(0).get("status"));
assertEquals(
"Correlation id does not match",
correlationId,
result.get(0).get("correlation_id"));
}
public void checkTask(String taskId, String status, String updateTime) throws SQLException {
List<Map<String, Object>> result =
queryDb(String.format("SELECT * FROM task_index WHERE task_id = '%s'", taskId));
assertEquals("Wrong number of rows returned", 1, result.size());
assertEquals("Wrong status returned", status, result.get(0).get("status"));
assertEquals(
"Update time does not match",
updateTime,
result.get(0).get("update_time").toString());
}
@Test
public void testIndexWorkflowOnlyStatusChange() throws SQLException {
WorkflowSummary wfs = getMockWorkflowSummary("workflow-id");
indexDAO.indexWorkflow(wfs);
// retrieve the record, make sure it exists
checkWorkflow("workflow-id", "RUNNING", "correlation-id");
// Change the record, but not the status, and re-index
wfs.setCorrelationId("new-correlation-id");
wfs.setUpdateTime("2023-02-07T08:44:45Z");
indexDAO.indexWorkflow(wfs);
// retrieve the record, make sure it hasn't changed
checkWorkflow("workflow-id", "RUNNING", "correlation-id");
// Change the status and re-index
wfs.setStatus(Workflow.WorkflowStatus.FAILED);
wfs.setUpdateTime("2023-02-07T08:45:45Z");
indexDAO.indexWorkflow(wfs);
// retrieve the record, make sure it has changed
checkWorkflow("workflow-id", "FAILED", "new-correlation-id");
}
public void testIndexTaskOnlyStatusChange() throws SQLException {
TaskSummary ts = getMockTaskSummary("task-id");
indexDAO.indexTask(ts);
// retrieve the record, make sure it exists
checkTask("task-id", "SCHEDULED", "2023-02-07 09:42:45.0");
// Change the record, but not the status
ts.setUpdateTime("2023-02-07T10:42:45Z");
indexDAO.indexTask(ts);
// retrieve the record, make sure it hasn't changed
checkTask("task-id", "SCHEDULED", "2023-02-07 09:42:45.0");
// Change the status and re-index
ts.setStatus(Task.Status.FAILED);
ts.setUpdateTime("2023-02-07T10:43:45Z");
indexDAO.indexTask(ts);
// retrieve the record, make sure it has changed
checkTask("task-id", "FAILED", "2023-02-07 10:43:45.0");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=true")
public class PostgresMetadataDAOTest {
@Autowired private PostgresMetadataDAO metadataDAO;
@Rule public TestName name = new TestName();
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
@Test
public void testDuplicateWorkflowDef() {
WorkflowDef def = new WorkflowDef();
def.setName("testDuplicate");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
NonTransientException applicationException =
assertThrows(NonTransientException.class, () -> metadataDAO.createWorkflowDef(def));
assertEquals(
"Workflow with testDuplicate.1 already exists!", applicationException.getMessage());
}
@Test
public void testRemoveNotExistingWorkflowDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeWorkflowDef("test", 1));
assertEquals(
"No such workflow definition: test version: 1", applicationException.getMessage());
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get();
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(3, found.getVersion());
all = metadataDAO.getAllLatest();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(3, all.get(0).getVersion());
all = metadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(3, all.get(1).getVersion());
def.setDescription("updated");
metadataDAO.updateWorkflowDef(def);
found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = metadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(3, found.getVersion());
metadataDAO.removeWorkflowDef("test", 3);
Optional<WorkflowDef> deleted = metadataDAO.getWorkflowDef("test", 3);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
metadataDAO.removeWorkflowDef("test", 1);
deleted = metadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(TaskDef.RetryLogic.FIXED);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createTaskDef(def);
TaskDef found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setDescription("updated description");
metadataDAO.updateTaskDef(def);
found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
metadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
metadataDAO.removeTaskDef(def.getName() + i);
}
all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test
public void testRemoveNotExistingTaskDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()));
assertEquals("No such task definition", applicationException.getMessage());
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
EventHandler.Action action = new EventHandler.Action();
action.setAction(EventHandler.Action.Type.start_workflow);
action.setStart_workflow(new EventHandler.StartWorkflow());
action.getStart_workflow().setName("workflow_x");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
metadataDAO.addEventHandler(eventHandler);
List<EventHandler> all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(eventHandler.getName(), all.get(0).getName());
assertEquals(eventHandler.getEvent(), all.get(0).getEvent());
List<EventHandler> byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
metadataDAO.updateEventHandler(eventHandler);
all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = metadataDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
def.setName("test2");
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
metadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(4, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class PostgresQueueDAOTest {
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresQueueDAOTest.class);
@Autowired private PostgresQueueDAO queueDAO;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired private ObjectMapper objectMapper;
@Rule public TestName name = new TestName();
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
try (Connection conn = dataSource.getConnection()) {
// Explicitly disable autoCommit to match HikariCP pool configuration
conn.setAutoCommit(false);
String[] stmts =
new String[] {
"truncate table queue restart identity cascade;",
"truncate table queue_message restart identity cascade;"
};
for (String stmt : stmts) {
conn.prepareStatement(stmt).executeUpdate();
}
// Commit to ensure truncation is visible across connection pool
conn.commit();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
@Test
public void complexQueueTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
*
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
List<Message> zeroPoll = queueDAO.pollMessages(queueName, 0, 10_000);
assertTrue("Zero poll should be empty", zeroPoll.isEmpty());
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/**
* Test fix for https://github.com/conductor-oss/conductor/issues/369
*
* <p>Confirms that the queue is taken into account when popping messages from the queue.
*/
@Test
public void pollMessagesDuplicatePopsTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName1 = "issue369_testQueue_1";
final String queueName2 = "issue369_testQueue_2";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName1, ImmutableList.copyOf(messages));
// Add same messages for queue 2, to make sure that the message_id is duplicated across
// queues
queueDAO.push(queueName2, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName1));
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName2));
List<Message> zeroPoll = queueDAO.pollMessages(queueName1, 0, 10_000);
assertTrue("Zero poll should be empty", zeroPoll.isEmpty());
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName1, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName1, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue 1 size hasn't changed
assertEquals(
"Total queue 1 size should have remained the same",
totalSize,
queueDAO.getSize(queueName1));
// Assert that the total queue 2 size hasn't changed
assertEquals(
"Total queue 2 size should have remained the same",
totalSize,
queueDAO.getSize(queueName2));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName1).executeCount();
assertEquals("Remaining queue 1 size mismatch", expectedSize, count);
}
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName2).executeCount();
assertEquals("Remaining queue 2 size mismatch", totalSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/** Test fix for https://github.com/Netflix/conductor/issues/1892 */
@Test
public void containsMessageTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertFalse(queueDAO.containsMessage(queueName, messageId));
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
*
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't
// accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
queueDAO.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds =
messages.stream()
.map(Message::getId)
.collect(Collectors.toList())
.subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.info("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
// @Test
public void processUnacksTest() {
processUnacks(
() -> {
// Process unacks
queueDAO.processUnacks("process_unacks_test");
},
"process_unacks_test");
}
// @Test
public void processAllUnacksTest() {
processUnacks(
() -> {
// Process all unacks
queueDAO.processAllUnacks();
},
"process_unacks_test");
}
private void processUnacks(Runnable unack, String queueName) {
// Count of messages in the queue(s)
final int count = 10;
// Number of messages to process acks for
final int unackedCount = 4;
// A secondary queue to make sure we don't accidentally process other queues
final String otherQueueName = "process_unacks_test_other_queue";
// Create testing queue with some messages (but not all) that will be popped/acked.
for (int i = 0; i < count; i++) {
int offset = 0;
if (i >= unackedCount) {
offset = 1_000_000;
}
queueDAO.push(queueName, "unack-" + i, offset);
}
// Create a second queue to make sure that unacks don't occur for it
for (int i = 0; i < count; i++) {
queueDAO.push(otherQueueName, "other-" + i, 0);
}
// Poll for first batch of messages (should be equal to unackedCount)
List<Message> polled = queueDAO.pollMessages(queueName, 100, 10_000);
assertNotNull(polled);
assertFalse(polled.isEmpty());
assertEquals(unackedCount, polled.size());
// Poll messages from the other queue so we know they don't get unacked later
queueDAO.pollMessages(otherQueueName, 100, 10_000);
// Ack one of the polled messages
assertTrue(queueDAO.ack(queueName, "unack-1"));
// Should have one less un-acked popped message in the queue
Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(uacked.longValue(), unackedCount - 1);
unack.run();
// Check uacks for both queues after processing
Map<String, Map<String, Map<String, Long>>> details = queueDAO.queuesDetailVerbose();
uacked = details.get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(
"The messages that were polled should be unacked still",
uacked.longValue(),
unackedCount - 1);
Long otherUacked = details.get(otherQueueName).get("a").get("uacked");
assertNotNull(otherUacked);
assertEquals(
"Other queue should have all unacked messages", otherUacked.longValue(), count);
Long size = queueDAO.queuesDetail().get(queueName);
assertNotNull(size);
assertEquals(size.longValue(), count - unackedCount);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAONoCacheTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAONoCacheTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.*;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.FixMethodOrder;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.MethodSorters;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres",
"conductor.postgres.pollDataFlushInterval=0",
"conductor.postgres.pollDataCacheValidityPeriod=0",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class PostgresPollDataDAONoCacheTest {
@Autowired private PollDataDAO pollDataDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
try (Connection conn = dataSource.getConnection()) {
// Explicitly disable autoCommit to match HikariCP pool configuration
// and ensure we can control transaction boundaries
conn.setAutoCommit(false);
// Use RESTART IDENTITY to reset sequences and CASCADE for foreign keys
conn.prepareStatement("truncate table poll_data restart identity cascade")
.executeUpdate();
// Explicitly commit the truncation in a separate transaction
// This ensures the truncation is visible to all subsequent connections
conn.commit();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private List<Map<String, Object>> queryDb(String query) throws SQLException {
try (Connection c = dataSource.getConnection()) {
try (Query q = new Query(objectMapper, c, query)) {
return q.executeAndFetchMap();
}
}
}
@Test
public void updateLastPollDataTest() throws SQLException, JsonProcessingException {
pollDataDAO.updateLastPollData("dummy-task", "dummy-domain", "dummy-worker-id");
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("More than one poll data records returned", 1, records.size());
assertEquals("Wrong domain set", "dummy-domain", records.get(0).get("domain"));
JsonNode jsonData = objectMapper.readTree(records.get(0).get("json_data").toString());
assertEquals(
"Poll data is incorrect", "dummy-worker-id", jsonData.get("workerId").asText());
}
@Test
public void updateLastPollDataNullDomainTest() throws SQLException, JsonProcessingException {
pollDataDAO.updateLastPollData("dummy-task", null, "dummy-worker-id");
List<Map<String, Object>> records =
queryDb("SELECT * FROM poll_data WHERE queue_name = 'dummy-task'");
assertEquals("More than one poll data records returned", 1, records.size());
assertEquals("Wrong domain set", "DEFAULT", records.get(0).get("domain"));
JsonNode jsonData = objectMapper.readTree(records.get(0).get("json_data").toString());
assertEquals(
"Poll data is incorrect", "dummy-worker-id", jsonData.get("workerId").asText());
}
@Test
public void getPollDataByDomainTest() {
pollDataDAO.updateLastPollData("dummy-task", "dummy-domain", "dummy-worker-id");
PollData pollData = pollDataDAO.getPollData("dummy-task", "dummy-domain");
assertEquals("dummy-task", pollData.getQueueName());
assertEquals("dummy-domain", pollData.getDomain());
assertEquals("dummy-worker-id", pollData.getWorkerId());
}
@Test
public void getPollDataByNullDomainTest() {
pollDataDAO.updateLastPollData("dummy-task", null, "dummy-worker-id");
PollData pollData = pollDataDAO.getPollData("dummy-task", null);
assertEquals("dummy-task", pollData.getQueueName());
assertNull(pollData.getDomain());
assertEquals("dummy-worker-id", pollData.getWorkerId());
}
@Test
public void getPollDataByTaskTest() {
pollDataDAO.updateLastPollData("dummy-task1", "domain1", "dummy-worker-id1");
pollDataDAO.updateLastPollData("dummy-task1", "domain2", "dummy-worker-id2");
pollDataDAO.updateLastPollData("dummy-task1", null, "dummy-worker-id3");
pollDataDAO.updateLastPollData("dummy-task2", "domain2", "dummy-worker-id4");
List<PollData> pollData = pollDataDAO.getPollData("dummy-task1");
assertEquals("Wrong number of records returned", 3, pollData.size());
List<String> queueNames =
pollData.stream().map(x -> x.getQueueName()).collect(Collectors.toList());
assertEquals(3, Collections.frequency(queueNames, "dummy-task1"));
List<String> domains =
pollData.stream().map(x -> x.getDomain()).collect(Collectors.toList());
assertTrue(domains.contains("domain1"));
assertTrue(domains.contains("domain2"));
assertTrue(domains.contains(null));
List<String> workerIds =
pollData.stream().map(x -> x.getWorkerId()).collect(Collectors.toList());
assertTrue(workerIds.contains("dummy-worker-id1"));
assertTrue(workerIds.contains("dummy-worker-id2"));
assertTrue(workerIds.contains("dummy-worker-id3"));
}
@Test
public void getAllPollDataTest() {
pollDataDAO.updateLastPollData("dummy-task1", "domain1", "dummy-worker-id1");
pollDataDAO.updateLastPollData("dummy-task1", "domain2", "dummy-worker-id2");
pollDataDAO.updateLastPollData("dummy-task1", null, "dummy-worker-id3");
pollDataDAO.updateLastPollData("dummy-task2", "domain2", "dummy-worker-id4");
List<PollData> pollData = pollDataDAO.getAllPollData();
assertEquals("Wrong number of records returned", 4, pollData.size());
List<String> queueNames =
pollData.stream().map(x -> x.getQueueName()).collect(Collectors.toList());
assertEquals(3, Collections.frequency(queueNames, "dummy-task1"));
assertEquals(1, Collections.frequency(queueNames, "dummy-task2"));
List<String> domains =
pollData.stream().map(x -> x.getDomain()).collect(Collectors.toList());
assertEquals(1, Collections.frequency(domains, "domain1"));
assertEquals(2, Collections.frequency(domains, "domain2"));
assertEquals(1, Collections.frequency(domains, null));
List<String> workerIds =
pollData.stream().map(x -> x.getWorkerId()).collect(Collectors.toList());
assertTrue(workerIds.contains("dummy-worker-id1"));
assertTrue(workerIds.contains("dummy-worker-id2"));
assertTrue(workerIds.contains("dummy-worker-id3"));
assertTrue(workerIds.contains("dummy-worker-id4"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilderTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilderTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.mockito.InOrder;
import org.mockito.Mockito;
import com.netflix.conductor.postgres.config.PostgresProperties;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.*;
public class PostgresIndexQueryBuilderTest {
private PostgresProperties properties = new PostgresProperties();
@Test
void shouldGenerateQueryForEmptyString() throws SQLException {
String inputQuery = "";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals("SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForEmptyString() throws SQLException {
String inputQuery = "";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals("SELECT COUNT(json_data) FROM table_name", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForNull() throws SQLException {
String inputQuery = null;
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals("SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForNull() throws SQLException {
String inputQuery = null;
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals("SELECT COUNT(json_data) FROM table_name", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForWorkflowId() throws SQLException {
String inputQuery = "workflowId=\"abc123\"";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE workflow_id = ? LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("abc123");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForWorkflowId() throws SQLException {
String inputQuery = "workflowId=\"abc123\"";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE workflow_id = ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("abc123");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForMultipleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED,RUNNING)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE status = ANY(?) LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForMultipleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED,RUNNING)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE status = ANY(?)", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForSingleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE status = ? LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("COMPLETED");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForSingleInClause() throws SQLException {
String inputQuery = "status IN (COMPLETED)";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals("SELECT COUNT(json_data) FROM table_name WHERE status = ?", generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("COMPLETED");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForStartTimeGt() throws SQLException {
String inputQuery = "startTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time > ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForStartTimeGt() throws SQLException {
String inputQuery = "startTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE start_time > ?::TIMESTAMPTZ",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForStartTimeLt() throws SQLException {
String inputQuery = "startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForStartTimeLt() throws SQLException {
String inputQuery = "startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE start_time < ?::TIMESTAMPTZ",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForUpdateTimeGt() throws SQLException {
String inputQuery = "updateTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE update_time > ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForUpdateTimeGt() throws SQLException {
String inputQuery = "updateTime>1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE update_time > ?::TIMESTAMPTZ",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForUpdateTimeLt() throws SQLException {
String inputQuery = "updateTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForUpdateTimeLt() throws SQLException {
String inputQuery = "updateTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE update_time < ?::TIMESTAMPTZ",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateQueryForMultipleConditions() throws SQLException {
String inputQuery =
"workflowId=\"abc123\" AND workflowType IN (one,two) AND status IN (COMPLETED,RUNNING) AND startTime>1675701498000 AND startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getQuery();
assertEquals(
"SELECT json_data::TEXT FROM table_name WHERE start_time < ?::TIMESTAMPTZ AND start_time > ?::TIMESTAMPTZ AND status = ANY(?) AND workflow_id = ? AND workflow_type = ANY(?) LIMIT ? OFFSET ?",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
builder.addPagingParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter("2023-02-06T16:38:18Z");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
inOrder.verify(mockQuery).addParameter("abc123");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("one", "two")));
inOrder.verify(mockQuery).addParameter(15);
inOrder.verify(mockQuery).addParameter(0);
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateCountQueryForMultipleConditions() throws SQLException {
String inputQuery =
"workflowId=\"abc123\" AND workflowType IN (one,two) AND status IN (COMPLETED,RUNNING) AND startTime>1675701498000 AND startTime<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String generatedQuery = builder.getCountQuery();
assertEquals(
"SELECT COUNT(json_data) FROM table_name WHERE start_time < ?::TIMESTAMPTZ AND start_time > ?::TIMESTAMPTZ AND status = ANY(?) AND workflow_id = ? AND workflow_type = ANY(?)",
generatedQuery);
Query mockQuery = mock(Query.class);
builder.addParameters(mockQuery);
InOrder inOrder = Mockito.inOrder(mockQuery);
inOrder.verify(mockQuery).addParameter("2023-02-06T16:54:58Z");
inOrder.verify(mockQuery).addParameter("2023-02-06T16:38:18Z");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("COMPLETED", "RUNNING")));
inOrder.verify(mockQuery).addParameter("abc123");
inOrder.verify(mockQuery).addParameter(new ArrayList<>(List.of("one", "two")));
verifyNoMoreInteractions(mockQuery);
}
@Test
void shouldGenerateOrderBy() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"updateTime:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ ORDER BY update_time DESC LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldGenerateOrderByMultiple() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"updateTime:DESC", "correlationId:ASC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ ORDER BY update_time DESC, correlation_id ASC LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldNotAllowInvalidColumns() throws SQLException {
String inputQuery = "sqlInjection<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String expectedQuery = "SELECT json_data::TEXT FROM table_name LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldNotAllowInvalidColumnsOnCountQuery() throws SQLException {
String inputQuery = "sqlInjection<1675702498000";
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
String expectedQuery = "SELECT COUNT(json_data) FROM table_name";
assertEquals(expectedQuery, builder.getCountQuery());
}
@Test
void shouldNotAllowInvalidSortColumn() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE update_time < ?::TIMESTAMPTZ LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldNotAllowInvalidSortColumnOnCountQuery() throws SQLException {
String inputQuery = "updateTime<1675702498000";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT COUNT(json_data) FROM table_name WHERE update_time < ?::TIMESTAMPTZ";
assertEquals(expectedQuery, builder.getCountQuery());
}
@Test
void shouldAllowFullTextSearch() throws SQLException {
String freeText = "correlation-id";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE jsonb_to_tsvector('english', json_data, '[\"all\"]') @@ to_tsquery(?) LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldAllowFullTextSearchOnCountQuery() throws SQLException {
String freeText = "correlation-id";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT COUNT(json_data) FROM table_name WHERE jsonb_to_tsvector('english', json_data, '[\"all\"]') @@ to_tsquery(?)";
assertEquals(expectedQuery, builder.getCountQuery());
}
@Test
void shouldAllowJsonSearch() throws SQLException {
String freeText = "{\"correlationId\":\"not-the-id\"}";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT json_data::TEXT FROM table_name WHERE json_data @> ?::JSONB LIMIT ? OFFSET ?";
assertEquals(expectedQuery, builder.getQuery());
}
@Test
void shouldAllowJsonSearchOnCountQuery() throws SQLException {
String freeText = "{\"correlationId\":\"not-the-id\"}";
String[] query = {"sqlInjection:DESC"};
PostgresIndexQueryBuilder builder =
new PostgresIndexQueryBuilder(
"table_name", "", freeText, 0, 15, Arrays.asList(query), properties);
String expectedQuery =
"SELECT COUNT(json_data) FROM table_name WHERE json_data @> ?::JSONB";
assertEquals(expectedQuery, builder.getCountQuery());
}
@Test()
void shouldThrowIllegalArgumentExceptionWhenQueryStringIsInvalid() {
String inputQuery =
"workflowType IN (one,two) AND status IN (COMPLETED,RUNNING) AND startTime>1675701498000 AND xyz";
try {
new PostgresIndexQueryBuilder(
"table_name", inputQuery, "", 0, 15, new ArrayList<>(), properties);
fail("should have failed since xyz does not conform to expected format");
} catch (IllegalArgumentException e) {
assertEquals("Incorrectly formatted query string: xyz", e.getMessage());
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/util/PostgresQueueListenerTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/util/PostgresQueueListenerTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.util.*;
import javax.sql.DataSource;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.shaded.com.fasterxml.jackson.databind.node.JsonNodeFactory;
import org.testcontainers.shaded.com.fasterxml.jackson.databind.node.ObjectNode;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.postgres.config.PostgresConfiguration;
import com.netflix.conductor.postgres.config.PostgresProperties;
import static org.junit.Assert.*;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.elasticsearch.version=0",
"spring.flyway.clean-disabled=false",
"conductor.database.type=postgres",
"conductor.postgres.experimentalQueueNotify=true",
"conductor.postgres.experimentalQueueNotifyStalePeriod=5000"
})
@SpringBootTest
public class PostgresQueueListenerTest {
private PostgresQueueListener listener;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired private PostgresProperties properties;
private void clearDb() {
try (Connection conn = dataSource.getConnection()) {
// Explicitly disable autoCommit to match HikariCP pool configuration
conn.setAutoCommit(false);
conn.prepareStatement("truncate table queue_message restart identity cascade")
.executeUpdate();
// Commit to ensure truncation is visible across connection pool
conn.commit();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private void sendNotification(String queueName, int queueDepth, long nextDelivery) {
JsonNodeFactory factory = JsonNodeFactory.instance;
ObjectNode payload = factory.objectNode();
ObjectNode queueNode = factory.objectNode();
queueNode.put("depth", queueDepth);
queueNode.put("nextDelivery", nextDelivery);
payload.put("__now__", System.currentTimeMillis());
payload.put(queueName, queueNode);
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
PreparedStatement stmt =
conn.prepareStatement("SELECT pg_notify('conductor_queue_state', ?)");
stmt.setString(1, payload.toString());
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private void createQueueMessage(String queue_name, String message_id) {
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
PreparedStatement stmt =
conn.prepareStatement(
"INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES (current_timestamp AT TIME ZONE 'UTC', ?,?,?,?,?)");
stmt.setString(1, queue_name);
stmt.setString(2, message_id);
stmt.setInt(3, 0);
stmt.setInt(4, 0);
stmt.setString(5, "dummy-payload");
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private void popQueueMessage(String message_id) {
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
PreparedStatement stmt =
conn.prepareStatement(
"UPDATE queue_message SET popped = TRUE where message_id = ?");
stmt.setString(1, message_id);
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
private void deleteQueueMessage(String message_id) {
try (Connection conn = dataSource.getConnection()) {
conn.setAutoCommit(true);
PreparedStatement stmt =
conn.prepareStatement("DELETE FROM queue_message where message_id = ?");
stmt.setString(1, message_id);
stmt.execute();
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
@Before
public void before() {
listener = new PostgresQueueListener(dataSource, properties);
clearDb();
}
@Test
public void testHasReadyMessages() {
assertFalse(listener.hasMessagesReady("dummy-task"));
sendNotification("dummy-task", 3, System.currentTimeMillis() - 1);
assertTrue(listener.hasMessagesReady("dummy-task"));
}
@Test
public void testHasReadyMessagesInFuture() throws InterruptedException {
assertFalse(listener.hasMessagesReady("dummy-task"));
sendNotification("dummy-task", 3, System.currentTimeMillis() + 100);
assertFalse(listener.hasMessagesReady("dummy-task"));
Thread.sleep(101);
assertTrue(listener.hasMessagesReady("dummy-task"));
}
@Test
public void testGetSize() {
assertEquals(0, listener.getSize("dummy-task").get().intValue());
sendNotification("dummy-task", 3, System.currentTimeMillis() + 100);
assertEquals(3, listener.getSize("dummy-task").get().intValue());
}
@Test
public void testTrigger() throws InterruptedException {
assertEquals(0, listener.getSize("dummy-task").get().intValue());
assertFalse(listener.hasMessagesReady("dummy-task"));
createQueueMessage("dummy-task", "dummy-id1");
createQueueMessage("dummy-task", "dummy-id2");
assertEquals(2, listener.getSize("dummy-task").get().intValue());
assertTrue(listener.hasMessagesReady("dummy-task"));
popQueueMessage("dummy-id2");
assertEquals(1, listener.getSize("dummy-task").get().intValue());
assertTrue(listener.hasMessagesReady("dummy-task"));
deleteQueueMessage("dummy-id2");
assertEquals(1, listener.getSize("dummy-task").get().intValue());
assertTrue(listener.hasMessagesReady("dummy-task"));
deleteQueueMessage("dummy-id1");
assertEquals(0, listener.getSize("dummy-task").get().intValue());
assertFalse(listener.hasMessagesReady("test-task"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.performance;
// SBMTODO: this test needs to be migrated
// reference - https://github.com/Netflix/conductor/pull/1940
// @Ignore("This test cannot be automated")
// public class PerformanceTest {
//
// public static final int MSGS = 1000;
// public static final int PRODUCER_BATCH = 10; // make sure MSGS % PRODUCER_BATCH == 0
// public static final int PRODUCERS = 4;
// public static final int WORKERS = 8;
// public static final int OBSERVERS = 4;
// public static final int OBSERVER_DELAY = 5000;
// public static final int UNACK_RUNNERS = 10;
// public static final int UNACK_DELAY = 500;
// public static final int WORKER_BATCH = 10;
// public static final int WORKER_BATCH_TIMEOUT = 500;
// public static final int COMPLETION_MONITOR_DELAY = 1000;
//
// private DataSource dataSource;
// private QueueDAO Q;
// private ExecutionDAO E;
//
// private final ExecutorService threadPool = Executors.newFixedThreadPool(PRODUCERS + WORKERS +
// OBSERVERS + UNACK_RUNNERS);
// private static final Logger LOGGER = LoggerFactory.getLogger(PerformanceTest.class);
//
// @Before
// public void setUp() {
// TestConfiguration testConfiguration = new TestConfiguration();
// configuration = new TestPostgresConfiguration(testConfiguration,
//
// "jdbc:postgresql://localhost:54320/conductor?charset=utf8&parseTime=true&interpolateParams=true",
// 10, 2);
// PostgresDataSourceProvider dataSource = new PostgresDataSourceProvider(configuration);
// this.dataSource = dataSource.get();
// resetAllData(this.dataSource);
// flywayMigrate(this.dataSource);
//
// final ObjectMapper objectMapper = new JsonMapperProvider().get();
// Q = new PostgresQueueDAO(objectMapper, this.dataSource);
// E = new PostgresExecutionDAO(objectMapper, this.dataSource);
// }
//
// @After
// public void tearDown() throws Exception {
// resetAllData(dataSource);
// }
//
// public static final String QUEUE = "task_queue";
//
// @Test
// public void testQueueDaoPerformance() throws InterruptedException {
// AtomicBoolean stop = new AtomicBoolean(false);
// Stopwatch start = Stopwatch.createStarted();
// AtomicInteger poppedCoutner = new AtomicInteger(0);
// HashMultiset<String> allPopped = HashMultiset.create();
//
// // Consumers - workers
// for (int i = 0; i < WORKERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// List<Message> pop = Q.pollMessages(QUEUE, WORKER_BATCH, WORKER_BATCH_TIMEOUT);
// LOGGER.info("Popped {} messages", pop.size());
// poppedCoutner.accumulateAndGet(pop.size(), Integer::sum);
//
// if (pop.size() == 0) {
// try {
// Thread.sleep(200);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// } else {
// LOGGER.info("Popped {}",
// pop.stream().map(Message::getId).collect(Collectors.toList()));
// }
//
// pop.forEach(popped -> {
// synchronized (allPopped) {
// allPopped.add(popped.getId());
// }
// boolean exists = Q.containsMessage(QUEUE, popped.getId());
// boolean ack = Q.ack(QUEUE, popped.getId());
//
// if (ack && exists) {
// // OK
// } else {
// LOGGER.error("Exists & Ack did not succeed for msg: {}", popped);
// }
// });
// }
// });
// }
//
// // Producers
// List<Future<?>> producers = Lists.newArrayList();
// for (int i = 0; i < PRODUCERS; i++) {
// Future<?> producer = threadPool.submit(() -> {
// try {
// // N messages
// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
// List<Message> randomMessages = getRandomMessages(PRODUCER_BATCH);
// Q.push(QUEUE, randomMessages);
// LOGGER.info("Pushed {} messages", PRODUCER_BATCH);
// LOGGER.info("Pushed {}",
// randomMessages.stream().map(Message::getId).collect(Collectors.toList()));
// }
// LOGGER.info("Pushed ALL");
// } catch (Exception e) {
// LOGGER.error("Something went wrong with producer", e);
// throw new RuntimeException(e);
// }
// });
//
// producers.add(producer);
// }
//
// // Observers
// for (int i = 0; i < OBSERVERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// int size = Q.getSize(QUEUE);
// Q.queuesDetail();
// LOGGER.info("Size {} messages", size);
// } catch (Exception e) {
// LOGGER.info("Queue size failed, nevermind");
// }
//
// try {
// Thread.sleep(OBSERVER_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// // Consumers - unack processor
// for (int i = 0; i < UNACK_RUNNERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// Q.processUnacks(QUEUE);
// } catch (Exception e) {
// LOGGER.info("Unack failed, nevermind", e);
// continue;
// }
// LOGGER.info("Unacked");
// try {
// Thread.sleep(UNACK_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// long elapsed;
// while (true) {
// try {
// Thread.sleep(COMPLETION_MONITOR_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
//
// int size = Q.getSize(QUEUE);
// LOGGER.info("MONITOR SIZE : {}", size);
//
// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 &&
// b2)) {
// elapsed = start.elapsed(TimeUnit.MILLISECONDS);
// stop.set(true);
// break;
// }
// }
//
// threadPool.awaitTermination(10, TimeUnit.SECONDS);
// threadPool.shutdown();
// LOGGER.info("Finished in {} ms", elapsed);
// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
// LOGGER.info("Threads finished");
// if (poppedCoutner.get() != MSGS * PRODUCERS) {
// synchronized (allPopped) {
// List<String> duplicates = allPopped.entrySet().stream()
// .filter(stringEntry -> stringEntry.getCount() > 1)
// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount())
// .collect(Collectors.toList());
//
// LOGGER.error("Found duplicate pops: " + duplicates);
// }
// throw new RuntimeException("Popped " + poppedCoutner.get() + " != produced: " + MSGS *
// PRODUCERS);
// }
// }
//
// @Test
// public void testExecDaoPerformance() throws InterruptedException {
// AtomicBoolean stop = new AtomicBoolean(false);
// Stopwatch start = Stopwatch.createStarted();
// BlockingDeque<Task> msgQueue = new LinkedBlockingDeque<>(1000);
// HashMultiset<String> allPopped = HashMultiset.create();
//
// // Consumers - workers
// for (int i = 0; i < WORKERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// List<Task> popped = new ArrayList<>();
// while (true) {
// try {
// Task poll;
// poll = msgQueue.poll(10, TimeUnit.MILLISECONDS);
//
// if (poll == null) {
// // poll timed out
// continue;
// }
// synchronized (allPopped) {
// allPopped.add(poll.getTaskId());
// }
// popped.add(poll);
// if (stop.get() || popped.size() == WORKER_BATCH) {
// break;
// }
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
//
// LOGGER.info("Popped {} messages", popped.size());
// LOGGER.info("Popped {}",
// popped.stream().map(Task::getTaskId).collect(Collectors.toList()));
//
// // Polling
// popped.stream()
// .peek(task -> {
// task.setWorkerId("someWorker");
// task.setPollCount(task.getPollCount() + 1);
// task.setStartTime(System.currentTimeMillis());
// })
// .forEach(task -> {
// try {
// // should always be false
// boolean concurrentLimit = E.exceedsInProgressLimit(task);
// task.setStartTime(System.currentTimeMillis());
// E.updateTask(task);
// LOGGER.info("Polled {}", task.getTaskId());
// } catch (Exception e) {
// LOGGER.error("Something went wrong with worker during poll", e);
// throw new RuntimeException(e);
// }
// });
//
// popped.forEach(task -> {
// try {
//
// String wfId = task.getWorkflowInstanceId();
// Workflow workflow = E.getWorkflow(wfId, true);
// E.getTask(task.getTaskId());
//
// task.setStatus(Task.Status.COMPLETED);
// task.setWorkerId("someWorker");
// task.setOutputData(Collections.singletonMap("a", "b"));
// E.updateTask(task);
// E.updateWorkflow(workflow);
// LOGGER.info("Updated {}", task.getTaskId());
// } catch (Exception e) {
// LOGGER.error("Something went wrong with worker during update", e);
// throw new RuntimeException(e);
// }
// });
//
// }
// });
// }
//
// Multiset<String> pushedTasks = HashMultiset.create();
//
// // Producers
// List<Future<?>> producers = Lists.newArrayList();
// for (int i = 0; i < PRODUCERS; i++) {
// Future<?> producer = threadPool.submit(() -> {
// // N messages
// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) {
// List<Task> randomTasks = getRandomTasks(PRODUCER_BATCH);
//
// Workflow wf = getWorkflow(randomTasks);
// E.createWorkflow(wf);
//
// E.createTasks(randomTasks);
// randomTasks.forEach(t -> {
// try {
// boolean offer = false;
// while (!offer) {
// offer = msgQueue.offer(t, 10, TimeUnit.MILLISECONDS);
// }
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// });
// LOGGER.info("Pushed {} messages", PRODUCER_BATCH);
// List<String> collect =
// randomTasks.stream().map(Task::getTaskId).collect(Collectors.toList());
// synchronized (pushedTasks) {
// pushedTasks.addAll(collect);
// }
// LOGGER.info("Pushed {}", collect);
// }
// LOGGER.info("Pushed ALL");
// });
//
// producers.add(producer);
// }
//
// // Observers
// for (int i = 0; i < OBSERVERS; i++) {
// threadPool.submit(() -> {
// while (!stop.get()) {
// try {
// List<Task> size = E.getPendingTasksForTaskType("taskType");
// LOGGER.info("Size {} messages", size.size());
// LOGGER.info("Size q {} messages", msgQueue.size());
// synchronized (allPopped) {
// LOGGER.info("All pp {} messages", allPopped.size());
// }
// LOGGER.info("Workflows by correlation id size: {}",
// E.getWorkflowsByCorrelationId("abcd", "1", true).size());
// LOGGER.info("Workflows by correlation id size: {}",
// E.getWorkflowsByCorrelationId("abcd", "2", true).size());
// LOGGER.info("Workflows running ids: {}", E.getRunningWorkflowIds("abcd",
// 1));
// LOGGER.info("Workflows pending count: {}",
// E.getPendingWorkflowCount("abcd"));
// } catch (Exception e) {
// LOGGER.warn("Observer failed ", e);
// }
// try {
// Thread.sleep(OBSERVER_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
// }
// });
// }
//
// long elapsed;
// while (true) {
// try {
// Thread.sleep(COMPLETION_MONITOR_DELAY);
// } catch (InterruptedException e) {
// throw new RuntimeException(e);
// }
//
// int size;
// try {
// size = E.getPendingTasksForTaskType("taskType").size();
// } catch (Exception e) {
// LOGGER.warn("Monitor failed", e);
// continue;
// }
// LOGGER.info("MONITOR SIZE : {}", size);
//
// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 &&
// b2)) {
// elapsed = start.elapsed(TimeUnit.MILLISECONDS);
// stop.set(true);
// break;
// }
// }
//
// threadPool.awaitTermination(10, TimeUnit.SECONDS);
// threadPool.shutdown();
// LOGGER.info("Finished in {} ms", elapsed);
// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000);
// LOGGER.info("Threads finished");
//
// List<String> duplicates = pushedTasks.entrySet().stream()
// .filter(stringEntry -> stringEntry.getCount() > 1)
// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount())
// .collect(Collectors.toList());
//
// LOGGER.error("Found duplicate pushes: " + duplicates);
// }
//
// private Workflow getWorkflow(List<Task> randomTasks) {
// Workflow wf = new Workflow();
// wf.setWorkflowId(randomTasks.get(0).getWorkflowInstanceId());
// wf.setCorrelationId(wf.getWorkflowId());
// wf.setTasks(randomTasks);
// WorkflowDef workflowDefinition = new WorkflowDef();
// workflowDefinition.setName("abcd");
// wf.setWorkflowDefinition(workflowDefinition);
// wf.setStartTime(System.currentTimeMillis());
// return wf;
// }
//
// private List<Task> getRandomTasks(int i) {
// String timestamp = Long.toString(System.nanoTime());
// return IntStream.range(0, i).mapToObj(j -> {
// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j;
// Task task = new Task();
// task.setTaskId(id);
// task.setCorrelationId(Integer.toString(j));
// task.setTaskType("taskType");
// task.setReferenceTaskName("refName" + j);
// task.setWorkflowType("task_wf");
// task.setWorkflowInstanceId(Thread.currentThread().getId() + "_" + timestamp);
// return task;
// }).collect(Collectors.toList());
// }
//
// private List<Message> getRandomMessages(int i) {
// String timestamp = Long.toString(System.nanoTime());
// return IntStream.range(0, i).mapToObj(j -> {
// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j;
// return new Message(id, "{ \"a\": \"b\", \"timestamp\": \" " + timestamp + " \"}",
// "receipt");
// }).collect(Collectors.toList());
// }
//
// private void flywayMigrate(DataSource dataSource) {
// FluentConfiguration flywayConfiguration = Flyway.configure()
// .table(configuration.getFlywayTable())
// .locations(Paths.get("db","migration_postgres").toString())
// .dataSource(dataSource)
// .placeholderReplacement(false);
//
// Flyway flyway = flywayConfiguration.load();
// try {
// flyway.migrate();
// } catch (FlywayException e) {
// if (e.getMessage().contains("non-empty")) {
// return;
// }
// throw e;
// }
// }
//
// public void resetAllData(DataSource dataSource) {
// // TODO
// }
// }
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/postgres/config/PostgresConfigurationDataMigrationTest.java | postgres-persistence/src/test/java/com/netflix/conductor/postgres/config/PostgresConfigurationDataMigrationTest.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.util.Arrays;
import java.util.Objects;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.ResourcePatternResolver;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
PostgresConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=0",
"conductor.indexing.type=postgres",
"conductor.postgres.applyDataMigrations=false",
"spring.flyway.clean-disabled=false"
})
@SpringBootTest
public class PostgresConfigurationDataMigrationTest {
@Autowired Flyway flyway;
@Autowired ResourcePatternResolver resourcePatternResolver;
// clean the database between tests.
@Before
public void before() {
flyway.migrate();
}
@Test
public void dataMigrationIsNotAppliedWhenDisabled() throws Exception {
var files = resourcePatternResolver.getResources("classpath:db/migration_postgres_data/*");
Arrays.stream(flyway.info().applied())
.forEach(
migrationInfo ->
assertTrue(
"Data migration wrongly applied: "
+ migrationInfo.getScript(),
Arrays.stream(files)
.map(Resource::getFilename)
.filter(Objects::nonNull)
.noneMatch(
fileName ->
fileName.contains(
migrationInfo
.getScript()))));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java | postgres-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc.postgres;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest;
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.db.type=postgres",
"conductor.postgres.experimentalQueueNotify=true",
"conductor.app.asyncIndexingEnabled=false",
"conductor.elasticsearch.version=7",
"conductor.grpc-server.port=8098",
"conductor.indexing.type=elasticsearch",
"spring.datasource.url=jdbc:tc:postgresql:11.15-alpine:///conductor", // "tc" prefix
// starts the
// Postgres container
"spring.datasource.username=postgres",
"spring.datasource.password=postgres",
"spring.datasource.hikari.maximum-pool-size=8",
"spring.datasource.hikari.minimum-idle=300000",
"spring.flyway.clean-disabled=true",
"conductor.app.workflow.name-validation.enabled=true"
})
public class PostgresGrpcEndToEndTest extends AbstractGrpcEndToEndTest {
@Before
public void init() {
taskClient = new TaskClient("localhost", 8098);
workflowClient = new WorkflowClient("localhost", 8098);
metadataClient = new MetadataClient("localhost", 8098);
eventClient = new EventClient("localhost", 8098);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.Date;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import jakarta.annotation.*;
public class PostgresExecutionDAO extends PostgresBaseDAO
implements ExecutionDAO, RateLimitingDAO, ConcurrentExecutionLimitDAO {
private final ScheduledExecutorService scheduledExecutorService;
public PostgresExecutionDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-execution-"));
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for removeWorkflowWithExpiry",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskDefName, String workflowId) {
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? AND workflow_id = ? FOR SHARE";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_WORKFLOW,
q ->
q.addParameter(taskDefName)
.addParameter(workflowId)
.executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new ArrayList<>(count);
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int found = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
// noinspection ConstantConditions
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && found < count) {
tasks.add(pendingTask);
found++;
}
}
return tasks;
}
private static String taskKey(TaskModel task) {
return task.getReferenceTaskName() + "_" + task.getRetryCount();
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> created = Lists.newArrayListWithCapacity(tasks.size());
withTransaction(
connection -> {
for (TaskModel task : tasks) {
validate(task);
task.setScheduledTime(System.currentTimeMillis());
final String taskKey = taskKey(task);
boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey);
if (!scheduledTaskAdded) {
logger.trace(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
continue;
}
insertOrUpdateTaskData(connection, task);
addWorkflowToTaskMapping(connection, task);
addTaskInProgress(connection, task);
updateTask(connection, task);
created.add(task);
}
});
return created;
}
@Override
public void updateTask(TaskModel task) {
withTransaction(connection -> updateTask(connection, task));
}
/**
* This is a dummy implementation and this feature is not for Postgres backed Conductor
*
* @param task: which needs to be evaluated whether it is rateLimited or not
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
return false;
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
TaskDef taskDef = taskDefinition.get();
int limit = taskDef.concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
logger.info(
"Task execution count for {}: limit={}, current={}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
String taskId = task.getTaskId();
List<String> tasksInProgressInOrderOfArrival =
findAllTasksInProgressInOrderOfArrival(task, limit);
boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId);
if (rateLimited) {
logger.info(
"Task execution count limited. {}, limit {}, current {}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
logger.warn("No such task found by id {}", taskId);
return false;
}
final String taskKey = taskKey(task);
withTransaction(
connection -> {
removeScheduledTask(connection, task, taskKey);
removeWorkflowToTaskMapping(connection, task);
removeTaskInProgress(connection, task);
removeTaskData(connection, task);
});
return true;
}
@Override
public TaskModel getTask(String taskId) {
String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?";
return queryWithTransaction(
GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
return getWithRetriedTransactions(c -> getTasks(c, taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_TYPE =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? FOR UPDATE SKIP LOCKED";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_TYPE,
q -> q.addParameter(taskName).executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
String GET_TASKS_FOR_WORKFLOW =
"SELECT task_id FROM workflow_to_task WHERE workflow_id = ? FOR SHARE";
return getWithRetriedTransactions(
tx ->
query(
tx,
GET_TASKS_FOR_WORKFLOW,
q -> {
List<String> taskIds =
q.addParameter(workflowId)
.executeScalarList(String.class);
return getTasks(tx, taskIds);
}));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
boolean removed = false;
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
withTransaction(
connection -> {
removeWorkflowDefToWorkflowMapping(connection, workflow);
removeWorkflow(connection, workflowId);
removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId);
});
removed = true;
for (TaskModel task : workflow.getTasks()) {
if (!removeTask(task.getTaskId())) {
removed = false;
}
}
}
return removed;
}
/** Scheduled executor based implementation. */
@Override
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
scheduledExecutorService.schedule(
() -> {
try {
removeWorkflow(workflowId);
} catch (Throwable e) {
logger.warn("Unable to remove workflow: {} with expiry", workflowId, e);
}
},
ttlSeconds,
TimeUnit.SECONDS);
return true;
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId));
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId));
if (workflow != null) {
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_IDS =
"SELECT workflow_id FROM workflow_pending WHERE workflow_type = ? FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_PENDING_WORKFLOW_IDS,
q -> q.addParameter(workflowName).executeScalarList(String.class));
}
/**
* @param workflowName Name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
return getRunningWorkflowIds(workflowName, version).stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public long getPendingWorkflowCount(String workflowName) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_COUNT =
"SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount());
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String GET_IN_PROGRESS_TASK_COUNT =
"SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true";
return queryWithTransaction(
GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
withTransaction(
tx -> {
// @formatter:off
String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF =
"SELECT workflow_id FROM workflow_def_to_workflow "
+ "WHERE workflow_def = ? AND date_str BETWEEN ? AND ? FOR SHARE SKIP LOCKED";
// @formatter:on
List<String> workflowIds =
query(
tx,
GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF,
q ->
q.addParameter(workflowName)
.addParameter(dateStr(startTime))
.addParameter(dateStr(endTime))
.executeScalarList(String.class));
workflowIds.forEach(
workflowId -> {
try {
WorkflowModel wf = getWorkflow(workflowId);
if (wf.getCreateTime() >= startTime
&& wf.getCreateTime() <= endTime) {
workflows.add(wf);
}
} catch (Exception e) {
logger.error(
"Unable to load workflow id {} with name {}",
workflowId,
workflowName,
e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
Preconditions.checkNotNull(correlationId, "correlationId cannot be null");
String GET_WORKFLOWS_BY_CORRELATION_ID =
"SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ? FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_WORKFLOWS_BY_CORRELATION_ID,
q ->
q.addParameter(correlationId)
.addParameter(workflowName)
.executeAndFetch(WorkflowModel.class));
}
@Override
public boolean canSearchAcrossWorkflows() {
return true;
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to add event execution " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> removeEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to remove event execution " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> updateEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to update event execution " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
List<EventExecution> executions = Lists.newLinkedList();
withTransaction(
tx -> {
for (int i = 0; i < max; i++) {
String executionId =
messageId + "_"
+ i; // see SimpleEventProcessor.handle to understand
// how the
// execution id is set
EventExecution ee =
readEventExecution(
tx,
eventHandlerName,
eventName,
messageId,
executionId);
if (ee == null) {
break;
}
executions.add(ee);
}
});
return executions;
} catch (Exception e) {
String message =
String.format(
"Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s",
eventHandlerName, eventName, messageId);
throw new NonTransientException(message, e);
}
}
private List<TaskModel> getTasks(Connection connection, List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
// Generate a formatted query string with a variable number of bind params based
// on taskIds.size()
final String GET_TASKS_FOR_IDS =
String.format(
"SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL",
Query.generateInBindings(taskIds.size()));
return query(
connection,
GET_TASKS_FOR_IDS,
q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class));
}
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
boolean terminal = workflow.getStatus().isTerminal();
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(Lists.newLinkedList());
withTransaction(
tx -> {
if (!update) {
addWorkflow(tx, workflow);
addWorkflowDefToWorkflowMapping(tx, workflow);
} else {
updateWorkflow(tx, workflow);
}
if (terminal) {
removePendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
} else {
addPendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
}
});
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
private void updateTask(Connection connection, TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
boolean inProgress =
task.getStatus() != null
&& task.getStatus().equals(TaskModel.Status.IN_PROGRESS);
updateInProgressStatus(connection, task, inProgress);
}
insertOrUpdateTaskData(connection, task);
if (task.getStatus() != null && task.getStatus().isTerminal()) {
removeTaskInProgress(connection, task);
}
addWorkflowToTaskMapping(connection, task);
}
private WorkflowModel readWorkflow(Connection connection, String workflowId) {
String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?";
return query(
connection,
GET_WORKFLOW,
q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class));
}
private void addWorkflow(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW =
"INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addJsonParameter(workflow)
.executeUpdate());
}
private void updateWorkflow(Connection connection, WorkflowModel workflow) {
String UPDATE_WORKFLOW =
"UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?";
execute(
connection,
UPDATE_WORKFLOW,
q ->
q.addJsonParameter(workflow)
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflow(Connection connection, String workflowId) {
String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?";
execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete());
}
private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) {
String EXISTS_PENDING_WORKFLOW =
"SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)";
boolean exists =
query(
connection,
EXISTS_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).exists());
if (!exists) {
String INSERT_PENDING_WORKFLOW =
"INSERT INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?) ON CONFLICT (workflow_type,workflow_id) DO NOTHING";
execute(
connection,
INSERT_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate());
}
}
private void removePendingWorkflow(
Connection connection, String workflowType, String workflowId) {
String REMOVE_PENDING_WORKFLOW =
"DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?";
execute(
connection,
REMOVE_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete());
}
private void insertOrUpdateTaskData(Connection connection, TaskModel task) {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens.
*/
String UPDATE_TASK =
"UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?";
int rowsUpdated =
query(
connection,
UPDATE_TASK,
q ->
q.addJsonParameter(task)
.addParameter(task.getTaskId())
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_TASK =
"INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON CONFLICT (task_id) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_TASK,
q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate());
}
}
private void removeTaskData(Connection connection, TaskModel task) {
String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?";
execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete());
}
private void addWorkflowToTaskMapping(Connection connection, TaskModel task) {
String EXISTS_WORKFLOW_TO_TASK =
"SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)";
boolean exists =
query(
connection,
EXISTS_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.exists());
if (!exists) {
String INSERT_WORKFLOW_TO_TASK =
"INSERT INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?) ON CONFLICT (workflow_id,task_id) DO NOTHING";
execute(
connection,
INSERT_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeUpdate());
}
}
private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) {
String REMOVE_WORKFLOW_TO_TASK =
"DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?";
execute(
connection,
REMOVE_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeDelete());
}
private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW_DEF_TO_WORKFLOW =
"INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String REMOVE_WORKFLOW_DEF_TO_WORKFLOW =
"DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?";
execute(
connection,
REMOVE_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
@VisibleForTesting
boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) {
final String EXISTS_SCHEDULED_TASK =
"SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)";
boolean exists =
query(
connection,
EXISTS_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.exists());
if (!exists) {
final String INSERT_IGNORE_SCHEDULED_TASK =
"INSERT INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?) ON CONFLICT (workflow_id,task_key) DO NOTHING";
int count =
query(
connection,
INSERT_IGNORE_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.netflix.conductor.postgres.util.PostgresQueueListener;
import com.netflix.conductor.postgres.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Uninterruptibles;
import jakarta.annotation.*;
public class PostgresQueueDAO extends PostgresBaseDAO implements QueueDAO {
private static final Long UNACK_SCHEDULE_MS = 60_000L;
private final ScheduledExecutorService scheduledExecutorService;
private PostgresQueueListener queueListener;
public PostgresQueueDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-queue-"));
this.scheduledExecutorService.scheduleAtFixedRate(
this::processAllUnacks,
UNACK_SCHEDULE_MS,
UNACK_SCHEDULE_MS,
TimeUnit.MILLISECONDS);
logger.debug("{} is ready to serve", PostgresQueueDAO.class.getName());
if (properties.getExperimentalQueueNotify()) {
this.queueListener = new PostgresQueueListener(dataSource, properties);
}
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for processAllUnacks",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public void push(String queueName, String messageId, long offsetTimeInSecond) {
push(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) {
withTransaction(
tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond));
}
@Override
public void push(String queueName, List<Message> messages) {
withTransaction(
tx ->
messages.forEach(
message ->
pushMessage(
tx,
queueName,
message.getId(),
message.getPayload(),
message.getPriority(),
0)));
}
@Override
public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) {
return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String messageId, int priority, long offsetTimeInSecond) {
return getWithRetriedTransactions(
tx -> {
if (!existsMessage(tx, queueName, messageId)) {
pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond);
return true;
}
return false;
});
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
return pollMessages(queueName, count, timeout).stream()
.map(Message::getId)
.collect(Collectors.toList());
}
@Override
public List<Message> pollMessages(String queueName, int count, int timeout) {
if (timeout < 1) {
List<Message> messages =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count, timeout));
if (messages == null) {
return new ArrayList<>();
}
return messages;
}
long start = System.currentTimeMillis();
final List<Message> messages = new ArrayList<>();
while (true) {
List<Message> messagesSlice =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count - messages.size(), timeout));
if (messagesSlice == null) {
logger.warn(
"Unable to poll {} messages from {} due to tx conflict, only {} popped",
count,
queueName,
messages.size());
// conflict could have happened, returned messages popped so far
return messages;
}
messages.addAll(messagesSlice);
if (messages.size() >= count || ((System.currentTimeMillis() - start) > timeout)) {
return messages;
}
Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
@Override
public void remove(String queueName, String messageId) {
withTransaction(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public int getSize(String queueName) {
if (queueListener != null) {
Optional<Integer> size = queueListener.getSize(queueName);
if (size.isPresent()) {
return size.get();
}
}
final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?";
return queryWithTransaction(
GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue());
}
@Override
public boolean ack(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) {
long updatedOffsetTimeInSecond = unackTimeout / 1000;
final String UPDATE_UNACK_TIMEOUT =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
UPDATE_UNACK_TIMEOUT,
q ->
q.addParameter(updatedOffsetTimeInSecond)
.addParameter(updatedOffsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate())
== 1;
}
@Override
public void flush(String queueName) {
final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?";
executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete());
}
@Override
public Map<String, Long> queuesDetail() {
final String GET_QUEUES_DETAIL =
"SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q FOR SHARE SKIP LOCKED";
return queryWithTransaction(
GET_QUEUES_DETAIL,
q ->
q.executeAndFetch(
rs -> {
Map<String, Long> detail = Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
detail.put(queueName, size);
}
return detail;
}));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
// @formatter:off
final String GET_QUEUES_DETAIL_VERBOSE =
"SELECT queue_name, \n"
+ " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n"
+ " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n"
+ "FROM queue q FOR SHARE SKIP LOCKED";
// @formatter:on
return queryWithTransaction(
GET_QUEUES_DETAIL_VERBOSE,
q ->
q.executeAndFetch(
rs -> {
Map<String, Map<String, Map<String, Long>>> result =
Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
Long queueUnacked = rs.getLong("uacked");
result.put(
queueName,
ImmutableMap.of(
"a",
ImmutableMap
.of( // sharding not implemented,
// returning only
// one shard with all the
// info
"size",
size,
"uacked",
queueUnacked)));
}
return result;
}));
}
/**
* Un-pop all un-acknowledged messages for all queues.
*
* @since 1.11.6
*/
public void processAllUnacks() {
logger.trace("processAllUnacks started");
getWithRetriedTransactions(
tx -> {
String LOCK_TASKS =
"SELECT queue_name, message_id FROM queue_message WHERE popped = true AND (deliver_on + (60 ||' seconds')::interval) < current_timestamp limit 1000 FOR UPDATE SKIP LOCKED";
List<QueueMessage> messages =
query(
tx,
LOCK_TASKS,
p ->
p.executeAndFetch(
rs -> {
List<QueueMessage> results =
new ArrayList<QueueMessage>();
while (rs.next()) {
QueueMessage qm = new QueueMessage();
qm.queueName =
rs.getString("queue_name");
qm.messageId =
rs.getString("message_id");
results.add(qm);
}
return results;
}));
if (messages.size() == 0) {
return 0;
}
Map<String, List<String>> queueMessageMap = new HashMap<String, List<String>>();
for (QueueMessage qm : messages) {
if (!queueMessageMap.containsKey(qm.queueName)) {
queueMessageMap.put(qm.queueName, new ArrayList<String>());
}
queueMessageMap.get(qm.queueName).add(qm.messageId);
}
int totalUnacked = 0;
for (String queueName : queueMessageMap.keySet()) {
Integer unacked = 0;
;
try {
final List<String> msgIds = queueMessageMap.get(queueName);
final String UPDATE_POPPED =
String.format(
"UPDATE queue_message SET popped = false WHERE queue_name = ? and message_id IN (%s)",
Query.generateInBindings(msgIds.size()));
unacked =
query(
tx,
UPDATE_POPPED,
q ->
q.addParameter(queueName)
.addParameters(msgIds)
.executeUpdate());
} catch (Exception e) {
e.printStackTrace();
}
totalUnacked += unacked;
logger.debug("Unacked {} messages from all queues", unacked);
}
if (totalUnacked > 0) {
logger.debug("Unacked {} messages from all queues", totalUnacked);
}
return totalUnacked;
});
}
@Override
public void processUnacks(String queueName) {
final String PROCESS_UNACKS =
"UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND (current_timestamp - (60 ||' seconds')::interval) > deliver_on";
executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate());
}
@Override
public boolean resetOffsetTime(String queueName, String messageId) {
long offsetTimeInSecond = 0; // Reset to 0
final String SET_OFFSET_TIME =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) \n"
+ "WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
SET_OFFSET_TIME,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate()
== 1);
}
private boolean existsMessage(Connection connection, String queueName, String messageId) {
final String EXISTS_MESSAGE =
"SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?) FOR SHARE";
return query(
connection,
EXISTS_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).exists());
}
private void pushMessage(
Connection connection,
String queueName,
String messageId,
String payload,
Integer priority,
long offsetTimeInSecond) {
createQueueIfNotExists(connection, queueName);
String UPDATE_MESSAGE =
"UPDATE queue_message SET payload=?, deliver_on=(current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?";
int rowsUpdated =
query(
connection,
UPDATE_MESSAGE,
q ->
q.addParameter(payload)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate());
if (rowsUpdated == 0) {
String PUSH_MESSAGE =
"INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES ((current_timestamp + (? ||' seconds')::interval), ?,?,?,?,?) ON CONFLICT (queue_name,message_id) DO UPDATE SET payload=excluded.payload, deliver_on=excluded.deliver_on";
execute(
connection,
PUSH_MESSAGE,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.addParameter(priority)
.addParameter(offsetTimeInSecond)
.addParameter(payload)
.executeUpdate());
}
}
private boolean removeMessage(Connection connection, String queueName, String messageId) {
final String REMOVE_MESSAGE =
"DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?";
return query(
connection,
REMOVE_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).executeDelete());
}
private List<Message> popMessages(
Connection connection, String queueName, int count, int timeout) {
if (this.queueListener != null) {
if (!this.queueListener.hasMessagesReady(queueName)) {
return new ArrayList<>();
}
}
String POP_QUERY =
"WITH cte AS ("
+ " SELECT queue_name, message_id "
+ " FROM queue_message "
+ " WHERE queue_name = ? "
+ " AND popped = false "
+ " AND deliver_on <= (current_timestamp + (1000 || ' microseconds')::interval) "
+ " ORDER BY deliver_on, priority DESC, created_on "
+ " LIMIT ? "
+ " FOR UPDATE SKIP LOCKED "
+ ") "
+ "UPDATE queue_message "
+ " SET popped = true "
+ " FROM cte "
+ " WHERE queue_message.queue_name = cte.queue_name "
+ " AND queue_message.message_id = cte.message_id "
+ " AND queue_message.popped = false "
+ " RETURNING queue_message.message_id, queue_message.priority, queue_message.payload";
return query(
connection,
POP_QUERY,
p ->
p.addParameter(queueName)
.addParameter(count)
.executeAndFetch(
rs -> {
List<Message> results = new ArrayList<>();
while (rs.next()) {
Message m = new Message();
m.setId(rs.getString("message_id"));
m.setPriority(rs.getInt("priority"));
m.setPayload(rs.getString("payload"));
results.add(m);
}
return results;
}));
}
@Override
public boolean containsMessage(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> existsMessage(tx, queueName, messageId));
}
private void createQueueIfNotExists(Connection connection, String queueName) {
logger.trace("Creating new queue '{}'", queueName);
final String EXISTS_QUEUE =
"SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?) FOR SHARE";
boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists());
if (!exists) {
final String CREATE_QUEUE =
"INSERT INTO queue (queue_name) VALUES (?) ON CONFLICT (queue_name) DO NOTHING";
execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate());
}
}
private class QueueMessage {
public String queueName;
public String messageId;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresIndexDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.TemporalAccessor;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.PostgresIndexQueryBuilder;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PostgresIndexDAO extends PostgresBaseDAO implements IndexDAO {
private final PostgresProperties properties;
private final ExecutorService executorService;
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private boolean onlyIndexOnStatusChange;
public PostgresIndexDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.properties = properties;
this.onlyIndexOnStatusChange = properties.getOnlyIndexOnStatusChange();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
int workerQueueSize = properties.getAsyncWorkerQueueSize();
// Set up a workerpool for performing async operations.
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
String INSERT_WORKFLOW_INDEX_SQL =
"INSERT INTO workflow_index (workflow_id, correlation_id, workflow_type, start_time, update_time, status, json_data)"
+ "VALUES (?, ?, ?, ?, ?, ?, ?::JSONB) ON CONFLICT (workflow_id) \n"
+ "DO UPDATE SET correlation_id = EXCLUDED.correlation_id, workflow_type = EXCLUDED.workflow_type, "
+ "start_time = EXCLUDED.start_time, status = EXCLUDED.status, json_data = EXCLUDED.json_data, "
+ "update_time = EXCLUDED.update_time "
+ "WHERE EXCLUDED.update_time >= workflow_index.update_time";
if (onlyIndexOnStatusChange) {
INSERT_WORKFLOW_INDEX_SQL += " AND workflow_index.status != EXCLUDED.status";
}
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(workflow.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor ta = DateTimeFormatter.ISO_INSTANT.parse(workflow.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(ta));
int rowsUpdated =
queryWithTransaction(
INSERT_WORKFLOW_INDEX_SQL,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addParameter(workflow.getWorkflowType())
.addParameter(startTime)
.addParameter(updateTime)
.addParameter(workflow.getStatus().toString())
.addJsonParameter(workflow)
.executeUpdate());
logger.debug("Postgres index workflow rows updated: {}", rowsUpdated);
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
PostgresIndexQueryBuilder queryBuilder =
new PostgresIndexQueryBuilder(
"workflow_index", query, freeText, start, count, sort, properties);
List<WorkflowSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
queryBuilder.addPagingParameters(q);
return q.executeAndFetch(WorkflowSummary.class);
});
List<String> totalHitResults =
queryWithTransaction(
queryBuilder.getCountQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(String.class);
});
int totalHits = Integer.valueOf(totalHitResults.get(0));
return new SearchResult<>(totalHits, results);
}
@Override
public void indexTask(TaskSummary task) {
String INSERT_TASK_INDEX_SQL =
"INSERT INTO task_index (task_id, task_type, task_def_name, status, start_time, update_time, workflow_type, json_data)"
+ "VALUES (?, ?, ?, ?, ?, ?, ?, ?::JSONB) ON CONFLICT (task_id) "
+ "DO UPDATE SET task_type = EXCLUDED.task_type, task_def_name = EXCLUDED.task_def_name, "
+ "status = EXCLUDED.status, update_time = EXCLUDED.update_time, json_data = EXCLUDED.json_data "
+ "WHERE EXCLUDED.update_time >= task_index.update_time";
if (onlyIndexOnStatusChange) {
INSERT_TASK_INDEX_SQL += " AND task_index.status != EXCLUDED.status";
}
TemporalAccessor updateTa = DateTimeFormatter.ISO_INSTANT.parse(task.getUpdateTime());
Timestamp updateTime = Timestamp.from(Instant.from(updateTa));
TemporalAccessor startTa = DateTimeFormatter.ISO_INSTANT.parse(task.getStartTime());
Timestamp startTime = Timestamp.from(Instant.from(startTa));
int rowsUpdated =
queryWithTransaction(
INSERT_TASK_INDEX_SQL,
q ->
q.addParameter(task.getTaskId())
.addParameter(task.getTaskType())
.addParameter(task.getTaskDefName())
.addParameter(task.getStatus().toString())
.addParameter(startTime)
.addParameter(updateTime)
.addParameter(task.getWorkflowType())
.addJsonParameter(task)
.executeUpdate());
logger.debug("Postgres index task rows updated: {}", rowsUpdated);
}
@Override
public SearchResult<TaskSummary> searchTaskSummary(
String query, String freeText, int start, int count, List<String> sort) {
PostgresIndexQueryBuilder queryBuilder =
new PostgresIndexQueryBuilder(
"task_index", query, freeText, start, count, sort, properties);
List<TaskSummary> results =
queryWithTransaction(
queryBuilder.getQuery(),
q -> {
queryBuilder.addParameters(q);
queryBuilder.addPagingParameters(q);
return q.executeAndFetch(TaskSummary.class);
});
List<String> totalHitResults =
queryWithTransaction(
queryBuilder.getCountQuery(),
q -> {
queryBuilder.addParameters(q);
return q.executeAndFetch(String.class);
});
int totalHits = Integer.valueOf(totalHitResults.get(0));
return new SearchResult<>(totalHits, results);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> logs) {
String INSERT_LOG =
"INSERT INTO task_execution_logs (task_id, created_time, log) VALUES (?, ?, ?)";
for (TaskExecLog log : logs) {
queryWithTransaction(
INSERT_LOG,
q ->
q.addParameter(log.getTaskId())
.addParameter(new Timestamp(log.getCreatedTime()))
.addParameter(log.getLog())
.executeUpdate());
}
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
return queryWithTransaction(
"SELECT log, task_id, created_time FROM task_execution_logs WHERE task_id = ? ORDER BY created_time ASC",
q ->
q.addParameter(taskId)
.executeAndFetch(
rs -> {
List<TaskExecLog> result = new ArrayList<>();
while (rs.next()) {
TaskExecLog log = new TaskExecLog();
log.setLog(rs.getString("log"));
log.setTaskId(rs.getString("task_id"));
log.setCreatedTime(
rs.getTimestamp("created_time").getTime());
result.add(log);
}
return result;
}));
}
@Override
public void setup() {}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
logger.info("asyncIndexWorkflow is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
logger.info("asyncIndexTask is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchWorkflows is not supported for postgres indexing");
return null;
}
@Override
public SearchResult<String> searchTasks(
String query, String freeText, int start, int count, List<String> sort) {
logger.info("searchTasks is not supported for postgres indexing");
return null;
}
@Override
public void removeWorkflow(String workflowId) {
String REMOVE_WORKFLOW_SQL = "DELETE FROM workflow_index WHERE workflow_id = ?";
queryWithTransaction(REMOVE_WORKFLOW_SQL, q -> q.addParameter(workflowId).executeUpdate());
}
@Override
public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) {
return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService);
}
@Override
public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
logger.info("updateWorkflow is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateWorkflow(
String workflowInstanceId, String[] keys, Object[] values) {
logger.info("asyncUpdateWorkflow is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void removeTask(String workflowId, String taskId) {
String REMOVE_TASK_SQL =
"WITH task_delete AS (DELETE FROM task_index WHERE task_id = ?)"
+ "DELETE FROM task_execution_logs WHERE task_id =?";
queryWithTransaction(
REMOVE_TASK_SQL, q -> q.addParameter(taskId).addParameter(taskId).executeUpdate());
}
@Override
public CompletableFuture<Void> asyncRemoveTask(String workflowId, String taskId) {
return CompletableFuture.runAsync(() -> removeTask(workflowId, taskId), executorService);
}
@Override
public void updateTask(String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("updateTask is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncUpdateTask(
String workflowId, String taskId, String[] keys, Object[] values) {
logger.info("asyncUpdateTask is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public String get(String workflowInstanceId, String key) {
logger.info("get is not supported for postgres indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
logger.info("asyncAddTaskExecutionLogs is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
logger.info("addEventExecution is not supported for postgres indexing");
}
@Override
public List<EventExecution> getEventExecutions(String event) {
logger.info("getEventExecutions is not supported for postgres indexing");
return null;
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
logger.info("asyncAddEventExecution is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public void addMessage(String queue, Message msg) {
logger.info("addMessage is not supported for postgres indexing");
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
logger.info("asyncAddMessage is not supported for postgres indexing");
return CompletableFuture.completedFuture(null);
}
@Override
public List<Message> getMessages(String queue) {
logger.info("getMessages is not supported for postgres indexing");
return null;
}
@Override
public List<String> searchArchivableWorkflows(String indexName, long archiveTtlDays) {
logger.info("searchArchivableWorkflows is not supported for postgres indexing");
return null;
}
public long getWorkflowCount(String query, String freeText) {
logger.info("getWorkflowCount is not supported for postgres indexing");
return 0;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresPollDataDAO.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import jakarta.annotation.PostConstruct;
public class PostgresPollDataDAO extends PostgresBaseDAO implements PollDataDAO {
private ConcurrentHashMap<String, ConcurrentHashMap<String, PollData>> pollDataCache =
new ConcurrentHashMap<>();
private long pollDataFlushInterval;
private long cacheValidityPeriod;
private long lastFlushTime = 0;
private boolean useReadCache;
public PostgresPollDataDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
this.pollDataFlushInterval = properties.getPollDataFlushInterval().toMillis();
if (this.pollDataFlushInterval > 0) {
logger.info("Using Postgres pollData write cache");
}
this.cacheValidityPeriod = properties.getPollDataCacheValidityPeriod().toMillis();
this.useReadCache = cacheValidityPeriod > 0;
if (this.useReadCache) {
logger.info("Using Postgres pollData read cache");
}
}
@PostConstruct
public void schedulePollDataRefresh() {
if (pollDataFlushInterval > 0) {
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::flushData,
pollDataFlushInterval,
pollDataFlushInterval,
TimeUnit.MILLISECONDS);
}
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = domain == null ? "DEFAULT" : domain;
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
if (pollDataFlushInterval > 0) {
ConcurrentHashMap<String, PollData> domainPollData = pollDataCache.get(taskDefName);
if (domainPollData == null) {
domainPollData = new ConcurrentHashMap<>();
pollDataCache.put(taskDefName, domainPollData);
}
domainPollData.put(effectiveDomain, pollData);
} else {
withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
}
}
@Override
public PollData getPollData(String taskDefName, String domain) {
PollData result;
if (useReadCache) {
ConcurrentHashMap<String, PollData> domainPollData = pollDataCache.get(taskDefName);
if (domainPollData == null) {
return null;
}
result = domainPollData.get(domain == null ? "DEFAULT" : domain);
long diffSeconds = System.currentTimeMillis() - result.getLastPollTime();
if (diffSeconds < cacheValidityPeriod) {
return result;
}
}
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
return readAllPollData(taskDefName);
}
@Override
public List<PollData> getAllPollData() {
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(true);
try {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
} catch (Throwable th) {
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
public long getLastFlushTime() {
return lastFlushTime;
}
private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) {
try {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase
* dramatically even though it won't be used.
*/
String UPDATE_POLL_DATA =
"UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?";
int rowsUpdated =
query(
connection,
UPDATE_POLL_DATA,
q ->
q.addJsonParameter(pollData)
.addParameter(pollData.getQueueName())
.addParameter(domain)
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_POLL_DATA =
"INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on";
execute(
connection,
INSERT_POLL_DATA,
q ->
q.addParameter(pollData.getQueueName())
.addParameter(domain)
.addJsonParameter(pollData)
.executeUpdate());
}
} catch (NonTransientException e) {
if (!e.getMessage().startsWith("ERROR: lastPollTime cannot be set to a lower value")) {
throw e;
}
}
}
private PollData readPollData(Connection connection, String queueName, String domain) {
String GET_POLL_DATA =
"SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?";
return query(
connection,
GET_POLL_DATA,
q ->
q.addParameter(queueName)
.addParameter(domain)
.executeAndFetchFirst(PollData.class));
}
private List<PollData> readAllPollData(String queueName) {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?";
return queryWithTransaction(
GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class));
}
private void flushData() {
try {
for (Map.Entry<String, ConcurrentHashMap<String, PollData>> queue :
pollDataCache.entrySet()) {
for (Map.Entry<String, PollData> domain : queue.getValue().entrySet()) {
withTransaction(
tx -> {
insertOrUpdatePollData(tx, domain.getValue(), domain.getKey());
});
}
}
lastFlushTime = System.currentTimeMillis();
} catch (Exception e) {
logger.error("Postgres pollData cache flush failed ", e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresLockDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresLockDAO.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.sync.Lock;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PostgresLockDAO extends PostgresBaseDAO implements Lock {
private final long DAY_MS = 24 * 60 * 60 * 1000;
public PostgresLockDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
@Override
public void acquireLock(String lockId) {
acquireLock(lockId, DAY_MS, DAY_MS, TimeUnit.MILLISECONDS);
}
@Override
public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
return acquireLock(lockId, timeToTry, DAY_MS, unit);
}
@Override
public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
long endTime = System.currentTimeMillis() + unit.toMillis(timeToTry);
while (System.currentTimeMillis() < endTime) {
var sql =
"INSERT INTO locks(lock_id, lease_expiration) VALUES (?, now() + (?::text || ' milliseconds')::interval) ON CONFLICT (lock_id) DO UPDATE SET lease_expiration = EXCLUDED.lease_expiration WHERE locks.lease_expiration <= now()";
int rowsAffected =
queryWithTransaction(
sql,
q ->
q.addParameter(lockId)
.addParameter(unit.toMillis(leaseTime))
.executeUpdate());
if (rowsAffected > 0) {
return true;
}
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return false;
}
}
return false;
}
@Override
public void releaseLock(String lockId) {
var sql = "DELETE FROM locks WHERE lock_id = ?";
queryWithTransaction(sql, q -> q.addParameter(lockId).executeDelete());
}
@Override
public void deleteLock(String lockId) {
releaseLock(lockId);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.postgres.util.*;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
public abstract class PostgresBaseDAO {
private static final List<String> EXCLUDED_STACKTRACE_CLASS =
ImmutableList.of(PostgresBaseDAO.class.getName(), Thread.class.getName());
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final ObjectMapper objectMapper;
protected final DataSource dataSource;
private final RetryTemplate retryTemplate;
protected PostgresBaseDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
this.retryTemplate = retryTemplate;
this.objectMapper = objectMapper;
this.dataSource = dataSource;
}
protected final LazyToString getCallingMethod() {
return new LazyToString(
() ->
Arrays.stream(Thread.currentThread().getStackTrace())
.filter(
ste ->
!EXCLUDED_STACKTRACE_CLASS.contains(
ste.getClassName()))
.findFirst()
.map(StackTraceElement::getMethodName)
.orElseThrow(() -> new NullPointerException("Cannot find Caller")));
}
protected String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, Class<T> tClass) {
try {
return objectMapper.readValue(json, tClass);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, TypeReference<T> typeReference) {
try {
return objectMapper.readValue(json, typeReference);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to
* {@literal function}.
*
* <p>Successful executions of {@literal function} will result in a commit and return of {@link
* TransactionalFunction#apply(Connection)}.
*
* <p>If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will
* result in a rollback of the transaction and will be wrapped in an {@link
* NonTransientException} if it is not already one.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce some expected return value.
*
* @param function The function to apply with a new transactional {@link Connection}
* @param <R> The return type.
* @return The result of {@code TransactionalFunction#apply(Connection)}
* @throws NonTransientException If any errors occur.
*/
private <R> R getWithTransaction(final TransactionalFunction<R> function) {
final Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
if (th instanceof NonTransientException) {
throw th;
}
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
<R> R getWithRetriedTransactions(final TransactionalFunction<R> function) {
try {
return retryTemplate.execute(context -> getWithTransaction(function));
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
protected <R> R getWithTransactionWithOutErrorPropagation(TransactionalFunction<R> function) {
Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
logger.info(th.getMessage());
return null;
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
/**
* Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce no expected return value.
*
* @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to.
* @throws NonTransientException If any errors occur.
* @see #getWithRetriedTransactions(TransactionalFunction)
*/
protected void withTransaction(Consumer<Connection> consumer) {
getWithRetriedTransactions(
connection -> {
consumer.accept(connection);
return null;
});
}
/**
* Initiate a new transaction and execute a {@link Query} within that context, then return the
* results of {@literal function}.
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R queryWithTransaction(String query, QueryFunction<R> function) {
return getWithRetriedTransactions(tx -> query(tx, query, function));
}
/**
* Execute a {@link Query} within the context of a given transaction and return the results of
* {@literal function}.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R query(Connection tx, String query, QueryFunction<R> function) {
try (Query q = new Query(objectMapper, tx, query)) {
return function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a statement with no expected return value within a given transaction.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void execute(Connection tx, String query, ExecuteFunction function) {
try (Query q = new Query(objectMapper, tx, query)) {
function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Instantiates a new transactional connection and invokes {@link #execute(Connection, String,
* ExecuteFunction)}
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void executeWithTransaction(String query, ExecuteFunction function) {
withTransaction(tx -> execute(tx, query, function));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.dao;
import java.sql.Connection;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.netflix.conductor.postgres.util.ExecutorsUtil;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import jakarta.annotation.*;
public class PostgresMetadataDAO extends PostgresBaseDAO implements MetadataDAO, EventHandlerDAO {
private final ConcurrentHashMap<String, TaskDef> taskDefCache = new ConcurrentHashMap<>();
private static final String CLASS_NAME = PostgresMetadataDAO.class.getSimpleName();
private final ScheduledExecutorService scheduledExecutorService;
public PostgresMetadataDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
PostgresProperties properties) {
super(retryTemplate, objectMapper, dataSource);
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
this.scheduledExecutorService =
Executors.newSingleThreadScheduledExecutor(
ExecutorsUtil.newNamedThreadFactory("postgres-metadata-"));
this.scheduledExecutorService.scheduleWithFixedDelay(
this::refreshTaskDefs, cacheRefreshTime, cacheRefreshTime, TimeUnit.SECONDS);
}
@PreDestroy
public void destroy() {
try {
this.scheduledExecutorService.shutdown();
if (scheduledExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
scheduledExecutorService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledExecutorService for refreshTaskDefs",
ie);
scheduledExecutorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef getTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
TaskDef taskDef = taskDefCache.get(name);
if (taskDef == null) {
if (logger.isTraceEnabled()) {
logger.trace("Cache miss: {}", name);
}
taskDef = getTaskDefFromDB(name);
}
return taskDef;
}
@Override
public List<TaskDef> getAllTaskDefs() {
return getWithRetriedTransactions(this::findAllTaskDefs);
}
@Override
public void removeTaskDef(String name) {
final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?";
executeWithTransaction(
DELETE_TASKDEF_QUERY,
q -> {
if (!q.addParameter(name).executeDelete()) {
throw new NotFoundException("No such task definition");
}
taskDefCache.remove(name);
});
}
@Override
public void createWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(
tx -> {
if (workflowExists(tx, def)) {
throw new ConflictException(
"Workflow with " + def.key() + " already exists!");
}
insertOrUpdateWorkflowDef(tx, def);
});
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def));
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
final String GET_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND "
+ "version = latest_version";
return Optional.ofNullable(
queryWithTransaction(
GET_LATEST_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
final String GET_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?";
return Optional.ofNullable(
queryWithTransaction(
GET_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(name)
.addParameter(version)
.executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public void removeWorkflowDef(String name, Integer version) {
final String DELETE_WORKFLOW_QUERY =
"DELETE from meta_workflow_def WHERE name = ? AND version = ?";
withTransaction(
tx -> {
// remove specified workflow
execute(
tx,
DELETE_WORKFLOW_QUERY,
q -> {
if (!q.addParameter(name).addParameter(version).executeDelete()) {
throw new NotFoundException(
String.format(
"No such workflow definition: %s version: %d",
name, version));
}
});
// reset latest version based on remaining rows for this workflow
Optional<Integer> maxVersion = getLatestVersion(tx, name);
maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion));
});
}
public List<String> findAll() {
final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def";
return queryWithTransaction(
FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
final String GET_ALL_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def ORDER BY name, version";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
final String GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY =
"SELECT json_data FROM meta_workflow_def wd WHERE wd.version = (SELECT MAX(version) FROM meta_workflow_def wd2 WHERE wd2.name = wd.name)";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY,
q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllLatest() {
final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version";
return queryWithTransaction(
GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllVersions(String name) {
final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version";
return queryWithTransaction(
GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetch(WorkflowDef.class));
}
@Override
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
final String INSERT_EVENT_HANDLER_QUERY =
"INSERT INTO meta_event_handler (name, event, active, json_data) "
+ "VALUES (?, ?, ?, ?)";
withTransaction(
tx -> {
if (getEventHandler(tx, eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name "
+ eventHandler.getName()
+ " already exists!");
}
execute(
tx,
INSERT_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getName())
.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.executeUpdate());
});
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
// @formatter:off
final String UPDATE_EVENT_HANDLER_QUERY =
"UPDATE meta_event_handler SET "
+ "event = ?, active = ?, json_data = ?, "
+ "modified_on = CURRENT_TIMESTAMP WHERE name = ?";
// @formatter:on
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + eventHandler.getName() + " not found!");
}
execute(
tx,
UPDATE_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.addParameter(eventHandler.getName())
.executeUpdate());
});
}
@Override
public void removeEventHandler(String name) {
final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?";
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, name);
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + name + " not found!");
}
execute(
tx,
DELETE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeDelete());
});
}
@Override
public List<EventHandler> getAllEventHandlers() {
final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class));
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY =
"SELECT json_data FROM meta_event_handler WHERE event = ?";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY,
q -> {
q.addParameter(event);
return q.executeAndFetch(
rs -> {
List<EventHandler> handlers = new ArrayList<>();
while (rs.next()) {
EventHandler h = readValue(rs.getString(1), EventHandler.class);
if (!activeOnly || h.isActive()) {
handlers.add(h);
}
}
return handlers;
});
});
}
/**
* Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime
* exception if validations fail.
*
* @param taskDef The {@code TaskDef} to check.
*/
private void validate(TaskDef taskDef) {
Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null");
Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null");
}
/**
* Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a
* Runtime exception if validations fail.
*
* @param def The {@code WorkflowDef} to check.
*/
private void validate(WorkflowDef def) {
Preconditions.checkNotNull(def, "WorkflowDef object cannot be null");
Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null");
}
/**
* Retrieve a {@link EventHandler} by {@literal name}.
*
* @param connection The {@link Connection} to use for queries.
* @param name The {@code EventHandler} name to look for.
* @return {@literal null} if nothing is found, otherwise the {@code EventHandler}.
*/
private EventHandler getEventHandler(Connection connection, String name) {
final String READ_ONE_EVENT_HANDLER_QUERY =
"SELECT json_data FROM meta_event_handler WHERE name = ?";
return query(
connection,
READ_ONE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class));
}
/**
* Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already
* exist.
*
* @param connection The {@link Connection} to use for queries.
* @param def The {@code WorkflowDef} to check for.
* @return {@literal true} if a {@code WorkflowDef} already exists with the same values.
*/
private Boolean workflowExists(Connection connection, WorkflowDef def) {
final String CHECK_WORKFLOW_DEF_EXISTS_QUERY =
"SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?";
return query(
connection,
CHECK_WORKFLOW_DEF_EXISTS_QUERY,
q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists());
}
/**
* Return the latest version that exists for the provided {@code name}.
*
* @param tx The {@link Connection} to use for queries.
* @param name The {@code name} to check for.
* @return {@code Optional.empty()} if no versions exist, otherwise the max {@link
* WorkflowDef#getVersion} found.
*/
private Optional<Integer> getLatestVersion(Connection tx, String name) {
final String GET_LATEST_WORKFLOW_DEF_VERSION =
"SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?";
Integer val =
query(
tx,
GET_LATEST_WORKFLOW_DEF_VERSION,
q -> {
q.addParameter(name);
return q.executeAndFetch(
rs -> {
if (!rs.next()) {
return null;
}
return rs.getInt(1);
});
});
return Optional.ofNullable(val);
}
/**
* Update the latest version for the workflow with name {@code WorkflowDef} to the version
* provided in {@literal version}.
*
* @param tx The {@link Connection} to use for queries.
* @param name Workflow def name to update
* @param version The new latest {@code version} value.
*/
private void updateLatestVersion(Connection tx, String name, int version) {
final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY =
"UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?";
execute(
tx,
UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY,
q -> q.addParameter(version).addParameter(name).executeUpdate());
}
private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) {
final String INSERT_WORKFLOW_DEF_QUERY =
"INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)";
Optional<Integer> version = getLatestVersion(tx, def.getName());
if (!workflowExists(tx, def)) {
execute(
tx,
INSERT_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(def.getName())
.addParameter(def.getVersion())
.addJsonParameter(def)
.executeUpdate());
} else {
// @formatter:off
final String UPDATE_WORKFLOW_DEF_QUERY =
"UPDATE meta_workflow_def "
+ "SET json_data = ?, modified_on = CURRENT_TIMESTAMP "
+ "WHERE name = ? AND version = ?";
// @formatter:on
execute(
tx,
UPDATE_WORKFLOW_DEF_QUERY,
q ->
q.addJsonParameter(def)
.addParameter(def.getName())
.addParameter(def.getVersion())
.executeUpdate());
}
int maxVersion = def.getVersion();
if (version.isPresent() && version.get() > def.getVersion()) {
maxVersion = version.get();
}
updateLatestVersion(tx, def.getName(), maxVersion);
}
/**
* Query persistence for all defined {@link TaskDef} data, and cache it in {@link
* #taskDefCache}.
*/
private void refreshTaskDefs() {
try {
withTransaction(
tx -> {
Map<String, TaskDef> map = new HashMap<>();
findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef));
synchronized (taskDefCache) {
taskDefCache.clear();
taskDefCache.putAll(map);
}
if (logger.isTraceEnabled()) {
logger.trace("Refreshed {} TaskDefs", taskDefCache.size());
}
});
} catch (Exception e) {
Monitors.error(CLASS_NAME, "refreshTaskDefs");
logger.error("refresh TaskDefs failed ", e);
}
}
/**
* Query persistence for all defined {@link TaskDef} data.
*
* @param tx The {@link Connection} to use for queries.
* @return A new {@code List<TaskDef>} with all the {@code TaskDef} data that was retrieved.
*/
private List<TaskDef> findAllTaskDefs(Connection tx) {
final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def";
return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class));
}
/**
* Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}.
*
* @param name The name of the {@code TaskDef} to query for.
* @return {@literal null} if nothing is found, otherwise the {@code TaskDef}.
*/
private TaskDef getTaskDefFromDB(String name) {
final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?";
return queryWithTransaction(
READ_ONE_TASKDEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class));
}
private String insertOrUpdateTaskDef(TaskDef taskDef) {
final String UPDATE_TASKDEF_QUERY =
"UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?";
final String INSERT_TASKDEF_QUERY =
"INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)";
return getWithRetriedTransactions(
tx -> {
execute(
tx,
UPDATE_TASKDEF_QUERY,
update -> {
int result =
update.addJsonParameter(taskDef)
.addParameter(taskDef.getName())
.executeUpdate();
if (result == 0) {
execute(
tx,
INSERT_TASKDEF_QUERY,
insert ->
insert.addParameter(taskDef.getName())
.addJsonParameter(taskDef)
.executeUpdate());
}
});
taskDefCache.put(taskDef.getName(), taskDef);
return taskDef.getName();
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions with no expected result.
*
* @author mustafa
*/
@FunctionalInterface
public interface ExecuteFunction {
void apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.util.function.Supplier;
/** Functional class to support the lazy execution of a String result. */
public class LazyToString {
private final Supplier<String> supplier;
/**
* @param supplier Supplier to execute when {@link #toString()} is called.
*/
public LazyToString(Supplier<String> supplier) {
this.supplier = supplier;
}
@Override
public String toString() {
return supplier.get();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions that return results.
*
* @author mustafa
*/
@FunctionalInterface
public interface QueryFunction<R> {
R apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueueStats.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueueStats.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
public class QueueStats {
private Integer depth;
private long nextDelivery;
public void setDepth(Integer depth) {
this.depth = depth;
}
public Integer getDepth() {
return depth;
}
public void setNextDelivery(long nextDelivery) {
this.nextDelivery = nextDelivery;
}
public long getNextDelivery() {
return nextDelivery;
}
public String toString() {
return "{nextDelivery: " + nextDelivery + " depth: " + depth + "}";
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Functional interface for operations within a transactional context.
*
* @author mustafa
*/
@FunctionalInterface
public interface TransactionalFunction<R> {
R apply(Connection tx) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresIndexQueryBuilder.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.SQLException;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.postgres.config.PostgresProperties;
public class PostgresIndexQueryBuilder {
private final String table;
private final String freeText;
private final int start;
private final int count;
private final List<String> sort;
private final List<Condition> conditions = new ArrayList<>();
private boolean allowJsonQueries;
private boolean allowFullTextQueries;
private static final String[] VALID_FIELDS = {
"workflow_id",
"correlation_id",
"workflow_type",
"start_time",
"status",
"task_id",
"task_type",
"task_def_name",
"update_time",
"json_data",
"jsonb_to_tsvector('english', json_data, '[\"all\"]')"
};
private static final String[] VALID_SORT_ORDER = {"ASC", "DESC"};
private static class Condition {
private String attribute;
private String operator;
private List<String> values;
private final String CONDITION_REGEX = "([a-zA-Z]+)\\s?(=|>|<|IN)\\s?(.*)";
public Condition() {}
public Condition(String query) {
Pattern conditionRegex = Pattern.compile(CONDITION_REGEX);
Matcher conditionMatcher = conditionRegex.matcher(query);
if (conditionMatcher.find()) {
String[] valueArr = conditionMatcher.group(3).replaceAll("[\"()]", "").split(",");
ArrayList<String> values = new ArrayList<>(Arrays.asList(valueArr));
this.attribute = camelToSnake(conditionMatcher.group(1));
this.values = values;
this.operator = getOperator(conditionMatcher.group(2));
if (this.attribute.endsWith("_time")) {
values.set(0, millisToUtc(values.get(0)));
}
} else {
throw new IllegalArgumentException("Incorrectly formatted query string: " + query);
}
}
public String getQueryFragment() {
if (operator.equals("IN")) {
return attribute + " = ANY(?)";
} else if (operator.equals("@@")) {
return attribute + " @@ to_tsquery(?)";
} else if (operator.equals("@>")) {
return attribute + " @> ?::JSONB";
} else {
if (attribute.endsWith("_time")) {
return attribute + " " + operator + " ?::TIMESTAMPTZ";
} else {
return attribute + " " + operator + " ?";
}
}
}
private String getOperator(String op) {
if (op.equals("IN") && values.size() == 1) {
return "=";
}
return op;
}
public void addParameter(Query q) throws SQLException {
if (values.size() > 1) {
q.addParameter(values);
} else {
q.addParameter(values.get(0));
}
}
private String millisToUtc(String millis) {
Long startTimeMilli = Long.parseLong(millis);
ZonedDateTime startDate =
ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimeMilli), ZoneOffset.UTC);
return DateTimeFormatter.ISO_DATE_TIME.format(startDate);
}
private boolean isValid() {
return Arrays.asList(VALID_FIELDS).contains(attribute);
}
public void setAttribute(String attribute) {
this.attribute = attribute;
}
public void setOperator(String operator) {
this.operator = operator;
}
public void setValues(List<String> values) {
this.values = values;
}
}
public PostgresIndexQueryBuilder(
String table,
String query,
String freeText,
int start,
int count,
List<String> sort,
PostgresProperties properties) {
this.table = table;
this.freeText = freeText;
this.start = start;
this.count = count;
this.sort = sort;
this.allowFullTextQueries = properties.getAllowFullTextQueries();
this.allowJsonQueries = properties.getAllowJsonQueries();
this.parseQuery(query);
this.parseFreeText(freeText);
}
public String getQuery() {
String queryString = "";
List<Condition> validConditions =
conditions.stream().filter(c -> c.isValid()).collect(Collectors.toList());
if (validConditions.size() > 0) {
queryString =
" WHERE "
+ String.join(
" AND ",
validConditions.stream()
.map(c -> c.getQueryFragment())
.collect(Collectors.toList()));
}
return "SELECT json_data::TEXT FROM "
+ table
+ queryString
+ getSort()
+ " LIMIT ? OFFSET ?";
}
public String getCountQuery() {
String queryString = "";
List<Condition> validConditions =
conditions.stream().filter(c -> c.isValid()).collect(Collectors.toList());
if (validConditions.size() > 0) {
queryString =
" WHERE "
+ String.join(
" AND ",
validConditions.stream()
.map(c -> c.getQueryFragment())
.collect(Collectors.toList()));
}
return "SELECT COUNT(json_data) FROM " + table + queryString;
}
public void addParameters(Query q) throws SQLException {
for (Condition condition : conditions) {
condition.addParameter(q);
}
}
public void addPagingParameters(Query q) throws SQLException {
q.addParameter(count);
q.addParameter(start);
}
private void parseQuery(String query) {
if (!StringUtils.isEmpty(query)) {
for (String s : query.split(" AND ")) {
conditions.add(new Condition(s));
}
Collections.sort(conditions, Comparator.comparing(Condition::getQueryFragment));
}
}
private void parseFreeText(String freeText) {
if (!StringUtils.isEmpty(freeText) && !freeText.equals("*")) {
if (allowJsonQueries && freeText.startsWith("{") && freeText.endsWith("}")) {
Condition cond = new Condition();
cond.setAttribute("json_data");
cond.setOperator("@>");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
conditions.add(cond);
} else if (allowFullTextQueries) {
Condition cond = new Condition();
cond.setAttribute("jsonb_to_tsvector('english', json_data, '[\"all\"]')");
cond.setOperator("@@");
String[] values = {freeText};
cond.setValues(Arrays.asList(values));
conditions.add(cond);
}
}
}
private String getSort() {
ArrayList<String> sortConds = new ArrayList<>();
for (String s : sort) {
String[] splitCond = s.split(":");
if (splitCond.length == 2) {
String attribute = camelToSnake(splitCond[0]);
String order = splitCond[1].toUpperCase();
if (Arrays.asList(VALID_FIELDS).contains(attribute)
&& Arrays.asList(VALID_SORT_ORDER).contains(order)) {
sortConds.add(attribute + " " + order);
}
}
}
if (sortConds.size() > 0) {
return " ORDER BY " + String.join(", ", sortConds);
}
return "";
}
private static String camelToSnake(String camel) {
return camel.replaceAll("\\B([A-Z])", "_$1").toLowerCase();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}.
*
* @author mustafa
*/
@FunctionalInterface
public interface ResultSetHandler<R> {
R apply(ResultSet resultSet) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.io.IOException;
import java.sql.*;
import java.sql.Date;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities.
*
* <p>This class simulates a parameter building pattern and all {@literal addParameter(*)} methods
* must be called in the proper order of their expected binding sequence.
*
* @author mustafa
*/
public class Query implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(getClass());
/** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */
protected final ObjectMapper objectMapper;
/** The initial supplied query String that was used to prepare {@link #statement}. */
private final String rawQuery;
/**
* Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a
* parameter is added to the {@code PreparedStatement} {@link #statement}.
*/
private final AtomicInteger index = new AtomicInteger(1);
/** The {@link PreparedStatement} that will be managed and executed by this class. */
private final PreparedStatement statement;
private final Connection connection;
public Query(ObjectMapper objectMapper, Connection connection, String query) {
this.rawQuery = query;
this.objectMapper = objectMapper;
this.connection = connection;
try {
this.statement = connection.prepareStatement(query);
} catch (SQLException ex) {
throw new NonTransientException(
"Cannot prepare statement for query: " + ex.getMessage(), ex);
}
}
/**
* Generate a String with {@literal count} number of '?' placeholders for {@link
* PreparedStatement} queries.
*
* @param count The number of '?' chars to generate.
* @return a comma delimited string of {@literal count} '?' binding placeholders.
*/
public static String generateInBindings(int count) {
String[] questions = new String[count];
for (int i = 0; i < count; i++) {
questions[i] = "?";
}
return String.join(", ", questions);
}
public Query addParameter(final String value) {
return addParameterInternal((ps, idx) -> ps.setString(idx, value));
}
public Query addParameter(final List<String> value) throws SQLException {
String[] valueStringArray = value.toArray(new String[0]);
Array valueArray = this.connection.createArrayOf("VARCHAR", valueStringArray);
return addParameterInternal((ps, idx) -> ps.setArray(idx, valueArray));
}
public Query addParameter(final int value) {
return addParameterInternal((ps, idx) -> ps.setInt(idx, value));
}
public Query addParameter(final boolean value) {
return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value)));
}
public Query addParameter(final long value) {
return addParameterInternal((ps, idx) -> ps.setLong(idx, value));
}
public Query addParameter(final double value) {
return addParameterInternal((ps, idx) -> ps.setDouble(idx, value));
}
public Query addParameter(Date date) {
return addParameterInternal((ps, idx) -> ps.setDate(idx, date));
}
public Query addParameter(Timestamp timestamp) {
return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp));
}
/**
* Serializes {@literal value} to a JSON string for persistence.
*
* @param value The value to serialize.
* @return {@literal this}
*/
public Query addJsonParameter(Object value) {
return addParameter(toJson(value));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Date}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addDateParameter(java.util.Date date) {
return addParameter(new Date(date.getTime()));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Timestamp}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addTimestampParameter(java.util.Date date) {
return addParameter(new Timestamp(date.getTime()));
}
/**
* Bind the given epoch millis to the PreparedStatement as a {@link Timestamp}.
*
* @param epochMillis The epoch ms to create a new {@literal Timestamp} from.
* @return {@literal this}
*/
public Query addTimestampParameter(long epochMillis) {
return addParameter(new Timestamp(epochMillis));
}
/**
* Add a collection of primitive values at once, in the order of the collection.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the
* collection.
* @see #addParameters(Object...)
*/
public Query addParameters(Collection values) {
return addParameters(values.toArray());
}
/**
* Add many primitive values at once.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered.
*/
public Query addParameters(Object... values) {
for (Object v : values) {
if (v instanceof String) {
addParameter((String) v);
} else if (v instanceof Integer) {
addParameter((Integer) v);
} else if (v instanceof Long) {
addParameter((Long) v);
} else if (v instanceof Double) {
addParameter((Double) v);
} else if (v instanceof Boolean) {
addParameter((Boolean) v);
} else if (v instanceof Date) {
addParameter((Date) v);
} else if (v instanceof Timestamp) {
addParameter((Timestamp) v);
} else {
throw new IllegalArgumentException(
"Type "
+ v.getClass().getName()
+ " is not supported by automatic property assignment");
}
}
return this;
}
/**
* Utility method for evaluating the prepared statement as a query to check the existence of a
* record using a numeric count or boolean return value.
*
* <p>The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result.
*
* @return {@literal true} If a count query returned more than 0 or an exists query returns
* {@literal true}.
* @throws NonTransientException If an unexpected return type cannot be evaluated to a {@code
* Boolean} result.
*/
public boolean exists() {
Object val = executeScalar();
if (null == val) {
return false;
}
if (val instanceof Number) {
return convertLong(val) > 0;
}
if (val instanceof Boolean) {
return (Boolean) val;
}
if (val instanceof String) {
return convertBoolean(val);
}
throw new NonTransientException(
"Expected a Numeric or Boolean scalar return value from the query, received "
+ val.getClass().getName());
}
/**
* Convenience method for executing delete statements.
*
* @return {@literal true} if the statement affected 1 or more rows.
* @see #executeUpdate()
*/
public boolean executeDelete() {
int count = executeUpdate();
if (count > 1) {
logger.trace("Removed {} row(s) for query {}", count, rawQuery);
}
return count > 0;
}
/**
* Convenience method for executing statements that return a single numeric value, typically
* {@literal SELECT COUNT...} style queries.
*
* @return The result of the query as a {@literal long}.
*/
public long executeCount() {
return executeScalar(Long.class);
}
/**
* @return The result of {@link PreparedStatement#executeUpdate()}
*/
public int executeUpdate() {
try {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
final int val = this.statement.executeUpdate();
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery);
}
return val;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a query from the PreparedStatement and return the ResultSet.
*
* <p><em>NOTE:</em> The returned ResultSet must be closed/managed by the calling methods.
*
* @return {@link PreparedStatement#executeQuery()}
* @throws NonTransientException If any SQL errors occur.
*/
public ResultSet executeQuery() {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
try {
return this.statement.executeQuery();
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}", (end - start), rawQuery);
}
}
}
/**
* @return The single result of the query as an Object.
*/
public Object executeScalar() {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
return null;
}
return rs.getObject(1);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a single 'primitive' value from the ResultSet.
*
* @param returnType The type to return.
* @param <V> The type parameter to return a List of.
* @return A single result from the execution of the statement, as a type of {@literal
* returnType}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> V executeScalar(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
Object value = null;
if (Integer.class == returnType) {
value = 0;
} else if (Long.class == returnType) {
value = 0L;
} else if (Boolean.class == returnType) {
value = false;
}
return returnType.cast(value);
} else {
return getScalarFromResultSet(rs, returnType);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeScalarList(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> values = new ArrayList<>();
while (rs.next()) {
values.add(getScalarFromResultSet(rs, returnType));
}
return values;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the statement and return only the first record from the result set.
*
* @param returnType The Class to return.
* @param <V> The type parameter.
* @return An instance of {@literal <V>} from the result set.
*/
public <V> V executeAndFetchFirst(Class<V> returnType) {
Object o = executeScalar();
if (null == o) {
return null;
}
return convert(o, returnType);
}
/**
* Execute the PreparedStatement and return a List of {@literal returnType} values from the
* ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeAndFetch(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> list = new ArrayList<>();
while (rs.next()) {
list.add(convert(rs.getObject(1), returnType));
}
return list;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of {@literal Map} values from the ResultSet.
*
* @return A {@code List<Map>}.
* @throws SQLException if any SQL errors occur.
* @throws NonTransientException if any SQL errors occur.
*/
public List<Map<String, Object>> executeAndFetchMap() {
try (ResultSet rs = executeQuery()) {
List<Map<String, Object>> result = new ArrayList<>();
ResultSetMetaData metadata = rs.getMetaData();
int columnCount = metadata.getColumnCount();
while (rs.next()) {
HashMap<String, Object> row = new HashMap<>();
for (int i = 1; i <= columnCount; i++) {
row.put(metadata.getColumnLabel(i), rs.getObject(i));
}
result.add(row);
}
return result;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the query and pass the {@link ResultSet} to the given handler.
*
* @param handler The {@link ResultSetHandler} to execute.
* @param <V> The return type of this method.
* @return The results of {@link ResultSetHandler#apply(ResultSet)}.
*/
public <V> V executeAndFetch(ResultSetHandler<V> handler) {
try (ResultSet rs = executeQuery()) {
return handler.apply(rs);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
@Override
public void close() {
try {
if (null != statement && !statement.isClosed()) {
statement.close();
}
} catch (SQLException ex) {
logger.warn("Error closing prepared statement: {}", ex.getMessage());
}
}
protected final Query addParameterInternal(InternalParameterSetter setter) {
int index = getAndIncrementIndex();
try {
setter.apply(this.statement, index);
return this;
} catch (SQLException ex) {
throw new NonTransientException("Could not apply bind parameter at index " + index, ex);
}
}
protected <V> V getScalarFromResultSet(ResultSet rs, Class<V> returnType) throws SQLException {
Object value = null;
if (Integer.class == returnType) {
value = rs.getInt(1);
} else if (Long.class == returnType) {
value = rs.getLong(1);
} else if (String.class == returnType) {
value = rs.getString(1);
} else if (Boolean.class == returnType) {
value = rs.getBoolean(1);
} else if (Double.class == returnType) {
value = rs.getDouble(1);
} else if (Date.class == returnType) {
value = rs.getDate(1);
} else if (Timestamp.class == returnType) {
value = rs.getTimestamp(1);
} else {
value = rs.getObject(1);
}
if (null == value) {
throw new NullPointerException(
"Cannot get value from ResultSet of type " + returnType.getName());
}
return returnType.cast(value);
}
protected <V> V convert(Object value, Class<V> returnType) {
if (Boolean.class == returnType) {
return returnType.cast(convertBoolean(value));
} else if (Integer.class == returnType) {
return returnType.cast(convertInt(value));
} else if (Long.class == returnType) {
return returnType.cast(convertLong(value));
} else if (Double.class == returnType) {
return returnType.cast(convertDouble(value));
} else if (String.class == returnType) {
return returnType.cast(convertString(value));
} else if (value instanceof String) {
return fromJson((String) value, returnType);
}
final String vName = value.getClass().getName();
final String rName = returnType.getName();
throw new NonTransientException("Cannot convert type " + vName + " to " + rName);
}
protected Integer convertInt(Object value) {
if (null == value) {
return null;
}
if (value instanceof Integer) {
return (Integer) value;
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
return NumberUtils.toInt(value.toString());
}
protected Double convertDouble(Object value) {
if (null == value) {
return null;
}
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
return NumberUtils.toDouble(value.toString());
}
protected Long convertLong(Object value) {
if (null == value) {
return null;
}
if (value instanceof Long) {
return (Long) value;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
return NumberUtils.toLong(value.toString());
}
protected String convertString(Object value) {
if (null == value) {
return null;
}
if (value instanceof String) {
return (String) value;
}
return value.toString().trim();
}
protected Boolean convertBoolean(Object value) {
if (null == value) {
return null;
}
if (value instanceof Boolean) {
return (Boolean) value;
}
if (value instanceof Number) {
return ((Number) value).intValue() != 0;
}
String text = value.toString().trim();
return "Y".equalsIgnoreCase(text)
|| "YES".equalsIgnoreCase(text)
|| "TRUE".equalsIgnoreCase(text)
|| "T".equalsIgnoreCase(text)
|| "1".equalsIgnoreCase(text);
}
protected String toJson(Object value) {
if (null == value) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <V> V fromJson(String value, Class<V> returnType) {
if (null == value) {
return null;
}
try {
return objectMapper.readValue(value, returnType);
} catch (IOException ex) {
throw new NonTransientException(
"Could not convert JSON '" + value + "' to " + returnType.getName(), ex);
}
}
protected final int getIndex() {
return index.get();
}
protected final int getAndIncrementIndex() {
return index.getAndIncrement();
}
@FunctionalInterface
private interface InternalParameterSetter {
void apply(PreparedStatement ps, int idx) throws SQLException;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresQueueListener.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/PostgresQueueListener.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Optional;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.sql.DataSource;
import org.postgresql.PGConnection;
import org.postgresql.PGNotification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.postgres.config.PostgresProperties;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
public class PostgresQueueListener {
private PGConnection pgconn;
private volatile Connection conn;
private final Lock connectionLock = new ReentrantLock();
private DataSource dataSource;
private HashMap<String, QueueStats> queues;
private volatile boolean connected = false;
private long lastNotificationTime = 0;
private Integer stalePeriod;
protected final Logger logger = LoggerFactory.getLogger(getClass());
public PostgresQueueListener(DataSource dataSource, PostgresProperties properties) {
logger.info("Using experimental PostgresQueueListener");
this.dataSource = dataSource;
this.stalePeriod = properties.getExperimentalQueueNotifyStalePeriod();
connect();
}
public boolean hasMessagesReady(String queueName) {
checkUpToDate();
handleNotifications();
if (notificationIsStale() || !connected) {
connect();
return true;
}
QueueStats queueStats = queues.get(queueName);
if (queueStats == null) {
return false;
}
if (queueStats.getNextDelivery() > System.currentTimeMillis()) {
return false;
}
return true;
}
public Optional<Integer> getSize(String queueName) {
checkUpToDate();
handleNotifications();
if (notificationIsStale() || !connected) {
connect();
return Optional.empty();
}
QueueStats queueStats = queues.get(queueName);
if (queueStats == null) {
return Optional.of(0);
}
return Optional.of(queueStats.getDepth());
}
private boolean notificationIsStale() {
return System.currentTimeMillis() - lastNotificationTime > this.stalePeriod;
}
private void connect() {
// Attempt to acquire the lock without waiting.
if (!connectionLock.tryLock()) {
// If the lock is not available, return early.
return;
}
boolean newConnectedState = false;
try {
// Check if the connection is null or not valid.
if (conn == null || !conn.isValid(1)) {
// Close the old connection if it exists and is not valid.
if (conn != null) {
try {
conn.close();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
// Establish a new connection.
try {
this.conn = dataSource.getConnection();
this.pgconn = conn.unwrap(PGConnection.class);
boolean previousAutoCommitMode = conn.getAutoCommit();
conn.setAutoCommit(true);
try {
conn.prepareStatement("LISTEN conductor_queue_state").execute();
newConnectedState = true;
} catch (Throwable th) {
conn.rollback();
logger.error(th.getMessage());
} finally {
conn.setAutoCommit(previousAutoCommitMode);
}
requestStats();
} catch (SQLException e) {
throw new NonTransientException(e.getMessage(), e);
}
}
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
} finally {
connected = newConnectedState;
// Ensure the lock is always released.
connectionLock.unlock();
}
}
private void requestStats() {
try {
boolean previousAutoCommitMode = conn.getAutoCommit();
conn.setAutoCommit(true);
try {
conn.prepareStatement("SELECT queue_notify()").execute();
connected = true;
} catch (Throwable th) {
conn.rollback();
logger.error(th.getMessage());
} finally {
conn.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException e) {
if (!isSQLExceptionConnectionDoesNotExists(e)) {
logger.error("Error fetching notifications {}", e.getSQLState());
}
connect();
}
}
private void checkUpToDate() {
if (System.currentTimeMillis() - lastNotificationTime > this.stalePeriod * 0.75) {
requestStats();
}
}
private void handleNotifications() {
try {
PGNotification[] notifications = pgconn.getNotifications();
if (notifications == null || notifications.length == 0) {
return;
}
processPayload(notifications[notifications.length - 1].getParameter());
} catch (SQLException e) {
if (!isSQLExceptionConnectionDoesNotExists(e)) {
logger.error("Error fetching notifications {}", e.getSQLState());
}
connect();
}
}
private void processPayload(String payload) {
ObjectMapper objectMapper = new ObjectMapper();
try {
JsonNode notification = objectMapper.readTree(payload);
JsonNode lastNotificationTime = notification.get("__now__");
if (lastNotificationTime != null) {
this.lastNotificationTime = lastNotificationTime.asLong();
}
Iterator<String> iterator = notification.fieldNames();
HashMap<String, QueueStats> queueStats = new HashMap<>();
iterator.forEachRemaining(
key -> {
if (!key.equals("__now__")) {
try {
QueueStats stats =
objectMapper.treeToValue(
notification.get(key), QueueStats.class);
queueStats.put(key, stats);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
});
this.queues = queueStats;
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
private static boolean isSQLExceptionConnectionDoesNotExists(SQLException e) {
return "08003".equals(e.getSQLState());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecutorsUtil.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecutorsUtil.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.util;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
public class ExecutorsUtil {
private ExecutorsUtil() {}
public static ThreadFactory newNamedThreadFactory(final String threadNamePrefix) {
return new ThreadFactory() {
private final AtomicInteger counter = new AtomicInteger();
@SuppressWarnings("NullableProblems")
@Override
public Thread newThread(Runnable r) {
Thread thread = Executors.defaultThreadFactory().newThread(r);
thread.setName(threadNamePrefix + counter.getAndIncrement());
return thread;
}
};
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Map;
import java.util.Optional;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.flywaydb.core.api.configuration.FluentConfiguration;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.*;
import org.springframework.retry.RetryContext;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.postgres.dao.*;
import com.fasterxml.jackson.databind.ObjectMapper;
import jakarta.annotation.*;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(PostgresProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "postgres")
// Import the DataSourceAutoConfiguration when postgres database is selected.
// By default, the datasource configuration is excluded in the main module.
@Import(DataSourceAutoConfiguration.class)
public class PostgresConfiguration {
DataSource dataSource;
private final PostgresProperties properties;
public PostgresConfiguration(DataSource dataSource, PostgresProperties properties) {
this.dataSource = dataSource;
this.properties = properties;
}
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForPrimaryDb() {
FluentConfiguration config = Flyway.configure();
var locations = new ArrayList<String>();
locations.add("classpath:db/migration_postgres");
if (properties.getExperimentalQueueNotify()) {
locations.add("classpath:db/migration_postgres_notify");
}
if (properties.isApplyDataMigrations()) {
locations.add("classpath:db/migration_postgres_data");
}
config.locations(locations.toArray(new String[0]));
return config.configuration(Map.of("flyway.postgresql.transactional.lock", "false"))
.schemas(properties.getSchema())
.dataSource(dataSource)
.outOfOrder(true)
.baselineOnMigrate(true)
.load();
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresMetadataDAO postgresMetadataDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
PostgresProperties properties) {
return new PostgresMetadataDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresExecutionDAO postgresExecutionDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new PostgresExecutionDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresPollDataDAO postgresPollDataDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
PostgresProperties properties) {
return new PostgresPollDataDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
public PostgresQueueDAO postgresQueueDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
PostgresProperties properties) {
return new PostgresQueueDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(name = "conductor.indexing.type", havingValue = "postgres")
public PostgresIndexDAO postgresIndexDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
PostgresProperties properties) {
return new PostgresIndexDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flywayForPrimaryDb"})
@ConditionalOnProperty(
name = "conductor.workflow-execution-lock.type",
havingValue = "postgres")
public PostgresLockDAO postgresLockDAO(
@Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new PostgresLockDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
public RetryTemplate postgresRetryTemplate(PostgresProperties properties) {
SimpleRetryPolicy retryPolicy = new CustomRetryPolicy();
retryPolicy.setMaxAttempts(3);
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(new NoBackOffPolicy());
return retryTemplate;
}
public static class CustomRetryPolicy extends SimpleRetryPolicy {
private static final String ER_LOCK_DEADLOCK = "40P01";
private static final String ER_SERIALIZATION_FAILURE = "40001";
@Override
public boolean canRetry(final RetryContext context) {
final Optional<Throwable> lastThrowable =
Optional.ofNullable(context.getLastThrowable());
return lastThrowable
.map(throwable -> super.canRetry(context) && isDeadLockError(throwable))
.orElseGet(() -> super.canRetry(context));
}
private boolean isDeadLockError(Throwable throwable) {
SQLException sqlException = findCauseSQLException(throwable);
if (sqlException == null) {
return false;
}
return ER_LOCK_DEADLOCK.equals(sqlException.getSQLState())
|| ER_SERIALIZATION_FAILURE.equals(sqlException.getSQLState());
}
private SQLException findCauseSQLException(Throwable throwable) {
Throwable causeException = throwable;
while (null != causeException && !(causeException instanceof SQLException)) {
causeException = causeException.getCause();
}
return (SQLException) causeException;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java | postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.postgres")
public class PostgresProperties {
/** The time in seconds after which the in-memory task definitions cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
private Integer deadlockRetryMax = 3;
@DurationUnit(ChronoUnit.MILLIS)
private Duration pollDataFlushInterval = Duration.ofMillis(0);
@DurationUnit(ChronoUnit.MILLIS)
private Duration pollDataCacheValidityPeriod = Duration.ofMillis(0);
private boolean experimentalQueueNotify = false;
private Integer experimentalQueueNotifyStalePeriod = 5000;
private boolean onlyIndexOnStatusChange = false;
/** The boolean indicating whether data migrations should be executed */
private boolean applyDataMigrations = true;
public String schema = "public";
public boolean allowFullTextQueries = true;
public boolean allowJsonQueries = true;
/** The maximum number of threads allowed in the async pool */
private int asyncMaxPoolSize = 12;
/** The size of the queue used for holding async indexing tasks */
private int asyncWorkerQueueSize = 100;
public boolean getExperimentalQueueNotify() {
return experimentalQueueNotify;
}
public void setExperimentalQueueNotify(boolean experimentalQueueNotify) {
this.experimentalQueueNotify = experimentalQueueNotify;
}
public Integer getExperimentalQueueNotifyStalePeriod() {
return experimentalQueueNotifyStalePeriod;
}
public void setExperimentalQueueNotifyStalePeriod(Integer experimentalQueueNotifyStalePeriod) {
this.experimentalQueueNotifyStalePeriod = experimentalQueueNotifyStalePeriod;
}
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public boolean getOnlyIndexOnStatusChange() {
return onlyIndexOnStatusChange;
}
public void setOnlyIndexOnStatusChange(boolean onlyIndexOnStatusChange) {
this.onlyIndexOnStatusChange = onlyIndexOnStatusChange;
}
public boolean isApplyDataMigrations() {
return applyDataMigrations;
}
public void setApplyDataMigrations(boolean applyDataMigrations) {
this.applyDataMigrations = applyDataMigrations;
}
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
public void setDeadlockRetryMax(Integer deadlockRetryMax) {
this.deadlockRetryMax = deadlockRetryMax;
}
public String getSchema() {
return schema;
}
public void setSchema(String schema) {
this.schema = schema;
}
public boolean getAllowFullTextQueries() {
return allowFullTextQueries;
}
public void setAllowFullTextQueries(boolean allowFullTextQueries) {
this.allowFullTextQueries = allowFullTextQueries;
}
public boolean getAllowJsonQueries() {
return allowJsonQueries;
}
public void setAllowJsonQueries(boolean allowJsonQueries) {
this.allowJsonQueries = allowJsonQueries;
}
public int getAsyncWorkerQueueSize() {
return asyncWorkerQueueSize;
}
public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) {
this.asyncWorkerQueueSize = asyncWorkerQueueSize;
}
public int getAsyncMaxPoolSize() {
return asyncMaxPoolSize;
}
public void setAsyncMaxPoolSize(int asyncMaxPoolSize) {
this.asyncMaxPoolSize = asyncMaxPoolSize;
}
public Duration getPollDataFlushInterval() {
return pollDataFlushInterval;
}
public void setPollDataFlushInterval(Duration interval) {
this.pollDataFlushInterval = interval;
}
public Duration getPollDataCacheValidityPeriod() {
return pollDataCacheValidityPeriod;
}
public void setPollDataCacheValidityPeriod(Duration period) {
this.pollDataCacheValidityPeriod = period;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/metrics/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java | metrics/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.logging.LoggingMeterRegistry;
import lombok.extern.slf4j.Slf4j;
/**
* Metrics logging reporter, dumping all metrics into an Slf4J logger.
*
* <p>Enable in config: conductor.metrics-logger.enabled=true
*
* <p>additional config: conductor.metrics-logger.reportInterval=15s
*/
@ConditionalOnProperty(value = "conductor.metrics-logger.enabled", havingValue = "true")
@Configuration
@Slf4j
public class LoggingMetricsConfiguration {
@Bean
public MeterRegistry getLoggingMeterRegistry() {
return new LoggingMeterRegistry(log::info);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/metrics/src/main/java/com/netflix/conductor/contribs/metrics/MetricsCollector.java | metrics/src/main/java/com/netflix/conductor/contribs/metrics/MetricsCollector.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.springframework.stereotype.Component;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.composite.CompositeMeterRegistry;
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@Component
public class MetricsCollector {
static final CompositeMeterRegistry compositeRegistry = new CompositeMeterRegistry();
private static final MeterRegistry simpleRegistry = new SimpleMeterRegistry();
public MetricsCollector(MeterRegistry... registries) {
log.info("=========");
log.info("Conductor configured with {} metrics registries", registries.length);
for (MeterRegistry registry : registries) {
log.info("Metrics registry: {}", registry);
}
log.info(
"check https://docs.micrometer.io/micrometer/reference/ for configuration options");
log.info("=========");
compositeRegistry.add(simpleRegistry);
for (MeterRegistry meterRegistry : registries) {
compositeRegistry.add(meterRegistry);
}
}
public static MeterRegistry getMeterRegistry() {
return compositeRegistry;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/metrics/src/main/java/com/netflix/conductor/contribs/metrics/AzureMonitorMetricsConfiguration.java | metrics/src/main/java/com/netflix/conductor/contribs/metrics/AzureMonitorMetricsConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import io.micrometer.azuremonitor.AzureMonitorConfig;
import io.micrometer.azuremonitor.AzureMonitorMeterRegistry;
import io.micrometer.core.instrument.Clock;
import io.micrometer.core.instrument.MeterRegistry;
import lombok.extern.slf4j.Slf4j;
@ConditionalOnProperty(
value = "management.azuremonitor.metrics.export.enabled",
havingValue = "true")
@Configuration
@Slf4j
public class AzureMonitorMetricsConfiguration {
@Bean
public MeterRegistry getAzureMonitorMeterRegistry(
@Value("${management.azuremonitor.metrics.export.instrumentationKey:null}")
String instrumentationKey) {
AzureMonitorConfig cloudWatchConfig =
new AzureMonitorConfig() {
@Override
public String instrumentationKey() {
return instrumentationKey;
}
@Override
public String get(String key) {
return null;
}
};
return new AzureMonitorMeterRegistry(cloudWatchConfig, Clock.SYSTEM);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/metrics/src/main/java/com/netflix/conductor/contribs/metrics/CloudWatchMetricsConfiguration.java | metrics/src/main/java/com/netflix/conductor/contribs/metrics/CloudWatchMetricsConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.metrics;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import io.micrometer.cloudwatch2.CloudWatchConfig;
import io.micrometer.cloudwatch2.CloudWatchMeterRegistry;
import io.micrometer.core.instrument.Clock;
import io.micrometer.core.instrument.MeterRegistry;
import lombok.extern.slf4j.Slf4j;
import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
@ConditionalOnProperty(value = "management.cloudwatch.metrics.export.enabled", havingValue = "true")
@Configuration
@Slf4j
public class CloudWatchMetricsConfiguration {
@Bean
public MeterRegistry getCloudWatchMetrics(
@Value("${management.cloudwatch.metrics.export.namespace:conductor}")
String namespace) {
CloudWatchConfig cloudWatchConfig =
new CloudWatchConfig() {
@Override
public String get(String s) {
return null;
}
@Override
public String namespace() {
return namespace;
}
};
log.info("Using namespace '{}' for cloudwatch metrics", namespace);
return new CloudWatchMeterRegistry(
cloudWatchConfig, Clock.SYSTEM, CloudWatchAsyncClient.create());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/ConductorTestApp.java | test-harness/src/test/java/com/netflix/conductor/ConductorTestApp.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor;
import java.io.IOException;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
/** Copy of com.netflix.conductor.Conductor for use by @SpringBootTest in AbstractSpecification. */
// Prevents from the datasource beans to be loaded, AS they are needed only for specific databases.
// In case that SQL database is selected this class will be imported back in the appropriate
// database persistence module.
@SpringBootApplication(exclude = DataSourceAutoConfiguration.class)
public class ConductorTestApp {
public static void main(String[] args) throws IOException {
SpringApplication.run(ConductorTestApp.class, args);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java | test-harness/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.test.context.TestPropertySource;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.Workflow;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@TestPropertySource(
properties = {
"conductor.indexing.enabled=true",
"conductor.elasticsearch.version=7",
"conductor.queue.type=xxx"
})
public abstract class AbstractEndToEndTest {
private static final Logger log = LoggerFactory.getLogger(AbstractEndToEndTest.class);
private static final String TASK_DEFINITION_PREFIX = "task_";
private static final String DEFAULT_DESCRIPTION = "description";
// Represents null value deserialized from the redis in memory db
private static final String DEFAULT_NULL_VALUE = "null";
protected static final String DEFAULT_EMAIL_ADDRESS = "test@harness.com";
private static final ElasticsearchContainer container =
new ElasticsearchContainer(
DockerImageName.parse("elasticsearch")
.withTag("7.17.11")); // this should match the client version
private static RestClient restClient;
// Initialization happens in a static block so the container is initialized
// only once for all the sub-class tests in a CI environment
// container is stopped when JVM exits
// https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/#singleton-containers
static {
container.start();
String httpHostAddress = container.getHttpHostAddress();
System.setProperty("conductor.elasticsearch.url", "http://" + httpHostAddress);
log.info("Initialized Elasticsearch {}", container.getContainerId());
}
@BeforeClass
public static void initializeEs() {
String httpHostAddress = container.getHttpHostAddress();
String host = httpHostAddress.split(":")[0];
int port = Integer.parseInt(httpHostAddress.split(":")[1]);
RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http"));
restClient = restClientBuilder.build();
}
@AfterClass
public static void cleanupEs() throws Exception {
// deletes all indices
Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices"));
Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent());
BufferedReader bufferedReader = new BufferedReader(streamReader);
String line;
while ((line = bufferedReader.readLine()) != null) {
String[] fields = line.split("\\s");
String endpoint = String.format("/%s", fields[2]);
restClient.performRequest(new Request("DELETE", endpoint));
}
if (restClient != null) {
restClient.close();
}
}
@Test
public void testEphemeralWorkflowsWithStoredTasks() {
String workflowExecutionName = "testEphemeralWorkflow";
createAndRegisterTaskDefinitions("storedTaskDef", 5);
WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName);
WorkflowTask workflowTask1 = createWorkflowTask("storedTaskDef1");
WorkflowTask workflowTask2 = createWorkflowTask("storedTaskDef2");
workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
}
@Test
public void testEphemeralWorkflowsWithEphemeralTasks() {
String workflowExecutionName = "ephemeralWorkflowWithEphemeralTasks";
WorkflowDef workflowDefinition = createWorkflowDefinition(workflowExecutionName);
WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1");
TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1");
workflowTask1.setTaskDefinition(taskDefinition1);
WorkflowTask workflowTask2 = createWorkflowTask("ephemeralTask2");
TaskDef taskDefinition2 = createTaskDefinition("ephemeralTaskDef2");
workflowTask2.setTaskDefinition(taskDefinition2);
workflowDefinition.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
List<WorkflowTask> ephemeralTasks = ephemeralWorkflow.getTasks();
assertEquals(2, ephemeralTasks.size());
for (WorkflowTask ephemeralTask : ephemeralTasks) {
assertNotNull(ephemeralTask.getTaskDefinition());
}
}
@Test
public void testEphemeralWorkflowsWithEphemeralAndStoredTasks() {
createAndRegisterTaskDefinitions("storedTask", 1);
WorkflowDef workflowDefinition =
createWorkflowDefinition("testEphemeralWorkflowsWithEphemeralAndStoredTasks");
WorkflowTask workflowTask1 = createWorkflowTask("ephemeralTask1");
TaskDef taskDefinition1 = createTaskDefinition("ephemeralTaskDef1");
workflowTask1.setTaskDefinition(taskDefinition1);
WorkflowTask workflowTask2 = createWorkflowTask("storedTask0");
workflowDefinition.getTasks().add(workflowTask1);
workflowDefinition.getTasks().add(workflowTask2);
String workflowExecutionName = "ephemeralWorkflowWithEphemeralAndStoredTasks";
String workflowId = startWorkflow(workflowExecutionName, workflowDefinition);
assertNotNull(workflowId);
Workflow workflow = getWorkflow(workflowId, true);
WorkflowDef ephemeralWorkflow = workflow.getWorkflowDefinition();
assertNotNull(ephemeralWorkflow);
assertEquals(workflowDefinition, ephemeralWorkflow);
TaskDef storedTaskDefinition = getTaskDefinition("storedTask0");
List<WorkflowTask> tasks = ephemeralWorkflow.getTasks();
assertEquals(2, tasks.size());
assertEquals(workflowTask1, tasks.get(0));
TaskDef currentStoredTaskDefinition = tasks.get(1).getTaskDefinition();
assertNotNull(currentStoredTaskDefinition);
assertEquals(storedTaskDefinition, currentStoredTaskDefinition);
}
@Test
public void testEventHandler() {
String eventName = "conductor:test_workflow:complete_task_with_event";
EventHandler eventHandler = new EventHandler();
eventHandler.setName("test_complete_task_event");
EventHandler.Action completeTaskAction = new EventHandler.Action();
completeTaskAction.setAction(EventHandler.Action.Type.complete_task);
completeTaskAction.setComplete_task(new EventHandler.TaskDetails());
completeTaskAction.getComplete_task().setTaskRefName("test_task");
completeTaskAction.getComplete_task().setWorkflowId("test_id");
completeTaskAction.getComplete_task().setOutput(new HashMap<>());
eventHandler.getActions().add(completeTaskAction);
eventHandler.setEvent(eventName);
eventHandler.setActive(true);
registerEventHandler(eventHandler);
Iterator<EventHandler> it = getEventHandlers(eventName, true);
EventHandler result = it.next();
assertFalse(it.hasNext());
assertEquals(eventHandler.getName(), result.getName());
}
protected WorkflowTask createWorkflowTask(String name) {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName(name);
workflowTask.setWorkflowTaskType(TaskType.SIMPLE);
workflowTask.setTaskReferenceName(name);
workflowTask.setDescription(getDefaultDescription(name));
workflowTask.setDynamicTaskNameParam(DEFAULT_NULL_VALUE);
workflowTask.setCaseValueParam(DEFAULT_NULL_VALUE);
workflowTask.setCaseExpression(DEFAULT_NULL_VALUE);
workflowTask.setDynamicForkTasksParam(DEFAULT_NULL_VALUE);
workflowTask.setDynamicForkTasksInputParamName(DEFAULT_NULL_VALUE);
workflowTask.setSink(DEFAULT_NULL_VALUE);
workflowTask.setEvaluatorType(DEFAULT_NULL_VALUE);
workflowTask.setExpression(DEFAULT_NULL_VALUE);
return workflowTask;
}
protected TaskDef createTaskDefinition(String name) {
TaskDef taskDefinition = new TaskDef();
taskDefinition.setName(name);
return taskDefinition;
}
protected WorkflowDef createWorkflowDefinition(String workflowName) {
WorkflowDef workflowDefinition = new WorkflowDef();
workflowDefinition.setName(workflowName);
workflowDefinition.setDescription(getDefaultDescription(workflowName));
workflowDefinition.setFailureWorkflow(DEFAULT_NULL_VALUE);
workflowDefinition.setOwnerEmail(DEFAULT_EMAIL_ADDRESS);
return workflowDefinition;
}
protected List<TaskDef> createAndRegisterTaskDefinitions(
String prefixTaskDefinition, int numberOfTaskDefinitions) {
String prefix = Optional.ofNullable(prefixTaskDefinition).orElse(TASK_DEFINITION_PREFIX);
List<TaskDef> definitions = new LinkedList<>();
for (int i = 0; i < numberOfTaskDefinitions; i++) {
TaskDef def =
new TaskDef(
prefix + i,
"task " + i + DEFAULT_DESCRIPTION,
DEFAULT_EMAIL_ADDRESS,
3,
60,
60);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.RETRY);
definitions.add(def);
}
this.registerTaskDefinitions(definitions);
return definitions;
}
private String getDefaultDescription(String nameResource) {
return nameResource + " " + DEFAULT_DESCRIPTION;
}
protected abstract String startWorkflow(
String workflowExecutionName, WorkflowDef workflowDefinition);
protected abstract Workflow getWorkflow(String workflowId, boolean includeTasks);
protected abstract TaskDef getTaskDefinition(String taskName);
protected abstract void registerTaskDefinitions(List<TaskDef> taskDefinitionList);
protected abstract void registerWorkflowDefinition(WorkflowDef workflowDefinition);
protected abstract void registerEventHandler(EventHandler eventHandler);
protected abstract Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/AbstractGrpcEndToEndTest.java | test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/AbstractGrpcEndToEndTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.ConductorTestApp;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.test.integration.AbstractEndToEndTest;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@RunWith(SpringRunner.class)
@SpringBootTest(
classes = ConductorTestApp.class,
properties = {"conductor.grpc-server.enabled=true", "conductor.grpc-server.port=8092"})
@TestPropertySource(locations = "classpath:application-integrationtest.properties")
public abstract class AbstractGrpcEndToEndTest extends AbstractEndToEndTest {
protected static TaskClient taskClient;
protected static WorkflowClient workflowClient;
protected static MetadataClient metadataClient;
protected static EventClient eventClient;
@Override
protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) {
StartWorkflowRequest workflowRequest =
new StartWorkflowRequest()
.withName(workflowExecutionName)
.withWorkflowDef(workflowDefinition);
return workflowClient.startWorkflow(workflowRequest);
}
@Override
protected Workflow getWorkflow(String workflowId, boolean includeTasks) {
return workflowClient.getWorkflow(workflowId, includeTasks);
}
@Override
protected TaskDef getTaskDefinition(String taskName) {
return metadataClient.getTaskDef(taskName);
}
@Override
protected void registerTaskDefinitions(List<TaskDef> taskDefinitionList) {
metadataClient.registerTaskDefs(taskDefinitionList);
}
@Override
protected void registerWorkflowDefinition(WorkflowDef workflowDefinition) {
metadataClient.registerWorkflowDef(workflowDefinition);
}
@Override
protected void registerEventHandler(EventHandler eventHandler) {
eventClient.registerEventHandler(eventHandler);
}
@Override
protected Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly) {
return eventClient.getEventHandlers(event, activeOnly);
}
@Test
public void testAll() throws Exception {
assertNotNull(taskClient);
List<TaskDef> defs = new LinkedList<>();
for (int i = 0; i < 5; i++) {
TaskDef def = new TaskDef("t" + i, "task " + i, DEFAULT_EMAIL_ADDRESS, 3, 60, 60);
def.setTimeoutPolicy(TimeoutPolicy.RETRY);
defs.add(def);
}
metadataClient.registerTaskDefs(defs);
for (int i = 0; i < 5; i++) {
final String taskName = "t" + i;
TaskDef def = metadataClient.getTaskDef(taskName);
assertNotNull(def);
assertEquals(taskName, def.getName());
}
WorkflowDef def = createWorkflowDefinition("test");
WorkflowTask t0 = createWorkflowTask("t0");
WorkflowTask t1 = createWorkflowTask("t1");
def.getTasks().add(t0);
def.getTasks().add(t1);
metadataClient.registerWorkflowDef(def);
WorkflowDef found = metadataClient.getWorkflowDef(def.getName(), null);
assertNotNull(found);
assertEquals(def, found);
String correlationId = "test_corr_id";
StartWorkflowRequest startWf = new StartWorkflowRequest();
startWf.setName(def.getName());
startWf.setCorrelationId(correlationId);
String workflowId = workflowClient.startWorkflow(startWf);
assertNotNull(workflowId);
Workflow workflow = workflowClient.getWorkflow(workflowId, false);
assertEquals(0, workflow.getTasks().size());
assertEquals(workflowId, workflow.getWorkflowId());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(workflowId, workflow.getWorkflowId());
List<String> runningIds =
workflowClient.getRunningWorkflow(def.getName(), def.getVersion());
assertNotNull(runningIds);
assertEquals(1, runningIds.size());
assertEquals(workflowId, runningIds.get(0));
List<Task> polled =
taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100);
assertNotNull(polled);
assertEquals(0, polled.size());
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 1000);
assertNotNull(polled);
assertEquals(1, polled.size());
assertEquals(t0.getName(), polled.get(0).getTaskDefName());
Task task = polled.get(0);
task.getOutputData().put("key1", "value1");
task.setStatus(Status.COMPLETED);
taskClient.updateTask(new TaskResult(task));
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100);
assertNotNull(polled);
assertTrue(polled.toString(), polled.isEmpty());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(2, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(t1.getTaskReferenceName(), workflow.getTasks().get(1).getReferenceTaskName());
assertEquals(Status.COMPLETED, workflow.getTasks().get(0).getStatus());
assertEquals(Status.SCHEDULED, workflow.getTasks().get(1).getStatus());
Task taskById = taskClient.getTaskDetails(task.getTaskId());
assertNotNull(taskById);
assertEquals(task.getTaskId(), taskById.getTaskId());
Thread.sleep(1000);
SearchResult<WorkflowSummary> searchResult =
workflowClient.search("workflowType='" + def.getName() + "'");
assertNotNull(searchResult);
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2 =
workflowClient.searchV2("workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2);
assertEquals(1, searchResultV2.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResultV2.getResults().get(0).getWorkflowId());
SearchResult<WorkflowSummary> searchResultAdvanced =
workflowClient.search(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultAdvanced);
assertEquals(1, searchResultAdvanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(), searchResultAdvanced.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2Advanced =
workflowClient.searchV2(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2Advanced);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(),
searchResultV2Advanced.getResults().get(0).getWorkflowId());
SearchResult<TaskSummary> taskSearchResult =
taskClient.search("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResult);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResult.getResults().get(0).getTaskDefName());
SearchResult<TaskSummary> taskSearchResultAdvanced =
taskClient.search(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultAdvanced);
assertEquals(1, taskSearchResultAdvanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResultAdvanced.getResults().get(0).getTaskDefName());
SearchResult<Task> taskSearchResultV2 =
taskClient.searchV2("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2.getResults().get(0).getReferenceTaskName());
SearchResult<Task> taskSearchResultV2Advanced =
taskClient.searchV2(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2Advanced);
assertEquals(1, taskSearchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2Advanced.getResults().get(0).getReferenceTaskName());
workflowClient.terminateWorkflow(workflowId, "terminate reason");
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.TERMINATED, workflow.getStatus());
workflowClient.restart(workflowId, false);
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java | test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc;
import org.junit.Before;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
public class GrpcEndToEndTest extends AbstractGrpcEndToEndTest {
@Before
public void init() {
taskClient = new TaskClient("localhost", 8092);
workflowClient = new WorkflowClient("localhost", 8092);
metadataClient = new MetadataClient("localhost", 8092);
eventClient = new EventClient("localhost", 8092);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java | test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.http;
import org.junit.Before;
import com.netflix.conductor.client.http.EventClient;
import com.netflix.conductor.client.http.MetadataClient;
import com.netflix.conductor.client.http.TaskClient;
import com.netflix.conductor.client.http.WorkflowClient;
public class HttpEndToEndTest extends AbstractHttpEndToEndTest {
@Before
public void init() {
apiRoot = String.format("http://localhost:%d/api/", port);
taskClient = new TaskClient();
taskClient.setRootURI(apiRoot);
workflowClient = new WorkflowClient();
workflowClient.setRootURI(apiRoot);
metadataClient = new MetadataClient();
metadataClient.setRootURI(apiRoot);
eventClient = new EventClient();
eventClient.setRootURI(apiRoot);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/integration/http/AbstractHttpEndToEndTest.java | test-harness/src/test/java/com/netflix/conductor/test/integration/http/AbstractHttpEndToEndTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.http;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.context.SpringBootTest.WebEnvironment;
import org.springframework.boot.test.web.server.LocalServerPort;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.ConductorTestApp;
import com.netflix.conductor.client.exception.ConductorClientException;
import com.netflix.conductor.client.http.EventClient;
import com.netflix.conductor.client.http.MetadataClient;
import com.netflix.conductor.client.http.TaskClient;
import com.netflix.conductor.client.http.WorkflowClient;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.common.validation.ValidationError;
import com.netflix.conductor.test.integration.AbstractEndToEndTest;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@RunWith(SpringRunner.class)
@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT, classes = ConductorTestApp.class)
@TestPropertySource(locations = "classpath:application-integrationtest.properties")
public abstract class AbstractHttpEndToEndTest extends AbstractEndToEndTest {
@LocalServerPort protected int port;
protected static String apiRoot;
protected static TaskClient taskClient;
protected static WorkflowClient workflowClient;
protected static MetadataClient metadataClient;
protected static EventClient eventClient;
@Override
protected String startWorkflow(String workflowExecutionName, WorkflowDef workflowDefinition) {
StartWorkflowRequest workflowRequest =
new StartWorkflowRequest()
.withName(workflowExecutionName)
.withWorkflowDef(workflowDefinition);
return workflowClient.startWorkflow(workflowRequest);
}
@Override
protected Workflow getWorkflow(String workflowId, boolean includeTasks) {
return workflowClient.getWorkflow(workflowId, includeTasks);
}
@Override
protected TaskDef getTaskDefinition(String taskName) {
return metadataClient.getTaskDef(taskName);
}
@Override
protected void registerTaskDefinitions(List<TaskDef> taskDefinitionList) {
metadataClient.registerTaskDefs(taskDefinitionList);
}
@Override
protected void registerWorkflowDefinition(WorkflowDef workflowDefinition) {
metadataClient.registerWorkflowDef(workflowDefinition);
}
@Override
protected void registerEventHandler(EventHandler eventHandler) {
eventClient.registerEventHandler(eventHandler);
}
@Override
protected Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly) {
return eventClient.getEventHandlers(event, activeOnly).iterator();
}
@Test
public void testAll() throws Exception {
createAndRegisterTaskDefinitions("t", 5);
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setOwnerEmail(DEFAULT_EMAIL_ADDRESS);
WorkflowTask t0 = new WorkflowTask();
t0.setName("t0");
t0.setWorkflowTaskType(TaskType.SIMPLE);
t0.setTaskReferenceName("t0");
WorkflowTask t1 = new WorkflowTask();
t1.setName("t1");
t1.setWorkflowTaskType(TaskType.SIMPLE);
t1.setTaskReferenceName("t1");
def.getTasks().add(t0);
def.getTasks().add(t1);
metadataClient.registerWorkflowDef(def);
WorkflowDef workflowDefinitionFromSystem =
metadataClient.getWorkflowDef(def.getName(), null);
assertNotNull(workflowDefinitionFromSystem);
assertEquals(def, workflowDefinitionFromSystem);
String correlationId = "test_corr_id";
StartWorkflowRequest startWorkflowRequest =
new StartWorkflowRequest()
.withName(def.getName())
.withCorrelationId(correlationId)
.withPriority(50)
.withInput(new HashMap<>());
String workflowId = workflowClient.startWorkflow(startWorkflowRequest);
assertNotNull(workflowId);
Workflow workflow = workflowClient.getWorkflow(workflowId, false);
assertEquals(0, workflow.getTasks().size());
assertEquals(workflowId, workflow.getWorkflowId());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(workflowId, workflow.getWorkflowId());
int queueSize = taskClient.getQueueSizeForTask(workflow.getTasks().get(0).getTaskType());
assertEquals(1, queueSize);
List<String> runningIds =
workflowClient.getRunningWorkflow(def.getName(), def.getVersion());
assertNotNull(runningIds);
assertEquals(1, runningIds.size());
assertEquals(workflowId, runningIds.get(0));
List<Task> polled =
taskClient.batchPollTasksByTaskType("non existing task", "test", 1, 100);
assertNotNull(polled);
assertEquals(0, polled.size());
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100);
assertNotNull(polled);
assertEquals(1, polled.size());
assertEquals(t0.getName(), polled.get(0).getTaskDefName());
Task task = polled.get(0);
task.getOutputData().put("key1", "value1");
task.setStatus(Status.COMPLETED);
taskClient.updateTask(new TaskResult(task));
polled = taskClient.batchPollTasksByTaskType(t0.getName(), "test", 1, 100);
assertNotNull(polled);
assertTrue(polled.toString(), polled.isEmpty());
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(2, workflow.getTasks().size());
assertEquals(t0.getTaskReferenceName(), workflow.getTasks().get(0).getReferenceTaskName());
assertEquals(t1.getTaskReferenceName(), workflow.getTasks().get(1).getReferenceTaskName());
assertEquals(Task.Status.COMPLETED, workflow.getTasks().get(0).getStatus());
assertEquals(Task.Status.SCHEDULED, workflow.getTasks().get(1).getStatus());
Task taskById = taskClient.getTaskDetails(task.getTaskId());
assertNotNull(taskById);
assertEquals(task.getTaskId(), taskById.getTaskId());
queueSize = taskClient.getQueueSizeForTask(workflow.getTasks().get(1).getTaskType());
assertEquals(1, queueSize);
Thread.sleep(1000);
SearchResult<WorkflowSummary> searchResult =
workflowClient.search("workflowType='" + def.getName() + "'");
assertNotNull(searchResult);
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResult.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2 =
workflowClient.searchV2("workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2);
assertEquals(1, searchResultV2.getTotalHits());
assertEquals(workflow.getWorkflowId(), searchResultV2.getResults().get(0).getWorkflowId());
SearchResult<WorkflowSummary> searchResultAdvanced =
workflowClient.search(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultAdvanced);
assertEquals(1, searchResultAdvanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(), searchResultAdvanced.getResults().get(0).getWorkflowId());
SearchResult<Workflow> searchResultV2Advanced =
workflowClient.searchV2(0, 1, null, null, "workflowType='" + def.getName() + "'");
assertNotNull(searchResultV2Advanced);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
workflow.getWorkflowId(),
searchResultV2Advanced.getResults().get(0).getWorkflowId());
SearchResult<TaskSummary> taskSearchResult =
taskClient.search("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResult);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResult.getResults().get(0).getTaskDefName());
SearchResult<TaskSummary> taskSearchResultAdvanced =
taskClient.search(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultAdvanced);
assertEquals(1, taskSearchResultAdvanced.getTotalHits());
assertEquals(t0.getName(), taskSearchResultAdvanced.getResults().get(0).getTaskDefName());
SearchResult<Task> taskSearchResultV2 =
taskClient.searchV2("taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2);
assertEquals(1, searchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2.getResults().get(0).getReferenceTaskName());
SearchResult<Task> taskSearchResultV2Advanced =
taskClient.searchV2(0, 1, null, null, "taskType='" + t0.getName() + "'");
assertNotNull(taskSearchResultV2Advanced);
assertEquals(1, taskSearchResultV2Advanced.getTotalHits());
assertEquals(
t0.getTaskReferenceName(),
taskSearchResultV2Advanced.getResults().get(0).getReferenceTaskName());
workflowClient.terminateWorkflow(workflowId, "terminate reason");
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.TERMINATED, workflow.getStatus());
workflowClient.restart(workflowId, false);
workflow = workflowClient.getWorkflow(workflowId, true);
assertNotNull(workflow);
assertEquals(WorkflowStatus.RUNNING, workflow.getStatus());
assertEquals(1, workflow.getTasks().size());
workflowClient.skipTaskFromWorkflow(workflowId, "t1");
}
@Test(expected = ConductorClientException.class)
public void testMetadataWorkflowDefinition() {
String workflowDefName = "testWorkflowDefMetadata";
WorkflowDef def = new WorkflowDef();
def.setName(workflowDefName);
def.setVersion(1);
WorkflowTask t0 = new WorkflowTask();
t0.setName("t0");
t0.setWorkflowTaskType(TaskType.SIMPLE);
t0.setTaskReferenceName("t0");
WorkflowTask t1 = new WorkflowTask();
t1.setName("t1");
t1.setWorkflowTaskType(TaskType.SIMPLE);
t1.setTaskReferenceName("t1");
def.getTasks().add(t0);
def.getTasks().add(t1);
metadataClient.registerWorkflowDef(def);
metadataClient.unregisterWorkflowDef(workflowDefName, 1);
try {
metadataClient.getWorkflowDef(workflowDefName, 1);
} catch (ConductorClientException e) {
int statusCode = e.getStatus();
String errorMessage = e.getMessage();
boolean retryable = e.isRetryable();
assertEquals(404, statusCode);
assertEquals(
"No such workflow found by name: testWorkflowDefMetadata, version: 1",
errorMessage);
assertFalse(retryable);
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testInvalidResource() {
MetadataClient metadataClient = new MetadataClient();
metadataClient.setRootURI(String.format("%sinvalid", apiRoot));
WorkflowDef def = new WorkflowDef();
def.setName("testWorkflowDel");
def.setVersion(1);
try {
metadataClient.registerWorkflowDef(def);
} catch (ConductorClientException e) {
int statusCode = e.getStatus();
boolean retryable = e.isRetryable();
assertEquals(404, statusCode);
assertFalse(retryable);
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testUpdateWorkflow() {
TaskDef taskDef = new TaskDef();
taskDef.setName("taskUpdate");
ArrayList<TaskDef> tasks = new ArrayList<>();
tasks.add(taskDef);
metadataClient.registerTaskDefs(tasks);
WorkflowDef def = new WorkflowDef();
def.setName("testWorkflowDel");
def.setVersion(1);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("taskUpdate");
workflowTask.setTaskReferenceName("taskUpdate");
List<WorkflowTask> workflowTaskList = new ArrayList<>();
workflowTaskList.add(workflowTask);
def.setTasks(workflowTaskList);
List<WorkflowDef> workflowList = new ArrayList<>();
workflowList.add(def);
metadataClient.registerWorkflowDef(def);
def.setVersion(2);
metadataClient.updateWorkflowDefs(workflowList);
WorkflowDef def1 = metadataClient.getWorkflowDef(def.getName(), 2);
assertNotNull(def1);
try {
metadataClient.getTaskDef("test");
} catch (ConductorClientException e) {
int statuCode = e.getStatus();
assertEquals(404, statuCode);
assertEquals("No such taskType found by name: test", e.getMessage());
assertFalse(e.isRetryable());
throw e;
}
}
@Test
public void testStartWorkflow() {
StartWorkflowRequest startWorkflowRequest = new StartWorkflowRequest();
try {
workflowClient.startWorkflow(startWorkflowRequest);
fail("StartWorkflow#name is null but NullPointerException was not thrown");
} catch (NullPointerException e) {
assertEquals("Workflow name cannot be null or empty", e.getMessage());
} catch (Exception e) {
fail("StartWorkflow#name is null but NullPointerException was not thrown");
}
}
@Test(expected = ConductorClientException.class)
public void testUpdateTask() {
TaskResult taskResult = new TaskResult();
try {
taskClient.updateTask(taskResult);
} catch (ConductorClientException e) {
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertEquals(2, errors.size());
assertTrue(errorMessages.contains("Workflow Id cannot be null or empty"));
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testGetWorfklowNotFound() {
try {
workflowClient.getWorkflow("w123", true);
} catch (ConductorClientException e) {
assertEquals(404, e.getStatus());
assertEquals("No such workflow found by id: w123", e.getMessage());
assertFalse(e.isRetryable());
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testEmptyCreateWorkflowDef() {
try {
WorkflowDef workflowDef = new WorkflowDef();
metadataClient.registerWorkflowDef(workflowDef);
} catch (ConductorClientException e) {
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertTrue(errorMessages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(errorMessages.contains("WorkflowTask list cannot be empty"));
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testUpdateWorkflowDef() {
try {
WorkflowDef workflowDef = new WorkflowDef();
List<WorkflowDef> workflowDefList = new ArrayList<>();
workflowDefList.add(workflowDef);
metadataClient.updateWorkflowDefs(workflowDefList);
} catch (ConductorClientException e) {
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertEquals(3, errors.size());
assertTrue(errorMessages.contains("WorkflowTask list cannot be empty"));
assertTrue(errorMessages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(errorMessages.contains("ownerEmail cannot be empty"));
throw e;
}
}
@Test
public void testTaskByTaskId() {
try {
taskClient.getTaskDetails("test999");
} catch (ConductorClientException e) {
assertEquals(404, e.getStatus());
assertEquals("Task not found for taskId: test999", e.getMessage());
}
}
@Test
public void testListworkflowsByCorrelationId() {
workflowClient.getWorkflows("test", "test12", false, false);
}
@Test(expected = ConductorClientException.class)
public void testCreateInvalidWorkflowDef() {
try {
WorkflowDef workflowDef = new WorkflowDef();
List<WorkflowDef> workflowDefList = new ArrayList<>();
workflowDefList.add(workflowDef);
metadataClient.registerWorkflowDef(workflowDef);
} catch (ConductorClientException e) {
assertEquals(3, e.getValidationErrors().size());
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertTrue(errorMessages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(errorMessages.contains("WorkflowTask list cannot be empty"));
assertTrue(errorMessages.contains("ownerEmail cannot be empty"));
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testUpdateTaskDefNameNull() {
TaskDef taskDef = new TaskDef();
try {
metadataClient.updateTaskDef(taskDef);
} catch (ConductorClientException e) {
assertEquals(2, e.getValidationErrors().size());
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertTrue(errorMessages.contains("TaskDef name cannot be null or empty"));
assertTrue(errorMessages.contains("ownerEmail cannot be empty"));
throw e;
}
}
@Test(expected = ConductorClientException.class)
public void testGetTaskDefNotExisting() {
metadataClient.getTaskDef("");
}
@Test(expected = ConductorClientException.class)
public void testUpdateWorkflowDefNameNull() {
WorkflowDef workflowDef = new WorkflowDef();
List<WorkflowDef> list = new ArrayList<>();
list.add(workflowDef);
try {
metadataClient.updateWorkflowDefs(list);
} catch (ConductorClientException e) {
assertEquals(3, e.getValidationErrors().size());
assertEquals(400, e.getStatus());
assertEquals("Validation failed, check below errors for detail.", e.getMessage());
assertFalse(e.isRetryable());
List<ValidationError> errors = e.getValidationErrors();
List<String> errorMessages =
errors.stream().map(ValidationError::getMessage).collect(Collectors.toList());
assertTrue(errorMessages.contains("WorkflowDef name cannot be null or empty"));
assertTrue(errorMessages.contains("WorkflowTask list cannot be empty"));
assertTrue(errorMessages.contains("ownerEmail cannot be empty"));
throw e;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/utils/UserTask.java | test-harness/src/test/java/com/netflix/conductor/test/utils/UserTask.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.utils;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.util.concurrent.Uninterruptibles;
@Component(UserTask.NAME)
public class UserTask extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(UserTask.class);
public static final String NAME = "USER_TASK";
private final ObjectMapper objectMapper;
private static final TypeReference<Map<String, Map<String, List<Object>>>>
mapStringListObjects = new TypeReference<>() {};
public UserTask(ObjectMapper objectMapper) {
super(NAME);
this.objectMapper = objectMapper;
LOGGER.info("Initialized system task - {}", getClass().getCanonicalName());
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
if (task.getWorkflowTask().isAsyncComplete()) {
task.setStatus(TaskModel.Status.IN_PROGRESS);
} else {
Map<String, Map<String, List<Object>>> map =
objectMapper.convertValue(task.getInputData(), mapStringListObjects);
Map<String, Object> output = new HashMap<>();
Map<String, List<Object>> defaultLargeInput = new HashMap<>();
defaultLargeInput.put("TEST_SAMPLE", Collections.singletonList("testDefault"));
output.put(
"size",
map.getOrDefault("largeInput", defaultLargeInput).get("TEST_SAMPLE").size());
task.setOutputData(output);
task.setStatus(TaskModel.Status.COMPLETED);
}
}
@Override
public boolean isAsync() {
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/utils/MockExternalPayloadStorage.java | test-harness/src/test/java/com/netflix/conductor/test/utils/MockExternalPayloadStorage.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.utils;
import java.io.*;
import java.nio.file.Files;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.commons.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SIMPLE;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_SUB_WORKFLOW;
/** A {@link ExternalPayloadStorage} implementation that stores payload in file. */
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "mock")
@Component
public class MockExternalPayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(MockExternalPayloadStorage.class);
private final ObjectMapper objectMapper;
private final File payloadDir;
public MockExternalPayloadStorage(ObjectMapper objectMapper) throws IOException {
this.objectMapper = objectMapper;
this.payloadDir = Files.createTempDirectory("payloads").toFile();
LOGGER.info(
"{} initialized in directory: {}",
this.getClass().getSimpleName(),
payloadDir.getAbsolutePath());
}
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
ExternalStorageLocation location = new ExternalStorageLocation();
location.setPath(UUID.randomUUID() + ".json");
return location;
}
@Override
public void upload(String path, InputStream payload, long payloadSize) {
File file = new File(payloadDir, path);
String filePath = file.getAbsolutePath();
try {
if (!file.exists() && file.createNewFile()) {
LOGGER.debug("Created file: {}", filePath);
}
IOUtils.copy(payload, new FileOutputStream(file));
LOGGER.debug("Written to {}", filePath);
} catch (IOException e) {
// just handle this exception here and return empty map so that test will fail in case
// this exception is thrown
LOGGER.error("Error writing to {}", filePath);
} finally {
try {
if (payload != null) {
payload.close();
}
} catch (IOException e) {
LOGGER.warn("Unable to close input stream when writing to file");
}
}
}
@Override
public InputStream download(String path) {
try {
LOGGER.debug("Reading from {}", path);
return new FileInputStream(new File(payloadDir, path));
} catch (IOException e) {
LOGGER.error("Error reading {}", path, e);
return null;
}
}
public void upload(String path, Map<String, Object> payload) {
try {
InputStream bais = new ByteArrayInputStream(objectMapper.writeValueAsBytes(payload));
upload(path, bais, 0);
} catch (IOException e) {
LOGGER.error("Error serializing map to json", e);
}
}
public InputStream readOutputDotJson() {
return MockExternalPayloadStorage.class.getResourceAsStream("/output.json");
}
@SuppressWarnings("unchecked")
public Map<String, Object> curateDynamicForkLargePayload() {
Map<String, Object> dynamicForkLargePayload = new HashMap<>();
try {
InputStream inputStream = readOutputDotJson();
Map<String, Object> largePayload = objectMapper.readValue(inputStream, Map.class);
WorkflowTask simpleWorkflowTask = new WorkflowTask();
simpleWorkflowTask.setName("integration_task_10");
simpleWorkflowTask.setTaskReferenceName("t10");
simpleWorkflowTask.setType(TASK_TYPE_SIMPLE);
simpleWorkflowTask.setInputParameters(
Collections.singletonMap("p1", "${workflow.input.imageType}"));
WorkflowDef subWorkflowDef = new WorkflowDef();
subWorkflowDef.setName("one_task_workflow");
subWorkflowDef.setVersion(1);
subWorkflowDef.setTasks(Collections.singletonList(simpleWorkflowTask));
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName("one_task_workflow");
subWorkflowParams.setVersion(1);
subWorkflowParams.setWorkflowDef(subWorkflowDef);
WorkflowTask subWorkflowTask = new WorkflowTask();
subWorkflowTask.setName("large_payload_subworkflow");
subWorkflowTask.setType(TASK_TYPE_SUB_WORKFLOW);
subWorkflowTask.setTaskReferenceName("large_payload_subworkflow");
subWorkflowTask.setInputParameters(largePayload);
subWorkflowTask.setSubWorkflowParam(subWorkflowParams);
dynamicForkLargePayload.put("dynamicTasks", List.of(subWorkflowTask));
dynamicForkLargePayload.put(
"dynamicTasksInput", Map.of("large_payload_subworkflow", largePayload));
} catch (IOException e) {
// just handle this exception here and return empty map so that test will fail in case
// this exception is thrown
}
return dynamicForkLargePayload;
}
public Map<String, Object> downloadPayload(String path) {
InputStream inputStream = download(path);
if (inputStream != null) {
try {
Map<String, Object> largePayload = objectMapper.readValue(inputStream, Map.class);
return largePayload;
} catch (IOException e) {
LOGGER.error("Error in downloading payload for path {}", path, e);
}
}
return new HashMap<>();
}
public Map<String, Object> createLargePayload(int repeat) {
Map<String, Object> largePayload = new HashMap<>();
try {
InputStream inputStream = readOutputDotJson();
Map<String, Object> payload = objectMapper.readValue(inputStream, Map.class);
for (int i = 0; i < repeat; i++) {
largePayload.put(String.valueOf(i), payload);
}
} catch (IOException e) {
// just handle this exception here and return empty map so that test will fail in case
// this exception is thrown
}
return largePayload;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/config/LocalStackS3Configuration.java | test-harness/src/test/java/com/netflix/conductor/test/config/LocalStackS3Configuration.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.config;
import java.net.URI;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
/**
* Test configuration that overrides production S3 beans to point to LocalStack. This configuration
* is only active when external payload storage type is set to S3 and allows tests to run against
* LocalStack instead of real AWS S3.
*/
@TestConfiguration
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "s3")
public class LocalStackS3Configuration {
private static String localStackEndpoint;
/**
* Sets the LocalStack endpoint URL for S3 client configuration. This method should be called
* from test setup before Spring context initialization.
*/
public static void setLocalStackEndpoint(String endpoint) {
localStackEndpoint = endpoint;
}
/**
* Creates an S3Client configured for LocalStack. This bean overrides the production S3Client
* bean during testing.
*/
@Bean
@Primary
public S3Client localStackS3Client() {
var builder =
S3Client.builder()
.region(Region.US_EAST_1)
.credentialsProvider(
StaticCredentialsProvider.create(
AwsBasicCredentials.create("test", "test")))
.forcePathStyle(true); // Required for LocalStack S3 compatibility
// Configure LocalStack endpoint if available
if (localStackEndpoint != null) {
builder.endpointOverride(URI.create(localStackEndpoint));
}
return builder.build();
}
/**
* Creates an S3Presigner configured for LocalStack. This bean overrides the production
* S3Presigner bean during testing.
*/
@Bean
@Primary
public S3Presigner localStackS3Presigner() {
var builder =
S3Presigner.builder()
.region(Region.US_EAST_1)
.credentialsProvider(
StaticCredentialsProvider.create(
AwsBasicCredentials.create("test", "test")));
// Configure LocalStack endpoint if available
if (localStackEndpoint != null) {
builder.endpointOverride(URI.create(localStackEndpoint));
}
return builder.build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/test-harness/src/test/java/com/netflix/conductor/test/config/LocalStackSQSConfiguration.java | test-harness/src/test/java/com/netflix/conductor/test/config/LocalStackSQSConfiguration.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.config;
import java.net.URI;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.test.context.TestConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Primary;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.sqs.SqsClient;
/**
* Test configuration that overrides production SQS beans to point to LocalStack. This configuration
* is only active when SQS event queues are enabled and allows tests to run against LocalStack
* instead of real AWS SQS.
*/
@TestConfiguration
@ConditionalOnProperty(name = "conductor.event-queues.sqs.enabled", havingValue = "true")
public class LocalStackSQSConfiguration {
private static String localStackEndpoint;
/**
* Sets the LocalStack endpoint URL for SQS client configuration. This method should be called
* from test setup before Spring context initialization.
*/
public static void setLocalStackEndpoint(String endpoint) {
localStackEndpoint = endpoint;
}
/**
* Creates an SqsClient configured for LocalStack. This bean overrides the production SqsClient
* bean during testing.
*/
@Bean
@Primary
public SqsClient localStackSqsClient() {
var builder =
SqsClient.builder()
.region(Region.US_EAST_1)
.credentialsProvider(
StaticCredentialsProvider.create(
AwsBasicCredentials.create("test", "test")));
// Configure LocalStack endpoint if available
if (localStackEndpoint != null) {
builder.endpointOverride(URI.create(localStackEndpoint));
}
return builder.build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraBaseDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraBaseDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.dao;
import java.io.IOException;
import java.util.UUID;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.metrics.Monitors;
import com.datastax.driver.core.DataType;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.schemabuilder.SchemaBuilder;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import static com.netflix.conductor.cassandra.util.Constants.DAO_NAME;
import static com.netflix.conductor.cassandra.util.Constants.ENTITY_KEY;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_EXECUTION_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.MESSAGE_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.PAYLOAD_KEY;
import static com.netflix.conductor.cassandra.util.Constants.SHARD_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_EXECUTIONS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_HANDLERS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEFS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEF_LIMIT;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_LOOKUP;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOWS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS_INDEX;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEF_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TOTAL_PARTITIONS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TOTAL_TASKS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_VALUE;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_VERSION_KEY;
/**
* Creates the keyspace and tables.
*
* <p>CREATE KEYSPACE IF NOT EXISTS conductor WITH replication = { 'class' :
* 'NetworkTopologyStrategy', 'us-east': '3'};
*
* <p>CREATE TABLE IF NOT EXISTS conductor.workflows ( workflow_id uuid, shard_id int, task_id text,
* entity text, payload text, total_tasks int STATIC, total_partitions int STATIC, PRIMARY
* KEY((workflow_id, shard_id), entity, task_id) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.task_lookup( task_id uuid, workflow_id uuid, PRIMARY KEY
* (task_id) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.task_def_limit( task_def_name text, task_id uuid,
* workflow_id uuid, PRIMARY KEY ((task_def_name), task_id_key) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.workflow_definitions( workflow_def_name text, version
* int, workflow_definition text, PRIMARY KEY ((workflow_def_name), version) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.workflow_defs_index( workflow_def_version_index text,
* workflow_def_name_version text, workflow_def_index_value text,PRIMARY KEY
* ((workflow_def_version_index), workflow_def_name_version) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.task_definitions( task_defs text, task_def_name text,
* task_definition text, PRIMARY KEY ((task_defs), task_def_name) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.event_handlers( handlers text, event_handler_name text,
* event_handler text, PRIMARY KEY ((handlers), event_handler_name) );
*
* <p>CREATE TABLE IF NOT EXISTS conductor.event_executions( message_id text, event_handler_name
* text, event_execution_id text, payload text, PRIMARY KEY ((message_id, event_handler_name),
* event_execution_id) );
*/
public abstract class CassandraBaseDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraBaseDAO.class);
private final ObjectMapper objectMapper;
protected final Session session;
protected final CassandraProperties properties;
private boolean initialized = false;
public CassandraBaseDAO(
Session session, ObjectMapper objectMapper, CassandraProperties properties) {
this.session = session;
this.objectMapper = objectMapper;
this.properties = properties;
init();
}
protected static UUID toUUID(String uuidString, String message) {
try {
return UUID.fromString(uuidString);
} catch (IllegalArgumentException iae) {
throw new IllegalArgumentException(message + " " + uuidString, iae);
}
}
private void init() {
try {
if (!initialized) {
session.execute(getCreateKeyspaceStatement());
session.execute(getCreateWorkflowsTableStatement());
session.execute(getCreateTaskLookupTableStatement());
session.execute(getCreateTaskDefLimitTableStatement());
session.execute(getCreateWorkflowDefsTableStatement());
session.execute(getCreateWorkflowDefsIndexTableStatement());
session.execute(getCreateTaskDefsTableStatement());
session.execute(getCreateEventHandlersTableStatement());
session.execute(getCreateEventExecutionsTableStatement());
LOGGER.info(
"{} initialization complete! Tables created!", getClass().getSimpleName());
initialized = true;
}
} catch (Exception e) {
LOGGER.error("Error initializing and setting up keyspace and table in cassandra", e);
throw e;
}
}
private String getCreateKeyspaceStatement() {
return SchemaBuilder.createKeyspace(properties.getKeyspace())
.ifNotExists()
.with()
.replication(
ImmutableMap.of(
"class",
properties.getReplicationStrategy(),
properties.getReplicationFactorKey(),
properties.getReplicationFactorValue()))
.durableWrites(true)
.getQueryString();
}
private String getCreateWorkflowsTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOWS)
.ifNotExists()
.addPartitionKey(WORKFLOW_ID_KEY, DataType.uuid())
.addPartitionKey(SHARD_ID_KEY, DataType.cint())
.addClusteringColumn(ENTITY_KEY, DataType.text())
.addClusteringColumn(TASK_ID_KEY, DataType.text())
.addColumn(PAYLOAD_KEY, DataType.text())
.addStaticColumn(TOTAL_TASKS_KEY, DataType.cint())
.addStaticColumn(TOTAL_PARTITIONS_KEY, DataType.cint())
.getQueryString();
}
private String getCreateTaskLookupTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_LOOKUP)
.ifNotExists()
.addPartitionKey(TASK_ID_KEY, DataType.uuid())
.addColumn(WORKFLOW_ID_KEY, DataType.uuid())
.getQueryString();
}
private String getCreateTaskDefLimitTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_DEF_LIMIT)
.ifNotExists()
.addPartitionKey(TASK_DEF_NAME_KEY, DataType.text())
.addClusteringColumn(TASK_ID_KEY, DataType.uuid())
.addColumn(WORKFLOW_ID_KEY, DataType.uuid())
.getQueryString();
}
private String getCreateWorkflowDefsTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOW_DEFS)
.ifNotExists()
.addPartitionKey(WORKFLOW_DEF_NAME_KEY, DataType.text())
.addClusteringColumn(WORKFLOW_VERSION_KEY, DataType.cint())
.addColumn(WORKFLOW_DEFINITION_KEY, DataType.text())
.getQueryString();
}
private String getCreateWorkflowDefsIndexTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_WORKFLOW_DEFS_INDEX)
.ifNotExists()
.addPartitionKey(WORKFLOW_DEF_INDEX_KEY, DataType.text())
.addClusteringColumn(WORKFLOW_DEF_NAME_VERSION_KEY, DataType.text())
.addColumn(WORKFLOW_DEF_INDEX_VALUE, DataType.text())
.getQueryString();
}
private String getCreateTaskDefsTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_TASK_DEFS)
.ifNotExists()
.addPartitionKey(TASK_DEFS_KEY, DataType.text())
.addClusteringColumn(TASK_DEF_NAME_KEY, DataType.text())
.addColumn(TASK_DEFINITION_KEY, DataType.text())
.getQueryString();
}
private String getCreateEventHandlersTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_EVENT_HANDLERS)
.ifNotExists()
.addPartitionKey(HANDLERS_KEY, DataType.text())
.addClusteringColumn(EVENT_HANDLER_NAME_KEY, DataType.text())
.addColumn(EVENT_HANDLER_KEY, DataType.text())
.getQueryString();
}
private String getCreateEventExecutionsTableStatement() {
return SchemaBuilder.createTable(properties.getKeyspace(), TABLE_EVENT_EXECUTIONS)
.ifNotExists()
.addPartitionKey(MESSAGE_ID_KEY, DataType.text())
.addPartitionKey(EVENT_HANDLER_NAME_KEY, DataType.text())
.addClusteringColumn(EVENT_EXECUTION_ID_KEY, DataType.text())
.addColumn(PAYLOAD_KEY, DataType.text())
.getQueryString();
}
String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException e) {
throw new NonTransientException("Error serializing to json", e);
}
}
<T> T readValue(String json, Class<T> clazz) {
try {
return objectMapper.readValue(json, clazz);
} catch (IOException e) {
throw new NonTransientException("Error de-serializing json", e);
}
}
void recordCassandraDaoRequests(String action) {
recordCassandraDaoRequests(action, "n/a", "n/a");
}
void recordCassandraDaoRequests(String action, String taskType, String workflowType) {
Monitors.recordDaoRequests(DAO_NAME, action, taskType, workflowType);
}
void recordCassandraDaoEventRequests(String action, String event) {
Monitors.recordDaoEventRequests(DAO_NAME, action, event);
}
void recordCassandraDaoPayloadSize(
String action, int size, String taskType, String workflowType) {
Monitors.recordDaoPayloadSize(DAO_NAME, action, taskType, workflowType, size);
}
static class WorkflowMetadata {
private int totalTasks;
private int totalPartitions;
public int getTotalTasks() {
return totalTasks;
}
public void setTotalTasks(int totalTasks) {
this.totalTasks = totalTasks;
}
public int getTotalPartitions() {
return totalPartitions;
}
public void setTotalPartitions(int totalPartitions) {
this.totalPartitions = totalPartitions;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraExecutionDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraExecutionDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.dao;
import java.util.*;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.cassandra.util.Statements;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.datastax.driver.core.*;
import com.datastax.driver.core.exceptions.DriverException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import static com.netflix.conductor.cassandra.util.Constants.*;
@Trace
public class CassandraExecutionDAO extends CassandraBaseDAO
implements ExecutionDAO, ConcurrentExecutionLimitDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraExecutionDAO.class);
private static final String CLASS_NAME = CassandraExecutionDAO.class.getSimpleName();
protected final PreparedStatement insertWorkflowStatement;
protected final PreparedStatement insertTaskStatement;
protected final PreparedStatement insertEventExecutionStatement;
protected final PreparedStatement selectTotalStatement;
protected final PreparedStatement selectTaskStatement;
protected final PreparedStatement selectWorkflowStatement;
protected final PreparedStatement selectWorkflowWithTasksStatement;
protected final PreparedStatement selectTaskLookupStatement;
protected final PreparedStatement selectTasksFromTaskDefLimitStatement;
protected final PreparedStatement selectEventExecutionsStatement;
protected final PreparedStatement updateWorkflowStatement;
protected final PreparedStatement updateTotalTasksStatement;
protected final PreparedStatement updateTotalPartitionsStatement;
protected final PreparedStatement updateTaskLookupStatement;
protected final PreparedStatement updateTaskDefLimitStatement;
protected final PreparedStatement updateEventExecutionStatement;
protected final PreparedStatement deleteWorkflowStatement;
protected final PreparedStatement deleteTaskStatement;
protected final PreparedStatement deleteTaskLookupStatement;
protected final PreparedStatement deleteTaskDefLimitStatement;
protected final PreparedStatement deleteEventExecutionStatement;
protected final int eventExecutionsTTL;
public CassandraExecutionDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements) {
super(session, objectMapper, properties);
eventExecutionsTTL = (int) properties.getEventExecutionPersistenceTtl().getSeconds();
this.insertWorkflowStatement =
session.prepare(statements.getInsertWorkflowStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.insertTaskStatement =
session.prepare(statements.getInsertTaskStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.insertEventExecutionStatement =
session.prepare(statements.getInsertEventExecutionStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.selectTotalStatement =
session.prepare(statements.getSelectTotalStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectTaskStatement =
session.prepare(statements.getSelectTaskStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectWorkflowStatement =
session.prepare(statements.getSelectWorkflowStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectWorkflowWithTasksStatement =
session.prepare(statements.getSelectWorkflowWithTasksStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectTaskLookupStatement =
session.prepare(statements.getSelectTaskFromLookupTableStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectTasksFromTaskDefLimitStatement =
session.prepare(statements.getSelectTasksFromTaskDefLimitStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectEventExecutionsStatement =
session.prepare(
statements
.getSelectAllEventExecutionsForMessageFromEventExecutionsStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.updateWorkflowStatement =
session.prepare(statements.getUpdateWorkflowStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.updateTotalTasksStatement =
session.prepare(statements.getUpdateTotalTasksStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.updateTotalPartitionsStatement =
session.prepare(statements.getUpdateTotalPartitionsStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.updateTaskLookupStatement =
session.prepare(statements.getUpdateTaskLookupStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.updateTaskDefLimitStatement =
session.prepare(statements.getUpdateTaskDefLimitStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.updateEventExecutionStatement =
session.prepare(statements.getUpdateEventExecutionStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteWorkflowStatement =
session.prepare(statements.getDeleteWorkflowStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteTaskStatement =
session.prepare(statements.getDeleteTaskStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteTaskLookupStatement =
session.prepare(statements.getDeleteTaskLookupStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteTaskDefLimitStatement =
session.prepare(statements.getDeleteTaskDefLimitStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteEventExecutionStatement =
session.prepare(statements.getDeleteEventExecutionsStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskName, String workflowId) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
return tasks.stream()
.filter(task -> taskName.equals(task.getTaskType()))
.filter(task -> TaskModel.Status.IN_PROGRESS.equals(task.getStatus()))
.collect(Collectors.toList());
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<TaskModel> getTasks(String taskType, String startKey, int count) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* Inserts tasks into the Cassandra datastore. <b>Note:</b> Creates the task_id to workflow_id
* mapping in the task_lookup table first. Once this succeeds, inserts the tasks into the
* workflows table. Tasks belonging to the same shard are created using batch statements.
*
* @param tasks tasks to be created
*/
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
validateTasks(tasks);
String workflowId = tasks.get(0).getWorkflowInstanceId();
UUID workflowUUID = toUUID(workflowId, "Invalid workflow id");
try {
WorkflowMetadata workflowMetadata = getWorkflowMetadata(workflowId);
int totalTasks = workflowMetadata.getTotalTasks() + tasks.size();
// TODO: write into multiple shards based on number of tasks
// update the task_lookup table
tasks.forEach(
task -> {
if (task.getScheduledTime() == 0) {
task.setScheduledTime(System.currentTimeMillis());
}
session.execute(
updateTaskLookupStatement.bind(
workflowUUID, toUUID(task.getTaskId(), "Invalid task id")));
});
// update all the tasks in the workflow using batch
BatchStatement batchStatement = new BatchStatement();
tasks.forEach(
task -> {
String taskPayload = toJson(task);
batchStatement.add(
insertTaskStatement.bind(
workflowUUID,
DEFAULT_SHARD_ID,
task.getTaskId(),
taskPayload));
recordCassandraDaoRequests(
"createTask", task.getTaskType(), task.getWorkflowType());
recordCassandraDaoPayloadSize(
"createTask",
taskPayload.length(),
task.getTaskType(),
task.getWorkflowType());
});
batchStatement.add(
updateTotalTasksStatement.bind(totalTasks, workflowUUID, DEFAULT_SHARD_ID));
session.execute(batchStatement);
// update the total tasks and partitions for the workflow
session.execute(
updateTotalPartitionsStatement.bind(
DEFAULT_TOTAL_PARTITIONS, totalTasks, workflowUUID));
return tasks;
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "createTasks");
String errorMsg =
String.format(
"Error creating %d tasks for workflow: %s", tasks.size(), workflowId);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public void updateTask(TaskModel task) {
try {
// TODO: calculate the shard number the task belongs to
String taskPayload = toJson(task);
recordCassandraDaoRequests("updateTask", task.getTaskType(), task.getWorkflowType());
recordCassandraDaoPayloadSize(
"updateTask", taskPayload.length(), task.getTaskType(), task.getWorkflowType());
session.execute(
insertTaskStatement.bind(
UUID.fromString(task.getWorkflowInstanceId()),
DEFAULT_SHARD_ID,
task.getTaskId(),
taskPayload));
if (task.getTaskDefinition().isPresent()
&& task.getTaskDefinition().get().concurrencyLimit() > 0) {
if (task.getStatus().isTerminal()) {
removeTaskFromLimit(task);
} else if (task.getStatus() == TaskModel.Status.IN_PROGRESS) {
addTaskToLimit(task);
}
}
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "updateTask");
String errorMsg =
String.format(
"Error updating task: %s in workflow: %s",
task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
int limit = taskDefinition.get().concurrencyLimit();
if (limit <= 0) {
return false;
}
try {
recordCassandraDaoRequests(
"selectTaskDefLimit", task.getTaskType(), task.getWorkflowType());
ResultSet resultSet =
session.execute(
selectTasksFromTaskDefLimitStatement.bind(task.getTaskDefName()));
List<String> taskIds =
resultSet.all().stream()
.map(row -> row.getUUID(TASK_ID_KEY).toString())
.collect(Collectors.toList());
long current = taskIds.size();
if (!taskIds.contains(task.getTaskId()) && current >= limit) {
LOGGER.info(
"Task execution count limited. task - {}:{}, limit: {}, current: {}",
task.getTaskId(),
task.getTaskDefName(),
limit,
current);
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "exceedsLimit");
String errorMsg =
String.format(
"Failed to get in progress limit - %s:%s in workflow :%s",
task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
return false;
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
LOGGER.warn("No such task found by id {}", taskId);
return false;
}
return removeTask(task);
}
@Override
public TaskModel getTask(String taskId) {
try {
String workflowId = lookupWorkflowIdFromTaskId(taskId);
if (workflowId == null) {
return null;
}
// TODO: implement for query against multiple shards
ResultSet resultSet =
session.execute(
selectTaskStatement.bind(
UUID.fromString(workflowId), DEFAULT_SHARD_ID, taskId));
return Optional.ofNullable(resultSet.one())
.map(
row -> {
String taskRow = row.getString(PAYLOAD_KEY);
TaskModel task = readValue(taskRow, TaskModel.class);
recordCassandraDaoRequests(
"getTask", task.getTaskType(), task.getWorkflowType());
recordCassandraDaoPayloadSize(
"getTask",
taskRow.length(),
task.getTaskType(),
task.getWorkflowType());
return task;
})
.orElse(null);
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getTask");
String errorMsg = String.format("Error getting task by id: %s", taskId);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
Preconditions.checkNotNull(taskIds);
Preconditions.checkArgument(taskIds.size() > 0, "Task ids list cannot be empty");
String workflowId = lookupWorkflowIdFromTaskId(taskIds.get(0));
if (workflowId == null) {
return null;
}
return getWorkflow(workflowId, true).getTasks().stream()
.filter(task -> taskIds.contains(task.getTaskId()))
.collect(Collectors.toList());
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskType) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
return getWorkflow(workflowId, true).getTasks();
}
@Override
public String createWorkflow(WorkflowModel workflow) {
try {
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(new LinkedList<>());
String payload = toJson(workflow);
recordCassandraDaoRequests("createWorkflow", "n/a", workflow.getWorkflowName());
recordCassandraDaoPayloadSize(
"createWorkflow", payload.length(), "n/a", workflow.getWorkflowName());
session.execute(
insertWorkflowStatement.bind(
UUID.fromString(workflow.getWorkflowId()), 1, "", payload, 0, 1));
workflow.setTasks(tasks);
return workflow.getWorkflowId();
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "createWorkflow");
String errorMsg =
String.format("Error creating workflow: %s", workflow.getWorkflowId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
try {
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(new LinkedList<>());
String payload = toJson(workflow);
recordCassandraDaoRequests("updateWorkflow", "n/a", workflow.getWorkflowName());
recordCassandraDaoPayloadSize(
"updateWorkflow", payload.length(), "n/a", workflow.getWorkflowName());
session.execute(
updateWorkflowStatement.bind(
payload, UUID.fromString(workflow.getWorkflowId())));
workflow.setTasks(tasks);
return workflow.getWorkflowId();
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "updateWorkflow");
String errorMsg =
String.format("Failed to update workflow: %s", workflow.getWorkflowId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@Override
public boolean removeWorkflow(String workflowId) {
WorkflowModel workflow = getWorkflow(workflowId, true);
boolean removed = false;
// TODO: calculate number of shards and iterate
if (workflow != null) {
try {
recordCassandraDaoRequests("removeWorkflow", "n/a", workflow.getWorkflowName());
ResultSet resultSet =
session.execute(
deleteWorkflowStatement.bind(
UUID.fromString(workflowId), DEFAULT_SHARD_ID));
removed = resultSet.wasApplied();
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "removeWorkflow");
String errorMsg = String.format("Failed to remove workflow: %s", workflowId);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
workflow.getTasks().forEach(this::removeTaskLookup);
}
return removed;
}
/**
* This is a dummy implementation and this feature is not yet implemented for Cassandra backed
* Conductor
*/
@Override
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
throw new UnsupportedOperationException(
"This method is not currently implemented in CassandraExecutionDAO. Please use RedisDAO mode instead now for using TTLs.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
UUID workflowUUID = toUUID(workflowId, "Invalid workflow id");
try {
WorkflowModel workflow = null;
ResultSet resultSet;
if (includeTasks) {
resultSet =
session.execute(
selectWorkflowWithTasksStatement.bind(
workflowUUID, DEFAULT_SHARD_ID));
List<TaskModel> tasks = new ArrayList<>();
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("Workflow {} not found in datastore", workflowId);
return null;
}
for (Row row : rows) {
String entityKey = row.getString(ENTITY_KEY);
if (ENTITY_TYPE_WORKFLOW.equals(entityKey)) {
workflow = readValue(row.getString(PAYLOAD_KEY), WorkflowModel.class);
} else if (ENTITY_TYPE_TASK.equals(entityKey)) {
TaskModel task = readValue(row.getString(PAYLOAD_KEY), TaskModel.class);
tasks.add(task);
} else {
throw new NonTransientException(
String.format(
"Invalid row with entityKey: %s found in datastore for workflow: %s",
entityKey, workflowId));
}
}
if (workflow != null) {
recordCassandraDaoRequests("getWorkflow", "n/a", workflow.getWorkflowName());
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
} else {
resultSet = session.execute(selectWorkflowStatement.bind(workflowUUID));
workflow =
Optional.ofNullable(resultSet.one())
.map(
row -> {
WorkflowModel wf =
readValue(
row.getString(PAYLOAD_KEY),
WorkflowModel.class);
recordCassandraDaoRequests(
"getWorkflow", "n/a", wf.getWorkflowName());
return wf;
})
.orElse(null);
}
return workflow;
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getWorkflow");
String errorMsg = String.format("Failed to get workflow: %s", workflowId);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public long getPendingWorkflowCount(String workflowName) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public long getInProgressTaskCount(String taskDefName) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor
*/
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraExecutionDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public boolean canSearchAcrossWorkflows() {
return false;
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
String jsonPayload = toJson(eventExecution);
recordCassandraDaoEventRequests("addEventExecution", eventExecution.getEvent());
recordCassandraDaoPayloadSize(
"addEventExecution", jsonPayload.length(), eventExecution.getEvent(), "n/a");
return session.execute(
insertEventExecutionStatement.bind(
eventExecution.getMessageId(),
eventExecution.getName(),
eventExecution.getId(),
jsonPayload))
.wasApplied();
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "addEventExecution");
String errorMsg =
String.format(
"Failed to add event execution for event: %s, handler: %s",
eventExecution.getEvent(), eventExecution.getName());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
String jsonPayload = toJson(eventExecution);
recordCassandraDaoEventRequests("updateEventExecution", eventExecution.getEvent());
recordCassandraDaoPayloadSize(
"updateEventExecution", jsonPayload.length(), eventExecution.getEvent(), "n/a");
session.execute(
updateEventExecutionStatement.bind(
eventExecutionsTTL,
jsonPayload,
eventExecution.getMessageId(),
eventExecution.getName(),
eventExecution.getId()));
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "updateEventExecution");
String errorMsg =
String.format(
"Failed to update event execution for event: %s, handler: %s",
eventExecution.getEvent(), eventExecution.getName());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
recordCassandraDaoEventRequests("removeEventExecution", eventExecution.getEvent());
session.execute(
deleteEventExecutionStatement.bind(
eventExecution.getMessageId(),
eventExecution.getName(),
eventExecution.getId()));
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "removeEventExecution");
String errorMsg =
String.format(
"Failed to remove event execution for event: %s, handler: %s",
eventExecution.getEvent(), eventExecution.getName());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@VisibleForTesting
List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId) {
try {
return session
.execute(selectEventExecutionsStatement.bind(messageId, eventHandlerName))
.all()
.stream()
.filter(row -> !row.isNull(PAYLOAD_KEY))
.map(row -> readValue(row.getString(PAYLOAD_KEY), EventExecution.class))
.collect(Collectors.toList());
} catch (DriverException e) {
String errorMsg =
String.format(
"Failed to fetch event executions for event: %s, handler: %s",
eventName, eventHandlerName);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
@Override
public void addTaskToLimit(TaskModel task) {
try {
recordCassandraDaoRequests(
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraMetadataDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraMetadataDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.dao;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.PriorityQueue;
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.annotations.VisibleForTesting;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.cassandra.util.Statements;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.exceptions.DriverException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY;
import static com.netflix.conductor.common.metadata.tasks.TaskDef.ONE_HOUR;
@Trace
public class CassandraMetadataDAO extends CassandraBaseDAO implements MetadataDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraMetadataDAO.class);
private static final String CLASS_NAME = CassandraMetadataDAO.class.getSimpleName();
private static final String INDEX_DELIMITER = "/";
private final PreparedStatement insertWorkflowDefStatement;
private final PreparedStatement insertWorkflowDefVersionIndexStatement;
private final PreparedStatement insertTaskDefStatement;
private final PreparedStatement selectWorkflowDefStatement;
private final PreparedStatement selectAllWorkflowDefVersionsByNameStatement;
private final PreparedStatement selectAllWorkflowDefsStatement;
private final PreparedStatement selectAllWorkflowDefsLatestVersionsStatement;
private final PreparedStatement selectTaskDefStatement;
private final PreparedStatement selectAllTaskDefsStatement;
private final PreparedStatement updateWorkflowDefStatement;
private final PreparedStatement deleteWorkflowDefStatement;
private final PreparedStatement deleteWorkflowDefIndexStatement;
private final PreparedStatement deleteTaskDefStatement;
public CassandraMetadataDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements) {
super(session, objectMapper, properties);
this.insertWorkflowDefStatement =
session.prepare(statements.getInsertWorkflowDefStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.insertWorkflowDefVersionIndexStatement =
session.prepare(statements.getInsertWorkflowDefVersionIndexStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.insertTaskDefStatement =
session.prepare(statements.getInsertTaskDefStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.selectWorkflowDefStatement =
session.prepare(statements.getSelectWorkflowDefStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectAllWorkflowDefVersionsByNameStatement =
session.prepare(statements.getSelectAllWorkflowDefVersionsByNameStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectAllWorkflowDefsStatement =
session.prepare(statements.getSelectAllWorkflowDefsStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectAllWorkflowDefsLatestVersionsStatement =
session.prepare(statements.getSelectAllWorkflowDefsLatestVersionsStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectTaskDefStatement =
session.prepare(statements.getSelectTaskDefStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.selectAllTaskDefsStatement =
session.prepare(statements.getSelectAllTaskDefsStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
this.updateWorkflowDefStatement =
session.prepare(statements.getUpdateWorkflowDefStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteWorkflowDefStatement =
session.prepare(statements.getDeleteWorkflowDefStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteWorkflowDefIndexStatement =
session.prepare(statements.getDeleteWorkflowDefIndexStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
this.deleteTaskDefStatement =
session.prepare(statements.getDeleteTaskDefStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
@Override
public TaskDef getTaskDef(String name) {
return getTaskDefFromDB(name);
}
@Override
public List<TaskDef> getAllTaskDefs() {
return getAllTaskDefsFromDB();
}
@Override
public void removeTaskDef(String name) {
try {
recordCassandraDaoRequests("removeTaskDef");
session.execute(deleteTaskDefStatement.bind(name));
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "removeTaskDef");
String errorMsg = String.format("Failed to remove task definition: %s", name);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public void createWorkflowDef(WorkflowDef workflowDef) {
try {
String workflowDefinition = toJson(workflowDef);
if (!session.execute(
insertWorkflowDefStatement.bind(
workflowDef.getName(),
workflowDef.getVersion(),
workflowDefinition))
.wasApplied()) {
throw new ConflictException(
"Workflow: %s, version: %s already exists!",
workflowDef.getName(), workflowDef.getVersion());
}
String workflowDefIndex =
getWorkflowDefIndexValue(workflowDef.getName(), workflowDef.getVersion());
session.execute(
insertWorkflowDefVersionIndexStatement.bind(
workflowDefIndex, workflowDefIndex));
recordCassandraDaoRequests("createWorkflowDef");
recordCassandraDaoPayloadSize(
"createWorkflowDef", workflowDefinition.length(), "n/a", workflowDef.getName());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "createWorkflowDef");
String errorMsg =
String.format(
"Error creating workflow definition: %s/%d",
workflowDef.getName(), workflowDef.getVersion());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public void updateWorkflowDef(WorkflowDef workflowDef) {
try {
String workflowDefinition = toJson(workflowDef);
session.execute(
updateWorkflowDefStatement.bind(
workflowDefinition, workflowDef.getName(), workflowDef.getVersion()));
String workflowDefIndex =
getWorkflowDefIndexValue(workflowDef.getName(), workflowDef.getVersion());
session.execute(
insertWorkflowDefVersionIndexStatement.bind(
workflowDefIndex, workflowDefIndex));
recordCassandraDaoRequests("updateWorkflowDef");
recordCassandraDaoPayloadSize(
"updateWorkflowDef", workflowDefinition.length(), "n/a", workflowDef.getName());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "updateWorkflowDef");
String errorMsg =
String.format(
"Error updating workflow definition: %s/%d",
workflowDef.getName(), workflowDef.getVersion());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
List<WorkflowDef> workflowDefList = getAllWorkflowDefVersions(name);
if (workflowDefList != null && workflowDefList.size() > 0) {
workflowDefList.sort(Comparator.comparingInt(WorkflowDef::getVersion));
return Optional.of(workflowDefList.get(workflowDefList.size() - 1));
}
return Optional.empty();
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
try {
recordCassandraDaoRequests("getWorkflowDef");
ResultSet resultSet = session.execute(selectWorkflowDefStatement.bind(name, version));
WorkflowDef workflowDef =
Optional.ofNullable(resultSet.one())
.map(
row ->
readValue(
row.getString(WORKFLOW_DEFINITION_KEY),
WorkflowDef.class))
.orElse(null);
return Optional.ofNullable(workflowDef);
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getTaskDef");
String errorMsg = String.format("Error fetching workflow def: %s/%d", name, version);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public void removeWorkflowDef(String name, Integer version) {
try {
session.execute(deleteWorkflowDefStatement.bind(name, version));
session.execute(
deleteWorkflowDefIndexStatement.bind(
WORKFLOW_DEF_INDEX_KEY, getWorkflowDefIndexValue(name, version)));
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "removeWorkflowDef");
String errorMsg =
String.format("Failed to remove workflow definition: %s/%d", name, version);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@SuppressWarnings("unchecked")
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
try {
ResultSet resultSet =
session.execute(selectAllWorkflowDefsStatement.bind(WORKFLOW_DEF_INDEX_KEY));
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("No workflow definitions were found.");
return Collections.EMPTY_LIST;
}
return rows.stream()
.map(
row -> {
String defNameVersion =
row.getString(WORKFLOW_DEF_NAME_VERSION_KEY);
var nameVersion = getWorkflowNameAndVersion(defNameVersion);
return getWorkflowDef(nameVersion.getLeft(), nameVersion.getRight())
.orElse(null);
})
.filter(Objects::nonNull)
.collect(Collectors.toList());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getAllWorkflowDefs");
String errorMsg = "Error retrieving all workflow defs";
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
try {
ResultSet resultSet =
session.execute(
selectAllWorkflowDefsLatestVersionsStatement.bind(
WORKFLOW_DEF_INDEX_KEY));
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("No workflow definitions were found.");
return Collections.EMPTY_LIST;
}
Map<String, PriorityQueue<WorkflowDef>> allWorkflowDefs = new HashMap<>();
for (Row row : rows) {
String defNameVersion = row.getString(WORKFLOW_DEF_NAME_VERSION_KEY);
var nameVersion = getWorkflowNameAndVersion(defNameVersion);
WorkflowDef def =
getWorkflowDef(nameVersion.getLeft(), nameVersion.getRight()).orElse(null);
if (def == null) {
continue;
}
if (allWorkflowDefs.get(def.getName()) == null) {
allWorkflowDefs.put(
def.getName(),
new PriorityQueue<>(
(WorkflowDef w1, WorkflowDef w2) ->
Integer.compare(w2.getVersion(), w1.getVersion())));
}
allWorkflowDefs.get(def.getName()).add(def);
}
return allWorkflowDefs.values().stream()
.map(PriorityQueue::poll)
.collect(Collectors.toList());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getAllWorkflowDefsLatestVersions");
String errorMsg = "Error retrieving all workflow defs latest versions";
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
private TaskDef getTaskDefFromDB(String name) {
try {
ResultSet resultSet = session.execute(selectTaskDefStatement.bind(name));
recordCassandraDaoRequests("getTaskDef", name, null);
return Optional.ofNullable(resultSet.one()).map(this::setDefaults).orElse(null);
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getTaskDef");
String errorMsg = String.format("Failed to get task def: %s", name);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@SuppressWarnings("unchecked")
private List<TaskDef> getAllTaskDefsFromDB() {
try {
ResultSet resultSet = session.execute(selectAllTaskDefsStatement.bind(TASK_DEFS_KEY));
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("No task definitions were found.");
return Collections.EMPTY_LIST;
}
return rows.stream().map(this::setDefaults).collect(Collectors.toList());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getAllTaskDefs");
String errorMsg = "Failed to get all task defs";
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
private List<WorkflowDef> getAllWorkflowDefVersions(String name) {
try {
ResultSet resultSet =
session.execute(selectAllWorkflowDefVersionsByNameStatement.bind(name));
recordCassandraDaoRequests("getAllWorkflowDefVersions", "n/a", name);
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("Not workflow definitions were found for : {}", name);
return null;
}
return rows.stream()
.map(
row ->
readValue(
row.getString(WORKFLOW_DEFINITION_KEY),
WorkflowDef.class))
.collect(Collectors.toList());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getAllWorkflowDefVersions");
String errorMsg = String.format("Failed to get workflows defs for : %s", name);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
private TaskDef insertOrUpdateTaskDef(TaskDef taskDef) {
try {
String taskDefinition = toJson(taskDef);
session.execute(insertTaskDefStatement.bind(taskDef.getName(), taskDefinition));
recordCassandraDaoRequests("storeTaskDef");
recordCassandraDaoPayloadSize(
"storeTaskDef", taskDefinition.length(), taskDef.getName(), "n/a");
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "insertOrUpdateTaskDef");
String errorMsg =
String.format("Error creating/updating task definition: %s", taskDef.getName());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
return taskDef;
}
@VisibleForTesting
String getWorkflowDefIndexValue(String name, int version) {
return name + INDEX_DELIMITER + version;
}
@VisibleForTesting
ImmutablePair<String, Integer> getWorkflowNameAndVersion(String nameVersionStr) {
int lastIndexOfDelimiter = nameVersionStr.lastIndexOf(INDEX_DELIMITER);
if (lastIndexOfDelimiter == -1) {
throw new IllegalStateException(
nameVersionStr
+ " is not in the 'workflowName"
+ INDEX_DELIMITER
+ "version' pattern.");
}
String workflowName = nameVersionStr.substring(0, lastIndexOfDelimiter);
String versionStr = nameVersionStr.substring(lastIndexOfDelimiter + 1);
try {
return new ImmutablePair<>(workflowName, Integer.parseInt(versionStr));
} catch (NumberFormatException e) {
throw new IllegalStateException(
versionStr + " in " + nameVersionStr + " is not a valid number.");
}
}
private TaskDef setDefaults(Row row) {
TaskDef taskDef = readValue(row.getString(TASK_DEFINITION_KEY), TaskDef.class);
if (taskDef != null && taskDef.getResponseTimeoutSeconds() == 0) {
taskDef.setResponseTimeoutSeconds(
taskDef.getTimeoutSeconds() == 0 ? ONE_HOUR : taskDef.getTimeoutSeconds() - 1);
}
return taskDef;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraEventHandlerDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraEventHandlerDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.dao;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.cassandra.util.Statements;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.metrics.Monitors;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.exceptions.DriverException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY;
import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY;
@Trace
public class CassandraEventHandlerDAO extends CassandraBaseDAO implements EventHandlerDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraEventHandlerDAO.class);
private static final String CLASS_NAME = CassandraEventHandlerDAO.class.getSimpleName();
private final PreparedStatement insertEventHandlerStatement;
private final PreparedStatement selectAllEventHandlersStatement;
private final PreparedStatement deleteEventHandlerStatement;
public CassandraEventHandlerDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements) {
super(session, objectMapper, properties);
insertEventHandlerStatement =
session.prepare(statements.getInsertEventHandlerStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
selectAllEventHandlersStatement =
session.prepare(statements.getSelectAllEventHandlersStatement())
.setConsistencyLevel(properties.getReadConsistencyLevel());
deleteEventHandlerStatement =
session.prepare(statements.getDeleteEventHandlerStatement())
.setConsistencyLevel(properties.getWriteConsistencyLevel());
}
@Override
public void addEventHandler(EventHandler eventHandler) {
insertOrUpdateEventHandler(eventHandler);
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
insertOrUpdateEventHandler(eventHandler);
}
@Override
public void removeEventHandler(String name) {
try {
recordCassandraDaoRequests("removeEventHandler");
session.execute(deleteEventHandlerStatement.bind(name));
} catch (Exception e) {
Monitors.error(CLASS_NAME, "removeEventHandler");
String errorMsg = String.format("Failed to remove event handler: %s", name);
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
@Override
public List<EventHandler> getAllEventHandlers() {
return getAllEventHandlersFromDB();
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
if (activeOnly) {
return getAllEventHandlers().stream()
.filter(eventHandler -> eventHandler.getEvent().equals(event))
.filter(EventHandler::isActive)
.collect(Collectors.toList());
} else {
return getAllEventHandlers().stream()
.filter(eventHandler -> eventHandler.getEvent().equals(event))
.collect(Collectors.toList());
}
}
@SuppressWarnings("unchecked")
private List<EventHandler> getAllEventHandlersFromDB() {
try {
ResultSet resultSet =
session.execute(selectAllEventHandlersStatement.bind(HANDLERS_KEY));
List<Row> rows = resultSet.all();
if (rows.size() == 0) {
LOGGER.info("No event handlers were found.");
return Collections.EMPTY_LIST;
}
return rows.stream()
.map(row -> readValue(row.getString(EVENT_HANDLER_KEY), EventHandler.class))
.collect(Collectors.toList());
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "getAllEventHandlersFromDB");
String errorMsg = "Failed to get all event handlers";
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
private void insertOrUpdateEventHandler(EventHandler eventHandler) {
try {
String handler = toJson(eventHandler);
session.execute(insertEventHandlerStatement.bind(eventHandler.getName(), handler));
recordCassandraDaoRequests("storeEventHandler");
recordCassandraDaoPayloadSize("storeEventHandler", handler.length(), "n/a", "n/a");
} catch (DriverException e) {
Monitors.error(CLASS_NAME, "insertOrUpdateEventHandler");
String errorMsg =
String.format(
"Error creating/updating event handler: %s/%s",
eventHandler.getName(), eventHandler.getEvent());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraPollDataDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/dao/CassandraPollDataDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.dao;
import java.util.List;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.dao.PollDataDAO;
/**
* This is a dummy implementation and this feature is not implemented for Cassandra backed
* Conductor.
*/
public class CassandraPollDataDAO implements PollDataDAO {
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public PollData getPollData(String taskDefName, String domain) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public List<PollData> getPollData(String taskDefName) {
throw new UnsupportedOperationException(
"This method is not implemented in CassandraPollDataDAO. Please use ExecutionDAOFacade instead.");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Statements.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Statements.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.util;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import static com.netflix.conductor.cassandra.util.Constants.ENTITY_KEY;
import static com.netflix.conductor.cassandra.util.Constants.ENTITY_TYPE_TASK;
import static com.netflix.conductor.cassandra.util.Constants.ENTITY_TYPE_WORKFLOW;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_EXECUTION_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_KEY;
import static com.netflix.conductor.cassandra.util.Constants.EVENT_HANDLER_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.HANDLERS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.MESSAGE_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.PAYLOAD_KEY;
import static com.netflix.conductor.cassandra.util.Constants.SHARD_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_EXECUTIONS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_EVENT_HANDLERS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEFS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_DEF_LIMIT;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_TASK_LOOKUP;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOWS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS;
import static com.netflix.conductor.cassandra.util.Constants.TABLE_WORKFLOW_DEFS_INDEX;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEFS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_DEF_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TASK_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TOTAL_PARTITIONS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.TOTAL_TASKS_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEFINITION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_INDEX_VALUE;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_DEF_NAME_VERSION_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_ID_KEY;
import static com.netflix.conductor.cassandra.util.Constants.WORKFLOW_VERSION_KEY;
import static com.datastax.driver.core.querybuilder.QueryBuilder.bindMarker;
import static com.datastax.driver.core.querybuilder.QueryBuilder.eq;
import static com.datastax.driver.core.querybuilder.QueryBuilder.set;
/**
* DML statements
*
* <p><em>MetadataDAO</em>
*
* <ul>
* <li>INSERT INTO conductor.workflow_definitions (workflow_def_name,version,workflow_definition)
* VALUES (?,?,?) IF NOT EXISTS;
* <li>INSERT INTO conductor.workflow_defs_index
* (workflow_def_version_index,workflow_def_name_version, workflow_def_index_value) VALUES
* ('workflow_def_version_index',?,?);
* <li>INSERT INTO conductor.task_definitions (task_defs,task_def_name,task_definition) VALUES
* ('task_defs',?,?);
* <li>SELECT workflow_definition FROM conductor.workflow_definitions WHERE workflow_def_name=?
* AND version=?;
* <li>SELECT * FROM conductor.workflow_definitions WHERE workflow_def_name=?;
* <li>SELECT * FROM conductor.workflow_defs_index WHERE workflow_def_version_index=?;
* <li>SELECT task_definition FROM conductor.task_definitions WHERE task_defs='task_defs' AND
* task_def_name=?;
* <li>SELECT * FROM conductor.task_definitions WHERE task_defs=?;
* <li>UPDATE conductor.workflow_definitions SET workflow_definition=? WHERE workflow_def_name=?
* AND version=?;
* <li>DELETE FROM conductor.workflow_definitions WHERE workflow_def_name=? AND version=?;
* <li>DELETE FROM conductor.workflow_defs_index WHERE workflow_def_version_index=? AND
* workflow_def_name_version=?;
* <li>DELETE FROM conductor.task_definitions WHERE task_defs='task_defs' AND task_def_name=?;
* </ul>
*
* <em>ExecutionDAO</em>
*
* <ul>
* <li>INSERT INTO conductor.workflows
* (workflow_id,shard_id,task_id,entity,payload,total_tasks,total_partitions) VALUES
* (?,?,?,'workflow',?,?,?);
* <li>INSERT INTO conductor.workflows (workflow_id,shard_id,task_id,entity,payload) VALUES
* (?,?,?,'task',?);
* <li>INSERT INTO conductor.event_executions
* (message_id,event_handler_name,event_execution_id,payload) VALUES (?,?,?,?) IF NOT EXISTS;
* <li>SELECT total_tasks,total_partitions FROM conductor.workflows WHERE workflow_id=? AND
* shard_id=1;
* <li>SELECT payload FROM conductor.workflows WHERE workflow_id=? AND shard_id=? AND
* entity='task' AND task_id=?;
* <li>SELECT payload FROM conductor.workflows WHERE workflow_id=? AND shard_id=1 AND
* entity='workflow';
* <li>SELECT * FROM conductor.workflows WHERE workflow_id=? AND shard_id=?;
* <li>SELECT workflow_id FROM conductor.task_lookup WHERE task_id=?;
* <li>SELECT * FROM conductor.task_def_limit WHERE task_def_name=?;
* <li>SELECT * FROM conductor.event_executions WHERE message_id=? AND event_handler_name=?;
* <li>UPDATE conductor.workflows SET payload=? WHERE workflow_id=? AND shard_id=1 AND
* entity='workflow' AND task_id='';
* <li>UPDATE conductor.workflows SET total_tasks=? WHERE workflow_id=? AND shard_id=?;
* <li>UPDATE conductor.workflows SET total_partitions=?,total_tasks=? WHERE workflow_id=? AND
* shard_id=1;
* <li>UPDATE conductor.task_lookup SET workflow_id=? WHERE task_id=?;
* <li>UPDATE conductor.task_def_limit SET workflow_id=? WHERE task_def_name=? AND task_id=?;
* <li>UPDATE conductor.event_executions USING TTL ? SET payload=? WHERE message_id=? AND
* event_handler_name=? AND event_execution_id=?;
* <li>DELETE FROM conductor.workflows WHERE workflow_id=? AND shard_id=?;
* <li>DELETE FROM conductor.workflows WHERE workflow_id=? AND shard_id=? AND entity='task' AND
* task_id=?;
* <li>DELETE FROM conductor.task_lookup WHERE task_id=?;
* <li>DELETE FROM conductor.task_def_limit WHERE task_def_name=? AND task_id=?;
* <li>DELETE FROM conductor.event_executions WHERE message_id=? AND event_handler_name=? AND
* event_execution_id=?;
* </ul>
*
* <em>EventHandlerDAO</em>
*
* <ul>
* <li>INSERT INTO conductor.event_handlers (handlers,event_handler_name,event_handler) VALUES
* ('handlers',?,?);
* <li>SELECT * FROM conductor.event_handlers WHERE handlers=?;
* <li>DELETE FROM conductor.event_handlers WHERE handlers='handlers' AND event_handler_name=?;
* </ul>
*/
public class Statements {
private final String keyspace;
public Statements(String keyspace) {
this.keyspace = keyspace;
}
// MetadataDAO
// Insert Statements
/**
* @return cql query statement to insert a new workflow definition into the
* "workflow_definitions" table
*/
public String getInsertWorkflowDefStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOW_DEFS)
.value(WORKFLOW_DEF_NAME_KEY, bindMarker())
.value(WORKFLOW_VERSION_KEY, bindMarker())
.value(WORKFLOW_DEFINITION_KEY, bindMarker())
.ifNotExists()
.getQueryString();
}
/**
* @return cql query statement to insert a workflow def name version index into the
* "workflow_defs_index" table
*/
public String getInsertWorkflowDefVersionIndexStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOW_DEFS_INDEX)
.value(WORKFLOW_DEF_INDEX_KEY, WORKFLOW_DEF_INDEX_KEY)
.value(WORKFLOW_DEF_NAME_VERSION_KEY, bindMarker())
.value(WORKFLOW_DEF_INDEX_VALUE, bindMarker())
.getQueryString();
}
/**
* @return cql query statement to insert a new task definition into the "task_definitions" table
*/
public String getInsertTaskDefStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_TASK_DEFS)
.value(TASK_DEFS_KEY, TASK_DEFS_KEY)
.value(TASK_DEF_NAME_KEY, bindMarker())
.value(TASK_DEFINITION_KEY, bindMarker())
.getQueryString();
}
// Select Statements
/**
* @return cql query statement to fetch a workflow definition by name and version from the
* "workflow_definitions" table
*/
public String getSelectWorkflowDefStatement() {
return QueryBuilder.select(WORKFLOW_DEFINITION_KEY)
.from(keyspace, TABLE_WORKFLOW_DEFS)
.where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker()))
.and(eq(WORKFLOW_VERSION_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve all versions of a workflow definition by name from
* the "workflow_definitions" table
*/
public String getSelectAllWorkflowDefVersionsByNameStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_WORKFLOW_DEFS)
.where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to fetch all workflow def names and version from the
* "workflow_defs_index" table
*/
public String getSelectAllWorkflowDefsStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_WORKFLOW_DEFS_INDEX)
.where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker()))
.getQueryString();
}
public String getSelectAllWorkflowDefsLatestVersionsStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_WORKFLOW_DEFS_INDEX)
.where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to fetch a task definition by name from the "task_definitions"
* table
*/
public String getSelectTaskDefStatement() {
return QueryBuilder.select(TASK_DEFINITION_KEY)
.from(keyspace, TABLE_TASK_DEFS)
.where(eq(TASK_DEFS_KEY, TASK_DEFS_KEY))
.and(eq(TASK_DEF_NAME_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve all task definitions from the "task_definitions"
* table
*/
public String getSelectAllTaskDefsStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_TASK_DEFS)
.where(eq(TASK_DEFS_KEY, bindMarker()))
.getQueryString();
}
// Update Statement
/**
* @return cql query statement to update a workflow definitinos in the "workflow_definitions"
* table
*/
public String getUpdateWorkflowDefStatement() {
return QueryBuilder.update(keyspace, TABLE_WORKFLOW_DEFS)
.with(set(WORKFLOW_DEFINITION_KEY, bindMarker()))
.where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker()))
.and(eq(WORKFLOW_VERSION_KEY, bindMarker()))
.getQueryString();
}
// Delete Statements
/**
* @return cql query statement to delete a workflow definition by name and version from the
* "workflow_definitions" table
*/
public String getDeleteWorkflowDefStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_WORKFLOW_DEFS)
.where(eq(WORKFLOW_DEF_NAME_KEY, bindMarker()))
.and(eq(WORKFLOW_VERSION_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete a workflow def name/version from the
* "workflow_defs_index" table
*/
public String getDeleteWorkflowDefIndexStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_WORKFLOW_DEFS_INDEX)
.where(eq(WORKFLOW_DEF_INDEX_KEY, bindMarker()))
.and(eq(WORKFLOW_DEF_NAME_VERSION_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete a task definition by name from the "task_definitions"
* table
*/
public String getDeleteTaskDefStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_TASK_DEFS)
.where(eq(TASK_DEFS_KEY, TASK_DEFS_KEY))
.and(eq(TASK_DEF_NAME_KEY, bindMarker()))
.getQueryString();
}
// ExecutionDAO
// Insert Statements
/**
* @return cql query statement to insert a new workflow into the "workflows" table
*/
public String getInsertWorkflowStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS)
.value(WORKFLOW_ID_KEY, bindMarker())
.value(SHARD_ID_KEY, bindMarker())
.value(TASK_ID_KEY, bindMarker())
.value(ENTITY_KEY, ENTITY_TYPE_WORKFLOW)
.value(PAYLOAD_KEY, bindMarker())
.value(TOTAL_TASKS_KEY, bindMarker())
.value(TOTAL_PARTITIONS_KEY, bindMarker())
.getQueryString();
}
/**
* @return cql query statement to insert a new task into the "workflows" table
*/
public String getInsertTaskStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_WORKFLOWS)
.value(WORKFLOW_ID_KEY, bindMarker())
.value(SHARD_ID_KEY, bindMarker())
.value(TASK_ID_KEY, bindMarker())
.value(ENTITY_KEY, ENTITY_TYPE_TASK)
.value(PAYLOAD_KEY, bindMarker())
.getQueryString();
}
/**
* @return cql query statement to insert a new event execution into the "event_executions" table
*/
public String getInsertEventExecutionStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_EVENT_EXECUTIONS)
.value(MESSAGE_ID_KEY, bindMarker())
.value(EVENT_HANDLER_NAME_KEY, bindMarker())
.value(EVENT_EXECUTION_ID_KEY, bindMarker())
.value(PAYLOAD_KEY, bindMarker())
.ifNotExists()
.getQueryString();
}
// Select Statements
/**
* @return cql query statement to retrieve the total_tasks and total_partitions for a workflow
* from the "workflows" table
*/
public String getSelectTotalStatement() {
return QueryBuilder.select(TOTAL_TASKS_KEY, TOTAL_PARTITIONS_KEY)
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, 1))
.getQueryString();
}
/**
* @return cql query statement to retrieve a task from the "workflows" table
*/
public String getSelectTaskStatement() {
return QueryBuilder.select(PAYLOAD_KEY)
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, bindMarker()))
.and(eq(ENTITY_KEY, ENTITY_TYPE_TASK))
.and(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve a workflow (without its tasks) from the "workflows"
* table
*/
public String getSelectWorkflowStatement() {
return QueryBuilder.select(PAYLOAD_KEY)
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, 1))
.and(eq(ENTITY_KEY, ENTITY_TYPE_WORKFLOW))
.getQueryString();
}
/**
* @return cql query statement to retrieve a workflow with its tasks from the "workflows" table
*/
public String getSelectWorkflowWithTasksStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve the workflow_id for a particular task_id from the
* "task_lookup" table
*/
public String getSelectTaskFromLookupTableStatement() {
return QueryBuilder.select(WORKFLOW_ID_KEY)
.from(keyspace, TABLE_TASK_LOOKUP)
.where(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve all task ids for a given taskDefName with concurrent
* execution limit configured from the "task_def_limit" table
*/
public String getSelectTasksFromTaskDefLimitStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_TASK_DEF_LIMIT)
.where(eq(TASK_DEF_NAME_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to retrieve all event executions for a given message and event
* handler from the "event_executions" table
*/
public String getSelectAllEventExecutionsForMessageFromEventExecutionsStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_EVENT_EXECUTIONS)
.where(eq(MESSAGE_ID_KEY, bindMarker()))
.and(eq(EVENT_HANDLER_NAME_KEY, bindMarker()))
.getQueryString();
}
// Update Statements
/**
* @return cql query statement to update a workflow in the "workflows" table
*/
public String getUpdateWorkflowStatement() {
return QueryBuilder.update(keyspace, TABLE_WORKFLOWS)
.with(set(PAYLOAD_KEY, bindMarker()))
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, 1))
.and(eq(ENTITY_KEY, ENTITY_TYPE_WORKFLOW))
.and(eq(TASK_ID_KEY, ""))
.getQueryString();
}
/**
* @return cql query statement to update the total_tasks in a shard for a workflow in the
* "workflows" table
*/
public String getUpdateTotalTasksStatement() {
return QueryBuilder.update(keyspace, TABLE_WORKFLOWS)
.with(set(TOTAL_TASKS_KEY, bindMarker()))
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to update the total_partitions for a workflow in the "workflows"
* table
*/
public String getUpdateTotalPartitionsStatement() {
return QueryBuilder.update(keyspace, TABLE_WORKFLOWS)
.with(set(TOTAL_PARTITIONS_KEY, bindMarker()))
.and(set(TOTAL_TASKS_KEY, bindMarker()))
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, 1))
.getQueryString();
}
/**
* @return cql query statement to add a new task_id to workflow_id mapping to the "task_lookup"
* table
*/
public String getUpdateTaskLookupStatement() {
return QueryBuilder.update(keyspace, TABLE_TASK_LOOKUP)
.with(set(WORKFLOW_ID_KEY, bindMarker()))
.where(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to add a new task_id to the "task_def_limit" table
*/
public String getUpdateTaskDefLimitStatement() {
return QueryBuilder.update(keyspace, TABLE_TASK_DEF_LIMIT)
.with(set(WORKFLOW_ID_KEY, bindMarker()))
.where(eq(TASK_DEF_NAME_KEY, bindMarker()))
.and(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to update an event execution in the "event_executions" table
*/
public String getUpdateEventExecutionStatement() {
return QueryBuilder.update(keyspace, TABLE_EVENT_EXECUTIONS)
.using(QueryBuilder.ttl(bindMarker()))
.with(set(PAYLOAD_KEY, bindMarker()))
.where(eq(MESSAGE_ID_KEY, bindMarker()))
.and(eq(EVENT_HANDLER_NAME_KEY, bindMarker()))
.and(eq(EVENT_EXECUTION_ID_KEY, bindMarker()))
.getQueryString();
}
// Delete statements
/**
* @return cql query statement to delete a workflow from the "workflows" table
*/
public String getDeleteWorkflowStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete a task_id to workflow_id mapping from the "task_lookup"
* table
*/
public String getDeleteTaskLookupStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_TASK_LOOKUP)
.where(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete a task from the "workflows" table
*/
public String getDeleteTaskStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_WORKFLOWS)
.where(eq(WORKFLOW_ID_KEY, bindMarker()))
.and(eq(SHARD_ID_KEY, bindMarker()))
.and(eq(ENTITY_KEY, ENTITY_TYPE_TASK))
.and(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete a task_id from the "task_def_limit" table
*/
public String getDeleteTaskDefLimitStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_TASK_DEF_LIMIT)
.where(eq(TASK_DEF_NAME_KEY, bindMarker()))
.and(eq(TASK_ID_KEY, bindMarker()))
.getQueryString();
}
/**
* @return cql query statement to delete an event execution from the "event_execution" table
*/
public String getDeleteEventExecutionsStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_EVENT_EXECUTIONS)
.where(eq(MESSAGE_ID_KEY, bindMarker()))
.and(eq(EVENT_HANDLER_NAME_KEY, bindMarker()))
.and(eq(EVENT_EXECUTION_ID_KEY, bindMarker()))
.getQueryString();
}
// EventHandlerDAO
// Insert Statements
/**
* @return cql query statement to insert an event handler into the "event_handlers" table
*/
public String getInsertEventHandlerStatement() {
return QueryBuilder.insertInto(keyspace, TABLE_EVENT_HANDLERS)
.value(HANDLERS_KEY, HANDLERS_KEY)
.value(EVENT_HANDLER_NAME_KEY, bindMarker())
.value(EVENT_HANDLER_KEY, bindMarker())
.getQueryString();
}
// Select Statements
/**
* @return cql query statement to retrieve all event handlers from the "event_handlers" table
*/
public String getSelectAllEventHandlersStatement() {
return QueryBuilder.select()
.all()
.from(keyspace, TABLE_EVENT_HANDLERS)
.where(eq(HANDLERS_KEY, bindMarker()))
.getQueryString();
}
// Delete Statements
/**
* @return cql query statement to delete an event handler by name from the "event_handlers"
* table
*/
public String getDeleteEventHandlerStatement() {
return QueryBuilder.delete()
.from(keyspace, TABLE_EVENT_HANDLERS)
.where(eq(HANDLERS_KEY, HANDLERS_KEY))
.and(eq(EVENT_HANDLER_NAME_KEY, bindMarker()))
.getQueryString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Constants.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/util/Constants.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.util;
public interface Constants {
String DAO_NAME = "cassandra";
String TABLE_WORKFLOWS = "workflows";
String TABLE_TASK_LOOKUP = "task_lookup";
String TABLE_TASK_DEF_LIMIT = "task_def_limit";
String TABLE_WORKFLOW_DEFS = "workflow_definitions";
String TABLE_WORKFLOW_DEFS_INDEX = "workflow_defs_index";
String TABLE_TASK_DEFS = "task_definitions";
String TABLE_EVENT_HANDLERS = "event_handlers";
String TABLE_EVENT_EXECUTIONS = "event_executions";
String WORKFLOW_ID_KEY = "workflow_id";
String SHARD_ID_KEY = "shard_id";
String TASK_ID_KEY = "task_id";
String ENTITY_KEY = "entity";
String PAYLOAD_KEY = "payload";
String TOTAL_TASKS_KEY = "total_tasks";
String TOTAL_PARTITIONS_KEY = "total_partitions";
String TASK_DEF_NAME_KEY = "task_def_name";
String WORKFLOW_DEF_NAME_KEY = "workflow_def_name";
String WORKFLOW_VERSION_KEY = "version";
String WORKFLOW_DEFINITION_KEY = "workflow_definition";
String WORKFLOW_DEF_INDEX_KEY = "workflow_def_version_index";
String WORKFLOW_DEF_INDEX_VALUE = "workflow_def_index_value";
String WORKFLOW_DEF_NAME_VERSION_KEY = "workflow_def_name_version";
String TASK_DEFS_KEY = "task_defs";
String TASK_DEFINITION_KEY = "task_definition";
String HANDLERS_KEY = "handlers";
String EVENT_HANDLER_NAME_KEY = "event_handler_name";
String EVENT_HANDLER_KEY = "event_handler";
String MESSAGE_ID_KEY = "message_id";
String EVENT_EXECUTION_ID_KEY = "event_execution_id";
String ENTITY_TYPE_WORKFLOW = "workflow";
String ENTITY_TYPE_TASK = "task";
int DEFAULT_SHARD_ID = 1;
int DEFAULT_TOTAL_PARTITIONS = 1;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraProperties.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraProperties.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
import com.datastax.driver.core.ConsistencyLevel;
@ConfigurationProperties("conductor.cassandra")
public class CassandraProperties {
/** The address for the cassandra database host */
private String hostAddress = "127.0.0.1";
/** The port to be used to connect to the cassandra database instance */
private int port = 9142;
/** The name of the cassandra cluster */
private String cluster = "";
/** The keyspace to be used in the cassandra datastore */
private String keyspace = "conductor";
/**
* The number of tasks to be stored in a single partition which will be used for sharding
* workflows in the datastore
*/
private int shardSize = 100;
/** The replication strategy with which to configure the keyspace */
private String replicationStrategy = "SimpleStrategy";
/** The key to be used while configuring the replication factor */
private String replicationFactorKey = "replication_factor";
/** The replication factor value with which the keyspace is configured */
private int replicationFactorValue = 3;
/** The consistency level to be used for read operations */
private ConsistencyLevel readConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM;
/** The consistency level to be used for write operations */
private ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.LOCAL_QUORUM;
/** The time in seconds after which the in-memory task definitions cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
/** The time in seconds after which the in-memory event handler cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration eventHandlerCacheRefreshInterval = Duration.ofSeconds(60);
/** The time to live in seconds for which the event execution will be persisted */
@DurationUnit(ChronoUnit.SECONDS)
private Duration eventExecutionPersistenceTtl = Duration.ZERO;
public String getHostAddress() {
return hostAddress;
}
public void setHostAddress(String hostAddress) {
this.hostAddress = hostAddress;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
public String getKeyspace() {
return keyspace;
}
public void setKeyspace(String keyspace) {
this.keyspace = keyspace;
}
public int getShardSize() {
return shardSize;
}
public void setShardSize(int shardSize) {
this.shardSize = shardSize;
}
public String getReplicationStrategy() {
return replicationStrategy;
}
public void setReplicationStrategy(String replicationStrategy) {
this.replicationStrategy = replicationStrategy;
}
public String getReplicationFactorKey() {
return replicationFactorKey;
}
public void setReplicationFactorKey(String replicationFactorKey) {
this.replicationFactorKey = replicationFactorKey;
}
public int getReplicationFactorValue() {
return replicationFactorValue;
}
public void setReplicationFactorValue(int replicationFactorValue) {
this.replicationFactorValue = replicationFactorValue;
}
public ConsistencyLevel getReadConsistencyLevel() {
return readConsistencyLevel;
}
public void setReadConsistencyLevel(ConsistencyLevel readConsistencyLevel) {
this.readConsistencyLevel = readConsistencyLevel;
}
public ConsistencyLevel getWriteConsistencyLevel() {
return writeConsistencyLevel;
}
public void setWriteConsistencyLevel(ConsistencyLevel writeConsistencyLevel) {
this.writeConsistencyLevel = writeConsistencyLevel;
}
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Duration getEventHandlerCacheRefreshInterval() {
return eventHandlerCacheRefreshInterval;
}
public void setEventHandlerCacheRefreshInterval(Duration eventHandlerCacheRefreshInterval) {
this.eventHandlerCacheRefreshInterval = eventHandlerCacheRefreshInterval;
}
public Duration getEventExecutionPersistenceTtl() {
return eventExecutionPersistenceTtl;
}
public void setEventExecutionPersistenceTtl(Duration eventExecutionPersistenceTtl) {
this.eventExecutionPersistenceTtl = eventExecutionPersistenceTtl;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraConfiguration.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/CassandraConfiguration.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cache.CacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.cassandra.config.cache.CacheableEventHandlerDAO;
import com.netflix.conductor.cassandra.config.cache.CacheableMetadataDAO;
import com.netflix.conductor.cassandra.dao.CassandraEventHandlerDAO;
import com.netflix.conductor.cassandra.dao.CassandraExecutionDAO;
import com.netflix.conductor.cassandra.dao.CassandraMetadataDAO;
import com.netflix.conductor.cassandra.dao.CassandraPollDataDAO;
import com.netflix.conductor.cassandra.util.Statements;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.MetadataDAO;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Metadata;
import com.datastax.driver.core.Session;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(CassandraProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "cassandra")
public class CassandraConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(CassandraConfiguration.class);
@Bean
public Cluster cluster(CassandraProperties properties) {
String host = properties.getHostAddress();
int port = properties.getPort();
LOGGER.info("Connecting to cassandra cluster with host:{}, port:{}", host, port);
Cluster cluster = Cluster.builder().addContactPoint(host).withPort(port).build();
Metadata metadata = cluster.getMetadata();
LOGGER.info("Connected to cluster: {}", metadata.getClusterName());
metadata.getAllHosts()
.forEach(
h ->
LOGGER.info(
"Datacenter:{}, host:{}, rack: {}",
h.getDatacenter(),
h.getEndPoint().resolve().getHostName(),
h.getRack()));
return cluster;
}
@Bean
public Session session(Cluster cluster) {
LOGGER.info("Initializing cassandra session");
return cluster.connect();
}
@Bean
public MetadataDAO cassandraMetadataDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements,
CacheManager cacheManager) {
CassandraMetadataDAO cassandraMetadataDAO =
new CassandraMetadataDAO(session, objectMapper, properties, statements);
return new CacheableMetadataDAO(cassandraMetadataDAO, properties, cacheManager);
}
@Bean
public ExecutionDAO cassandraExecutionDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements) {
return new CassandraExecutionDAO(session, objectMapper, properties, statements);
}
@Bean
public EventHandlerDAO cassandraEventHandlerDAO(
Session session,
ObjectMapper objectMapper,
CassandraProperties properties,
Statements statements,
CacheManager cacheManager) {
CassandraEventHandlerDAO cassandraEventHandlerDAO =
new CassandraEventHandlerDAO(session, objectMapper, properties, statements);
return new CacheableEventHandlerDAO(cassandraEventHandlerDAO, properties, cacheManager);
}
@Bean
public CassandraPollDataDAO cassandraPollDataDAO() {
return new CassandraPollDataDAO();
}
@Bean
public Statements statements(CassandraProperties cassandraProperties) {
return new Statements(cassandraProperties.getKeyspace());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CachingConfig.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CachingConfig.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.config.cache;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.concurrent.ConcurrentMapCacheManager;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
@EnableCaching
public class CachingConfig {
public static final String TASK_DEF_CACHE = "taskDefCache";
public static final String EVENT_HANDLER_CACHE = "eventHandlerCache";
@Bean
public CacheManager cacheManager() {
return new ConcurrentMapCacheManager(TASK_DEF_CACHE, EVENT_HANDLER_CACHE);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableEventHandlerDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableEventHandlerDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.config.cache;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.cassandra.dao.CassandraEventHandlerDAO;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.metrics.Monitors;
import jakarta.annotation.PostConstruct;
import static com.netflix.conductor.cassandra.config.cache.CachingConfig.EVENT_HANDLER_CACHE;
@Trace
public class CacheableEventHandlerDAO implements EventHandlerDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(CacheableEventHandlerDAO.class);
private static final String CLASS_NAME = CacheableEventHandlerDAO.class.getSimpleName();
private final CassandraEventHandlerDAO cassandraEventHandlerDAO;
private final CassandraProperties properties;
private final CacheManager cacheManager;
public CacheableEventHandlerDAO(
CassandraEventHandlerDAO cassandraEventHandlerDAO,
CassandraProperties properties,
CacheManager cacheManager) {
this.cassandraEventHandlerDAO = cassandraEventHandlerDAO;
this.properties = properties;
this.cacheManager = cacheManager;
}
@PostConstruct
public void scheduleEventHandlerRefresh() {
long cacheRefreshTime = properties.getEventHandlerCacheRefreshInterval().getSeconds();
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::refreshEventHandlersCache, 0, cacheRefreshTime, TimeUnit.SECONDS);
}
@Override
@CachePut(value = EVENT_HANDLER_CACHE, key = "#eventHandler.name")
public void addEventHandler(EventHandler eventHandler) {
cassandraEventHandlerDAO.addEventHandler(eventHandler);
}
@Override
@CachePut(value = EVENT_HANDLER_CACHE, key = "#eventHandler.name")
public void updateEventHandler(EventHandler eventHandler) {
cassandraEventHandlerDAO.updateEventHandler(eventHandler);
}
@Override
@CacheEvict(EVENT_HANDLER_CACHE)
public void removeEventHandler(String name) {
cassandraEventHandlerDAO.removeEventHandler(name);
}
@Override
public List<EventHandler> getAllEventHandlers() {
Object nativeCache = cacheManager.getCache(EVENT_HANDLER_CACHE).getNativeCache();
if (nativeCache != null && nativeCache instanceof ConcurrentHashMap) {
ConcurrentHashMap cacheMap = (ConcurrentHashMap) nativeCache;
if (!cacheMap.isEmpty()) {
List<EventHandler> eventHandlers = new ArrayList<>();
cacheMap.values().stream()
.filter(element -> element != null && element instanceof EventHandler)
.forEach(element -> eventHandlers.add((EventHandler) element));
return eventHandlers;
}
}
return refreshEventHandlersCache();
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
if (activeOnly) {
return getAllEventHandlers().stream()
.filter(eventHandler -> eventHandler.getEvent().equals(event))
.filter(EventHandler::isActive)
.collect(Collectors.toList());
} else {
return getAllEventHandlers().stream()
.filter(eventHandler -> eventHandler.getEvent().equals(event))
.collect(Collectors.toList());
}
}
private List<EventHandler> refreshEventHandlersCache() {
try {
Cache eventHandlersCache = cacheManager.getCache(EVENT_HANDLER_CACHE);
eventHandlersCache.clear();
List<EventHandler> eventHandlers = cassandraEventHandlerDAO.getAllEventHandlers();
eventHandlers.forEach(
eventHandler -> eventHandlersCache.put(eventHandler.getName(), eventHandler));
LOGGER.debug("Refreshed event handlers, total num: " + eventHandlers.size());
return eventHandlers;
} catch (Exception e) {
Monitors.error(CLASS_NAME, "refreshEventHandlersCache");
LOGGER.error("refresh EventHandlers failed", e);
}
return Collections.emptyList();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableMetadataDAO.java | cassandra-persistence/src/main/java/com/netflix/conductor/cassandra/config/cache/CacheableMetadataDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.cassandra.config.cache;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.cache.Cache;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import org.springframework.cache.annotation.Cacheable;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.cassandra.config.CassandraProperties;
import com.netflix.conductor.cassandra.dao.CassandraMetadataDAO;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import jakarta.annotation.PostConstruct;
import static com.netflix.conductor.cassandra.config.cache.CachingConfig.TASK_DEF_CACHE;
@Trace
public class CacheableMetadataDAO implements MetadataDAO {
private static final String CLASS_NAME = CacheableMetadataDAO.class.getSimpleName();
private static final Logger LOGGER = LoggerFactory.getLogger(CacheableMetadataDAO.class);
private final CassandraMetadataDAO cassandraMetadataDAO;
private final CassandraProperties properties;
private final CacheManager cacheManager;
public CacheableMetadataDAO(
CassandraMetadataDAO cassandraMetadataDAO,
CassandraProperties properties,
CacheManager cacheManager) {
this.cassandraMetadataDAO = cassandraMetadataDAO;
this.properties = properties;
this.cacheManager = cacheManager;
}
@PostConstruct
public void scheduleCacheRefresh() {
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::refreshTaskDefsCache, 0, cacheRefreshTime, TimeUnit.SECONDS);
LOGGER.info(
"Scheduled cache refresh for Task Definitions, every {} seconds", cacheRefreshTime);
}
@Override
@CachePut(value = TASK_DEF_CACHE, key = "#taskDef.name")
public TaskDef createTaskDef(TaskDef taskDef) {
cassandraMetadataDAO.createTaskDef(taskDef);
return taskDef;
}
@Override
@CachePut(value = TASK_DEF_CACHE, key = "#taskDef.name")
public TaskDef updateTaskDef(TaskDef taskDef) {
return cassandraMetadataDAO.updateTaskDef(taskDef);
}
@Override
@Cacheable(TASK_DEF_CACHE)
public TaskDef getTaskDef(String name) {
return cassandraMetadataDAO.getTaskDef(name);
}
@Override
public List<TaskDef> getAllTaskDefs() {
Object nativeCache = cacheManager.getCache(TASK_DEF_CACHE).getNativeCache();
if (nativeCache != null && nativeCache instanceof ConcurrentHashMap) {
ConcurrentHashMap cacheMap = (ConcurrentHashMap) nativeCache;
if (!cacheMap.isEmpty()) {
List<TaskDef> taskDefs = new ArrayList<>();
cacheMap.values().stream()
.filter(element -> element != null && element instanceof TaskDef)
.forEach(element -> taskDefs.add((TaskDef) element));
return taskDefs;
}
}
return refreshTaskDefsCache();
}
@Override
@CacheEvict(TASK_DEF_CACHE)
public void removeTaskDef(String name) {
cassandraMetadataDAO.removeTaskDef(name);
}
@Override
public void createWorkflowDef(WorkflowDef workflowDef) {
cassandraMetadataDAO.createWorkflowDef(workflowDef);
}
@Override
public void updateWorkflowDef(WorkflowDef workflowDef) {
cassandraMetadataDAO.updateWorkflowDef(workflowDef);
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
return cassandraMetadataDAO.getLatestWorkflowDef(name);
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
return cassandraMetadataDAO.getWorkflowDef(name, version);
}
@Override
public void removeWorkflowDef(String name, Integer version) {
cassandraMetadataDAO.removeWorkflowDef(name, version);
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
return cassandraMetadataDAO.getAllWorkflowDefs();
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
return cassandraMetadataDAO.getAllWorkflowDefsLatestVersions();
}
private List<TaskDef> refreshTaskDefsCache() {
try {
Cache taskDefsCache = cacheManager.getCache(TASK_DEF_CACHE);
taskDefsCache.clear();
List<TaskDef> taskDefs = cassandraMetadataDAO.getAllTaskDefs();
taskDefs.forEach(taskDef -> taskDefsCache.put(taskDef.getName(), taskDef));
LOGGER.debug("Refreshed task defs, total num: " + taskDefs.size());
return taskDefs;
} catch (Exception e) {
Monitors.error(CLASS_NAME, "refreshTaskDefs");
LOGGER.error("refresh TaskDefs failed ", e);
}
return Collections.emptyList();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java | mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import com.netflix.conductor.mysql.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class MySQLQueueDAOTest {
private static final Logger LOGGER = LoggerFactory.getLogger(MySQLQueueDAOTest.class);
@Autowired private MySQLQueueDAO queueDAO;
@Autowired private ObjectMapper objectMapper;
@Qualifier("dataSource")
@Autowired
private DataSource dataSource;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void complexQueueTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
/** Test fix for https://github.com/Netflix/conductor/issues/1892 */
@Test
public void containsMessageTest() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertTrue(queueDAO.containsMessage(queueName, messageId));
queueDAO.remove(queueName, messageId);
}
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
assertFalse(queueDAO.containsMessage(queueName, messageId));
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/399
*
* @since 1.8.2-rc5
*/
@Test
public void pollMessagesTest() {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue399_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}";
Message m = new Message("testmsg-" + i, payload, "");
if (i % 2 == 0) {
// Set priority on message with pair id
m.setPriority(99 - i);
}
messages.add(m);
}
// Populate the queue with our test message batch
queueDAO.push(queueName, ImmutableList.copyOf(messages));
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 3;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
final int secondPollSize = 4;
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
/**
* Test fix for https://github.com/Netflix/conductor/issues/448
*
* @since 1.8.2-rc5
*/
@Test
public void pollDeferredMessagesTest() throws InterruptedException {
final List<Message> messages = new ArrayList<>();
final String queueName = "issue448_testQueue";
final int totalSize = 10;
for (int i = 0; i < totalSize; i++) {
int offset = 0;
if (i < 5) {
offset = 0;
} else if (i == 6 || i == 7) {
// Purposefully skipping id:5 to test out of order deliveries
// Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch
offset = 5;
} else {
// Set all other queue messages to have enough of a delay that they won't
// accidentally
// be picked up.
offset = 10_000 + i;
}
String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}";
Message m = new Message("testmsg-" + i, payload, "");
messages.add(m);
queueDAO.push(queueName, "testmsg-" + i, offset);
}
// Assert that all messages were persisted and no extras are in there
assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName));
final int firstPollSize = 4;
List<Message> firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100);
assertNotNull("First poll was null", firstPoll);
assertFalse("First poll was empty", firstPoll.isEmpty());
assertEquals("First poll size mismatch", firstPollSize, firstPoll.size());
List<String> firstPollMessageIds =
messages.stream()
.map(Message::getId)
.collect(Collectors.toList())
.subList(0, firstPollSize + 1);
for (int i = 0; i < firstPollSize; i++) {
String actual = firstPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual));
}
final int secondPollSize = 3;
// Sleep a bit to get the next batch of messages
LOGGER.debug("Sleeping for second poll...");
Thread.sleep(5_000);
// Poll for many more messages than expected
List<Message> secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100);
assertNotNull("Second poll was null", secondPoll);
assertFalse("Second poll was empty", secondPoll.isEmpty());
assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size());
List<String> expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7");
for (int i = 0; i < secondPollSize; i++) {
String actual = secondPoll.get(i).getId();
assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual));
}
// Assert that the total queue size hasn't changed
assertEquals(
"Total queue size should have remained the same",
totalSize,
queueDAO.getSize(queueName));
// Assert that our un-popped messages match our expected size
final long expectedSize = totalSize - firstPollSize - secondPollSize;
try (Connection c = dataSource.getConnection()) {
String UNPOPPED =
"SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false";
try (Query q = new Query(objectMapper, c, UNPOPPED)) {
long count = q.addParameter(queueName).executeCount();
assertEquals("Remaining queue size mismatch", expectedSize, count);
}
} catch (Exception ex) {
fail(ex.getMessage());
}
}
@Test
public void processUnacksTest() {
final String queueName = "process_unacks_test";
// Count of messages in the queue(s)
final int count = 10;
// Number of messages to process acks for
final int unackedCount = 4;
// A secondary queue to make sure we don't accidentally process other queues
final String otherQueueName = "process_unacks_test_other_queue";
// Create testing queue with some messages (but not all) that will be popped/acked.
for (int i = 0; i < count; i++) {
int offset = 0;
if (i >= unackedCount) {
offset = 1_000_000;
}
queueDAO.push(queueName, "unack-" + i, offset);
}
// Create a second queue to make sure that unacks don't occur for it
for (int i = 0; i < count; i++) {
queueDAO.push(otherQueueName, "other-" + i, 0);
}
// Poll for first batch of messages (should be equal to unackedCount)
List<Message> polled = queueDAO.pollMessages(queueName, 100, 10_000);
assertNotNull(polled);
assertFalse(polled.isEmpty());
assertEquals(unackedCount, polled.size());
// Poll messages from the other queue so we know they don't get unacked later
queueDAO.pollMessages(otherQueueName, 100, 10_000);
// Ack one of the polled messages
assertTrue(queueDAO.ack(queueName, "unack-1"));
// Should have one less un-acked popped message in the queue
Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(uacked.longValue(), unackedCount - 1);
// Process unacks
queueDAO.processUnacks(queueName);
// Check uacks for both queues after processing
Map<String, Map<String, Map<String, Long>>> details = queueDAO.queuesDetailVerbose();
uacked = details.get(queueName).get("a").get("uacked");
assertNotNull(uacked);
assertEquals(
"The messages that were polled should be unacked still",
uacked.longValue(),
unackedCount - 1);
Long otherUacked = details.get(otherQueueName).get("a").get("uacked");
assertNotNull(otherUacked);
assertEquals(
"Other queue should have all unacked messages", otherUacked.longValue(), count);
Long size = queueDAO.queuesDetail().get(queueName);
assertNotNull(size);
assertEquals(size.longValue(), count - unackedCount);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java | mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class MySQLMetadataDAOTest {
@Autowired private MySQLMetadataDAO metadataDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testDuplicateWorkflowDef() {
WorkflowDef def = new WorkflowDef();
def.setName("testDuplicate");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
NonTransientException applicationException =
assertThrows(NonTransientException.class, () -> metadataDAO.createWorkflowDef(def));
assertEquals(
"Workflow with testDuplicate.1 already exists!", applicationException.getMessage());
}
@Test
public void testRemoveNotExistingWorkflowDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeWorkflowDef("test", 1));
assertEquals(
"No such workflow definition: test version: 1", applicationException.getMessage());
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get();
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
all = metadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(3, found.getVersion());
all = metadataDAO.getAllLatest();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(3, all.get(0).getVersion());
all = metadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(3, all.get(1).getVersion());
def.setDescription("updated");
metadataDAO.updateWorkflowDef(def);
found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = metadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(3, found.getVersion());
metadataDAO.removeWorkflowDef("test", 3);
Optional<WorkflowDef> deleted = metadataDAO.getWorkflowDef("test", 3);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
metadataDAO.removeWorkflowDef("test", 1);
deleted = metadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
found = metadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(2, found.getVersion());
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(TaskDef.RetryLogic.FIXED);
def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createTaskDef(def);
TaskDef found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
def.setDescription("updated description");
metadataDAO.updateTaskDef(def);
found = metadataDAO.getTaskDef(def.getName());
assertTrue(EqualsBuilder.reflectionEquals(def, found));
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
metadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
metadataDAO.removeTaskDef(def.getName() + i);
}
all = metadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test
public void testRemoveNotExistingTaskDef() {
NonTransientException applicationException =
assertThrows(
NonTransientException.class,
() -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString()));
assertEquals("No such task definition", applicationException.getMessage());
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
EventHandler.Action action = new EventHandler.Action();
action.setAction(EventHandler.Action.Type.start_workflow);
action.setStart_workflow(new EventHandler.StartWorkflow());
action.getStart_workflow().setName("workflow_x");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
metadataDAO.addEventHandler(eventHandler);
List<EventHandler> all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(eventHandler.getName(), all.get(0).getName());
assertEquals(eventHandler.getEvent(), all.get(0).getEvent());
List<EventHandler> byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
metadataDAO.updateEventHandler(eventHandler);
all = metadataDAO.getAllEventHandlers();
assertNotNull(all);
assertEquals(1, all.size());
byEvents = metadataDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = metadataDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
metadataDAO.createWorkflowDef(def);
def.setName("test2");
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
metadataDAO.createWorkflowDef(def);
def.setVersion(2);
metadataDAO.createWorkflowDef(def);
def.setVersion(3);
metadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
metadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(3, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java | mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.util.List;
import org.flywaydb.core.Flyway;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.mysql.config.MySQLConfiguration;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ContextConfiguration(
classes = {
TestObjectMapperConfiguration.class,
MySQLConfiguration.class,
FlywayAutoConfiguration.class
})
@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.flyway.clean-disabled=false")
public class MySQLExecutionDAOTest extends ExecutionDAOTest {
@Autowired private MySQLExecutionDAO executionDAO;
@Autowired Flyway flyway;
// clean the database between tests.
@Before
public void before() {
flyway.clean();
flyway.migrate();
}
@Test
public void testPendingByCorrelationId() {
WorkflowDef def = new WorkflowDef();
def.setName("pending_count_correlation_jtest");
WorkflowModel workflow = createTestWorkflow();
workflow.setWorkflowDefinition(def);
generateWorkflows(workflow, 10);
List<WorkflowModel> bycorrelationId =
getExecutionDAO()
.getWorkflowsByCorrelationId(
"pending_count_correlation_jtest", "corr001", true);
assertNotNull(bycorrelationId);
assertEquals(10, bycorrelationId.size());
}
@Override
public ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java | mysql-persistence/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.integration.grpc.mysql;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.client.grpc.EventClient;
import com.netflix.conductor.client.grpc.MetadataClient;
import com.netflix.conductor.client.grpc.TaskClient;
import com.netflix.conductor.client.grpc.WorkflowClient;
import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest;
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {
"conductor.db.type=mysql",
"conductor.grpc-server.port=8094",
"spring.datasource.url=jdbc:tc:mysql:8.0.27:///conductor", // "tc" prefix starts the
// MySql
// container
"spring.datasource.username=root",
"spring.datasource.password=root",
"spring.datasource.hikari.maximum-pool-size=8",
"spring.datasource.hikari.minimum-idle=300000",
"conductor.elasticsearch.version=7",
"conductor.app.workflow.name-validation.enabled=true"
})
public class MySQLGrpcEndToEndTest extends AbstractGrpcEndToEndTest {
@Before
public void init() {
taskClient = new TaskClient("localhost", 8094);
workflowClient = new WorkflowClient("localhost", 8094);
metadataClient = new MetadataClient("localhost", 8094);
eventClient = new EventClient("localhost", 8094);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.mysql.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Uninterruptibles;
public class MySQLQueueDAO extends MySQLBaseDAO implements QueueDAO {
private static final Long UNACK_SCHEDULE_MS = 60_000L;
public MySQLQueueDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
Executors.newSingleThreadScheduledExecutor()
.scheduleAtFixedRate(
this::processAllUnacks,
UNACK_SCHEDULE_MS,
UNACK_SCHEDULE_MS,
TimeUnit.MILLISECONDS);
logger.debug(MySQLQueueDAO.class.getName() + " is ready to serve");
}
@Override
public void push(String queueName, String messageId, long offsetTimeInSecond) {
push(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) {
withTransaction(
tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond));
}
@Override
public void push(String queueName, List<Message> messages) {
withTransaction(
tx ->
messages.forEach(
message ->
pushMessage(
tx,
queueName,
message.getId(),
message.getPayload(),
message.getPriority(),
0)));
}
@Override
public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) {
return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String messageId, int priority, long offsetTimeInSecond) {
return getWithRetriedTransactions(
tx -> {
if (!existsMessage(tx, queueName, messageId)) {
pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond);
return true;
}
return false;
});
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
List<Message> messages =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count, timeout));
if (messages == null) {
return new ArrayList<>();
}
return messages.stream().map(Message::getId).collect(Collectors.toList());
}
@Override
public List<Message> pollMessages(String queueName, int count, int timeout) {
List<Message> messages =
getWithTransactionWithOutErrorPropagation(
tx -> popMessages(tx, queueName, count, timeout));
if (messages == null) {
return new ArrayList<>();
}
return messages;
}
@Override
public void remove(String queueName, String messageId) {
withTransaction(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public int getSize(String queueName) {
final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?";
return queryWithTransaction(
GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue());
}
@Override
public boolean ack(String queueName, String messageId) {
return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId));
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) {
long updatedOffsetTimeInSecond = unackTimeout / 1000;
final String UPDATE_UNACK_TIMEOUT =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND, ?, CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
UPDATE_UNACK_TIMEOUT,
q ->
q.addParameter(updatedOffsetTimeInSecond)
.addParameter(updatedOffsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate())
== 1;
}
@Override
public void flush(String queueName) {
final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?";
executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete());
}
@Override
public Map<String, Long> queuesDetail() {
final String GET_QUEUES_DETAIL =
"SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q";
return queryWithTransaction(
GET_QUEUES_DETAIL,
q ->
q.executeAndFetch(
rs -> {
Map<String, Long> detail = Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
detail.put(queueName, size);
}
return detail;
}));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
// @formatter:off
final String GET_QUEUES_DETAIL_VERBOSE =
"SELECT queue_name, \n"
+ " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n"
+ " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n"
+ "FROM queue q";
// @formatter:on
return queryWithTransaction(
GET_QUEUES_DETAIL_VERBOSE,
q ->
q.executeAndFetch(
rs -> {
Map<String, Map<String, Map<String, Long>>> result =
Maps.newHashMap();
while (rs.next()) {
String queueName = rs.getString("queue_name");
Long size = rs.getLong("size");
Long queueUnacked = rs.getLong("uacked");
result.put(
queueName,
ImmutableMap.of(
"a",
ImmutableMap
.of( // sharding not implemented,
// returning only
// one shard with all the
// info
"size",
size,
"uacked",
queueUnacked)));
}
return result;
}));
}
/**
* Un-pop all un-acknowledged messages for all queues.
*
* @since 1.11.6
*/
public void processAllUnacks() {
logger.trace("processAllUnacks started");
final String PROCESS_ALL_UNACKS =
"UPDATE queue_message SET popped = false WHERE popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on";
executeWithTransaction(PROCESS_ALL_UNACKS, Query::executeUpdate);
}
@Override
public void processUnacks(String queueName) {
final String PROCESS_UNACKS =
"UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on";
executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate());
}
@Override
public boolean resetOffsetTime(String queueName, String messageId) {
long offsetTimeInSecond = 0; // Reset to 0
final String SET_OFFSET_TIME =
"UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) \n"
+ "WHERE queue_name = ? AND message_id = ?";
return queryWithTransaction(
SET_OFFSET_TIME,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate()
== 1);
}
private boolean existsMessage(Connection connection, String queueName, String messageId) {
final String EXISTS_MESSAGE =
"SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?)";
return query(
connection,
EXISTS_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).exists());
}
private void pushMessage(
Connection connection,
String queueName,
String messageId,
String payload,
Integer priority,
long offsetTimeInSecond) {
createQueueIfNotExists(connection, queueName);
String UPDATE_MESSAGE =
"UPDATE queue_message SET payload=?, deliver_on=TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?";
int rowsUpdated =
query(
connection,
UPDATE_MESSAGE,
q ->
q.addParameter(payload)
.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.executeUpdate());
if (rowsUpdated == 0) {
String PUSH_MESSAGE =
"INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES (TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP), ?, ?,?,?,?) ON DUPLICATE KEY UPDATE payload=VALUES(payload), deliver_on=VALUES(deliver_on)";
execute(
connection,
PUSH_MESSAGE,
q ->
q.addParameter(offsetTimeInSecond)
.addParameter(queueName)
.addParameter(messageId)
.addParameter(priority)
.addParameter(offsetTimeInSecond)
.addParameter(payload)
.executeUpdate());
}
}
private boolean removeMessage(Connection connection, String queueName, String messageId) {
final String REMOVE_MESSAGE =
"DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?";
return query(
connection,
REMOVE_MESSAGE,
q -> q.addParameter(queueName).addParameter(messageId).executeDelete());
}
private List<Message> peekMessages(Connection connection, String queueName, int count) {
if (count < 1) {
return Collections.emptyList();
}
final String PEEK_MESSAGES =
"SELECT message_id, priority, payload FROM queue_message use index(combo_queue_message) WHERE queue_name = ? AND popped = false AND deliver_on <= TIMESTAMPADD(MICROSECOND, 1000, CURRENT_TIMESTAMP) ORDER BY priority DESC, deliver_on, created_on LIMIT ?";
return query(
connection,
PEEK_MESSAGES,
p ->
p.addParameter(queueName)
.addParameter(count)
.executeAndFetch(
rs -> {
List<Message> results = new ArrayList<>();
while (rs.next()) {
Message m = new Message();
m.setId(rs.getString("message_id"));
m.setPriority(rs.getInt("priority"));
m.setPayload(rs.getString("payload"));
results.add(m);
}
return results;
}));
}
private List<Message> popMessages(
Connection connection, String queueName, int count, int timeout) {
long start = System.currentTimeMillis();
List<Message> messages = peekMessages(connection, queueName, count);
while (messages.size() < count && ((System.currentTimeMillis() - start) < timeout)) {
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
messages = peekMessages(connection, queueName, count);
}
if (messages.isEmpty()) {
return messages;
}
List<Message> poppedMessages = new ArrayList<>();
for (Message message : messages) {
final String POP_MESSAGE =
"UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false";
int result =
query(
connection,
POP_MESSAGE,
q ->
q.addParameter(queueName)
.addParameter(message.getId())
.executeUpdate());
if (result == 1) {
poppedMessages.add(message);
}
}
return poppedMessages;
}
private void createQueueIfNotExists(Connection connection, String queueName) {
logger.trace("Creating new queue '{}'", queueName);
final String EXISTS_QUEUE = "SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?)";
boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists());
if (!exists) {
final String CREATE_QUEUE = "INSERT IGNORE INTO queue (queue_name) VALUES (?)";
execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate());
}
}
@Override
public boolean containsMessage(String queueName, String messageId) {
final String EXISTS_QUEUE =
"SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ? )";
return queryWithTransaction(
EXISTS_QUEUE, q -> q.addParameter(queueName).addParameter(messageId).exists());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.sql.Connection;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.mysql.util.Query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
public class MySQLExecutionDAO extends MySQLBaseDAO
implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO {
public MySQLExecutionDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
super(retryTemplate, objectMapper, dataSource);
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskDefName, String workflowId) {
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ? AND workflow_id = ?";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_WORKFLOW,
q ->
q.addParameter(taskDefName)
.addParameter(workflowId)
.executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new ArrayList<>(count);
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int found = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
// noinspection ConstantConditions
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && found < count) {
tasks.add(pendingTask);
found++;
}
}
return tasks;
}
private static String taskKey(TaskModel task) {
return task.getReferenceTaskName() + "_" + task.getRetryCount();
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> created = Lists.newArrayListWithCapacity(tasks.size());
withTransaction(
connection -> {
for (TaskModel task : tasks) {
validate(task);
task.setScheduledTime(System.currentTimeMillis());
final String taskKey = taskKey(task);
boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey);
if (!scheduledTaskAdded) {
logger.trace(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
continue;
}
insertOrUpdateTaskData(connection, task);
addWorkflowToTaskMapping(connection, task);
addTaskInProgress(connection, task);
updateTask(connection, task);
created.add(task);
}
});
return created;
}
@Override
public void updateTask(TaskModel task) {
withTransaction(connection -> updateTask(connection, task));
}
/**
* This is a dummy implementation and this feature is not for Mysql backed Conductor
*
* @param task: which needs to be evaluated whether it is rateLimited or not
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
return false;
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
TaskDef taskDef = taskDefinition.get();
int limit = taskDef.concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
logger.info(
"Task execution count for {}: limit={}, current={}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
String taskId = task.getTaskId();
List<String> tasksInProgressInOrderOfArrival =
findAllTasksInProgressInOrderOfArrival(task, limit);
boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId);
if (rateLimited) {
logger.info(
"Task execution count limited. {}, limit {}, current {}",
task.getTaskDefName(),
limit,
getInProgressTaskCount(task.getTaskDefName()));
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
logger.warn("No such task found by id {}", taskId);
return false;
}
final String taskKey = taskKey(task);
withTransaction(
connection -> {
removeScheduledTask(connection, task, taskKey);
removeWorkflowToTaskMapping(connection, task);
removeTaskInProgress(connection, task);
removeTaskData(connection, task);
});
return true;
}
@Override
public TaskModel getTask(String taskId) {
String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?";
return queryWithTransaction(
GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class));
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
return getWithRetriedTransactions(c -> getTasks(c, taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
// @formatter:off
String GET_IN_PROGRESS_TASKS_FOR_TYPE =
"SELECT json_data FROM task_in_progress tip "
+ "INNER JOIN task t ON t.task_id = tip.task_id "
+ "WHERE task_def_name = ?";
// @formatter:on
return queryWithTransaction(
GET_IN_PROGRESS_TASKS_FOR_TYPE,
q -> q.addParameter(taskName).executeAndFetch(TaskModel.class));
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
String GET_TASKS_FOR_WORKFLOW =
"SELECT task_id FROM workflow_to_task WHERE workflow_id = ?";
return getWithRetriedTransactions(
tx ->
query(
tx,
GET_TASKS_FOR_WORKFLOW,
q -> {
List<String> taskIds =
q.addParameter(workflowId)
.executeScalarList(String.class);
return getTasks(tx, taskIds);
}));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
boolean removed = false;
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
withTransaction(
connection -> {
removeWorkflowDefToWorkflowMapping(connection, workflow);
removeWorkflow(connection, workflowId);
removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId);
});
removed = true;
for (TaskModel task : workflow.getTasks()) {
if (!removeTask(task.getTaskId())) {
removed = false;
}
}
}
return removed;
}
/**
* This is a dummy implementation and this feature is not supported for MySQL backed Conductor
*/
@Override
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
throw new UnsupportedOperationException(
"This method is not implemented in MySQLExecutionDAO. Please use RedisDAO mode instead for using TTLs.");
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId));
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId));
if (workflow != null) {
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_IDS =
"SELECT workflow_id FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_IDS,
q -> q.addParameter(workflowName).executeScalarList(String.class));
}
/**
* @param workflowName Name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
return getRunningWorkflowIds(workflowName, version).stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public long getPendingWorkflowCount(String workflowName) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
String GET_PENDING_WORKFLOW_COUNT =
"SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?";
return queryWithTransaction(
GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount());
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String GET_IN_PROGRESS_TASK_COUNT =
"SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true";
return queryWithTransaction(
GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
withTransaction(
tx -> {
// @formatter:off
String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF =
"SELECT workflow_id FROM workflow_def_to_workflow "
+ "WHERE workflow_def = ? AND date_str BETWEEN ? AND ?";
// @formatter:on
List<String> workflowIds =
query(
tx,
GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF,
q ->
q.addParameter(workflowName)
.addParameter(dateStr(startTime))
.addParameter(dateStr(endTime))
.executeScalarList(String.class));
workflowIds.forEach(
workflowId -> {
try {
WorkflowModel wf = getWorkflow(workflowId);
if (wf.getCreateTime() >= startTime
&& wf.getCreateTime() <= endTime) {
workflows.add(wf);
}
} catch (Exception e) {
logger.error(
"Unable to load workflow id {} with name {}",
workflowId,
workflowName,
e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
Preconditions.checkNotNull(correlationId, "correlationId cannot be null");
String GET_WORKFLOWS_BY_CORRELATION_ID =
"SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ?";
return queryWithTransaction(
GET_WORKFLOWS_BY_CORRELATION_ID,
q ->
q.addParameter(correlationId)
.addParameter(workflowName)
.executeAndFetch(WorkflowModel.class));
}
@Override
public boolean canSearchAcrossWorkflows() {
return true;
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to add event execution " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> removeEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to remove event execution " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
withTransaction(tx -> updateEventExecution(tx, eventExecution));
} catch (Exception e) {
throw new NonTransientException(
"Unable to update event execution " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
List<EventExecution> executions = Lists.newLinkedList();
withTransaction(
tx -> {
for (int i = 0; i < max; i++) {
String executionId =
messageId + "_"
+ i; // see SimpleEventProcessor.handle to understand
// how the
// execution id is set
EventExecution ee =
readEventExecution(
tx,
eventHandlerName,
eventName,
messageId,
executionId);
if (ee == null) {
break;
}
executions.add(ee);
}
});
return executions;
} catch (Exception e) {
String message =
String.format(
"Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s",
eventHandlerName, eventName, messageId);
throw new NonTransientException(message, e);
}
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain));
}
@Override
public PollData getPollData(String taskDefName, String domain) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String effectiveDomain = (domain == null) ? "DEFAULT" : domain;
return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain));
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
return readAllPollData(taskDefName);
}
@Override
public List<PollData> getAllPollData() {
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(true);
try {
String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name";
return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class));
} catch (Throwable th) {
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
private List<TaskModel> getTasks(Connection connection, List<String> taskIds) {
if (taskIds.isEmpty()) {
return Lists.newArrayList();
}
// Generate a formatted query string with a variable number of bind params based
// on taskIds.size()
final String GET_TASKS_FOR_IDS =
String.format(
"SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL",
Query.generateInBindings(taskIds.size()));
return query(
connection,
GET_TASKS_FOR_IDS,
q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class));
}
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
boolean terminal = workflow.getStatus().isTerminal();
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(Lists.newLinkedList());
withTransaction(
tx -> {
if (!update) {
addWorkflow(tx, workflow);
addWorkflowDefToWorkflowMapping(tx, workflow);
} else {
updateWorkflow(tx, workflow);
}
if (terminal) {
removePendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
} else {
addPendingWorkflow(
tx, workflow.getWorkflowName(), workflow.getWorkflowId());
}
});
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
private void updateTask(Connection connection, TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
boolean inProgress =
task.getStatus() != null
&& task.getStatus().equals(TaskModel.Status.IN_PROGRESS);
updateInProgressStatus(connection, task, inProgress);
}
insertOrUpdateTaskData(connection, task);
if (task.getStatus() != null && task.getStatus().isTerminal()) {
removeTaskInProgress(connection, task);
}
addWorkflowToTaskMapping(connection, task);
}
private WorkflowModel readWorkflow(Connection connection, String workflowId) {
String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?";
return query(
connection,
GET_WORKFLOW,
q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class));
}
private void addWorkflow(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW =
"INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowId())
.addParameter(workflow.getCorrelationId())
.addJsonParameter(workflow)
.executeUpdate());
}
private void updateWorkflow(Connection connection, WorkflowModel workflow) {
String UPDATE_WORKFLOW =
"UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?";
execute(
connection,
UPDATE_WORKFLOW,
q ->
q.addJsonParameter(workflow)
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflow(Connection connection, String workflowId) {
String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?";
execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete());
}
private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) {
String EXISTS_PENDING_WORKFLOW =
"SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)";
boolean exists =
query(
connection,
EXISTS_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).exists());
if (!exists) {
String INSERT_PENDING_WORKFLOW =
"INSERT IGNORE INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?)";
execute(
connection,
INSERT_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate());
}
}
private void removePendingWorkflow(
Connection connection, String workflowType, String workflowId) {
String REMOVE_PENDING_WORKFLOW =
"DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?";
execute(
connection,
REMOVE_PENDING_WORKFLOW,
q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete());
}
private void insertOrUpdateTaskData(Connection connection, TaskModel task) {
/*
* Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON DUPLICATE KEY update' sql statement. The problem with that
* is that if we try the INSERT first, the sequence will be increased even if the ON DUPLICATE KEY happens.
*/
String UPDATE_TASK =
"UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?";
int rowsUpdated =
query(
connection,
UPDATE_TASK,
q ->
q.addJsonParameter(task)
.addParameter(task.getTaskId())
.executeUpdate());
if (rowsUpdated == 0) {
String INSERT_TASK =
"INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)";
execute(
connection,
INSERT_TASK,
q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate());
}
}
private void removeTaskData(Connection connection, TaskModel task) {
String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?";
execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete());
}
private void addWorkflowToTaskMapping(Connection connection, TaskModel task) {
String EXISTS_WORKFLOW_TO_TASK =
"SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)";
boolean exists =
query(
connection,
EXISTS_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.exists());
if (!exists) {
String INSERT_WORKFLOW_TO_TASK =
"INSERT IGNORE INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?)";
execute(
connection,
INSERT_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeUpdate());
}
}
private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) {
String REMOVE_WORKFLOW_TO_TASK =
"DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?";
execute(
connection,
REMOVE_WORKFLOW_TO_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(task.getTaskId())
.executeDelete());
}
private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String INSERT_WORKFLOW_DEF_TO_WORKFLOW =
"INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)";
execute(
connection,
INSERT_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) {
String REMOVE_WORKFLOW_DEF_TO_WORKFLOW =
"DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?";
execute(
connection,
REMOVE_WORKFLOW_DEF_TO_WORKFLOW,
q ->
q.addParameter(workflow.getWorkflowName())
.addParameter(dateStr(workflow.getCreateTime()))
.addParameter(workflow.getWorkflowId())
.executeUpdate());
}
@VisibleForTesting
boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) {
final String EXISTS_SCHEDULED_TASK =
"SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)";
boolean exists =
query(
connection,
EXISTS_SCHEDULED_TASK,
q ->
q.addParameter(task.getWorkflowInstanceId())
.addParameter(taskKey)
.exists());
if (!exists) {
final String INSERT_IGNORE_SCHEDULED_TASK =
"INSERT IGNORE INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?)";
int count =
query(
connection,
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer;
import javax.sql.DataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.mysql.util.*;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
public abstract class MySQLBaseDAO {
private static final List<String> EXCLUDED_STACKTRACE_CLASS =
ImmutableList.of(MySQLBaseDAO.class.getName(), Thread.class.getName());
protected final Logger logger = LoggerFactory.getLogger(getClass());
protected final ObjectMapper objectMapper;
protected final DataSource dataSource;
private final RetryTemplate retryTemplate;
protected MySQLBaseDAO(
RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) {
this.retryTemplate = retryTemplate;
this.objectMapper = objectMapper;
this.dataSource = dataSource;
}
protected final LazyToString getCallingMethod() {
return new LazyToString(
() ->
Arrays.stream(Thread.currentThread().getStackTrace())
.filter(
ste ->
!EXCLUDED_STACKTRACE_CLASS.contains(
ste.getClassName()))
.findFirst()
.map(StackTraceElement::getMethodName)
.orElseThrow(() -> new NullPointerException("Cannot find Caller")));
}
protected String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, Class<T> tClass) {
try {
return objectMapper.readValue(json, tClass);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <T> T readValue(String json, TypeReference<T> typeReference) {
try {
return objectMapper.readValue(json, typeReference);
} catch (IOException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to
* {@literal function}.
*
* <p>Successful executions of {@literal function} will result in a commit and return of {@link
* TransactionalFunction#apply(Connection)}.
*
* <p>If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will
* result in a rollback of the transaction and will be wrapped in an {@link
* NonTransientException} if it is not already one.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce some expected return value.
*
* @param function The function to apply with a new transactional {@link Connection}
* @param <R> The return type.
* @return The result of {@code TransactionalFunction#apply(Connection)}
* @throws NonTransientException If any errors occur.
*/
private <R> R getWithTransaction(final TransactionalFunction<R> function) {
final Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
if (th instanceof NonTransientException) {
throw th;
}
throw new NonTransientException(th.getMessage(), th);
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
<R> R getWithRetriedTransactions(final TransactionalFunction<R> function) {
try {
return retryTemplate.execute(context -> getWithTransaction(function));
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
protected <R> R getWithTransactionWithOutErrorPropagation(TransactionalFunction<R> function) {
Instant start = Instant.now();
LazyToString callingMethod = getCallingMethod();
logger.trace("{} : starting transaction", callingMethod);
try (Connection tx = dataSource.getConnection()) {
boolean previousAutoCommitMode = tx.getAutoCommit();
tx.setAutoCommit(false);
try {
R result = function.apply(tx);
tx.commit();
return result;
} catch (Throwable th) {
tx.rollback();
logger.info(th.getMessage());
return null;
} finally {
tx.setAutoCommit(previousAutoCommitMode);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
logger.trace(
"{} : took {}ms",
callingMethod,
Duration.between(start, Instant.now()).toMillis());
}
}
/**
* Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value.
*
* <p>Generally this is used to wrap multiple {@link #execute(Connection, String,
* ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that
* produce no expected return value.
*
* @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to.
* @throws NonTransientException If any errors occur.
* @see #getWithRetriedTransactions(TransactionalFunction)
*/
protected void withTransaction(Consumer<Connection> consumer) {
getWithRetriedTransactions(
connection -> {
consumer.accept(connection);
return null;
});
}
/**
* Initiate a new transaction and execute a {@link Query} within that context, then return the
* results of {@literal function}.
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R queryWithTransaction(String query, QueryFunction<R> function) {
return getWithRetriedTransactions(tx -> query(tx, query, function));
}
/**
* Execute a {@link Query} within the context of a given transaction and return the results of
* {@literal function}.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
* @param <R> The expected return type of {@literal function}.
* @return The results of applying {@literal function}.
*/
protected <R> R query(Connection tx, String query, QueryFunction<R> function) {
try (Query q = new Query(objectMapper, tx, query)) {
return function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a statement with no expected return value within a given transaction.
*
* @param tx The transactional {@link Connection} to use.
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void execute(Connection tx, String query, ExecuteFunction function) {
try (Query q = new Query(objectMapper, tx, query)) {
function.apply(q);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Instantiates a new transactional connection and invokes {@link #execute(Connection, String,
* ExecuteFunction)}
*
* @param query The query string to prepare.
* @param function The functional callback to pass a {@link Query} to.
*/
protected void executeWithTransaction(String query, ExecuteFunction function) {
withTransaction(tx -> execute(tx, query, function));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.dao;
import java.sql.Connection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import javax.sql.DataSource;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.mysql.config.MySQLProperties;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
public class MySQLMetadataDAO extends MySQLBaseDAO implements MetadataDAO, EventHandlerDAO {
private final ConcurrentHashMap<String, TaskDef> taskDefCache = new ConcurrentHashMap<>();
private static final String CLASS_NAME = MySQLMetadataDAO.class.getSimpleName();
public MySQLMetadataDAO(
RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
MySQLProperties properties) {
super(retryTemplate, objectMapper, dataSource);
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::refreshTaskDefs,
cacheRefreshTime,
cacheRefreshTime,
TimeUnit.SECONDS);
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
validate(taskDef);
insertOrUpdateTaskDef(taskDef);
return taskDef;
}
@Override
public TaskDef getTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
TaskDef taskDef = taskDefCache.get(name);
if (taskDef == null) {
if (logger.isTraceEnabled()) {
logger.trace("Cache miss: {}", name);
}
taskDef = getTaskDefFromDB(name);
}
return taskDef;
}
@Override
public List<TaskDef> getAllTaskDefs() {
return getWithRetriedTransactions(this::findAllTaskDefs);
}
@Override
public void removeTaskDef(String name) {
final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?";
executeWithTransaction(
DELETE_TASKDEF_QUERY,
q -> {
if (!q.addParameter(name).executeDelete()) {
throw new NotFoundException("No such task definition");
}
taskDefCache.remove(name);
});
}
@Override
public void createWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(
tx -> {
if (workflowExists(tx, def)) {
throw new ConflictException(
"Workflow with " + def.key() + " already exists!");
}
insertOrUpdateWorkflowDef(tx, def);
});
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
validate(def);
withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def));
}
@Override
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
final String GET_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND "
+ "version = latest_version";
return Optional.ofNullable(
queryWithTransaction(
GET_LATEST_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
final String GET_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?";
return Optional.ofNullable(
queryWithTransaction(
GET_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(name)
.addParameter(version)
.executeAndFetchFirst(WorkflowDef.class)));
}
@Override
public void removeWorkflowDef(String name, Integer version) {
final String DELETE_WORKFLOW_QUERY =
"DELETE from meta_workflow_def WHERE name = ? AND version = ?";
withTransaction(
tx -> {
// remove specified workflow
execute(
tx,
DELETE_WORKFLOW_QUERY,
q -> {
if (!q.addParameter(name).addParameter(version).executeDelete()) {
throw new NotFoundException(
String.format(
"No such workflow definition: %s version: %d",
name, version));
}
});
// reset latest version based on remaining rows for this workflow
Optional<Integer> maxVersion = getLatestVersion(tx, name);
maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion));
});
}
public List<String> findAll() {
final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def";
return queryWithTransaction(
FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
final String GET_ALL_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def ORDER BY name, version";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
final String GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY =
"SELECT json_data FROM meta_workflow_def wd WHERE wd.version = (SELECT MAX(version) FROM meta_workflow_def wd2 WHERE wd2.name = wd.name)";
return queryWithTransaction(
GET_ALL_WORKFLOW_DEF_LATEST_VERSIONS_QUERY,
q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllLatest() {
final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version";
return queryWithTransaction(
GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class));
}
public List<WorkflowDef> getAllVersions(String name) {
final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY =
"SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version";
return queryWithTransaction(
GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY,
q -> q.addParameter(name).executeAndFetch(WorkflowDef.class));
}
@Override
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
final String INSERT_EVENT_HANDLER_QUERY =
"INSERT INTO meta_event_handler (name, event, active, json_data) "
+ "VALUES (?, ?, ?, ?)";
withTransaction(
tx -> {
if (getEventHandler(tx, eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name "
+ eventHandler.getName()
+ " already exists!");
}
execute(
tx,
INSERT_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getName())
.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.executeUpdate());
});
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null");
// @formatter:off
final String UPDATE_EVENT_HANDLER_QUERY =
"UPDATE meta_event_handler SET "
+ "event = ?, active = ?, json_data = ?, "
+ "modified_on = CURRENT_TIMESTAMP WHERE name = ?";
// @formatter:on
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + eventHandler.getName() + " not found!");
}
execute(
tx,
UPDATE_EVENT_HANDLER_QUERY,
q ->
q.addParameter(eventHandler.getEvent())
.addParameter(eventHandler.isActive())
.addJsonParameter(eventHandler)
.addParameter(eventHandler.getName())
.executeUpdate());
});
}
@Override
public void removeEventHandler(String name) {
final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?";
withTransaction(
tx -> {
EventHandler existing = getEventHandler(tx, name);
if (existing == null) {
throw new NotFoundException(
"EventHandler with name " + name + " not found!");
}
execute(
tx,
DELETE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeDelete());
});
}
@Override
public List<EventHandler> getAllEventHandlers() {
final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class));
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY =
"SELECT json_data FROM meta_event_handler WHERE event = ?";
return queryWithTransaction(
READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY,
q -> {
q.addParameter(event);
return q.executeAndFetch(
rs -> {
List<EventHandler> handlers = new ArrayList<>();
while (rs.next()) {
EventHandler h = readValue(rs.getString(1), EventHandler.class);
if (!activeOnly || h.isActive()) {
handlers.add(h);
}
}
return handlers;
});
});
}
/**
* Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime
* exception if validations fail.
*
* @param taskDef The {@code TaskDef} to check.
*/
private void validate(TaskDef taskDef) {
Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null");
Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null");
}
/**
* Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a
* Runtime exception if validations fail.
*
* @param def The {@code WorkflowDef} to check.
*/
private void validate(WorkflowDef def) {
Preconditions.checkNotNull(def, "WorkflowDef object cannot be null");
Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null");
}
/**
* Retrieve a {@link EventHandler} by {@literal name}.
*
* @param connection The {@link Connection} to use for queries.
* @param name The {@code EventHandler} name to look for.
* @return {@literal null} if nothing is found, otherwise the {@code EventHandler}.
*/
private EventHandler getEventHandler(Connection connection, String name) {
final String READ_ONE_EVENT_HANDLER_QUERY =
"SELECT json_data FROM meta_event_handler WHERE name = ?";
return query(
connection,
READ_ONE_EVENT_HANDLER_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class));
}
/**
* Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already
* exist.
*
* @param connection The {@link Connection} to use for queries.
* @param def The {@code WorkflowDef} to check for.
* @return {@literal true} if a {@code WorkflowDef} already exists with the same values.
*/
private Boolean workflowExists(Connection connection, WorkflowDef def) {
final String CHECK_WORKFLOW_DEF_EXISTS_QUERY =
"SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?";
return query(
connection,
CHECK_WORKFLOW_DEF_EXISTS_QUERY,
q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists());
}
/**
* Return the latest version that exists for the provided {@code name}.
*
* @param tx The {@link Connection} to use for queries.
* @param name The {@code name} to check for.
* @return {@code Optional.empty()} if no versions exist, otherwise the max {@link
* WorkflowDef#getVersion} found.
*/
private Optional<Integer> getLatestVersion(Connection tx, String name) {
final String GET_LATEST_WORKFLOW_DEF_VERSION =
"SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?";
Integer val =
query(
tx,
GET_LATEST_WORKFLOW_DEF_VERSION,
q -> {
q.addParameter(name);
return q.executeAndFetch(
rs -> {
if (!rs.next()) {
return null;
}
return rs.getInt(1);
});
});
return Optional.ofNullable(val);
}
/**
* Update the latest version for the workflow with name {@code WorkflowDef} to the version
* provided in {@literal version}.
*
* @param tx The {@link Connection} to use for queries.
* @param name Workflow def name to update
* @param version The new latest {@code version} value.
*/
private void updateLatestVersion(Connection tx, String name, int version) {
final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY =
"UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?";
execute(
tx,
UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY,
q -> q.addParameter(version).addParameter(name).executeUpdate());
}
private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) {
final String INSERT_WORKFLOW_DEF_QUERY =
"INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)";
Optional<Integer> version = getLatestVersion(tx, def.getName());
if (!workflowExists(tx, def)) {
execute(
tx,
INSERT_WORKFLOW_DEF_QUERY,
q ->
q.addParameter(def.getName())
.addParameter(def.getVersion())
.addJsonParameter(def)
.executeUpdate());
} else {
// @formatter:off
final String UPDATE_WORKFLOW_DEF_QUERY =
"UPDATE meta_workflow_def "
+ "SET json_data = ?, modified_on = CURRENT_TIMESTAMP "
+ "WHERE name = ? AND version = ?";
// @formatter:on
execute(
tx,
UPDATE_WORKFLOW_DEF_QUERY,
q ->
q.addJsonParameter(def)
.addParameter(def.getName())
.addParameter(def.getVersion())
.executeUpdate());
}
int maxVersion = def.getVersion();
if (version.isPresent() && version.get() > def.getVersion()) {
maxVersion = version.get();
}
updateLatestVersion(tx, def.getName(), maxVersion);
}
/**
* Query persistence for all defined {@link TaskDef} data, and cache it in {@link
* #taskDefCache}.
*/
private void refreshTaskDefs() {
try {
withTransaction(
tx -> {
Map<String, TaskDef> map = new HashMap<>();
findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef));
synchronized (taskDefCache) {
taskDefCache.clear();
taskDefCache.putAll(map);
}
if (logger.isTraceEnabled()) {
logger.trace("Refreshed {} TaskDefs", taskDefCache.size());
}
});
} catch (Exception e) {
Monitors.error(CLASS_NAME, "refreshTaskDefs");
logger.error("refresh TaskDefs failed ", e);
}
}
/**
* Query persistence for all defined {@link TaskDef} data.
*
* @param tx The {@link Connection} to use for queries.
* @return A new {@code List<TaskDef>} with all the {@code TaskDef} data that was retrieved.
*/
private List<TaskDef> findAllTaskDefs(Connection tx) {
final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def";
return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class));
}
/**
* Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}.
*
* @param name The name of the {@code TaskDef} to query for.
* @return {@literal null} if nothing is found, otherwise the {@code TaskDef}.
*/
private TaskDef getTaskDefFromDB(String name) {
final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?";
return queryWithTransaction(
READ_ONE_TASKDEF_QUERY,
q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class));
}
private String insertOrUpdateTaskDef(TaskDef taskDef) {
final String UPDATE_TASKDEF_QUERY =
"UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?";
final String INSERT_TASKDEF_QUERY =
"INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)";
return getWithRetriedTransactions(
tx -> {
execute(
tx,
UPDATE_TASKDEF_QUERY,
update -> {
int result =
update.addJsonParameter(taskDef)
.addParameter(taskDef.getName())
.executeUpdate();
if (result == 0) {
execute(
tx,
INSERT_TASKDEF_QUERY,
insert ->
insert.addParameter(taskDef.getName())
.addJsonParameter(taskDef)
.executeUpdate());
}
});
taskDefCache.put(taskDef.getName(), taskDef);
return taskDef.getName();
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions with no expected result.
*
* @author mustafa
*/
@FunctionalInterface
public interface ExecuteFunction {
void apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.util.function.Supplier;
/** Functional class to support the lazy execution of a String result. */
public class LazyToString {
private final Supplier<String> supplier;
/**
* @param supplier Supplier to execute when {@link #toString()} is called.
*/
public LazyToString(Supplier<String> supplier) {
this.supplier = supplier;
}
@Override
public String toString() {
return supplier.get();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.SQLException;
/**
* Functional interface for {@link Query} executions that return results.
*
* @author mustafa
*/
@FunctionalInterface
public interface QueryFunction<R> {
R apply(Query query) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Functional interface for operations within a transactional context.
*
* @author mustafa
*/
@FunctionalInterface
public interface TransactionalFunction<R> {
R apply(Connection tx) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.sql.ResultSet;
import java.sql.SQLException;
/**
* Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}.
*
* @author mustafa
*/
@FunctionalInterface
public interface ResultSetHandler<R> {
R apply(ResultSet resultSet) throws SQLException;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.util;
import java.io.IOException;
import java.sql.Connection;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.math.NumberUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.exception.NonTransientException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities.
*
* <p>This class simulates a parameter building pattern and all {@literal addParameter(*)} methods
* must be called in the proper order of their expected binding sequence.
*
* @author mustafa
*/
public class Query implements AutoCloseable {
private final Logger logger = LoggerFactory.getLogger(getClass());
/** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */
protected final ObjectMapper objectMapper;
/** The initial supplied query String that was used to prepare {@link #statement}. */
private final String rawQuery;
/**
* Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a
* parameter is added to the {@code PreparedStatement} {@link #statement}.
*/
private final AtomicInteger index = new AtomicInteger(1);
/** The {@link PreparedStatement} that will be managed and executed by this class. */
private final PreparedStatement statement;
public Query(ObjectMapper objectMapper, Connection connection, String query) {
this.rawQuery = query;
this.objectMapper = objectMapper;
try {
this.statement = connection.prepareStatement(query);
} catch (SQLException ex) {
throw new NonTransientException(
"Cannot prepare statement for query: " + ex.getMessage(), ex);
}
}
/**
* Generate a String with {@literal count} number of '?' placeholders for {@link
* PreparedStatement} queries.
*
* @param count The number of '?' chars to generate.
* @return a comma delimited string of {@literal count} '?' binding placeholders.
*/
public static String generateInBindings(int count) {
String[] questions = new String[count];
for (int i = 0; i < count; i++) {
questions[i] = "?";
}
return String.join(", ", questions);
}
public Query addParameter(final String value) {
return addParameterInternal((ps, idx) -> ps.setString(idx, value));
}
public Query addParameter(final int value) {
return addParameterInternal((ps, idx) -> ps.setInt(idx, value));
}
public Query addParameter(final boolean value) {
return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value)));
}
public Query addParameter(final long value) {
return addParameterInternal((ps, idx) -> ps.setLong(idx, value));
}
public Query addParameter(final double value) {
return addParameterInternal((ps, idx) -> ps.setDouble(idx, value));
}
public Query addParameter(Date date) {
return addParameterInternal((ps, idx) -> ps.setDate(idx, date));
}
public Query addParameter(Timestamp timestamp) {
return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp));
}
/**
* Serializes {@literal value} to a JSON string for persistence.
*
* @param value The value to serialize.
* @return {@literal this}
*/
public Query addJsonParameter(Object value) {
return addParameter(toJson(value));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link java.sql.Date}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addDateParameter(java.util.Date date) {
return addParameter(new Date(date.getTime()));
}
/**
* Bind the given {@link java.util.Date} to the PreparedStatement as a {@link
* java.sql.Timestamp}.
*
* @param date The {@literal java.util.Date} to bind.
* @return {@literal this}
*/
public Query addTimestampParameter(java.util.Date date) {
return addParameter(new Timestamp(date.getTime()));
}
/**
* Bind the given epoch millis to the PreparedStatement as a {@link java.sql.Timestamp}.
*
* @param epochMillis The epoch ms to create a new {@literal Timestamp} from.
* @return {@literal this}
*/
public Query addTimestampParameter(long epochMillis) {
return addParameter(new Timestamp(epochMillis));
}
/**
* Add a collection of primitive values at once, in the order of the collection.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the
* collection.
* @see #addParameters(Object...)
*/
public Query addParameters(Collection values) {
return addParameters(values.toArray());
}
/**
* Add many primitive values at once.
*
* @param values The values to bind to the prepared statement.
* @return {@literal this}
* @throws IllegalArgumentException If a non-primitive/unsupported type is encountered.
*/
public Query addParameters(Object... values) {
for (Object v : values) {
if (v instanceof String) {
addParameter((String) v);
} else if (v instanceof Integer) {
addParameter((Integer) v);
} else if (v instanceof Long) {
addParameter((Long) v);
} else if (v instanceof Double) {
addParameter((Double) v);
} else if (v instanceof Boolean) {
addParameter((Boolean) v);
} else if (v instanceof Date) {
addParameter((Date) v);
} else if (v instanceof Timestamp) {
addParameter((Timestamp) v);
} else {
throw new IllegalArgumentException(
"Type "
+ v.getClass().getName()
+ " is not supported by automatic property assignment");
}
}
return this;
}
/**
* Utility method for evaluating the prepared statement as a query to check the existence of a
* record using a numeric count or boolean return value.
*
* <p>The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result.
*
* @return {@literal true} If a count query returned more than 0 or an exists query returns
* {@literal true}.
* @throws NonTransientException If an unexpected return type cannot be evaluated to a {@code
* Boolean} result.
*/
public boolean exists() {
Object val = executeScalar();
if (null == val) {
return false;
}
if (val instanceof Number) {
return convertLong(val) > 0;
}
if (val instanceof Boolean) {
return (Boolean) val;
}
if (val instanceof String) {
return convertBoolean(val);
}
throw new NonTransientException(
"Expected a Numeric or Boolean scalar return value from the query, received "
+ val.getClass().getName());
}
/**
* Convenience method for executing delete statements.
*
* @return {@literal true} if the statement affected 1 or more rows.
* @see #executeUpdate()
*/
public boolean executeDelete() {
int count = executeUpdate();
if (count > 1) {
logger.trace("Removed {} row(s) for query {}", count, rawQuery);
}
return count > 0;
}
/**
* Convenience method for executing statements that return a single numeric value, typically
* {@literal SELECT COUNT...} style queries.
*
* @return The result of the query as a {@literal long}.
*/
public long executeCount() {
return executeScalar(Long.class);
}
/**
* @return The result of {@link PreparedStatement#executeUpdate()}
*/
public int executeUpdate() {
try {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
final int val = this.statement.executeUpdate();
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery);
}
return val;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute a query from the PreparedStatement and return the ResultSet.
*
* <p><em>NOTE:</em> The returned ResultSet must be closed/managed by the calling methods.
*
* @return {@link PreparedStatement#executeQuery()}
* @throws NonTransientException If any SQL errors occur.
*/
public ResultSet executeQuery() {
Long start = null;
if (logger.isTraceEnabled()) {
start = System.currentTimeMillis();
}
try {
return this.statement.executeQuery();
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
} finally {
if (null != start && logger.isTraceEnabled()) {
long end = System.currentTimeMillis();
logger.trace("[{}ms] {}", (end - start), rawQuery);
}
}
}
/**
* @return The single result of the query as an Object.
*/
public Object executeScalar() {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
return null;
}
return rs.getObject(1);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a single 'primitive' value from the ResultSet.
*
* @param returnType The type to return.
* @param <V> The type parameter to return a List of.
* @return A single result from the execution of the statement, as a type of {@literal
* returnType}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> V executeScalar(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
if (!rs.next()) {
Object value = null;
if (Integer.class == returnType) {
value = 0;
} else if (Long.class == returnType) {
value = 0L;
} else if (Boolean.class == returnType) {
value = false;
}
return returnType.cast(value);
} else {
return getScalarFromResultSet(rs, returnType);
}
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeScalarList(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> values = new ArrayList<>();
while (rs.next()) {
values.add(getScalarFromResultSet(rs, returnType));
}
return values;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the statement and return only the first record from the result set.
*
* @param returnType The Class to return.
* @param <V> The type parameter.
* @return An instance of {@literal <V>} from the result set.
*/
public <V> V executeAndFetchFirst(Class<V> returnType) {
Object o = executeScalar();
if (null == o) {
return null;
}
return convert(o, returnType);
}
/**
* Execute the PreparedStatement and return a List of {@literal returnType} values from the
* ResultSet.
*
* @param returnType The type Class return a List of.
* @param <V> The type parameter to return a List of.
* @return A {@code List<returnType>}.
* @throws NonTransientException {@literal returnType} is unsupported, cannot be cast to from
* the result, or any SQL errors occur.
*/
public <V> List<V> executeAndFetch(Class<V> returnType) {
try (ResultSet rs = executeQuery()) {
List<V> list = new ArrayList<>();
while (rs.next()) {
list.add(convert(rs.getObject(1), returnType));
}
return list;
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
/**
* Execute the query and pass the {@link ResultSet} to the given handler.
*
* @param handler The {@link ResultSetHandler} to execute.
* @param <V> The return type of this method.
* @return The results of {@link ResultSetHandler#apply(ResultSet)}.
*/
public <V> V executeAndFetch(ResultSetHandler<V> handler) {
try (ResultSet rs = executeQuery()) {
return handler.apply(rs);
} catch (SQLException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
@Override
public void close() {
try {
if (null != statement && !statement.isClosed()) {
statement.close();
}
} catch (SQLException ex) {
logger.warn("Error closing prepared statement: {}", ex.getMessage());
}
}
protected final Query addParameterInternal(InternalParameterSetter setter) {
int index = getAndIncrementIndex();
try {
setter.apply(this.statement, index);
return this;
} catch (SQLException ex) {
throw new NonTransientException("Could not apply bind parameter at index " + index, ex);
}
}
protected <V> V getScalarFromResultSet(ResultSet rs, Class<V> returnType) throws SQLException {
Object value = null;
if (Integer.class == returnType) {
value = rs.getInt(1);
} else if (Long.class == returnType) {
value = rs.getLong(1);
} else if (String.class == returnType) {
value = rs.getString(1);
} else if (Boolean.class == returnType) {
value = rs.getBoolean(1);
} else if (Double.class == returnType) {
value = rs.getDouble(1);
} else if (Date.class == returnType) {
value = rs.getDate(1);
} else if (Timestamp.class == returnType) {
value = rs.getTimestamp(1);
} else {
value = rs.getObject(1);
}
if (null == value) {
throw new NullPointerException(
"Cannot get value from ResultSet of type " + returnType.getName());
}
return returnType.cast(value);
}
protected <V> V convert(Object value, Class<V> returnType) {
if (Boolean.class == returnType) {
return returnType.cast(convertBoolean(value));
} else if (Integer.class == returnType) {
return returnType.cast(convertInt(value));
} else if (Long.class == returnType) {
return returnType.cast(convertLong(value));
} else if (Double.class == returnType) {
return returnType.cast(convertDouble(value));
} else if (String.class == returnType) {
return returnType.cast(convertString(value));
} else if (value instanceof String) {
return fromJson((String) value, returnType);
}
final String vName = value.getClass().getName();
final String rName = returnType.getName();
throw new NonTransientException("Cannot convert type " + vName + " to " + rName);
}
protected Integer convertInt(Object value) {
if (null == value) {
return null;
}
if (value instanceof Integer) {
return (Integer) value;
}
if (value instanceof Number) {
return ((Number) value).intValue();
}
return NumberUtils.toInt(value.toString());
}
protected Double convertDouble(Object value) {
if (null == value) {
return null;
}
if (value instanceof Double) {
return (Double) value;
}
if (value instanceof Number) {
return ((Number) value).doubleValue();
}
return NumberUtils.toDouble(value.toString());
}
protected Long convertLong(Object value) {
if (null == value) {
return null;
}
if (value instanceof Long) {
return (Long) value;
}
if (value instanceof Number) {
return ((Number) value).longValue();
}
return NumberUtils.toLong(value.toString());
}
protected String convertString(Object value) {
if (null == value) {
return null;
}
if (value instanceof String) {
return (String) value;
}
return value.toString().trim();
}
protected Boolean convertBoolean(Object value) {
if (null == value) {
return null;
}
if (value instanceof Boolean) {
return (Boolean) value;
}
if (value instanceof Number) {
return ((Number) value).intValue() != 0;
}
String text = value.toString().trim();
return "Y".equalsIgnoreCase(text)
|| "YES".equalsIgnoreCase(text)
|| "TRUE".equalsIgnoreCase(text)
|| "T".equalsIgnoreCase(text)
|| "1".equalsIgnoreCase(text);
}
protected String toJson(Object value) {
if (null == value) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
throw new NonTransientException(ex.getMessage(), ex);
}
}
protected <V> V fromJson(String value, Class<V> returnType) {
if (null == value) {
return null;
}
try {
return objectMapper.readValue(value, returnType);
} catch (IOException ex) {
throw new NonTransientException(
"Could not convert JSON '" + value + "' to " + returnType.getName(), ex);
}
}
protected final int getIndex() {
return index.get();
}
protected final int getAndIncrementIndex() {
return index.getAndIncrement();
}
@FunctionalInterface
private interface InternalParameterSetter {
void apply(PreparedStatement ps, int idx) throws SQLException;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.config;
import java.time.Duration;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.mysql")
public class MySQLProperties {
/** The time (in seconds) after which the in-memory task definitions cache will be refreshed */
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
private Integer deadlockRetryMax = 3;
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Integer getDeadlockRetryMax() {
return deadlockRetryMax;
}
public void setDeadlockRetryMax(Integer deadlockRetryMax) {
this.deadlockRetryMax = deadlockRetryMax;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java | mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.mysql.config;
import java.sql.SQLException;
import java.util.Optional;
import javax.sql.DataSource;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
import org.springframework.retry.RetryContext;
import org.springframework.retry.backoff.NoBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.mysql.dao.MySQLExecutionDAO;
import com.netflix.conductor.mysql.dao.MySQLMetadataDAO;
import com.netflix.conductor.mysql.dao.MySQLQueueDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.mysql.cj.exceptions.MysqlErrorNumbers.ER_LOCK_DEADLOCK;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(MySQLProperties.class)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "mysql")
// Import the DataSourceAutoConfiguration when mysql database is selected.
// By default the datasource configuration is excluded in the main module.
@Import(DataSourceAutoConfiguration.class)
public class MySQLConfiguration {
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLMetadataDAO mySqlMetadataDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource,
MySQLProperties properties) {
return new MySQLMetadataDAO(retryTemplate, objectMapper, dataSource, properties);
}
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLExecutionDAO mySqlExecutionDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource) {
return new MySQLExecutionDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
@DependsOn({"flyway", "flywayInitializer"})
public MySQLQueueDAO mySqlQueueDAO(
@Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper,
DataSource dataSource) {
return new MySQLQueueDAO(retryTemplate, objectMapper, dataSource);
}
@Bean
public RetryTemplate mysqlRetryTemplate(MySQLProperties properties) {
SimpleRetryPolicy retryPolicy = new CustomRetryPolicy();
retryPolicy.setMaxAttempts(properties.getDeadlockRetryMax());
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy);
retryTemplate.setBackOffPolicy(new NoBackOffPolicy());
return retryTemplate;
}
public static class CustomRetryPolicy extends SimpleRetryPolicy {
@Override
public boolean canRetry(final RetryContext context) {
final Optional<Throwable> lastThrowable =
Optional.ofNullable(context.getLastThrowable());
return lastThrowable
.map(throwable -> super.canRetry(context) && isDeadLockError(throwable))
.orElseGet(() -> super.canRetry(context));
}
private boolean isDeadLockError(Throwable throwable) {
SQLException sqlException = findCauseSQLException(throwable);
if (sqlException == null) {
return false;
}
return ER_LOCK_DEADLOCK == sqlException.getErrorCode();
}
private SQLException findCauseSQLException(Throwable throwable) {
Throwable causeException = throwable;
while (null != causeException && !(causeException instanceof SQLException)) {
causeException = causeException.getCause();
}
return (SQLException) causeException;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JetStreamObservableQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.availability.AvailabilityChangeEvent;
import org.springframework.boot.availability.LivenessState;
import org.springframework.context.ApplicationEventPublisher;
import com.netflix.conductor.contribs.queue.nats.config.JetStreamProperties;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import io.nats.client.*;
import io.nats.client.api.*;
import rx.Observable;
import rx.Scheduler;
/**
* @author andrey.stelmashenko@gmail.com
*/
public class JetStreamObservableQueue implements ObservableQueue {
private static final Logger LOG = LoggerFactory.getLogger(JetStreamObservableQueue.class);
private final LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>();
private final Lock mu = new ReentrantLock();
private final String queueType;
private final String subject;
private final String queueUri;
private final JetStreamProperties properties;
private final Scheduler scheduler;
private final AtomicBoolean running = new AtomicBoolean(false);
private final ApplicationEventPublisher eventPublisher;
private Connection nc;
private JetStreamSubscription sub;
private Observable<Long> interval;
private final String queueGroup;
public JetStreamObservableQueue(
ConductorProperties conductorProperties,
JetStreamProperties properties,
String queueType,
String queueUri,
Scheduler scheduler,
ApplicationEventPublisher eventPublisher) {
LOG.debug("JSM obs queue create, qtype={}, quri={}", queueType, queueUri);
this.queueUri = queueUri;
// If queue specified (e.g. subject:queue) - split to subject & queue
if (queueUri.contains(":")) {
this.subject =
getQueuePrefix(conductorProperties, properties)
+ queueUri.substring(0, queueUri.indexOf(':'));
queueGroup = queueUri.substring(queueUri.indexOf(':') + 1);
} else {
this.subject = getQueuePrefix(conductorProperties, properties) + queueUri;
queueGroup = null;
}
this.queueType = queueType;
this.properties = properties;
this.scheduler = scheduler;
this.eventPublisher = eventPublisher;
}
public static String getQueuePrefix(
ConductorProperties conductorProperties, JetStreamProperties properties) {
String stack = "";
if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
stack = conductorProperties.getStack() + "_";
}
return StringUtils.isBlank(properties.getListenerQueuePrefix())
? conductorProperties.getAppId() + "_jsm_notify_" + stack
: properties.getListenerQueuePrefix();
}
@Override
public Observable<Message> observe() {
return Observable.create(getOnSubscribe());
}
private Observable.OnSubscribe<Message> getOnSubscribe() {
return subscriber -> {
interval =
Observable.interval(
properties.getPollTimeDuration().toMillis(),
TimeUnit.MILLISECONDS,
scheduler);
interval.flatMap(
(Long x) -> {
if (!this.isRunning()) {
LOG.debug(
"Component stopped, skip listening for messages from JSM Queue '{}'",
subject);
return Observable.from(Collections.emptyList());
} else {
List<Message> available = new ArrayList<>();
messages.drainTo(available);
if (!available.isEmpty()) {
LOG.debug(
"Processing JSM queue '{}' batch messages count={}",
subject,
available.size());
}
return Observable.from(available);
}
})
.subscribe(subscriber::onNext, subscriber::onError);
};
}
@Override
public String getType() {
return queueType;
}
@Override
public String getName() {
return queueUri;
}
@Override
public String getURI() {
return getName();
}
@Override
public List<String> ack(List<Message> messages) {
messages.forEach(m -> ((JsmMessage) m).getJsmMsg().ack());
return Collections.emptyList();
}
@Override
public void publish(List<Message> messages) {
try (Connection conn = Nats.connect(properties.getUrl())) {
JetStream js = conn.jetStream();
for (Message msg : messages) {
js.publish(subject, msg.getPayload().getBytes());
}
} catch (IOException | JetStreamApiException e) {
throw new NatsException("Failed to publish to jsm", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new NatsException("Failed to publish to jsm", e);
}
}
@Override
public void setUnackTimeout(Message message, long unackTimeout) {
// do nothing, not supported
}
@Override
public long size() {
try {
return sub.getConsumerInfo().getNumPending();
} catch (IOException | JetStreamApiException e) {
LOG.warn("Failed to get stream '{}' info", subject);
}
return 0;
}
@Override
public void start() {
mu.lock();
try {
natsConnect();
} finally {
mu.unlock();
}
}
@Override
public void stop() {
interval.unsubscribeOn(scheduler);
try {
if (nc != null) {
nc.close();
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.error("Failed to close Nats connection", e);
}
running.set(false);
}
@Override
public boolean isRunning() {
return this.running.get();
}
private void natsConnect() {
if (running.get()) {
return;
}
LOG.info("Starting JSM observable, name={}", queueUri);
try {
Nats.connectAsynchronously(
new Options.Builder()
.connectionListener(
(conn, type) -> {
LOG.info("Connection to JSM updated: {}", type);
if (ConnectionListener.Events.CLOSED.equals(type)) {
LOG.error(
"Could not reconnect to NATS! Changing liveness status to {}!",
LivenessState.BROKEN);
AvailabilityChangeEvent.publish(
eventPublisher, type, LivenessState.BROKEN);
}
this.nc = conn;
subscribeOnce(conn, type);
})
.errorListener(new LoggingNatsErrorListener())
.server(properties.getUrl())
.maxReconnects(properties.getMaxReconnects())
.build(),
true);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new NatsException("Failed to connect to JSM", e);
}
}
private void createStream(JetStreamManagement jsm) {
StreamConfiguration streamConfig =
StreamConfiguration.builder()
.name(subject)
.replicas(properties.getReplicas())
.retentionPolicy(RetentionPolicy.Limits)
.maxBytes(properties.getStreamMaxBytes())
.storageType(StorageType.get(properties.getStreamStorageType()))
.build();
try {
StreamInfo streamInfo = jsm.addStream(streamConfig);
LOG.debug("Updated stream, info: {}", streamInfo);
} catch (IOException | JetStreamApiException e) {
LOG.error("Failed to add stream: " + streamConfig, e);
AvailabilityChangeEvent.publish(eventPublisher, e, LivenessState.BROKEN);
}
}
private void subscribeOnce(Connection nc, ConnectionListener.Events type) {
if (type.equals(ConnectionListener.Events.CONNECTED)
|| type.equals(ConnectionListener.Events.RECONNECTED)) {
JetStreamManagement jsm;
try {
jsm = nc.jetStreamManagement();
} catch (IOException e) {
throw new NatsException("Failed to get jsm management", e);
}
createStream(jsm);
var consumerConfig = createConsumer(jsm);
subscribe(nc, consumerConfig);
}
}
private ConsumerConfiguration createConsumer(JetStreamManagement jsm) {
ConsumerConfiguration consumerConfig =
ConsumerConfiguration.builder()
.name(properties.getDurableName())
.deliverGroup(queueGroup)
.durable(properties.getDurableName())
.ackWait(properties.getAckWait())
.maxDeliver(properties.getMaxDeliver())
.maxAckPending(properties.getMaxAckPending())
.ackPolicy(AckPolicy.Explicit)
.deliverSubject(subject + "-deliver")
.deliverPolicy(DeliverPolicy.New)
.build();
try {
jsm.addOrUpdateConsumer(subject, consumerConfig);
return consumerConfig;
} catch (IOException | JetStreamApiException e) {
throw new NatsException("Failed to add/update consumer", e);
}
}
private void subscribe(Connection nc, ConsumerConfiguration consumerConfig) {
try {
JetStream js = nc.jetStream();
PushSubscribeOptions pso =
PushSubscribeOptions.builder().configuration(consumerConfig).stream(subject)
.bind(true)
.build();
LOG.debug("Subscribing jsm, subject={}, options={}", subject, pso);
sub =
js.subscribe(
subject,
queueGroup,
nc.createDispatcher(),
msg -> {
var message = new JsmMessage();
message.setJsmMsg(msg);
message.setId(NUID.nextGlobal());
message.setPayload(new String(msg.getData()));
messages.add(message);
},
/*autoAck*/ false,
pso);
LOG.debug("Subscribed successfully {}", sub.getConsumerInfo());
this.running.set(true);
} catch (IOException | JetStreamApiException e) {
throw new NatsException("Failed to subscribe", e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NatsException.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NatsException.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
public class NatsException extends RuntimeException {
public NatsException() {
super();
}
public NatsException(String message) {
super(message);
}
public NatsException(String message, Throwable cause) {
super(message, cause);
}
public NatsException(Throwable cause) {
super(cause);
}
protected NatsException(
String message,
Throwable cause,
boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/LoggingNatsErrorListener.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/LoggingNatsErrorListener.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.nats.client.Connection;
import io.nats.client.ErrorListener;
import io.nats.client.JetStreamSubscription;
import io.nats.client.Message;
public class LoggingNatsErrorListener implements ErrorListener {
private static final Logger LOG = LoggerFactory.getLogger(LoggingNatsErrorListener.class);
@Override
public void errorOccurred(Connection conn, String error) {
LOG.error("Nats connection error occurred: {}", error);
}
@Override
public void exceptionOccurred(Connection conn, Exception exp) {
LOG.error("Nats connection exception occurred", exp);
}
@Override
public void messageDiscarded(Connection conn, Message msg) {
LOG.error("Nats message discarded, SID={}, ", msg.getSID());
}
@Override
public void heartbeatAlarm(
Connection conn,
JetStreamSubscription sub,
long lastStreamSequence,
long lastConsumerSequence) {
LOG.warn("Heartbit missed, subject={}", sub.getSubject());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import io.nats.client.NUID;
import rx.Observable;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public abstract class NATSAbstractQueue implements ObservableQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSAbstractQueue.class);
protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>();
protected final Lock mu = new ReentrantLock();
private final String queueType;
private ScheduledExecutorService execs;
private final Scheduler scheduler;
protected final String queueURI;
protected final String subject;
protected String queue;
// Indicates that observe was called (Event Handler) and we must to re-initiate subscription
// upon reconnection
private boolean observable;
private boolean isOpened;
private volatile boolean running;
NATSAbstractQueue(String queueURI, String queueType, Scheduler scheduler) {
this.queueURI = queueURI;
this.queueType = queueType;
this.scheduler = scheduler;
// If queue specified (e.g. subject:queue) - split to subject & queue
if (queueURI.contains(":")) {
this.subject = queueURI.substring(0, queueURI.indexOf(':'));
queue = queueURI.substring(queueURI.indexOf(':') + 1);
} else {
this.subject = queueURI;
queue = null;
}
LOGGER.info(
String.format(
"Initialized with queueURI=%s, subject=%s, queue=%s",
queueURI, subject, queue));
}
void onMessage(String subject, byte[] data) {
String payload = new String(data);
LOGGER.info(String.format("Received message for %s: %s", subject, payload));
Message dstMsg = new Message();
dstMsg.setId(NUID.nextGlobal());
dstMsg.setPayload(payload);
messages.add(dstMsg);
}
@Override
public Observable<Message> observe() {
LOGGER.info("Observe invoked for queueURI " + queueURI);
observable = true;
mu.lock();
try {
subscribe();
} finally {
mu.unlock();
}
Observable.OnSubscribe<Message> onSubscribe =
subscriber -> {
Observable<Long> interval =
Observable.interval(100, TimeUnit.MILLISECONDS, scheduler);
interval.flatMap(
(Long x) -> {
if (!isRunning()) {
LOGGER.debug(
"Component stopped, skip listening for messages from NATS Queue");
return Observable.from(Collections.emptyList());
} else {
List<Message> available = new LinkedList<>();
messages.drainTo(available);
if (!available.isEmpty()) {
AtomicInteger count = new AtomicInteger(0);
StringBuilder buffer = new StringBuilder();
available.forEach(
msg -> {
buffer.append(msg.getId())
.append("=")
.append(msg.getPayload());
count.incrementAndGet();
if (count.get() < available.size()) {
buffer.append(",");
}
});
LOGGER.info(
String.format(
"Batch from %s to conductor is %s",
subject, buffer.toString()));
}
return Observable.from(available);
}
})
.subscribe(subscriber::onNext, subscriber::onError);
};
return Observable.create(onSubscribe);
}
@Override
public String getType() {
return queueType;
}
@Override
public String getName() {
return queueURI;
}
@Override
public String getURI() {
return queueURI;
}
@Override
public List<String> ack(List<Message> messages) {
return Collections.emptyList();
}
@Override
public void setUnackTimeout(Message message, long unackTimeout) {}
@Override
public long size() {
return messages.size();
}
@Override
public void publish(List<Message> messages) {
messages.forEach(
message -> {
try {
String payload = message.getPayload();
publish(subject, payload.getBytes());
LOGGER.info(String.format("Published message to %s: %s", subject, payload));
} catch (Exception ex) {
LOGGER.error(
"Failed to publish message "
+ message.getPayload()
+ " to "
+ subject,
ex);
throw new RuntimeException(ex);
}
});
}
@Override
public boolean rePublishIfNoAck() {
return true;
}
@Override
public void close() {
LOGGER.info("Closing connection for " + queueURI);
mu.lock();
try {
if (execs != null) {
execs.shutdownNow();
execs = null;
}
closeSubs();
closeConn();
isOpened = false;
} finally {
mu.unlock();
}
}
public void open() {
// do nothing if not closed
if (isOpened) {
return;
}
mu.lock();
try {
try {
connect();
// Re-initiated subscription if existed
if (observable) {
subscribe();
}
} catch (Exception ignore) {
}
execs = Executors.newScheduledThreadPool(1);
execs.scheduleAtFixedRate(this::monitor, 0, 500, TimeUnit.MILLISECONDS);
isOpened = true;
} finally {
mu.unlock();
}
}
private void monitor() {
if (isConnected()) {
return;
}
LOGGER.error("Monitor invoked for " + queueURI);
mu.lock();
try {
closeSubs();
closeConn();
// Connect
connect();
// Re-initiated subscription if existed
if (observable) {
subscribe();
}
} catch (Exception ex) {
LOGGER.error("Monitor failed with " + ex.getMessage() + " for " + queueURI, ex);
} finally {
mu.unlock();
}
}
public boolean isClosed() {
return !isOpened;
}
void ensureConnected() {
if (!isConnected()) {
throw new RuntimeException("No nats connection");
}
}
@Override
public void start() {
LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueURI);
running = true;
}
@Override
public void stop() {
LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueURI);
running = false;
}
@Override
public boolean isRunning() {
return running;
}
abstract void connect();
abstract boolean isConnected();
abstract void publish(String subject, byte[] data) throws Exception;
abstract void subscribe();
abstract void closeSubs();
abstract void closeConn();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.nats.client.Connection;
import io.nats.client.Nats;
import io.nats.client.Subscription;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSObservableQueue extends NATSAbstractQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSObservableQueue.class);
private Subscription subs;
private Connection conn;
public NATSObservableQueue(String queueURI, Scheduler scheduler) {
super(queueURI, "nats", scheduler);
open();
}
@Override
public boolean isConnected() {
return (conn != null && Connection.Status.CONNECTED.equals(conn.getStatus()));
}
@Override
public void connect() {
try {
Connection temp = Nats.connect();
LOGGER.info("Successfully connected for " + queueURI);
conn = temp;
} catch (Exception e) {
LOGGER.error("Unable to establish nats connection for " + queueURI, e);
throw new RuntimeException(e);
}
}
@Override
public void subscribe() {
// do nothing if already subscribed
if (subs != null) {
return;
}
try {
ensureConnected();
// Create subject/queue subscription if the queue has been provided
if (StringUtils.isNotEmpty(queue)) {
LOGGER.info(
"No subscription. Creating a queue subscription. subject={}, queue={}",
subject,
queue);
conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData()));
subs = conn.subscribe(subject, queue);
} else {
LOGGER.info(
"No subscription. Creating a pub/sub subscription. subject={}", subject);
conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData()));
subs = conn.subscribe(subject);
}
} catch (Exception ex) {
LOGGER.error(
"Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI,
ex);
}
}
@Override
public void publish(String subject, byte[] data) throws Exception {
ensureConnected();
conn.publish(subject, data);
}
@Override
public void closeSubs() {
if (subs != null) {
try {
subs.unsubscribe();
} catch (Exception ex) {
LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex);
}
subs = null;
}
}
@Override
public void closeConn() {
if (conn != null) {
try {
conn.close();
} catch (Exception ex) {
LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex);
}
conn = null;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JsmMessage.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/JsmMessage.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats;
import com.netflix.conductor.core.events.queue.Message;
/**
* @author andrey.stelmashenko@gmail.com
*/
public class JsmMessage extends Message {
private io.nats.client.Message jsmMsg;
public io.nats.client.Message getJsmMsg() {
return jsmMsg;
}
public void setJsmMsg(io.nats.client.Message jsmMsg) {
this.jsmMsg = jsmMsg;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.env.Environment;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.nats.NATSObservableQueue;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSEventQueueProvider.class);
protected Map<String, NATSObservableQueue> queues = new ConcurrentHashMap<>();
private final Scheduler scheduler;
public NATSEventQueueProvider(Environment environment, Scheduler scheduler) {
this.scheduler = scheduler;
LOGGER.info("NATS Event Queue Provider initialized...");
}
@Override
public String getQueueType() {
return "nats";
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) {
NATSObservableQueue queue =
queues.computeIfAbsent(queueURI, q -> new NATSObservableQueue(queueURI, scheduler));
if (queue.isClosed()) {
queue.open();
}
return queue;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamEventQueueProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.nats.JetStreamObservableQueue;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import rx.Scheduler;
/**
* @author andrey.stelmashenko@gmail.com
*/
public class JetStreamEventQueueProvider implements EventQueueProvider {
public static final String QUEUE_TYPE = "jsm";
private static final Logger LOG = LoggerFactory.getLogger(JetStreamEventQueueProvider.class);
private final Map<String, ObservableQueue> queues = new ConcurrentHashMap<>();
private final JetStreamProperties properties;
private final ConductorProperties conductorProperties;
private final Scheduler scheduler;
private final ApplicationEventPublisher eventPublisher;
public JetStreamEventQueueProvider(
ConductorProperties conductorProperties,
JetStreamProperties properties,
Scheduler scheduler,
ApplicationEventPublisher eventPublisher) {
LOG.info("NATS Event Queue Provider initialized...");
this.properties = properties;
this.conductorProperties = conductorProperties;
this.scheduler = scheduler;
this.eventPublisher = eventPublisher;
}
@Override
public String getQueueType() {
return QUEUE_TYPE;
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) throws IllegalArgumentException {
LOG.info("Getting obs queue, quri={}", queueURI);
return queues.computeIfAbsent(
queueURI,
q ->
new JetStreamObservableQueue(
conductorProperties,
properties,
getQueueType(),
queueURI,
scheduler,
eventPublisher));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats.config;
import java.time.Duration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import io.nats.client.Options;
/**
* @author andrey.stelmashenko@gmail.com
*/
@ConfigurationProperties("conductor.event-queues.jsm")
public class JetStreamProperties {
private String listenerQueuePrefix = "";
/** The durable subscriber name for the subscription */
private String durableName = "defaultQueue";
private String streamStorageType = "file";
private long streamMaxBytes = -1;
/** The NATS connection url */
private String url = Options.DEFAULT_URL;
private Duration pollTimeDuration = Duration.ofMillis(100);
/** WAIT tasks default queue group, to make subscription round-robin delivery to single sub */
private String defaultQueueGroup = "wait-group";
private int replicas = 3;
private int maxReconnects = -1;
private Duration ackWait = Duration.ofSeconds(60);
private long maxAckPending = 100;
private int maxDeliver = 5;
public long getStreamMaxBytes() {
return streamMaxBytes;
}
public void setStreamMaxBytes(long streamMaxBytes) {
this.streamMaxBytes = streamMaxBytes;
}
public Duration getAckWait() {
return ackWait;
}
public void setAckWait(Duration ackWait) {
this.ackWait = ackWait;
}
public long getMaxAckPending() {
return maxAckPending;
}
public void setMaxAckPending(long maxAckPending) {
this.maxAckPending = maxAckPending;
}
public int getMaxDeliver() {
return maxDeliver;
}
public void setMaxDeliver(int maxDeliver) {
this.maxDeliver = maxDeliver;
}
public Duration getPollTimeDuration() {
return pollTimeDuration;
}
public void setPollTimeDuration(Duration pollTimeDuration) {
this.pollTimeDuration = pollTimeDuration;
}
public String getListenerQueuePrefix() {
return listenerQueuePrefix;
}
public void setListenerQueuePrefix(String listenerQueuePrefix) {
this.listenerQueuePrefix = listenerQueuePrefix;
}
public String getDurableName() {
return durableName;
}
public void setDurableName(String durableName) {
this.durableName = durableName;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getStreamStorageType() {
return streamStorageType;
}
public void setStreamStorageType(String streamStorageType) {
this.streamStorageType = streamStorageType;
}
public String getDefaultQueueGroup() {
return defaultQueueGroup;
}
public void setDefaultQueueGroup(String defaultQueueGroup) {
this.defaultQueueGroup = defaultQueueGroup;
}
public int getReplicas() {
return replicas;
}
public void setReplicas(int replicas) {
this.replicas = replicas;
}
public int getMaxReconnects() {
return maxReconnects;
}
public void setMaxReconnects(int maxReconnects) {
this.maxReconnects = maxReconnects;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java | nats/src/main/java/com/netflix/conductor/contribs/queue/nats/config/JetStreamConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.nats.config;
import java.util.EnumMap;
import java.util.Map;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.model.TaskModel;
import rx.Scheduler;
/**
* @author andrey.stelmashenko@gmail.com
*/
@Configuration
@EnableConfigurationProperties(JetStreamProperties.class)
@ConditionalOnProperty(name = "conductor.event-queues.jsm.enabled", havingValue = "true")
public class JetStreamConfiguration {
@Bean
public EventQueueProvider jsmEventQueueProvider(
JetStreamProperties properties,
Scheduler scheduler,
ConductorProperties conductorProperties,
ApplicationEventPublisher eventPublisher) {
return new JetStreamEventQueueProvider(
conductorProperties, properties, scheduler, eventPublisher);
}
@ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "jsm")
@Bean
public Map<TaskModel.Status, ObservableQueue> getQueues(
EventQueueProvider jsmEventQueueProvider, JetStreamProperties properties) {
TaskModel.Status[] statuses =
new TaskModel.Status[] {TaskModel.Status.COMPLETED, TaskModel.Status.FAILED};
Map<TaskModel.Status, ObservableQueue> queues = new EnumMap<>(TaskModel.Status.class);
for (TaskModel.Status status : statuses) {
String queueName = status.name() + getQueueGroup(properties);
ObservableQueue queue = jsmEventQueueProvider.getQueue(queueName);
queues.put(status, queue);
}
return queues;
}
private String getQueueGroup(final JetStreamProperties properties) {
if (properties.getDefaultQueueGroup() == null
|| properties.getDefaultQueueGroup().isBlank()) {
return "";
}
return ":" + properties.getDefaultQueueGroup();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.