repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSStreamObservableQueue.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSStreamObservableQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan;
import java.util.UUID;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.nats.client.Connection;
import io.nats.streaming.*;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSStreamObservableQueue extends NATSAbstractQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSStreamObservableQueue.class);
private final StreamingConnectionFactory fact;
private StreamingConnection conn;
private Subscription subs;
private final String durableName;
public NATSStreamObservableQueue(
String clusterId,
String natsUrl,
String durableName,
String queueURI,
Scheduler scheduler) {
super(queueURI, "nats_stream", scheduler);
Options.Builder options = new Options.Builder();
options.clusterId(clusterId);
options.clientId(UUID.randomUUID().toString());
options.natsUrl(natsUrl);
this.fact = new StreamingConnectionFactory(options.build());
this.durableName = durableName;
open();
}
@Override
public boolean isConnected() {
return (conn != null
&& conn.getNatsConnection() != null
&& Connection.Status.CONNECTED.equals(conn.getNatsConnection().getStatus()));
}
@Override
public void connect() {
try {
StreamingConnection temp = fact.createConnection();
LOGGER.info("Successfully connected for " + queueURI);
conn = temp;
} catch (Exception e) {
LOGGER.error("Unable to establish nats streaming connection for " + queueURI, e);
throw new RuntimeException(e);
}
}
@Override
public void subscribe() {
// do nothing if already subscribed
if (subs != null) {
return;
}
try {
ensureConnected();
SubscriptionOptions subscriptionOptions =
new SubscriptionOptions.Builder().durableName(durableName).build();
// Create subject/queue subscription if the queue has been provided
if (StringUtils.isNotEmpty(queue)) {
LOGGER.info(
"No subscription. Creating a queue subscription. subject={}, queue={}",
subject,
queue);
subs =
conn.subscribe(
subject,
queue,
msg -> onMessage(msg.getSubject(), msg.getData()),
subscriptionOptions);
} else {
LOGGER.info(
"No subscription. Creating a pub/sub subscription. subject={}", subject);
subs =
conn.subscribe(
subject,
msg -> onMessage(msg.getSubject(), msg.getData()),
subscriptionOptions);
}
} catch (Exception ex) {
LOGGER.error(
"Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI,
ex);
}
}
@Override
public void publish(String subject, byte[] data) throws Exception {
ensureConnected();
conn.publish(subject, data);
}
@Override
public void closeSubs() {
if (subs != null) {
try {
subs.close(true);
} catch (Exception ex) {
LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex);
}
subs = null;
}
}
@Override
public void closeConn() {
if (conn != null) {
try {
conn.close();
} catch (Exception ex) {
LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex);
}
conn = null;
}
}
@Override
public boolean rePublishIfNoAck() {
return false;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSAbstractQueue.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSAbstractQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import io.nats.client.NUID;
import rx.Observable;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public abstract class NATSAbstractQueue implements ObservableQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSAbstractQueue.class);
protected LinkedBlockingQueue<Message> messages = new LinkedBlockingQueue<>();
protected final Lock mu = new ReentrantLock();
private final String queueType;
private ScheduledExecutorService execs;
private final Scheduler scheduler;
protected final String queueURI;
protected final String subject;
protected String queue;
// Indicates that observe was called (Event Handler) and we must to re-initiate subscription
// upon reconnection
private boolean observable;
private boolean isOpened;
private volatile boolean running;
NATSAbstractQueue(String queueURI, String queueType, Scheduler scheduler) {
this.queueURI = queueURI;
this.queueType = queueType;
this.scheduler = scheduler;
// If queue specified (e.g. subject:queue) - split to subject & queue
if (queueURI.contains(":")) {
this.subject = queueURI.substring(0, queueURI.indexOf(':'));
queue = queueURI.substring(queueURI.indexOf(':') + 1);
} else {
this.subject = queueURI;
queue = null;
}
LOGGER.info(
String.format(
"Initialized with queueURI=%s, subject=%s, queue=%s",
queueURI, subject, queue));
}
void onMessage(String subject, byte[] data) {
String payload = new String(data);
LOGGER.info(String.format("Received message for %s: %s", subject, payload));
Message dstMsg = new Message();
dstMsg.setId(NUID.nextGlobal());
dstMsg.setPayload(payload);
messages.add(dstMsg);
}
@Override
public Observable<Message> observe() {
LOGGER.info("Observe invoked for queueURI " + queueURI);
observable = true;
mu.lock();
try {
subscribe();
} finally {
mu.unlock();
}
Observable.OnSubscribe<Message> onSubscribe =
subscriber -> {
Observable<Long> interval =
Observable.interval(100, TimeUnit.MILLISECONDS, scheduler);
interval.flatMap(
(Long x) -> {
if (!isRunning()) {
LOGGER.debug(
"Component stopped, skip listening for messages from NATS Queue");
return Observable.from(Collections.emptyList());
} else {
List<Message> available = new LinkedList<>();
messages.drainTo(available);
if (!available.isEmpty()) {
AtomicInteger count = new AtomicInteger(0);
StringBuilder buffer = new StringBuilder();
available.forEach(
msg -> {
buffer.append(msg.getId())
.append("=")
.append(msg.getPayload());
count.incrementAndGet();
if (count.get() < available.size()) {
buffer.append(",");
}
});
LOGGER.info(
String.format(
"Batch from %s to conductor is %s",
subject, buffer.toString()));
}
return Observable.from(available);
}
})
.subscribe(subscriber::onNext, subscriber::onError);
};
return Observable.create(onSubscribe);
}
@Override
public String getType() {
return queueType;
}
@Override
public String getName() {
return queueURI;
}
@Override
public String getURI() {
return queueURI;
}
@Override
public List<String> ack(List<Message> messages) {
return Collections.emptyList();
}
@Override
public void setUnackTimeout(Message message, long unackTimeout) {}
@Override
public long size() {
return messages.size();
}
@Override
public void publish(List<Message> messages) {
messages.forEach(
message -> {
try {
String payload = message.getPayload();
publish(subject, payload.getBytes());
LOGGER.info(String.format("Published message to %s: %s", subject, payload));
} catch (Exception ex) {
LOGGER.error(
"Failed to publish message "
+ message.getPayload()
+ " to "
+ subject,
ex);
throw new RuntimeException(ex);
}
});
}
@Override
public boolean rePublishIfNoAck() {
return true;
}
@Override
public void close() {
LOGGER.info("Closing connection for " + queueURI);
mu.lock();
try {
if (execs != null) {
execs.shutdownNow();
execs = null;
}
closeSubs();
closeConn();
isOpened = false;
} finally {
mu.unlock();
}
}
public void open() {
// do nothing if not closed
if (isOpened) {
return;
}
mu.lock();
try {
try {
connect();
// Re-initiated subscription if existed
if (observable) {
subscribe();
}
} catch (Exception ignore) {
}
execs = Executors.newScheduledThreadPool(1);
execs.scheduleAtFixedRate(this::monitor, 0, 500, TimeUnit.MILLISECONDS);
isOpened = true;
} finally {
mu.unlock();
}
}
private void monitor() {
if (isConnected()) {
return;
}
LOGGER.error("Monitor invoked for " + queueURI);
mu.lock();
try {
closeSubs();
closeConn();
// Connect
connect();
// Re-initiated subscription if existed
if (observable) {
subscribe();
}
} catch (Exception ex) {
LOGGER.error("Monitor failed with " + ex.getMessage() + " for " + queueURI, ex);
} finally {
mu.unlock();
}
}
public boolean isClosed() {
return !isOpened;
}
void ensureConnected() {
if (!isConnected()) {
throw new RuntimeException("No nats connection");
}
}
@Override
public void start() {
LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueURI);
running = true;
}
@Override
public void stop() {
LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueURI);
running = false;
}
@Override
public boolean isRunning() {
return running;
}
abstract void connect();
abstract boolean isConnected();
abstract void publish(String subject, byte[] data) throws Exception;
abstract void subscribe();
abstract void closeSubs();
abstract void closeConn();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSObservableQueue.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/NATSObservableQueue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.nats.client.Connection;
import io.nats.client.Nats;
import io.nats.client.Subscription;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSObservableQueue extends NATSAbstractQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSObservableQueue.class);
private Subscription subs;
private Connection conn;
public NATSObservableQueue(String queueURI, Scheduler scheduler) {
super(queueURI, "nats", scheduler);
open();
}
@Override
public boolean isConnected() {
return (conn != null && Connection.Status.CONNECTED.equals(conn.getStatus()));
}
@Override
public void connect() {
try {
Connection temp = Nats.connect();
LOGGER.info("Successfully connected for " + queueURI);
conn = temp;
} catch (Exception e) {
LOGGER.error("Unable to establish nats connection for " + queueURI, e);
throw new RuntimeException(e);
}
}
@Override
public void subscribe() {
// do nothing if already subscribed
if (subs != null) {
return;
}
try {
ensureConnected();
// Create subject/queue subscription if the queue has been provided
if (StringUtils.isNotEmpty(queue)) {
LOGGER.info(
"No subscription. Creating a queue subscription. subject={}, queue={}",
subject,
queue);
conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData()));
subs = conn.subscribe(subject, queue);
} else {
LOGGER.info(
"No subscription. Creating a pub/sub subscription. subject={}", subject);
conn.createDispatcher(msg -> onMessage(msg.getSubject(), msg.getData()));
subs = conn.subscribe(subject);
}
} catch (Exception ex) {
LOGGER.error(
"Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI,
ex);
}
}
@Override
public void publish(String subject, byte[] data) throws Exception {
ensureConnected();
conn.publish(subject, data);
}
@Override
public void closeSubs() {
if (subs != null) {
try {
subs.unsubscribe();
} catch (Exception ex) {
LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex);
}
subs = null;
}
}
@Override
public void closeConn() {
if (conn != null) {
try {
conn.close();
} catch (Exception ex) {
LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex);
}
conn = null;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSEventQueueProvider.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSEventQueueProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.core.env.Environment;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.stan.NATSObservableQueue;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(NATSEventQueueProvider.class);
protected Map<String, NATSObservableQueue> queues = new ConcurrentHashMap<>();
private final Scheduler scheduler;
public NATSEventQueueProvider(Environment environment, Scheduler scheduler) {
this.scheduler = scheduler;
LOGGER.info("NATS Event Queue Provider initialized...");
}
@Override
public String getQueueType() {
return "nats";
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) {
NATSObservableQueue queue =
queues.computeIfAbsent(queueURI, q -> new NATSObservableQueue(queueURI, scheduler));
if (queue.isClosed()) {
queue.open();
}
return queue;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSConfiguration.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import com.netflix.conductor.core.events.EventQueueProvider;
import rx.Scheduler;
@Configuration
@ConditionalOnProperty(name = "conductor.event-queues.nats.enabled", havingValue = "true")
public class NATSConfiguration {
@Bean
public EventQueueProvider natsEventQueueProvider(Environment environment, Scheduler scheduler) {
return new NATSEventQueueProvider(environment, scheduler);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamEventQueueProvider.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamEventQueueProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.lang.NonNull;
import com.netflix.conductor.contribs.queue.stan.NATSStreamObservableQueue;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import rx.Scheduler;
/**
* @author Oleksiy Lysak
*/
public class NATSStreamEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER =
LoggerFactory.getLogger(NATSStreamEventQueueProvider.class);
protected final Map<String, NATSStreamObservableQueue> queues = new ConcurrentHashMap<>();
private final String durableName;
private final String clusterId;
private final String natsUrl;
private final Scheduler scheduler;
public NATSStreamEventQueueProvider(NATSStreamProperties properties, Scheduler scheduler) {
LOGGER.info("NATS Stream Event Queue Provider init");
this.scheduler = scheduler;
// Get NATS Streaming options
clusterId = properties.getClusterId();
durableName = properties.getDurableName();
natsUrl = properties.getUrl();
LOGGER.info(
"NATS Streaming clusterId="
+ clusterId
+ ", natsUrl="
+ natsUrl
+ ", durableName="
+ durableName);
LOGGER.info("NATS Stream Event Queue Provider initialized...");
}
@Override
public String getQueueType() {
return "nats_stream";
}
@Override
@NonNull
public ObservableQueue getQueue(String queueURI) {
NATSStreamObservableQueue queue =
queues.computeIfAbsent(
queueURI,
q ->
new NATSStreamObservableQueue(
clusterId, natsUrl, durableName, queueURI, scheduler));
if (queue.isClosed()) {
queue.open();
}
return queue;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamConfiguration.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan.config;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.contribs.queue.stan.NATSStreamObservableQueue;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.model.TaskModel;
import rx.Scheduler;
@Configuration
@EnableConfigurationProperties(NATSStreamProperties.class)
@ConditionalOnProperty(name = "conductor.event-queues.nats-stream.enabled", havingValue = "true")
public class NATSStreamConfiguration {
@Bean
public EventQueueProvider natsEventQueueProvider(
NATSStreamProperties properties, Scheduler scheduler) {
return new NATSStreamEventQueueProvider(properties, scheduler);
}
@ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "nats_stream")
@Bean
public Map<TaskModel.Status, ObservableQueue> getQueues(
ConductorProperties conductorProperties,
NATSStreamProperties properties,
Scheduler scheduler) {
String stack = "";
if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) {
stack = conductorProperties.getStack() + "_";
}
TaskModel.Status[] statuses =
new TaskModel.Status[] {TaskModel.Status.COMPLETED, TaskModel.Status.FAILED};
Map<TaskModel.Status, ObservableQueue> queues = new HashMap<>();
for (TaskModel.Status status : statuses) {
String queuePrefix =
StringUtils.isBlank(properties.getListenerQueuePrefix())
? conductorProperties.getAppId() + "_nats_stream_notify_" + stack
: properties.getListenerQueuePrefix();
String queueName = queuePrefix + status.name() + getQueueGroup(properties);
ObservableQueue queue =
new NATSStreamObservableQueue(
properties.getClusterId(),
properties.getUrl(),
properties.getDurableName(),
queueName,
scheduler);
queues.put(status, queue);
}
return queues;
}
private String getQueueGroup(final NATSStreamProperties properties) {
if (properties.getDefaultQueueGroup() == null
|| properties.getDefaultQueueGroup().isBlank()) {
return "";
}
return ":" + properties.getDefaultQueueGroup();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamProperties.java | nats-streaming/src/main/java/com/netflix/conductor/contribs/queue/stan/config/NATSStreamProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.queue.stan.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
import io.nats.client.Options;
@ConfigurationProperties("conductor.event-queues.nats-stream")
public class NATSStreamProperties {
/** The cluster id of the STAN session */
private String clusterId = "test-cluster";
/** The durable subscriber name for the subscription */
private String durableName = null;
/** The NATS connection url */
private String url = Options.DEFAULT_URL;
/** The prefix to be used for the default listener queues */
private String listenerQueuePrefix = "";
/** WAIT tasks default queue group, to make subscription round-robin delivery to single sub */
private String defaultQueueGroup = "wait-group";
public String getClusterId() {
return clusterId;
}
public void setClusterId(String clusterId) {
this.clusterId = clusterId;
}
public String getDurableName() {
return durableName;
}
public void setDurableName(String durableName) {
this.durableName = durableName;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getListenerQueuePrefix() {
return listenerQueuePrefix;
}
public void setListenerQueuePrefix(String listenerQueuePrefix) {
this.listenerQueuePrefix = listenerQueuePrefix;
}
public String getDefaultQueueGroup() {
return defaultQueueGroup;
}
public void setDefaultQueueGroup(String defaultQueueGroup) {
this.defaultQueueGroup = defaultQueueGroup;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoMessage.java | annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoMessage.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotations.protogen;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* ProtoMessage annotates a given Java class so it becomes exposed via the GRPC API as a native
* Protocol Buffers struct. The annotated class must be a POJO.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface ProtoMessage {
/**
* Sets whether the generated mapping code will contain a helper to translate the POJO for this
* class into the equivalent ProtoBuf object.
*
* @return whether this class will generate a mapper to ProtoBuf objects
*/
boolean toProto() default true;
/**
* Sets whether the generated mapping code will contain a helper to translate the ProtoBuf
* object for this class into the equivalent POJO.
*
* @return whether this class will generate a mapper from ProtoBuf objects
*/
boolean fromProto() default true;
/**
* Sets whether this is a wrapper class that will be used to encapsulate complex nested type
* interfaces. Wrapper classes are not directly exposed by the ProtoBuf API and must be mapped
* manually.
*
* @return whether this is a wrapper class
*/
boolean wrapper() default false;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoField.java | annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoField.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotations.protogen;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* ProtoField annotates a field inside an struct with metadata on how to expose it on its
* corresponding Protocol Buffers struct. For a field to be exposed in a ProtoBuf struct, the
* containing struct must also be annotated with a {@link ProtoMessage} or {@link ProtoEnum} tag.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface ProtoField {
/**
* Mandatory. Sets the Protocol Buffer ID for this specific field. Once a field has been
* annotated with a given ID, the ID can never change to a different value or the resulting
* Protocol Buffer struct will not be backwards compatible.
*
* @return the numeric ID for the field
*/
int id();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoEnum.java | annotations/src/main/java/com/netflix/conductor/annotations/protogen/ProtoEnum.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotations.protogen;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* ProtoEnum annotates an enum type that will be exposed via the GRPC API as a native Protocol
* Buffers enum.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface ProtoEnum {}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/TestExpression.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/TestExpression.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.junit.Test;
import com.netflix.conductor.es6.dao.query.parser.internal.ConstValue;
import com.netflix.conductor.es6.dao.query.parser.internal.TestAbstractParser;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class TestExpression extends TestAbstractParser {
@Test
public void test() throws Exception {
String test =
"type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)";
InputStream inputStream =
new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expression = new Expression(inputStream);
assertTrue(expression.isBinaryExpr());
assertNull(expression.getGroupedExpression());
assertNotNull(expression.getNameValue());
NameValue nameValue = expression.getNameValue();
assertEquals("type", nameValue.getName().getName());
assertEquals("=", nameValue.getOp().getOperator());
assertEquals("\"IMAGE\"", nameValue.getValue().getValue());
Expression rightHandSide = expression.getRightHandSide();
assertNotNull(rightHandSide);
assertTrue(rightHandSide.isBinaryExpr());
nameValue = rightHandSide.getNameValue();
assertNotNull(nameValue); // subType = sdp
assertNull(rightHandSide.getGroupedExpression());
assertEquals("subType", nameValue.getName().getName());
assertEquals("=", nameValue.getOp().getOperator());
assertEquals("\"sdp\"", nameValue.getValue().getValue());
assertEquals("AND", rightHandSide.getOperator().getOperator());
rightHandSide = rightHandSide.getRightHandSide();
assertNotNull(rightHandSide);
assertFalse(rightHandSide.isBinaryExpr());
GroupedExpression groupedExpression = rightHandSide.getGroupedExpression();
assertNotNull(groupedExpression);
expression = groupedExpression.getExpression();
assertNotNull(expression);
assertTrue(expression.isBinaryExpr());
nameValue = expression.getNameValue();
assertNotNull(nameValue);
assertEquals("metadata.width", nameValue.getName().getName());
assertEquals(">", nameValue.getOp().getOperator());
assertEquals("50", nameValue.getValue().getValue());
assertEquals("OR", expression.getOperator().getOperator());
rightHandSide = expression.getRightHandSide();
assertNotNull(rightHandSide);
assertFalse(rightHandSide.isBinaryExpr());
nameValue = rightHandSide.getNameValue();
assertNotNull(nameValue);
assertEquals("metadata.height", nameValue.getName().getName());
assertEquals(">", nameValue.getOp().getOperator());
assertEquals("50", nameValue.getValue().getValue());
}
@Test
public void testWithSysConstants() throws Exception {
String test = "type='IMAGE' AND subType ='sdp' AND description IS null";
InputStream inputStream =
new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expression = new Expression(inputStream);
assertTrue(expression.isBinaryExpr());
assertNull(expression.getGroupedExpression());
assertNotNull(expression.getNameValue());
NameValue nameValue = expression.getNameValue();
assertEquals("type", nameValue.getName().getName());
assertEquals("=", nameValue.getOp().getOperator());
assertEquals("\"IMAGE\"", nameValue.getValue().getValue());
Expression rightHandSide = expression.getRightHandSide();
assertNotNull(rightHandSide);
assertTrue(rightHandSide.isBinaryExpr());
nameValue = rightHandSide.getNameValue();
assertNotNull(nameValue); // subType = sdp
assertNull(rightHandSide.getGroupedExpression());
assertEquals("subType", nameValue.getName().getName());
assertEquals("=", nameValue.getOp().getOperator());
assertEquals("\"sdp\"", nameValue.getValue().getValue());
assertEquals("AND", rightHandSide.getOperator().getOperator());
rightHandSide = rightHandSide.getRightHandSide();
assertNotNull(rightHandSide);
assertFalse(rightHandSide.isBinaryExpr());
GroupedExpression groupedExpression = rightHandSide.getGroupedExpression();
assertNull(groupedExpression);
nameValue = rightHandSide.getNameValue();
assertNotNull(nameValue);
assertEquals("description", nameValue.getName().getName());
assertEquals("IS", nameValue.getOp().getOperator());
ConstValue constValue = nameValue.getValue();
assertNotNull(constValue);
assertEquals(constValue.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "description IS not null";
inputStream = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
expression = new Expression(inputStream);
nameValue = expression.getNameValue();
assertNotNull(nameValue);
assertEquals("description", nameValue.getName().getName());
assertEquals("IS", nameValue.getOp().getOperator());
constValue = nameValue.getValue();
assertNotNull(constValue);
assertEquals(constValue.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestConstValue.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestConstValue.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class TestConstValue extends TestAbstractParser {
@Test
public void testStringConst() throws Exception {
String test = "'string value'";
String expected =
test.replaceAll(
"'", "\""); // Quotes are removed but then the result is double quoted.
ConstValue constValue = new ConstValue(getInputStream(test));
assertNotNull(constValue.getValue());
assertEquals(expected, constValue.getValue());
assertTrue(constValue.getValue() instanceof String);
test = "\"string value\"";
constValue = new ConstValue(getInputStream(test));
assertNotNull(constValue.getValue());
assertEquals(expected, constValue.getValue());
assertTrue(constValue.getValue() instanceof String);
}
@Test
public void testSystemConst() throws Exception {
String test = "null";
ConstValue constValue = new ConstValue(getInputStream(test));
assertNotNull(constValue.getValue());
assertTrue(constValue.getValue() instanceof String);
assertEquals(constValue.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "not null";
constValue = new ConstValue(getInputStream(test));
assertNotNull(constValue.getValue());
assertEquals(constValue.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "'string value";
new ConstValue(getInputStream(test));
}
@Test
public void testNumConst() throws Exception {
String test = "12345.89";
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertTrue(
cv.getValue()
instanceof
String); // Numeric values are stored as string as we are just passing thru
// them to ES
assertEquals(test, cv.getValue());
}
@Test
public void testRange() throws Exception {
String test = "50 AND 100";
Range range = new Range(getInputStream(test));
assertEquals("50", range.getLow());
assertEquals("100", range.getHigh());
}
@Test(expected = ParserException.class)
public void testBadRange() throws Exception {
String test = "50 AND";
new Range(getInputStream(test));
}
@Test
public void testArray() throws Exception {
String test = "(1, 3, 'name', 'value2')";
ListConst listConst = new ListConst(getInputStream(test));
List<Object> list = listConst.getList();
assertEquals(4, list.size());
assertTrue(list.contains("1"));
assertEquals("'value2'", list.get(3)); // Values are preserved as it is...
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestAbstractParser.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestAbstractParser.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
public abstract class TestAbstractParser {
protected InputStream getInputStream(String expression) {
return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes()));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestName.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestName.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class TestName extends TestAbstractParser {
@Test
public void test() throws Exception {
String test = "metadata.en_US.lang ";
Name name = new Name(getInputStream(test));
String nameVal = name.getName();
assertNotNull(nameVal);
assertEquals(test.trim(), nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestBooleanOp.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestBooleanOp.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class TestBooleanOp extends TestAbstractParser {
@Test
public void test() throws Exception {
String[] tests = new String[] {"AND", "OR"};
for (String test : tests) {
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "<";
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestComparisonOp.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/query/parser/internal/TestComparisonOp.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class TestComparisonOp extends TestAbstractParser {
@Test
public void test() throws Exception {
String[] tests = new String[] {"<", ">", "=", "!=", "IN", "BETWEEN", "STARTS_WITH"};
for (String test : tests) {
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalidOp() throws Exception {
String test = "AND";
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchRestDaoBaseTest.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchRestDaoBaseTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.junit.After;
import org.junit.Before;
import org.springframework.retry.support.RetryTemplate;
abstract class ElasticSearchRestDaoBaseTest extends ElasticSearchTest {
protected RestClient restClient;
protected ElasticSearchRestDAOV6 indexDAO;
@Before
public void setup() throws Exception {
String httpHostAddress = container.getHttpHostAddress();
String host = httpHostAddress.split(":")[0];
int port = Integer.parseInt(httpHostAddress.split(":")[1]);
properties.setUrl("http://" + httpHostAddress);
RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http"));
restClient = restClientBuilder.build();
indexDAO =
new ElasticSearchRestDAOV6(
restClientBuilder, new RetryTemplate(), properties, objectMapper);
indexDAO.setup();
}
@After
public void tearDown() throws Exception {
deleteAllIndices();
if (restClient != null) {
restClient.close();
}
}
private void deleteAllIndices() throws IOException {
Response beforeResponse = restClient.performRequest("GET", "/_cat/indices");
Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent());
BufferedReader bufferedReader = new BufferedReader(streamReader);
String line;
while ((line = bufferedReader.readLine()) != null) {
String[] fields = line.split("\\s");
String endpoint = String.format("/%s", fields[2]);
restClient.performRequest("DELETE", endpoint);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchRestDAOV6Batch.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchRestDAOV6Batch.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.springframework.test.context.TestPropertySource;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.fasterxml.jackson.core.JsonProcessingException;
import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2")
public class TestElasticSearchRestDAOV6Batch extends ElasticSearchRestDaoBaseTest {
@Test
public void indexTaskWithBatchSizeTwo() {
String correlationId = "some-correlation-id";
TaskSummary taskSummary = new TaskSummary();
taskSummary.setTaskId("some-task-id");
taskSummary.setWorkflowId("some-workflow-instance-id");
taskSummary.setTaskType("some-task-type");
taskSummary.setStatus(Status.FAILED);
try {
taskSummary.setInput(
objectMapper.writeValueAsString(
new HashMap<String, Object>() {
{
put("input_key", "input_value");
}
}));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
taskSummary.setCorrelationId(correlationId);
taskSummary.setTaskDefName("some-task-def-name");
taskSummary.setReasonForIncompletion("some-failure-reason");
indexDAO.indexTask(taskSummary);
indexDAO.indexTask(taskSummary);
await().atMost(5, TimeUnit.SECONDS)
.untilAsserted(
() -> {
SearchResult<String> result =
indexDAO.searchTasks(
"correlationId='" + correlationId + "'",
"*",
0,
10000,
null);
assertTrue(
"should return 1 or more search results",
result.getResults().size() > 0);
assertEquals(
"taskId should match the indexed task",
"some-task-id",
result.getResults().get(0));
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchDAOV6Batch.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchDAOV6Batch.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.springframework.test.context.TestPropertySource;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.fasterxml.jackson.core.JsonProcessingException;
import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2")
public class TestElasticSearchDAOV6Batch extends ElasticSearchDaoBaseTest {
@Test
public void indexTaskWithBatchSizeTwo() {
String correlationId = "some-correlation-id";
TaskSummary taskSummary = new TaskSummary();
taskSummary.setTaskId("some-task-id");
taskSummary.setWorkflowId("some-workflow-instance-id");
taskSummary.setTaskType("some-task-type");
taskSummary.setStatus(Status.FAILED);
try {
taskSummary.setInput(
objectMapper.writeValueAsString(
new HashMap<String, Object>() {
{
put("input_key", "input_value");
}
}));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
taskSummary.setCorrelationId(correlationId);
taskSummary.setTaskDefName("some-task-def-name");
taskSummary.setReasonForIncompletion("some-failure-reason");
indexDAO.indexTask(taskSummary);
indexDAO.indexTask(taskSummary);
await().atMost(5, TimeUnit.SECONDS)
.untilAsserted(
() -> {
SearchResult<String> result =
indexDAO.searchTasks(
"correlationId='" + correlationId + "'",
"*",
0,
10000,
null);
assertTrue(
"should return 1 or more search results",
result.getResults().size() > 0);
assertEquals(
"taskId should match the indexed task",
"some-task-id",
result.getResults().get(0));
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchDaoBaseTest.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchDaoBaseTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.net.InetAddress;
import java.util.concurrent.ExecutionException;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.springframework.retry.support.RetryTemplate;
abstract class ElasticSearchDaoBaseTest extends ElasticSearchTest {
protected TransportClient elasticSearchClient;
protected ElasticSearchDAOV6 indexDAO;
@Before
public void setup() throws Exception {
int mappedPort = container.getMappedPort(9300);
properties.setUrl("tcp://localhost:" + mappedPort);
Settings settings =
Settings.builder().put("client.transport.ignore_cluster_name", true).build();
elasticSearchClient =
new PreBuiltTransportClient(settings)
.addTransportAddress(
new TransportAddress(
InetAddress.getByName("localhost"), mappedPort));
indexDAO =
new ElasticSearchDAOV6(
elasticSearchClient, new RetryTemplate(), properties, objectMapper);
indexDAO.setup();
}
@AfterClass
public static void closeClient() {
container.stop();
}
@After
public void tearDown() {
deleteAllIndices();
if (elasticSearchClient != null) {
elasticSearchClient.close();
}
}
private void deleteAllIndices() {
ImmutableOpenMap<String, IndexMetaData> indices =
elasticSearchClient
.admin()
.cluster()
.prepareState()
.get()
.getState()
.getMetaData()
.getIndices();
indices.forEach(
cursor -> {
try {
elasticSearchClient
.admin()
.indices()
.delete(new DeleteIndexRequest(cursor.value.getIndex().getName()))
.get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchTest.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/ElasticSearchTest.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.util.Map;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.es6.config.ElasticSearchProperties;
import com.fasterxml.jackson.databind.ObjectMapper;
@ContextConfiguration(
classes = {TestObjectMapperConfiguration.class, ElasticSearchTest.TestConfiguration.class})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=6"})
abstract class ElasticSearchTest {
@Configuration
static class TestConfiguration {
@Bean
public ElasticSearchProperties elasticSearchProperties() {
return new ElasticSearchProperties();
}
}
protected static final ElasticsearchContainer container =
new ElasticsearchContainer(
DockerImageName.parse(
"docker.elastic.co/elasticsearch/elasticsearch-oss")
.withTag("6.8.23")) // this should match the client version
// Resolve issue with es container not starting on m1/m2 macs
.withEnv(Map.of("bootstrap.system_call_filter", "false"));
@Autowired protected ObjectMapper objectMapper;
@Autowired protected ElasticSearchProperties properties;
@BeforeClass
public static void startServer() {
container.start();
}
@AfterClass
public static void stopServer() {
container.stop();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchRestDAOV6.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchRestDAOV6.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.TimeZone;
import java.util.UUID;
import java.util.function.Supplier;
import org.junit.Test;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.es6.utils.TestUtils;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.google.common.collect.ImmutableMap;
import static org.junit.Assert.*;
public class TestElasticSearchRestDAOV6 extends ElasticSearchRestDaoBaseTest {
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private static final String INDEX_PREFIX = "conductor";
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String MSG_DOC_TYPE = "message";
private static final String EVENT_DOC_TYPE = "event";
private static final String LOG_INDEX_PREFIX = "task_log";
private boolean indexExists(final String index) throws IOException {
return indexDAO.doesResourceExist("/" + index);
}
private boolean doesMappingExist(final String index, final String mappingName)
throws IOException {
return indexDAO.doesResourceExist("/" + index + "/_mapping/" + mappingName);
}
@Test
public void assertInitialSetup() throws IOException {
SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT"));
String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE;
String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE;
String taskLogIndex =
INDEX_PREFIX + "_" + LOG_INDEX_PREFIX + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String messageIndex =
INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String eventIndex =
INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
assertTrue("Index 'conductor_workflow' should exist", indexExists("conductor_workflow"));
assertTrue("Index 'conductor_task' should exist", indexExists("conductor_task"));
assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex));
assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex));
assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex));
assertTrue(
"Mapping 'workflow' for index 'conductor' should exist",
doesMappingExist(workflowIndex, WORKFLOW_DOC_TYPE));
assertTrue(
"Mapping 'task' for index 'conductor' should exist",
doesMappingExist(taskIndex, TASK_DOC_TYPE));
}
@Test
public void shouldIndexWorkflow() throws JsonProcessingException {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexWorkflowAsync() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.asyncIndexWorkflow(workflowSummary).get();
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldRemoveWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.removeWorkflow(workflowSummary.getWorkflowId());
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldAsyncRemoveWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.asyncRemoveWorkflow(workflowSummary.getWorkflowId()).get();
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldUpdateWorkflow() throws JsonProcessingException {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.updateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.COMPLETED});
workflowSummary.setStatus(WorkflowStatus.COMPLETED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldAsyncUpdateWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.asyncUpdateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.FAILED})
.get();
workflowSummary.setStatus(WorkflowStatus.FAILED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldIndexTaskAsync() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.asyncIndexTask(taskSummary).get();
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldRemoveTask() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldAsyncRemoveTask() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldNotRemoveTaskWhenNotAssociatedWithWorkflow() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask("InvalidWorkflow", taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldNotAsyncRemoveTaskWhenNotAssociatedWithWorkflow() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask("InvalidWorkflow", taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldAddTaskExecutionLogs() {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddTaskExecutionLogsAsync() throws Exception {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.asyncAddTaskExecutionLogs(logs).get();
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddMessage() {
String queue = "queue";
Message message1 = new Message(uuid(), "payload1", null);
Message message2 = new Message(uuid(), "payload2", null);
indexDAO.addMessage(queue, message1);
indexDAO.addMessage(queue, message2);
List<Message> indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2);
assertEquals(2, indexedMessages.size());
assertTrue(
"Not all messages was indexed",
indexedMessages.containsAll(Arrays.asList(message1, message2)));
}
@Test
public void shouldAddEventExecution() {
String event = "event";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.addEventExecution(execution1);
indexDAO.addEventExecution(execution2);
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAsyncAddEventExecution() throws Exception {
String event = "event2";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.asyncAddEventExecution(execution1).get();
indexDAO.asyncAddEventExecution(execution2).get();
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAddIndexPrefixToIndexTemplate() throws Exception {
String json = TestUtils.loadJsonResource("expected_template_task_log");
String content = indexDAO.loadTypeMappingSource("/template_task_log.json");
assertEquals(json, content);
}
@Test
public void shouldCountWorkflows() {
int counts = 1100;
for (int i = 0; i < counts; i++) {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
}
// wait for workflow to be indexed
long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts);
assertEquals(counts, result);
}
@Test
public void shouldFindWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<WorkflowSummary> workflows =
tryFindResults(() -> searchWorkflowSummary(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
assertEquals(workflowSummary, workflows.get(0));
}
@Test
public void shouldFindTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<TaskSummary> tasks = tryFindResults(() -> searchTaskSummary(taskSummary));
assertEquals(1, tasks.size());
assertEquals(taskSummary, tasks.get(0));
}
private long tryGetCount(Supplier<Long> countFunction, int resultsCount) {
long result = 0;
for (int i = 0; i < 20; i++) {
result = countFunction.get();
if (result == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
// Get total workflow counts given the name and status
private long getWorkflowCount(String workflowName, String status) {
return indexDAO.getWorkflowCount(
"status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*");
}
private void assertWorkflowSummary(String workflowId, WorkflowSummary summary)
throws JsonProcessingException {
assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType"));
assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version"));
assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId"));
assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId"));
assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime"));
assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime"));
assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime"));
assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status"));
assertEquals(summary.getInput(), indexDAO.get(workflowId, "input"));
assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output"));
assertEquals(
summary.getReasonForIncompletion(),
indexDAO.get(workflowId, "reasonForIncompletion"));
assertEquals(
String.valueOf(summary.getExecutionTime()),
indexDAO.get(workflowId, "executionTime"));
assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event"));
assertEquals(
summary.getFailedReferenceTaskNames(),
indexDAO.get(workflowId, "failedReferenceTaskNames"));
assertEquals(
summary.getFailedTaskNames(),
objectMapper.readValue(indexDAO.get(workflowId, "failedTaskNames"), Set.class));
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction) {
return tryFindResults(searchFunction, 1);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction, int resultsCount) {
List<T> result = Collections.emptyList();
for (int i = 0; i < 20; i++) {
result = searchFunction.get();
if (result.size() == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
private List<String> searchWorkflows(String workflowId) {
return indexDAO.searchWorkflows(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<WorkflowSummary> searchWorkflowSummary(String workflowId) {
return indexDAO.searchWorkflowSummary(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<String> searchWorkflows(String workflowName, String status) {
List<String> sortOptions = new ArrayList<>();
sortOptions.add("startTime:DESC");
return indexDAO.searchWorkflows(
"status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"",
"*",
0,
1000,
sortOptions)
.getResults();
}
private List<String> searchTasks(TaskSummary taskSummary) {
return indexDAO.searchTasks(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private List<TaskSummary> searchTaskSummary(TaskSummary taskSummary) {
return indexDAO.searchTaskSummary(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private TaskExecLog createLog(String taskId, String log) {
TaskExecLog taskExecLog = new TaskExecLog(log);
taskExecLog.setTaskId(taskId);
return taskExecLog;
}
private EventExecution createEventExecution(String event) {
EventExecution execution = new EventExecution(uuid(), uuid());
execution.setName("name");
execution.setEvent(event);
execution.setCreated(System.currentTimeMillis());
execution.setStatus(EventExecution.Status.COMPLETED);
execution.setAction(EventHandler.Action.Type.start_workflow);
execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3));
return execution;
}
private String uuid() {
return UUID.randomUUID().toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchDAOV6.java | es6-persistence/src/test/java/com/netflix/conductor/es6/dao/index/TestElasticSearchDAOV6.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.function.Supplier;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.junit.Test;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.es6.utils.TestUtils;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.google.common.collect.ImmutableMap;
import static org.junit.Assert.*;
import static org.junit.Assert.assertFalse;
public class TestElasticSearchDAOV6 extends ElasticSearchDaoBaseTest {
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private static final String INDEX_PREFIX = "conductor";
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String MSG_DOC_TYPE = "message";
private static final String EVENT_DOC_TYPE = "event";
private static final String LOG_INDEX_PREFIX = "task_log";
@Test
public void assertInitialSetup() {
SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT"));
String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE;
String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE;
String taskLogIndex =
INDEX_PREFIX + "_" + LOG_INDEX_PREFIX + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String messageIndex =
INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String eventIndex =
INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
assertTrue("Index 'conductor_workflow' should exist", indexExists("conductor_workflow"));
assertTrue("Index 'conductor_task' should exist", indexExists("conductor_task"));
assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex));
assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex));
assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex));
assertTrue(
"Mapping 'workflow' for index 'conductor' should exist",
doesMappingExist(workflowIndex, WORKFLOW_DOC_TYPE));
assertTrue(
"Mapping 'task' for index 'conductor' should exist",
doesMappingExist(taskIndex, TASK_DOC_TYPE));
}
private boolean indexExists(final String index) {
IndicesExistsRequest request = new IndicesExistsRequest(index);
try {
return elasticSearchClient.admin().indices().exists(request).get().isExists();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
private boolean doesMappingExist(final String index, final String mappingName) {
GetMappingsRequest request = new GetMappingsRequest().indices(index);
try {
GetMappingsResponse response =
elasticSearchClient.admin().indices().getMappings(request).get();
return response.getMappings().get(index).containsKey(mappingName);
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
}
@Test
public void shouldIndexWorkflow() throws JsonProcessingException {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
assertWorkflowSummary(workflow.getWorkflowId(), workflow);
}
@Test
public void shouldIndexWorkflowAsync() throws Exception {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.asyncIndexWorkflow(workflow).get();
assertWorkflowSummary(workflow.getWorkflowId(), workflow);
}
@Test
public void shouldRemoveWorkflow() {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
// wait for workflow to be indexed
List<String> workflows = tryFindResults(() -> searchWorkflows(workflow.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.removeWorkflow(workflow.getWorkflowId());
workflows = tryFindResults(() -> searchWorkflows(workflow.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldAsyncRemoveWorkflow() throws Exception {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
// wait for workflow to be indexed
List<String> workflows = tryFindResults(() -> searchWorkflows(workflow.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.asyncRemoveWorkflow(workflow.getWorkflowId()).get();
workflows = tryFindResults(() -> searchWorkflows(workflow.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldUpdateWorkflow() throws JsonProcessingException {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
indexDAO.updateWorkflow(
workflow.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.COMPLETED});
workflow.setStatus(WorkflowStatus.COMPLETED);
assertWorkflowSummary(workflow.getWorkflowId(), workflow);
}
@Test
public void shouldAsyncUpdateWorkflow() throws Exception {
WorkflowSummary workflow = TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
indexDAO.asyncUpdateWorkflow(
workflow.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.FAILED})
.get();
workflow.setStatus(WorkflowStatus.FAILED);
assertWorkflowSummary(workflow.getWorkflowId(), workflow);
}
@Test
public void shouldIndexTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldIndexTaskAsync() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.asyncIndexTask(taskSummary).get();
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldRemoveTask() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldAsyncRemoveTask() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldNotRemoveTaskWhenNotAssociatedWithWorkflow() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask("InvalidWorkflow", taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldNotAsyncRemoveTaskWhenNotAssociatedWithWorkflow() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask("InvalidWorkflow", taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldAddTaskExecutionLogs() {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddTaskExecutionLogsAsync() throws Exception {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.asyncAddTaskExecutionLogs(logs).get();
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddMessage() {
String queue = "queue";
Message message1 = new Message(uuid(), "payload1", null);
Message message2 = new Message(uuid(), "payload2", null);
indexDAO.addMessage(queue, message1);
indexDAO.addMessage(queue, message2);
List<Message> indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2);
assertEquals(2, indexedMessages.size());
assertTrue(
"Not all messages was indexed",
indexedMessages.containsAll(Arrays.asList(message1, message2)));
}
@Test
public void shouldAddEventExecution() {
String event = "event";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.addEventExecution(execution1);
indexDAO.addEventExecution(execution2);
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAsyncAddEventExecution() throws Exception {
String event = "event2";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.asyncAddEventExecution(execution1).get();
indexDAO.asyncAddEventExecution(execution2).get();
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAddIndexPrefixToIndexTemplate() throws Exception {
String json = TestUtils.loadJsonResource("expected_template_task_log");
String content = indexDAO.loadTypeMappingSource("/template_task_log.json");
assertEquals(json, content);
}
@Test
public void shouldCountWorkflows() {
int counts = 1100;
for (int i = 0; i < counts; i++) {
WorkflowSummary workflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflow);
}
// wait for workflow to be indexed
long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts);
assertEquals(counts, result);
}
@Test
public void shouldFindWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<WorkflowSummary> workflows =
tryFindResults(() -> searchWorkflowSummary(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
assertEquals(workflowSummary, workflows.get(0));
}
@Test
public void shouldFindTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<TaskSummary> tasks = tryFindResults(() -> searchTaskSummary(taskSummary));
assertEquals(1, tasks.size());
assertEquals(taskSummary, tasks.get(0));
}
private long tryGetCount(Supplier<Long> countFunction, int resultsCount) {
long result = 0;
for (int i = 0; i < 20; i++) {
result = countFunction.get();
if (result == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
// Get total workflow counts given the name and status
private long getWorkflowCount(String workflowName, String status) {
return indexDAO.getWorkflowCount(
"status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*");
}
private void assertWorkflowSummary(String workflowId, WorkflowSummary summary)
throws JsonProcessingException {
assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType"));
assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version"));
assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId"));
assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId"));
assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime"));
assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime"));
assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime"));
assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status"));
assertEquals(summary.getInput(), indexDAO.get(workflowId, "input"));
assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output"));
assertEquals(
summary.getReasonForIncompletion(),
indexDAO.get(workflowId, "reasonForIncompletion"));
assertEquals(
String.valueOf(summary.getExecutionTime()),
indexDAO.get(workflowId, "executionTime"));
assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event"));
assertEquals(
summary.getFailedReferenceTaskNames(),
indexDAO.get(workflowId, "failedReferenceTaskNames"));
assertEquals(
summary.getFailedTaskNames(),
objectMapper.readValue(indexDAO.get(workflowId, "failedTaskNames"), Set.class));
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction) {
return tryFindResults(searchFunction, 1);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction, int resultsCount) {
List<T> result = Collections.emptyList();
for (int i = 0; i < 20; i++) {
result = searchFunction.get();
if (result.size() == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
private List<String> searchWorkflows(String workflowId) {
return indexDAO.searchWorkflows(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<WorkflowSummary> searchWorkflowSummary(String workflowId) {
return indexDAO.searchWorkflowSummary(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<String> searchTasks(TaskSummary taskSummary) {
return indexDAO.searchTasks(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private List<TaskSummary> searchTaskSummary(TaskSummary taskSummary) {
return indexDAO.searchTaskSummary(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private TaskExecLog createLog(String taskId, String log) {
TaskExecLog taskExecLog = new TaskExecLog(log);
taskExecLog.setTaskId(taskId);
return taskExecLog;
}
private EventExecution createEventExecution(String event) {
EventExecution execution = new EventExecution(uuid(), uuid());
execution.setName("name");
execution.setEvent(event);
execution.setCreated(System.currentTimeMillis());
execution.setStatus(EventExecution.Status.COMPLETED);
execution.setAction(EventHandler.Action.Type.start_workflow);
execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3));
return execution;
}
private String uuid() {
return UUID.randomUUID().toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/utils/TestUtils.java | es6-persistence/src/test/java/com/netflix/conductor/es6/utils/TestUtils.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.utils;
import java.nio.charset.StandardCharsets;
import org.apache.commons.io.FileUtils;
import org.springframework.util.ResourceUtils;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.utils.IDGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
public class TestUtils {
private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID";
public static WorkflowSummary loadWorkflowSnapshot(
ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, WorkflowSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(
ObjectMapper objectMapper, String resourceFileName, String workflowId) {
try {
String content = loadJsonResource(resourceFileName);
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static String loadJsonResource(String resourceFileName) {
try {
return FileUtils.readFileToString(
ResourceUtils.getFile("classpath:" + resourceFileName + ".json"),
StandardCharsets.UTF_8);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/test/java/com/netflix/conductor/es6/config/ElasticSearchPropertiesTest.java | es6-persistence/src/test/java/com/netflix/conductor/es6/config/ElasticSearchPropertiesTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class ElasticSearchPropertiesTest {
@Test
public void testWaitForIndexRefreshDefaultsToFalse() {
ElasticSearchProperties properties = new ElasticSearchProperties();
assertFalse(
"waitForIndexRefresh should default to false for v3.21.19 performance",
properties.isWaitForIndexRefresh());
}
@Test
public void testWaitForIndexRefreshCanBeEnabled() {
ElasticSearchProperties properties = new ElasticSearchProperties();
properties.setWaitForIndexRefresh(true);
assertTrue(
"waitForIndexRefresh should be configurable to true",
properties.isWaitForIndexRefresh());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/FilterProvider.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/FilterProvider.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser;
import org.elasticsearch.index.query.QueryBuilder;
public interface FilterProvider {
/**
* @return FilterBuilder for elasticsearch
*/
public QueryBuilder getFilterBuilder();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/Expression.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/Expression.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import com.netflix.conductor.es6.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es6.dao.query.parser.internal.BooleanOp;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
public class Expression extends AbstractNode implements FilterProvider {
private NameValue nameVal;
private GroupedExpression ge;
private BooleanOp op;
private Expression rhs;
public Expression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(1);
if (peeked[0] == '(') {
this.ge = new GroupedExpression(is);
} else {
this.nameVal = new NameValue(is);
}
peeked = peek(3);
if (isBoolOpr(peeked)) {
// we have an expression next
this.op = new BooleanOp(is);
this.rhs = new Expression(is);
}
}
public boolean isBinaryExpr() {
return this.op != null;
}
public BooleanOp getOperator() {
return this.op;
}
public Expression getRightHandSide() {
return this.rhs;
}
public boolean isNameValue() {
return this.nameVal != null;
}
public NameValue getNameValue() {
return this.nameVal;
}
public GroupedExpression getGroupedExpression() {
return this.ge;
}
@Override
public QueryBuilder getFilterBuilder() {
QueryBuilder lhs = null;
if (nameVal != null) {
lhs = nameVal.getFilterBuilder();
} else {
lhs = ge.getFilterBuilder();
}
if (this.isBinaryExpr()) {
QueryBuilder rhsFilter = rhs.getFilterBuilder();
if (this.op.isAnd()) {
return QueryBuilders.boolQuery().must(lhs).must(rhsFilter);
} else {
return QueryBuilders.boolQuery().should(lhs).should(rhsFilter);
}
} else {
return lhs;
}
}
@Override
public String toString() {
if (isBinaryExpr()) {
return "" + (nameVal == null ? ge : nameVal) + op + rhs;
} else {
return "" + (nameVal == null ? ge : nameVal);
}
}
public static Expression fromString(String value) throws ParserException {
return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes())));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/NameValue.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/NameValue.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import com.netflix.conductor.es6.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es6.dao.query.parser.internal.ComparisonOp;
import com.netflix.conductor.es6.dao.query.parser.internal.ComparisonOp.Operators;
import com.netflix.conductor.es6.dao.query.parser.internal.ConstValue;
import com.netflix.conductor.es6.dao.query.parser.internal.ListConst;
import com.netflix.conductor.es6.dao.query.parser.internal.Name;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
import com.netflix.conductor.es6.dao.query.parser.internal.Range;
/**
*
*
* <pre>
* Represents an expression of the form as below:
* key OPR value
* OPR is the comparison operator which could be one of the following:
* >, <, = , !=, IN, BETWEEN
* </pre>
*/
public class NameValue extends AbstractNode implements FilterProvider {
private Name name;
private ComparisonOp op;
private ConstValue value;
private Range range;
private ListConst valueList;
public NameValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.name = new Name(is);
this.op = new ComparisonOp(is);
if (this.op.getOperator().equals(Operators.BETWEEN.value())) {
this.range = new Range(is);
}
if (this.op.getOperator().equals(Operators.IN.value())) {
this.valueList = new ListConst(is);
} else {
this.value = new ConstValue(is);
}
}
@Override
public String toString() {
return "" + name + op + value;
}
/**
* @return the name
*/
public Name getName() {
return name;
}
/**
* @return the op
*/
public ComparisonOp getOp() {
return op;
}
/**
* @return the value
*/
public ConstValue getValue() {
return value;
}
@Override
public QueryBuilder getFilterBuilder() {
if (op.getOperator().equals(Operators.EQUALS.value())) {
return QueryBuilders.queryStringQuery(
name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.BETWEEN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(range.getLow())
.to(range.getHigh());
} else if (op.getOperator().equals(Operators.IN.value())) {
return QueryBuilders.termsQuery(name.getName(), valueList.getList());
} else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) {
return QueryBuilders.queryStringQuery(
"NOT " + name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.GREATER_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.IS.value())) {
if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.mustNot(QueryBuilders.existsQuery(name.getName())));
} else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.must(QueryBuilders.existsQuery(name.getName())));
}
} else if (op.getOperator().equals(Operators.LESS_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.to(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.STARTS_WITH.value())) {
return QueryBuilders.prefixQuery(name.getName(), value.getUnquotedValue());
}
throw new IllegalStateException("Incorrect/unsupported operators");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/GroupedExpression.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/GroupedExpression.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import com.netflix.conductor.es6.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
public class GroupedExpression extends AbstractNode implements FilterProvider {
private Expression expression;
public GroupedExpression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.expression = new Expression(is);
peeked = read(1);
assertExpected(peeked, ")");
}
@Override
public String toString() {
return "(" + expression + ")";
}
/**
* @return the expression
*/
public Expression getExpression() {
return expression;
}
@Override
public QueryBuilder getFilterBuilder() {
return expression.getFilterBuilder();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ConstValue.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ConstValue.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
/**
* Constant value can be:
*
* <ol>
* <li>List of values (a,b,c)
* <li>Range of values (m AND n)
* <li>A value (x)
* <li>A value is either a string or a number
* </ol>
*/
public class ConstValue extends AbstractNode {
public enum SystemConsts {
NULL("null"),
NOT_NULL("not null");
private final String value;
SystemConsts(String value) {
this.value = value;
}
public String value() {
return value;
}
}
private static final String QUOTE = "\"";
private Object value;
private SystemConsts sysConsts;
public ConstValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(4);
String sp = new String(peeked).trim();
// Read a constant value (number or a string)
if (peeked[0] == '"' || peeked[0] == '\'') {
this.value = readString(is);
} else if (sp.toLowerCase().startsWith("not")) {
this.value = SystemConsts.NOT_NULL.value();
sysConsts = SystemConsts.NOT_NULL;
read(SystemConsts.NOT_NULL.value().length());
} else if (sp.equalsIgnoreCase(SystemConsts.NULL.value())) {
this.value = SystemConsts.NULL.value();
sysConsts = SystemConsts.NULL;
read(SystemConsts.NULL.value().length());
} else {
this.value = readNumber(is);
}
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
return sb.toString().trim();
}
/**
* Reads an escaped string
*
* @throws Exception
*/
private String readString(InputStream is) throws Exception {
char delim = (char) read(1)[0];
StringBuilder sb = new StringBuilder();
boolean valid = false;
while (is.available() > 0) {
char c = (char) is.read();
if (c == delim) {
valid = true;
break;
} else if (c == '\\') {
// read the next character as part of the value
c = (char) is.read();
sb.append(c);
} else {
sb.append(c);
}
}
if (!valid) {
throw new ParserException(
"String constant is not quoted with <" + delim + "> : " + sb.toString());
}
return QUOTE + sb.toString() + QUOTE;
}
public Object getValue() {
return value;
}
@Override
public String toString() {
return "" + value;
}
public String getUnquotedValue() {
String result = toString();
if (result.length() >= 2 && result.startsWith(QUOTE) && result.endsWith(QUOTE)) {
result = result.substring(1, result.length() - 1);
}
return result;
}
public boolean isSysConstant() {
return this.sysConsts != null;
}
public SystemConsts getSysConstant() {
return this.sysConsts;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ListConst.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ListConst.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
/** List of constants */
public class ListConst extends AbstractNode {
private List<Object> values;
public ListConst(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.values = readList();
}
private List<Object> readList() throws Exception {
List<Object> list = new LinkedList<>();
boolean valid = false;
char c;
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
c = (char) is.read();
if (c == ')') {
valid = true;
break;
} else if (c == ',') {
list.add(sb.toString().trim());
sb = new StringBuilder();
} else {
sb.append(c);
}
}
list.add(sb.toString().trim());
if (!valid) {
throw new ParserException("Expected ')' but never encountered in the stream");
}
return list;
}
public List<Object> getList() {
return values;
}
@Override
public String toString() {
return values.toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/FunctionThrowingException.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/FunctionThrowingException.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
@FunctionalInterface
public interface FunctionThrowingException<T> {
void accept(T t) throws Exception;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/AbstractNode.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/AbstractNode.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
import java.math.BigDecimal;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Pattern;
public abstract class AbstractNode {
public static final Pattern WHITESPACE = Pattern.compile("\\s");
protected static Set<Character> comparisonOprs = new HashSet<>();
static {
comparisonOprs.add('>');
comparisonOprs.add('<');
comparisonOprs.add('=');
}
protected InputStream is;
protected AbstractNode(InputStream is) throws ParserException {
this.is = is;
this.parse();
}
protected boolean isNumber(String test) {
try {
// If you can convert to a big decimal value, then it is a number.
new BigDecimal(test);
return true;
} catch (NumberFormatException e) {
// Ignore
}
return false;
}
protected boolean isBoolOpr(byte[] buffer) {
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
return true;
} else {
return buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D';
}
}
protected boolean isComparisonOpr(byte[] buffer) {
if (buffer[0] == 'I' && buffer[1] == 'N') {
return true;
} else if (buffer[0] == '!' && buffer[1] == '=') {
return true;
} else {
return comparisonOprs.contains((char) buffer[0]);
}
}
protected byte[] peek(int length) throws Exception {
return read(length, true);
}
protected byte[] read(int length) throws Exception {
return read(length, false);
}
protected String readToken() throws Exception {
skipWhitespace();
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
char c = (char) peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
is.skip(1);
break;
} else if (c == '=' || c == '>' || c == '<' || c == '!') {
// do not skip
break;
}
sb.append(c);
is.skip(1);
}
return sb.toString().trim();
}
protected boolean isNumeric(char c) {
return c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.';
}
protected void assertExpected(byte[] found, String expected) throws ParserException {
assertExpected(new String(found), expected);
}
protected void assertExpected(String found, String expected) throws ParserException {
if (!found.equals(expected)) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected void assertExpected(char found, char expected) throws ParserException {
if (found != expected) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected static void efor(int length, FunctionThrowingException<Integer> consumer)
throws Exception {
for (int i = 0; i < length; i++) {
consumer.accept(i);
}
}
protected abstract void _parse() throws Exception;
// Public stuff here
private void parse() throws ParserException {
// skip white spaces
skipWhitespace();
try {
_parse();
} catch (Exception e) {
if (!(e instanceof ParserException)) {
throw new ParserException("Error parsing", e);
} else {
throw (ParserException) e;
}
}
skipWhitespace();
}
// Private methods
private byte[] read(int length, boolean peekOnly) throws Exception {
byte[] buf = new byte[length];
if (peekOnly) {
is.mark(length);
}
efor(length, (Integer c) -> buf[c] = (byte) is.read());
if (peekOnly) {
is.reset();
}
return buf;
}
protected void skipWhitespace() throws ParserException {
try {
while (is.available() > 0) {
byte c = peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
// skip
read(1);
} else {
break;
}
}
} catch (Exception e) {
throw new ParserException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/Name.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/Name.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
/** Represents the name of the field to be searched against. */
public class Name extends AbstractNode {
private String value;
public Name(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.value = readToken();
}
@Override
public String toString() {
return value;
}
public String getName() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/Range.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/Range.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
public class Range extends AbstractNode {
private String low;
private String high;
public Range(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.low = readNumber(is);
skipWhitespace();
byte[] peeked = read(3);
assertExpected(peeked, "AND");
skipWhitespace();
String num = readNumber(is);
if ("".equals(num)) {
throw new ParserException("Missing the upper range value...");
}
this.high = num;
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
return sb.toString().trim();
}
/**
* @return the low
*/
public String getLow() {
return low;
}
/**
* @return the high
*/
public String getHigh() {
return high;
}
@Override
public String toString() {
return low + " AND " + high;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/BooleanOp.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/BooleanOp.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
public class BooleanOp extends AbstractNode {
private String value;
public BooleanOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] buffer = peek(3);
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
this.value = "OR";
} else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') {
this.value = "AND";
} else {
throw new ParserException("No valid boolean operator found...");
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
public boolean isAnd() {
return "AND".equals(value);
}
public boolean isOr() {
return "OR".equals(value);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ParserException.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ParserException.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
@SuppressWarnings("serial")
public class ParserException extends Exception {
public ParserException(String message) {
super(message);
}
public ParserException(String message, Throwable cause) {
super(message, cause);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ComparisonOp.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/query/parser/internal/ComparisonOp.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.query.parser.internal;
import java.io.InputStream;
public class ComparisonOp extends AbstractNode {
public enum Operators {
BETWEEN("BETWEEN"),
EQUALS("="),
LESS_THAN("<"),
GREATER_THAN(">"),
IN("IN"),
NOT_EQUALS("!="),
IS("IS"),
STARTS_WITH("STARTS_WITH");
private final String value;
Operators(String value) {
this.value = value;
}
public String value() {
return value;
}
}
static {
int max = 0;
for (Operators op : Operators.values()) {
max = Math.max(max, op.value().length());
}
maxOperatorLength = max;
}
private static final int maxOperatorLength;
private static final int betweenLen = Operators.BETWEEN.value().length();
private static final int startsWithLen = Operators.STARTS_WITH.value().length();
private String value;
public ComparisonOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(maxOperatorLength);
if (peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<') {
this.value = new String(peeked, 0, 1);
} else if (peeked[0] == 'I' && peeked[1] == 'N') {
this.value = "IN";
} else if (peeked[0] == 'I' && peeked[1] == 'S') {
this.value = "IS";
} else if (peeked[0] == '!' && peeked[1] == '=') {
this.value = "!=";
} else if (peeked.length >= betweenLen
&& peeked[0] == 'B'
&& peeked[1] == 'E'
&& peeked[2] == 'T'
&& peeked[3] == 'W'
&& peeked[4] == 'E'
&& peeked[5] == 'E'
&& peeked[6] == 'N') {
this.value = Operators.BETWEEN.value();
} else if (peeked.length == startsWithLen
&& new String(peeked).equals(Operators.STARTS_WITH.value())) {
this.value = Operators.STARTS_WITH.value();
} else {
throw new ParserException(
"Expecting an operator (=, >, <, !=, BETWEEN, IN, STARTS_WITH), but found none. Peeked=>"
+ new String(peeked));
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchRestDAOV6.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchRestDAOV6.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.LocalDate;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.*;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es6.config.ElasticSearchProperties;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
import com.netflix.conductor.metrics.Monitors;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.type.MapType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
@Trace
public class ElasticSearchRestDAOV6 extends ElasticSearchBaseDAO implements IndexDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(ElasticSearchRestDAOV6.class);
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String LOG_DOC_TYPE = "task_log";
private static final String EVENT_DOC_TYPE = "event";
private static final String MSG_DOC_TYPE = "message";
private static final TimeZone GMT = TimeZone.getTimeZone("GMT");
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private @interface HttpMethod {
String GET = "GET";
String POST = "POST";
String PUT = "PUT";
String HEAD = "HEAD";
}
private static final String className = ElasticSearchRestDAOV6.class.getSimpleName();
private final String workflowIndexName;
private final String taskIndexName;
private final String eventIndexPrefix;
private String eventIndexName;
private final String messageIndexPrefix;
private String messageIndexName;
private String logIndexName;
private final String logIndexPrefix;
private final String docTypeOverride;
private final String clusterHealthColor;
private final ObjectMapper objectMapper;
private final RestHighLevelClient elasticSearchClient;
private final RestClient elasticSearchAdminClient;
private final ExecutorService executorService;
private final ExecutorService logExecutorService;
private final ConcurrentHashMap<String, BulkRequests> bulkRequests;
private final int indexBatchSize;
private final long asyncBufferFlushTimeout;
private final ElasticSearchProperties properties;
private final RetryTemplate retryTemplate;
static {
SIMPLE_DATE_FORMAT.setTimeZone(GMT);
}
public ElasticSearchRestDAOV6(
RestClientBuilder restClientBuilder,
RetryTemplate retryTemplate,
ElasticSearchProperties properties,
ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
this.elasticSearchAdminClient = restClientBuilder.build();
this.elasticSearchClient = new RestHighLevelClient(restClientBuilder);
this.clusterHealthColor = properties.getClusterHealthColor();
this.bulkRequests = new ConcurrentHashMap<>();
this.indexBatchSize = properties.getIndexBatchSize();
this.asyncBufferFlushTimeout = properties.getAsyncBufferFlushTimeout().toMillis();
this.properties = properties;
this.indexPrefix = properties.getIndexPrefix();
if (!properties.isAutoIndexManagementEnabled()
&& StringUtils.isNotBlank(properties.getDocumentTypeOverride())) {
docTypeOverride = properties.getDocumentTypeOverride();
} else {
docTypeOverride = "";
}
this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE);
this.taskIndexName = getIndexName(TASK_DOC_TYPE);
this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE;
this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE;
this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE;
int workerQueueSize = properties.getAsyncWorkerQueueSize();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
// Set up a workerpool for performing async operations.
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
// Set up a workerpool for performing async operations for task_logs, event_executions,
// message
int corePoolSize = 1;
maximumPoolSize = 2;
long keepAliveTime = 30L;
this.logExecutorService =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
keepAliveTime,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to async log dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("logQueue");
});
Executors.newSingleThreadScheduledExecutor()
.scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS);
this.retryTemplate = retryTemplate;
}
@PreDestroy
private void shutdown() {
LOGGER.info("Gracefully shutdown executor service");
shutdownExecutorService(logExecutorService);
shutdownExecutorService(executorService);
}
private void shutdownExecutorService(ExecutorService execService) {
try {
execService.shutdown();
if (execService.awaitTermination(30, TimeUnit.SECONDS)) {
LOGGER.debug("tasks completed, shutting down");
} else {
LOGGER.warn("Forcing shutdown after waiting for 30 seconds");
execService.shutdownNow();
}
} catch (InterruptedException ie) {
LOGGER.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue");
execService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
@PostConstruct
public void setup() throws Exception {
waitForHealthyCluster();
if (properties.isAutoIndexManagementEnabled()) {
createIndexesTemplates();
createWorkflowIndex();
createTaskIndex();
}
}
private void createIndexesTemplates() {
try {
initIndexesTemplates();
updateIndexesNames();
Executors.newScheduledThreadPool(1)
.scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS);
} catch (Exception e) {
LOGGER.error("Error creating index templates!", e);
}
}
private void initIndexesTemplates() {
initIndexTemplate(LOG_DOC_TYPE);
initIndexTemplate(EVENT_DOC_TYPE);
initIndexTemplate(MSG_DOC_TYPE);
}
/** Initializes the index with the required templates and mappings. */
private void initIndexTemplate(String type) {
String template = "template_" + type;
try {
if (doesResourceNotExist("/_template/" + template)) {
LOGGER.info("Creating the index template '" + template + "'");
InputStream stream =
ElasticSearchDAOV6.class.getResourceAsStream("/" + template + ".json");
byte[] templateSource = IOUtils.toByteArray(stream);
HttpEntity entity =
new NByteArrayEntity(templateSource, ContentType.APPLICATION_JSON);
elasticSearchAdminClient.performRequest(
HttpMethod.PUT, "/_template/" + template, Collections.emptyMap(), entity);
}
} catch (Exception e) {
LOGGER.error("Failed to init " + template, e);
}
}
private void updateIndexesNames() {
logIndexName = updateIndexName(LOG_DOC_TYPE);
eventIndexName = updateIndexName(EVENT_DOC_TYPE);
messageIndexName = updateIndexName(MSG_DOC_TYPE);
}
private String updateIndexName(String type) {
String indexName =
this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date());
try {
addIndex(indexName);
return indexName;
} catch (IOException e) {
LOGGER.error("Failed to update log index name: {}", indexName, e);
throw new NonTransientException("Failed to update log index name: " + indexName, e);
}
}
private void createWorkflowIndex() {
String indexName = getIndexName(WORKFLOW_DOC_TYPE);
try {
addIndex(indexName);
} catch (IOException e) {
LOGGER.error("Failed to initialize index '{}'", indexName, e);
}
try {
addMappingToIndex(indexName, WORKFLOW_DOC_TYPE, "/mappings_docType_workflow.json");
} catch (IOException e) {
LOGGER.error("Failed to add {} mapping", WORKFLOW_DOC_TYPE);
}
}
private void createTaskIndex() {
String indexName = getIndexName(TASK_DOC_TYPE);
try {
addIndex(indexName);
} catch (IOException e) {
LOGGER.error("Failed to initialize index '{}'", indexName, e);
}
try {
addMappingToIndex(indexName, TASK_DOC_TYPE, "/mappings_docType_task.json");
} catch (IOException e) {
LOGGER.error("Failed to add {} mapping", TASK_DOC_TYPE);
}
}
/**
* Waits for the ES cluster to become green.
*
* @throws Exception If there is an issue connecting with the ES cluster.
*/
private void waitForHealthyCluster() throws Exception {
Map<String, String> params = new HashMap<>();
params.put("wait_for_status", this.clusterHealthColor);
params.put("timeout", "30s");
elasticSearchAdminClient.performRequest("GET", "/_cluster/health", params);
}
/**
* Adds an index to elasticsearch if it does not exist.
*
* @param index The name of the index to create.
* @throws IOException If an error occurred during requests to ES.
*/
private void addIndex(final String index) throws IOException {
LOGGER.info("Adding index '{}'...", index);
String resourcePath = "/" + index;
if (doesResourceNotExist(resourcePath)) {
try {
ObjectNode setting = objectMapper.createObjectNode();
ObjectNode indexSetting = objectMapper.createObjectNode();
indexSetting.put("number_of_shards", properties.getIndexShardCount());
indexSetting.put("number_of_replicas", properties.getIndexReplicasCount());
setting.set("index", indexSetting);
elasticSearchAdminClient.performRequest(
HttpMethod.PUT,
resourcePath,
Collections.emptyMap(),
new NStringEntity(setting.toString(), ContentType.APPLICATION_JSON));
LOGGER.info("Added '{}' index", index);
} catch (ResponseException e) {
boolean errorCreatingIndex = true;
Response errorResponse = e.getResponse();
if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) {
JsonNode root =
objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity()));
String errorCode = root.get("error").get("type").asText();
if ("index_already_exists_exception".equals(errorCode)) {
errorCreatingIndex = false;
}
}
if (errorCreatingIndex) {
throw e;
}
}
} else {
LOGGER.info("Index '{}' already exists", index);
}
}
/**
* Adds a mapping type to an index if it does not exist.
*
* @param index The name of the index.
* @param mappingType The name of the mapping type.
* @param mappingFilename The name of the mapping file to use to add the mapping if it does not
* exist.
* @throws IOException If an error occurred during requests to ES.
*/
private void addMappingToIndex(
final String index, final String mappingType, final String mappingFilename)
throws IOException {
LOGGER.info("Adding '{}' mapping to index '{}'...", mappingType, index);
String resourcePath = "/" + index + "/_mapping/" + mappingType;
if (doesResourceNotExist(resourcePath)) {
HttpEntity entity =
new NByteArrayEntity(
loadTypeMappingSource(mappingFilename).getBytes(),
ContentType.APPLICATION_JSON);
elasticSearchAdminClient.performRequest(
HttpMethod.PUT, resourcePath, Collections.emptyMap(), entity);
LOGGER.info("Added '{}' mapping", mappingType);
} else {
LOGGER.info("Mapping '{}' already exists", mappingType);
}
}
/**
* Determines whether a resource exists in ES. This will call a GET method to a particular path
* and return true if status 200; false otherwise.
*
* @param resourcePath The path of the resource to get.
* @return True if it exists; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceExist(final String resourcePath) throws IOException {
Response response = elasticSearchAdminClient.performRequest(HttpMethod.HEAD, resourcePath);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
}
/**
* The inverse of doesResourceExist.
*
* @param resourcePath The path of the resource to check.
* @return True if it does not exist; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceNotExist(final String resourcePath) throws IOException {
return !doesResourceExist(resourcePath);
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
try {
long startTime = Instant.now().toEpochMilli();
String workflowId = workflow.getWorkflowId();
byte[] docBytes = objectMapper.writeValueAsBytes(workflow);
String docType =
StringUtils.isBlank(docTypeOverride) ? WORKFLOW_DOC_TYPE : docTypeOverride;
IndexRequest request = new IndexRequest(workflowIndexName, docType, workflowId);
request.source(docBytes, XContentType.JSON);
elasticSearchClient.index(request, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing workflow: {}", endTime - startTime, workflowId);
Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
Monitors.error(className, "indexWorkflow");
LOGGER.error("Failed to index workflow: {}", workflow.getWorkflowId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService);
}
@Override
public void indexTask(TaskSummary task) {
try {
long startTime = Instant.now().toEpochMilli();
String taskId = task.getTaskId();
String docType = StringUtils.isBlank(docTypeOverride) ? TASK_DOC_TYPE : docTypeOverride;
indexObject(taskIndexName, docType, taskId, task);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing task:{} in workflow: {}",
endTime - startTime,
taskId,
task.getWorkflowId());
Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
LOGGER.error("Failed to index task: {}", task.getTaskId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
return CompletableFuture.runAsync(() -> indexTask(task), executorService);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) {
if (taskExecLogs.isEmpty()) {
return;
}
long startTime = Instant.now().toEpochMilli();
BulkRequest bulkRequest = new BulkRequest();
for (TaskExecLog log : taskExecLogs) {
byte[] docBytes;
try {
docBytes = objectMapper.writeValueAsBytes(log);
} catch (JsonProcessingException e) {
LOGGER.error("Failed to convert task log to JSON for task {}", log.getTaskId());
continue;
}
String docType = StringUtils.isBlank(docTypeOverride) ? LOG_DOC_TYPE : docTypeOverride;
IndexRequest request = new IndexRequest(logIndexName, docType);
request.source(docBytes, XContentType.JSON);
bulkRequest.add(request);
}
try {
elasticSearchClient.bulk(bulkRequest, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime);
Monitors.recordESIndexTime(
"index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
List<String> taskIds =
taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList());
LOGGER.error("Failed to index task execution logs for tasks: {}", taskIds, e);
}
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService);
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
try {
BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("createdTime").order(SortOrder.ASC));
searchSourceBuilder.size(properties.getTaskLogResultLimit());
// Generate the actual request to send to ES.
String docType = StringUtils.isBlank(docTypeOverride) ? LOG_DOC_TYPE : docTypeOverride;
SearchRequest searchRequest = new SearchRequest(logIndexPrefix + "*");
searchRequest.types(docType);
searchRequest.source(searchSourceBuilder);
SearchResponse response = elasticSearchClient.search(searchRequest);
return mapTaskExecLogsResponse(response);
} catch (Exception e) {
LOGGER.error("Failed to get task execution logs for task: {}", taskId, e);
}
return null;
}
private List<TaskExecLog> mapTaskExecLogsResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<TaskExecLog> logs = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class);
logs.add(tel);
}
return logs;
}
@Override
public List<Message> getMessages(String queue) {
try {
BoolQueryBuilder query = boolQueryBuilder("queue='" + queue + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
String docType = StringUtils.isBlank(docTypeOverride) ? MSG_DOC_TYPE : docTypeOverride;
SearchRequest searchRequest = new SearchRequest(messageIndexPrefix + "*");
searchRequest.types(docType);
searchRequest.source(searchSourceBuilder);
SearchResponse response = elasticSearchClient.search(searchRequest);
return mapGetMessagesResponse(response);
} catch (Exception e) {
LOGGER.error("Failed to get messages for queue: {}", queue, e);
}
return null;
}
private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
TypeFactory factory = TypeFactory.defaultInstance();
MapType type = factory.constructMapType(HashMap.class, String.class, String.class);
List<Message> messages = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
Map<String, String> mapSource = objectMapper.readValue(source, type);
Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null);
messages.add(msg);
}
return messages;
}
@Override
public List<EventExecution> getEventExecutions(String event) {
try {
BoolQueryBuilder query = boolQueryBuilder("event='" + event + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
String docType =
StringUtils.isBlank(docTypeOverride) ? EVENT_DOC_TYPE : docTypeOverride;
SearchRequest searchRequest = new SearchRequest(eventIndexPrefix + "*");
searchRequest.types(docType);
searchRequest.source(searchSourceBuilder);
SearchResponse response = elasticSearchClient.search(searchRequest);
return mapEventExecutionsResponse(response);
} catch (Exception e) {
LOGGER.error("Failed to get executions for event: {}", event, e);
}
return null;
}
private List<EventExecution> mapEventExecutionsResponse(SearchResponse response)
throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<EventExecution> executions = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
EventExecution tel = objectMapper.readValue(source, EventExecution.class);
executions.add(tel);
}
return executions;
}
@Override
public void addMessage(String queue, Message message) {
try {
long startTime = Instant.now().toEpochMilli();
Map<String, Object> doc = new HashMap<>();
doc.put("messageId", message.getId());
doc.put("payload", message.getPayload());
doc.put("queue", queue);
doc.put("created", System.currentTimeMillis());
String docType = StringUtils.isBlank(docTypeOverride) ? MSG_DOC_TYPE : docTypeOverride;
indexObject(messageIndexName, docType, doc);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing message: {}",
endTime - startTime,
message.getId());
Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime);
} catch (Exception e) {
LOGGER.error("Failed to index message: {}", message.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
try {
long startTime = Instant.now().toEpochMilli();
String id =
eventExecution.getName()
+ "."
+ eventExecution.getEvent()
+ "."
+ eventExecution.getMessageId()
+ "."
+ eventExecution.getId();
String docType =
StringUtils.isBlank(docTypeOverride) ? EVENT_DOC_TYPE : docTypeOverride;
indexObject(eventIndexName, docType, id, eventExecution);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing event execution: {}",
endTime - startTime,
eventExecution.getId());
Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
LOGGER.error("Failed to index event execution: {}", eventExecution.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
return CompletableFuture.runAsync(
() -> addEventExecution(eventExecution), logExecutorService);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectsViaExpression(
query, start, count, sort, freeText, WORKFLOW_DOC_TYPE, true, String.class);
} catch (Exception e) {
throw new TransientException(e.getMessage(), e);
}
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectsViaExpression(
query,
start,
count,
sort,
freeText,
WORKFLOW_DOC_TYPE,
false,
WorkflowSummary.class);
} catch (Exception e) {
throw new TransientException(e.getMessage(), e);
}
}
@Override
public SearchResult<String> searchTasks(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectsViaExpression(
query, start, count, sort, freeText, TASK_DOC_TYPE, true, String.class);
} catch (Exception e) {
throw new TransientException(e.getMessage(), e);
}
}
@Override
public SearchResult<TaskSummary> searchTaskSummary(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectsViaExpression(
query, start, count, sort, freeText, TASK_DOC_TYPE, false, TaskSummary.class);
} catch (Exception e) {
throw new TransientException(e.getMessage(), e);
}
}
@Override
public void removeWorkflow(String workflowId) {
long startTime = Instant.now().toEpochMilli();
String docType = StringUtils.isBlank(docTypeOverride) ? WORKFLOW_DOC_TYPE : docTypeOverride;
DeleteRequest request = new DeleteRequest(workflowIndexName, docType, workflowId);
try {
DeleteResponse response = elasticSearchClient.delete(request);
if (response.getResult() == DocWriteResponse.Result.NOT_FOUND) {
LOGGER.error("Index removal failed - document not found by id: {}", workflowId);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchBaseDAO.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchBaseDAO.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryStringQueryBuilder;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es6.dao.query.parser.Expression;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
abstract class ElasticSearchBaseDAO implements IndexDAO {
String indexPrefix;
String loadTypeMappingSource(String path) throws IOException {
return applyIndexPrefixToTemplate(
IOUtils.toString(ElasticSearchBaseDAO.class.getResourceAsStream(path)));
}
private String applyIndexPrefixToTemplate(String text) {
String pattern = "\"template\": \"\\*(.*)\\*\"";
Pattern r = Pattern.compile(pattern);
Matcher m = r.matcher(text);
StringBuilder sb = new StringBuilder();
while (m.find()) {
m.appendReplacement(
sb,
m.group(0)
.replaceFirst(
Pattern.quote(m.group(1)), indexPrefix + "_" + m.group(1)));
}
m.appendTail(sb);
return sb.toString();
}
BoolQueryBuilder boolQueryBuilder(String expression, String queryString)
throws ParserException {
QueryBuilder queryBuilder = QueryBuilders.matchAllQuery();
if (StringUtils.isNotEmpty(expression)) {
Expression exp = Expression.fromString(expression);
queryBuilder = exp.getFilterBuilder();
}
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(queryString);
return QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
}
protected String getIndexName(String documentType) {
return indexPrefix + "_" + documentType;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/BulkRequestBuilderWrapper.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/BulkRequestBuilderWrapper.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.util.Objects;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequestBuilder}. */
public class BulkRequestBuilderWrapper {
private final BulkRequestBuilder bulkRequestBuilder;
public BulkRequestBuilderWrapper(@NonNull BulkRequestBuilder bulkRequestBuilder) {
this.bulkRequestBuilder = Objects.requireNonNull(bulkRequestBuilder);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public int numberOfActions() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.numberOfActions();
}
}
public ActionFuture<BulkResponse> execute() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.execute();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchDAOV6.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/ElasticSearchDAOV6.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.LocalDate;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es6.config.ElasticSearchProperties;
import com.netflix.conductor.es6.dao.query.parser.internal.ParserException;
import com.netflix.conductor.metrics.Monitors;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.type.MapType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
@Trace
public class ElasticSearchDAOV6 extends ElasticSearchBaseDAO implements IndexDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(ElasticSearchDAOV6.class);
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String LOG_DOC_TYPE = "task_log";
private static final String EVENT_DOC_TYPE = "event";
private static final String MSG_DOC_TYPE = "message";
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private static final int UPDATE_REQUEST_RETRY_COUNT = 5;
private static final String CLASS_NAME = ElasticSearchDAOV6.class.getSimpleName();
private final String workflowIndexName;
private final String taskIndexName;
private final String eventIndexPrefix;
private String eventIndexName;
private final String messageIndexPrefix;
private String messageIndexName;
private String logIndexName;
private final String logIndexPrefix;
private final String docTypeOverride;
private final ObjectMapper objectMapper;
private final Client elasticSearchClient;
private static final TimeZone GMT = TimeZone.getTimeZone("GMT");
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private final ExecutorService executorService;
private final ExecutorService logExecutorService;
private final ConcurrentHashMap<Pair<String, WriteRequest.RefreshPolicy>, BulkRequests>
bulkRequests;
private final int indexBatchSize;
private final long asyncBufferFlushTimeout;
private final ElasticSearchProperties properties;
private final RetryTemplate retryTemplate;
static {
SIMPLE_DATE_FORMAT.setTimeZone(GMT);
}
public ElasticSearchDAOV6(
Client elasticSearchClient,
RetryTemplate retryTemplate,
ElasticSearchProperties properties,
ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
this.elasticSearchClient = elasticSearchClient;
this.indexPrefix = properties.getIndexPrefix();
this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE);
this.taskIndexName = getIndexName(TASK_DOC_TYPE);
this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE;
this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE;
this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE;
int workerQueueSize = properties.getAsyncWorkerQueueSize();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
this.bulkRequests = new ConcurrentHashMap<>();
this.indexBatchSize = properties.getIndexBatchSize();
this.asyncBufferFlushTimeout = properties.getAsyncBufferFlushTimeout().toMillis();
this.properties = properties;
if (!properties.isAutoIndexManagementEnabled()
&& StringUtils.isNotBlank(properties.getDocumentTypeOverride())) {
docTypeOverride = properties.getDocumentTypeOverride();
} else {
docTypeOverride = "";
}
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
int corePoolSize = 1;
maximumPoolSize = 2;
long keepAliveTime = 30L;
this.logExecutorService =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
keepAliveTime,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to async log dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("logQueue");
});
Executors.newSingleThreadScheduledExecutor()
.scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS);
this.retryTemplate = retryTemplate;
}
@PreDestroy
private void shutdown() {
LOGGER.info("Starting graceful shutdown of executor service");
shutdownExecutorService(logExecutorService);
shutdownExecutorService(executorService);
}
private void shutdownExecutorService(ExecutorService execService) {
try {
execService.shutdown();
if (execService.awaitTermination(30, TimeUnit.SECONDS)) {
LOGGER.debug("tasks completed, shutting down");
} else {
LOGGER.warn("Forcing shutdown after waiting for 30 seconds");
execService.shutdownNow();
}
} catch (InterruptedException ie) {
LOGGER.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue");
execService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
@PostConstruct
public void setup() throws Exception {
waitForHealthyCluster();
if (properties.isAutoIndexManagementEnabled()) {
createIndexesTemplates();
createWorkflowIndex();
createTaskIndex();
}
}
private void waitForHealthyCluster() throws Exception {
elasticSearchClient
.admin()
.cluster()
.prepareHealth()
.setWaitForGreenStatus()
.execute()
.get();
}
/** Initializes the indexes templates task_log, message and event, and mappings. */
private void createIndexesTemplates() {
try {
initIndexesTemplates();
updateIndexesNames();
Executors.newScheduledThreadPool(1)
.scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS);
} catch (Exception e) {
LOGGER.error("Error creating index templates", e);
}
}
private void initIndexesTemplates() {
initIndexTemplate(LOG_DOC_TYPE);
initIndexTemplate(EVENT_DOC_TYPE);
initIndexTemplate(MSG_DOC_TYPE);
}
private void initIndexTemplate(String type) {
String template = "template_" + type;
GetIndexTemplatesResponse result =
elasticSearchClient
.admin()
.indices()
.prepareGetTemplates(template)
.execute()
.actionGet();
if (result.getIndexTemplates().isEmpty()) {
LOGGER.info("Creating the index template '{}'", template);
try {
String templateSource = loadTypeMappingSource("/" + template + ".json");
elasticSearchClient
.admin()
.indices()
.preparePutTemplate(template)
.setSource(templateSource.getBytes(), XContentType.JSON)
.execute()
.actionGet();
} catch (Exception e) {
LOGGER.error("Failed to init {}", template, e);
}
}
}
private void updateIndexesNames() {
logIndexName = updateIndexName(LOG_DOC_TYPE);
eventIndexName = updateIndexName(EVENT_DOC_TYPE);
messageIndexName = updateIndexName(MSG_DOC_TYPE);
}
private String updateIndexName(String type) {
String indexName =
this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date());
createIndex(indexName);
return indexName;
}
private void createWorkflowIndex() {
createIndex(workflowIndexName);
addTypeMapping(workflowIndexName, WORKFLOW_DOC_TYPE, "/mappings_docType_workflow.json");
}
private void createTaskIndex() {
createIndex(taskIndexName);
addTypeMapping(taskIndexName, TASK_DOC_TYPE, "/mappings_docType_task.json");
}
private void createIndex(String indexName) {
try {
elasticSearchClient
.admin()
.indices()
.prepareGetIndex()
.addIndices(indexName)
.execute()
.actionGet();
} catch (IndexNotFoundException infe) {
try {
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
createIndexRequest.settings(
Settings.builder()
.put("index.number_of_shards", properties.getIndexShardCount())
.put(
"index.number_of_replicas",
properties.getIndexReplicasCount()));
elasticSearchClient.admin().indices().create(createIndexRequest).actionGet();
} catch (ResourceAlreadyExistsException done) {
LOGGER.error("Failed to update log index name: {}", indexName, done);
}
}
}
private void addTypeMapping(String indexName, String type, String sourcePath) {
GetMappingsResponse getMappingsResponse =
elasticSearchClient
.admin()
.indices()
.prepareGetMappings(indexName)
.addTypes(type)
.execute()
.actionGet();
if (getMappingsResponse.mappings().isEmpty()) {
LOGGER.info("Adding the {} type mappings", indexName);
try {
String source = loadTypeMappingSource(sourcePath);
elasticSearchClient
.admin()
.indices()
.preparePutMapping(indexName)
.setType(type)
.setSource(source, XContentType.JSON)
.execute()
.actionGet();
} catch (Exception e) {
LOGGER.error("Failed to init index {} mappings", indexName, e);
}
}
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
try {
long startTime = Instant.now().toEpochMilli();
String id = workflow.getWorkflowId();
byte[] doc = objectMapper.writeValueAsBytes(workflow);
String docType =
StringUtils.isBlank(docTypeOverride) ? WORKFLOW_DOC_TYPE : docTypeOverride;
UpdateRequest req = buildUpdateRequest(id, doc, workflowIndexName, docType);
if (properties.isWaitForIndexRefresh()) {
req.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
}
elasticSearchClient.update(req).actionGet();
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing workflow: {}",
endTime - startTime,
workflow.getWorkflowId());
Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
Monitors.error(CLASS_NAME, "indexWorkflow");
LOGGER.error("Failed to index workflow: {}", workflow.getWorkflowId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService);
}
@Override
public void indexTask(TaskSummary task) {
try {
long startTime = Instant.now().toEpochMilli();
String id = task.getTaskId();
byte[] doc = objectMapper.writeValueAsBytes(task);
String docType = StringUtils.isBlank(docTypeOverride) ? TASK_DOC_TYPE : docTypeOverride;
UpdateRequest req = new UpdateRequest(taskIndexName, docType, id);
req.doc(doc, XContentType.JSON);
req.upsert(doc, XContentType.JSON);
WriteRequest.RefreshPolicy refreshPolicy =
properties.isWaitForIndexRefresh()
? WriteRequest.RefreshPolicy.WAIT_UNTIL
: null;
indexObject(req, TASK_DOC_TYPE, refreshPolicy);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing task:{} in workflow: {}",
endTime - startTime,
task.getTaskId(),
task.getWorkflowId());
Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
LOGGER.error("Failed to index task: {}", task.getTaskId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
return CompletableFuture.runAsync(() -> indexTask(task), executorService);
}
private void indexObject(final UpdateRequest req, final String docType) {
indexObject(req, docType, null);
}
private void indexObject(
final UpdateRequest req,
final String docType,
final WriteRequest.RefreshPolicy refreshPolicy) {
Pair<String, WriteRequest.RefreshPolicy> requestTypeKey =
new ImmutablePair<>(docType, refreshPolicy);
if (bulkRequests.get(requestTypeKey) == null) {
BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk();
Optional.ofNullable(requestTypeKey.getRight())
.ifPresent(bulkRequestBuilder::setRefreshPolicy);
bulkRequests.put(
requestTypeKey,
new BulkRequests(System.currentTimeMillis(), bulkRequestBuilder));
}
bulkRequests.get(requestTypeKey).getBulkRequestBuilder().add(req);
if (bulkRequests.get(requestTypeKey).getBulkRequestBuilder().numberOfActions()
>= this.indexBatchSize) {
indexBulkRequest(requestTypeKey);
}
}
private synchronized void indexBulkRequest(
Pair<String, WriteRequest.RefreshPolicy> requestTypeKey) {
if (bulkRequests.get(requestTypeKey).getBulkRequestBuilder() != null
&& bulkRequests.get(requestTypeKey).getBulkRequestBuilder().numberOfActions() > 0) {
updateWithRetry(
bulkRequests.get(requestTypeKey).getBulkRequestBuilder(),
requestTypeKey.getLeft());
BulkRequestBuilder bulkRequestBuilder = elasticSearchClient.prepareBulk();
Optional.ofNullable(requestTypeKey.getRight())
.ifPresent(bulkRequestBuilder::setRefreshPolicy);
bulkRequests.put(
requestTypeKey,
new BulkRequests(System.currentTimeMillis(), bulkRequestBuilder));
}
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) {
if (taskExecLogs.isEmpty()) {
return;
}
try {
long startTime = Instant.now().toEpochMilli();
BulkRequestBuilderWrapper bulkRequestBuilder =
new BulkRequestBuilderWrapper(elasticSearchClient.prepareBulk());
for (TaskExecLog log : taskExecLogs) {
String docType =
StringUtils.isBlank(docTypeOverride) ? LOG_DOC_TYPE : docTypeOverride;
IndexRequest request = new IndexRequest(logIndexName, docType);
request.source(objectMapper.writeValueAsBytes(log), XContentType.JSON);
bulkRequestBuilder.add(request);
}
bulkRequestBuilder.execute().actionGet(5, TimeUnit.SECONDS);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime);
Monitors.recordESIndexTime(
"index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
List<String> taskIds =
taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList());
LOGGER.error("Failed to index task execution logs for tasks: {}", taskIds, e);
}
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService);
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
try {
BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*");
String docType = StringUtils.isBlank(docTypeOverride) ? LOG_DOC_TYPE : docTypeOverride;
final SearchRequestBuilder srb =
elasticSearchClient
.prepareSearch(logIndexPrefix + "*")
.setQuery(query)
.setTypes(docType)
.setSize(properties.getTaskLogResultLimit())
.addSort(SortBuilders.fieldSort("createdTime").order(SortOrder.ASC));
return mapTaskExecLogsResponse(srb.execute().actionGet());
} catch (Exception e) {
LOGGER.error("Failed to get task execution logs for task: {}", taskId, e);
}
return null;
}
private List<TaskExecLog> mapTaskExecLogsResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<TaskExecLog> logs = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class);
logs.add(tel);
}
return logs;
}
@Override
public void addMessage(String queue, Message message) {
try {
long startTime = Instant.now().toEpochMilli();
Map<String, Object> doc = new HashMap<>();
doc.put("messageId", message.getId());
doc.put("payload", message.getPayload());
doc.put("queue", queue);
doc.put("created", System.currentTimeMillis());
String docType = StringUtils.isBlank(docTypeOverride) ? MSG_DOC_TYPE : docTypeOverride;
UpdateRequest req = new UpdateRequest(messageIndexName, docType, message.getId());
req.doc(doc, XContentType.JSON);
req.upsert(doc, XContentType.JSON);
indexObject(req, MSG_DOC_TYPE);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing message: {}",
endTime - startTime,
message.getId());
Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime);
} catch (Exception e) {
LOGGER.error("Failed to index message: {}", message.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService);
}
@Override
public List<Message> getMessages(String queue) {
try {
BoolQueryBuilder fq = boolQueryBuilder("queue='" + queue + "'", "*");
String docType = StringUtils.isBlank(docTypeOverride) ? MSG_DOC_TYPE : docTypeOverride;
final SearchRequestBuilder srb =
elasticSearchClient
.prepareSearch(messageIndexPrefix + "*")
.setQuery(fq)
.setTypes(docType)
.addSort(SortBuilders.fieldSort("created").order(SortOrder.ASC));
return mapGetMessagesResponse(srb.execute().actionGet());
} catch (Exception e) {
LOGGER.error("Failed to get messages for queue: {}", queue, e);
}
return null;
}
private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
TypeFactory factory = TypeFactory.defaultInstance();
MapType type = factory.constructMapType(HashMap.class, String.class, String.class);
List<Message> messages = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
Map<String, String> mapSource = objectMapper.readValue(source, type);
Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null);
messages.add(msg);
}
return messages;
}
@Override
public void addEventExecution(EventExecution eventExecution) {
try {
long startTime = Instant.now().toEpochMilli();
byte[] doc = objectMapper.writeValueAsBytes(eventExecution);
String id =
eventExecution.getName()
+ "."
+ eventExecution.getEvent()
+ "."
+ eventExecution.getMessageId()
+ "."
+ eventExecution.getId();
String docType =
StringUtils.isBlank(docTypeOverride) ? EVENT_DOC_TYPE : docTypeOverride;
UpdateRequest req = buildUpdateRequest(id, doc, eventIndexName, docType);
indexObject(req, EVENT_DOC_TYPE);
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing event execution: {}",
endTime - startTime,
eventExecution.getId());
Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
LOGGER.error("Failed to index event execution: {}", eventExecution.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
return CompletableFuture.runAsync(
() -> addEventExecution(eventExecution), logExecutorService);
}
@Override
public List<EventExecution> getEventExecutions(String event) {
try {
BoolQueryBuilder fq = boolQueryBuilder("event='" + event + "'", "*");
String docType =
StringUtils.isBlank(docTypeOverride) ? EVENT_DOC_TYPE : docTypeOverride;
final SearchRequestBuilder srb =
elasticSearchClient
.prepareSearch(eventIndexPrefix + "*")
.setQuery(fq)
.setTypes(docType)
.addSort(SortBuilders.fieldSort("created").order(SortOrder.ASC));
return mapEventExecutionsResponse(srb.execute().actionGet());
} catch (Exception e) {
LOGGER.error("Failed to get executions for event: {}", event, e);
}
return null;
}
private List<EventExecution> mapEventExecutionsResponse(SearchResponse response)
throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<EventExecution> executions = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
EventExecution tel = objectMapper.readValue(source, EventExecution.class);
executions.add(tel);
}
return executions;
}
private void updateWithRetry(BulkRequestBuilderWrapper request, String docType) {
try {
long startTime = Instant.now().toEpochMilli();
retryTemplate.execute(context -> request.execute().actionGet(5, TimeUnit.SECONDS));
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for indexing object of type: {}", endTime - startTime, docType);
Monitors.recordESIndexTime("index_object", docType, endTime - startTime);
} catch (Exception e) {
Monitors.error(CLASS_NAME, "index");
LOGGER.error("Failed to index {} for requests", request.numberOfActions(), e);
}
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
return search(query, start, count, sort, freeText, WORKFLOW_DOC_TYPE, true, String.class);
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
return search(
query,
start,
count,
sort,
freeText,
WORKFLOW_DOC_TYPE,
false,
WorkflowSummary.class);
}
@Override
public long getWorkflowCount(String query, String freeText) {
return count(query, freeText, WORKFLOW_DOC_TYPE);
}
@Override
public SearchResult<String> searchTasks(
String query, String freeText, int start, int count, List<String> sort) {
return search(query, start, count, sort, freeText, TASK_DOC_TYPE, true, String.class);
}
@Override
public SearchResult<TaskSummary> searchTaskSummary(
String query, String freeText, int start, int count, List<String> sort) {
return search(query, start, count, sort, freeText, TASK_DOC_TYPE, false, TaskSummary.class);
}
@Override
public void removeWorkflow(String workflowId) {
try {
long startTime = Instant.now().toEpochMilli();
DeleteRequest request =
new DeleteRequest(workflowIndexName, WORKFLOW_DOC_TYPE, workflowId);
DeleteResponse response = elasticSearchClient.delete(request).actionGet();
if (response.getResult() == DocWriteResponse.Result.DELETED) {
LOGGER.error("Index removal failed - document not found by id: {}", workflowId);
}
long endTime = Instant.now().toEpochMilli();
LOGGER.debug(
"Time taken {} for removing workflow: {}", endTime - startTime, workflowId);
Monitors.recordESIndexTime("remove_workflow", WORKFLOW_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Throwable e) {
LOGGER.error("Failed to remove workflow {} from index", workflowId, e);
Monitors.error(CLASS_NAME, "remove");
}
}
@Override
public CompletableFuture<Void> asyncRemoveWorkflow(String workflowId) {
return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService);
}
@Override
public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) {
if (keys.length != values.length) {
throw new IllegalArgumentException("Number of keys and values do not match");
}
long startTime = Instant.now().toEpochMilli();
UpdateRequest request =
new UpdateRequest(workflowIndexName, WORKFLOW_DOC_TYPE, workflowInstanceId);
Map<String, Object> source =
IntStream.range(0, keys.length)
.boxed()
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/BulkRequestWrapper.java | es6-persistence/src/main/java/com/netflix/conductor/es6/dao/index/BulkRequestWrapper.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.dao.index;
import java.util.Objects;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequest}. */
class BulkRequestWrapper {
private final BulkRequest bulkRequest;
BulkRequestWrapper(@NonNull BulkRequest bulkRequest) {
this.bulkRequest = Objects.requireNonNull(bulkRequest);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
BulkRequest get() {
return bulkRequest;
}
int numberOfActions() {
synchronized (bulkRequest) {
return bulkRequest.numberOfActions();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/config/IsHttpProtocol.java | es6-persistence/src/main/java/com/netflix/conductor/es6/config/IsHttpProtocol.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.type.AnnotatedTypeMetadata;
@EnableConfigurationProperties(ElasticSearchProperties.class)
@Configuration
public class IsHttpProtocol implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
String url = context.getEnvironment().getProperty("conductor.elasticsearch.url");
if (url.startsWith("http") || url.startsWith("https")) {
return true;
}
return false;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchConditions.java | es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchConditions.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
public class ElasticSearchConditions {
private ElasticSearchConditions() {}
public static class ElasticSearchV6Enabled extends AllNestedConditions {
ElasticSearchV6Enabled() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
@SuppressWarnings("unused")
@ConditionalOnProperty(
name = "conductor.indexing.enabled",
havingValue = "true",
matchIfMissing = true)
static class enabledIndexing {}
@SuppressWarnings("unused")
@ConditionalOnProperty(
name = "conductor.elasticsearch.version",
havingValue = "6",
matchIfMissing = true)
static class enabledES6 {}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/config/IsTcpProtocol.java | es6-persistence/src/main/java/com/netflix/conductor/es6/config/IsTcpProtocol.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.type.AnnotatedTypeMetadata;
@EnableConfigurationProperties(ElasticSearchProperties.class)
@Configuration
public class IsTcpProtocol implements Condition {
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
String url = context.getEnvironment().getProperty("conductor.elasticsearch.url");
if (url.startsWith("http") || url.startsWith("https")) {
return false;
}
return true;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchProperties.java | es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchProperties.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.elasticsearch")
public class ElasticSearchProperties {
/**
* The comma separated list of urls for the elasticsearch cluster. Format --
* host1:port1,host2:port2
*/
private String url = "localhost:9300";
/** The index prefix to be used when creating indices */
private String indexPrefix = "conductor";
/** The color of the elasticserach cluster to wait for to confirm healthy status */
private String clusterHealthColor = "green";
/** The size of the batch to be used for bulk indexing in async mode */
private int indexBatchSize = 1;
/** The size of the queue used for holding async indexing tasks */
private int asyncWorkerQueueSize = 100;
/** The maximum number of threads allowed in the async pool */
private int asyncMaxPoolSize = 12;
/**
* The time in seconds after which the async buffers will be flushed (if no activity) to prevent
* data loss
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration asyncBufferFlushTimeout = Duration.ofSeconds(10);
/** The number of shards that the index will be created with */
private int indexShardCount = 5;
/** The number of replicas that the index will be configured to have */
private int indexReplicasCount = 1;
/** The number of task log results that will be returned in the response */
private int taskLogResultLimit = 10;
/** The timeout in milliseconds used when requesting a connection from the connection manager */
private int restClientConnectionRequestTimeout = -1;
/** Used to control if index management is to be enabled or will be controlled externally */
private boolean autoIndexManagementEnabled = true;
/**
* Document types are deprecated in ES6 and removed from ES7. This property can be used to
* disable the use of specific document types with an override. This property is currently used
* in ES6 module.
*
* <p><em>Note that this property will only take effect if {@link
* ElasticSearchProperties#isAutoIndexManagementEnabled} is set to false and index management is
* handled outside of this module.</em>
*/
private String documentTypeOverride = "";
/** Elasticsearch basic auth username */
private String username;
/** Elasticsearch basic auth password */
private String password;
/**
* Whether to wait for index refresh when updating tasks and workflows. When enabled, the
* operation will block until the changes are visible for search. This guarantees immediate
* search visibility but can significantly impact performance (20-30s delays). Defaults to false
* for better performance.
*/
private boolean waitForIndexRefresh = false;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getIndexPrefix() {
return indexPrefix;
}
public void setIndexPrefix(String indexPrefix) {
this.indexPrefix = indexPrefix;
}
public String getClusterHealthColor() {
return clusterHealthColor;
}
public void setClusterHealthColor(String clusterHealthColor) {
this.clusterHealthColor = clusterHealthColor;
}
public int getIndexBatchSize() {
return indexBatchSize;
}
public void setIndexBatchSize(int indexBatchSize) {
this.indexBatchSize = indexBatchSize;
}
public int getAsyncWorkerQueueSize() {
return asyncWorkerQueueSize;
}
public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) {
this.asyncWorkerQueueSize = asyncWorkerQueueSize;
}
public int getAsyncMaxPoolSize() {
return asyncMaxPoolSize;
}
public void setAsyncMaxPoolSize(int asyncMaxPoolSize) {
this.asyncMaxPoolSize = asyncMaxPoolSize;
}
public Duration getAsyncBufferFlushTimeout() {
return asyncBufferFlushTimeout;
}
public void setAsyncBufferFlushTimeout(Duration asyncBufferFlushTimeout) {
this.asyncBufferFlushTimeout = asyncBufferFlushTimeout;
}
public int getIndexShardCount() {
return indexShardCount;
}
public void setIndexShardCount(int indexShardCount) {
this.indexShardCount = indexShardCount;
}
public int getIndexReplicasCount() {
return indexReplicasCount;
}
public void setIndexReplicasCount(int indexReplicasCount) {
this.indexReplicasCount = indexReplicasCount;
}
public int getTaskLogResultLimit() {
return taskLogResultLimit;
}
public void setTaskLogResultLimit(int taskLogResultLimit) {
this.taskLogResultLimit = taskLogResultLimit;
}
public int getRestClientConnectionRequestTimeout() {
return restClientConnectionRequestTimeout;
}
public void setRestClientConnectionRequestTimeout(int restClientConnectionRequestTimeout) {
this.restClientConnectionRequestTimeout = restClientConnectionRequestTimeout;
}
public boolean isAutoIndexManagementEnabled() {
return autoIndexManagementEnabled;
}
public void setAutoIndexManagementEnabled(boolean autoIndexManagementEnabled) {
this.autoIndexManagementEnabled = autoIndexManagementEnabled;
}
public String getDocumentTypeOverride() {
return documentTypeOverride;
}
public void setDocumentTypeOverride(String documentTypeOverride) {
this.documentTypeOverride = documentTypeOverride;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public boolean isWaitForIndexRefresh() {
return waitForIndexRefresh;
}
public void setWaitForIndexRefresh(boolean waitForIndexRefresh) {
this.waitForIndexRefresh = waitForIndexRefresh;
}
public List<URL> toURLs() {
String clusterAddress = getUrl();
String[] hosts = clusterAddress.split(",");
return Arrays.stream(hosts)
.map(
host ->
(host.startsWith("http://")
|| host.startsWith("https://")
|| host.startsWith("tcp://"))
? toURL(host)
: toURL("tcp://" + host))
.collect(Collectors.toList());
}
private URL toURL(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new IllegalArgumentException(url + "can not be converted to java.net.URL");
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchV6Configuration.java | es6-persistence/src/main/java/com/netflix/conductor/es6/config/ElasticSearchV6Configuration.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es6.config;
import java.net.InetAddress;
import java.net.URI;
import java.net.URL;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es6.dao.index.ElasticSearchDAOV6;
import com.netflix.conductor.es6.dao.index.ElasticSearchRestDAOV6;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(ElasticSearchProperties.class)
@Conditional(ElasticSearchConditions.ElasticSearchV6Enabled.class)
public class ElasticSearchV6Configuration {
private static final Logger log = LoggerFactory.getLogger(ElasticSearchV6Configuration.class);
@Bean
@Conditional(IsTcpProtocol.class)
public Client client(ElasticSearchProperties properties) {
Settings settings =
Settings.builder()
.put("client.transport.ignore_cluster_name", true)
.put("client.transport.sniff", true)
.build();
TransportClient transportClient = new PreBuiltTransportClient(settings);
List<URI> clusterAddresses = getURIs(properties);
if (clusterAddresses.isEmpty()) {
log.warn("workflow.elasticsearch.url is not set. Indexing will remain DISABLED.");
}
for (URI hostAddress : clusterAddresses) {
int port = Optional.ofNullable(hostAddress.getPort()).orElse(9200);
try {
transportClient.addTransportAddress(
new TransportAddress(InetAddress.getByName(hostAddress.getHost()), port));
} catch (Exception e) {
throw new RuntimeException("Invalid host" + hostAddress.getHost(), e);
}
}
return transportClient;
}
@Bean
@Conditional(IsHttpProtocol.class)
public RestClient restClient(RestClientBuilder restClientBuilder) {
return restClientBuilder.build();
}
@Bean
@Conditional(IsHttpProtocol.class)
public RestClientBuilder restClientBuilder(ElasticSearchProperties properties) {
RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs()));
if (properties.getRestClientConnectionRequestTimeout() > 0) {
builder.setRequestConfigCallback(
requestConfigBuilder ->
requestConfigBuilder.setConnectionRequestTimeout(
properties.getRestClientConnectionRequestTimeout()));
}
if (properties.getUsername() != null && properties.getPassword() != null) {
log.info(
"Configure ElasticSearch with BASIC authentication. User:{}",
properties.getUsername());
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(
properties.getUsername(), properties.getPassword()));
builder.setHttpClientConfigCallback(
httpClientBuilder ->
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider));
} else {
log.info("Configure ElasticSearch with no authentication.");
}
return builder;
}
@Bean
@Conditional(IsHttpProtocol.class)
public IndexDAO es6IndexRestDAO(
RestClientBuilder restClientBuilder,
ElasticSearchProperties properties,
@Qualifier("es6RetryTemplate") RetryTemplate retryTemplate,
ObjectMapper objectMapper) {
return new ElasticSearchRestDAOV6(
restClientBuilder, retryTemplate, properties, objectMapper);
}
@Bean
@Conditional(IsTcpProtocol.class)
public IndexDAO es6IndexDAO(
Client client,
@Qualifier("es6RetryTemplate") RetryTemplate retryTemplate,
ElasticSearchProperties properties,
ObjectMapper objectMapper) {
return new ElasticSearchDAOV6(client, retryTemplate, properties, objectMapper);
}
@Bean
public RetryTemplate es6RetryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy();
fixedBackOffPolicy.setBackOffPeriod(1000L);
retryTemplate.setBackOffPolicy(fixedBackOffPolicy);
return retryTemplate;
}
private HttpHost[] convertToHttpHosts(List<URL> hosts) {
return hosts.stream()
.map(host -> new HttpHost(host.getHost(), host.getPort(), host.getProtocol()))
.toArray(HttpHost[]::new);
}
public List<URI> getURIs(ElasticSearchProperties properties) {
String clusterAddress = properties.getUrl();
String[] hosts = clusterAddress.split(",");
return Arrays.stream(hosts)
.map(
host ->
(host.startsWith("http://")
|| host.startsWith("https://")
|| host.startsWith("tcp://"))
? URI.create(host)
: URI.create("tcp://" + host))
.collect(Collectors.toList());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java | json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.json;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import com.netflix.conductor.common.config.ObjectMapperProvider;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
public class JsonJqTransformTest {
private final ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper();
@Test
public void dataShouldBeCorrectlySelected() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put("queryExpression", ".inputJson.key[0]");
final Map<String, Object> inputJson = new HashMap<>();
inputJson.put("key", Collections.singletonList("VALUE"));
inputData.put("inputJson", inputJson);
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertEquals("VALUE", task.getOutputData().get("result").toString());
List<?> resultList = (List<?>) task.getOutputData().get("resultList");
assertEquals(1, resultList.size());
assertEquals("VALUE", resultList.get(0));
}
@Test
public void simpleErrorShouldBeDisplayed() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put("queryExpression", "{");
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertTrue(
((String) task.getOutputData().get("error"))
.startsWith("Encountered \"<EOF>\" at line 1, column 1."));
}
@Test
public void nestedExceptionsWithNACausesShouldBeDisregarded() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put(
"queryExpression",
"{officeID: (.inputJson.OIDs | unique)[], requestedIndicatorList: .inputJson.requestedindicatorList}");
final Map<String, Object> inputJson = new HashMap<>();
inputJson.put("OIDs", Collections.singletonList("VALUE"));
final Map<String, Object> indicatorList = new HashMap<>();
indicatorList.put("indicator", "AFA");
indicatorList.put("value", false);
inputJson.put("requestedindicatorList", Collections.singletonList(indicatorList));
inputData.put("inputJson", inputJson);
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertTrue(
((String) task.getOutputData().get("error"))
.startsWith("Encountered \" \"[\" \"[ \"\" at line 1"));
}
@Test
public void mapResultShouldBeCorrectlyExtracted() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
Map<String, Object> inputData = new HashMap<>();
inputData.put("method", "POST");
inputData.put("successExpression", null);
inputData.put("requestTransform", "{name: (.body.name + \" you are a \" + .body.title) }");
inputData.put("responseTransform", "{result: \"reply: \" + .response.body.message}");
taskInput.put("input", inputData);
taskInput.put(
"queryExpression",
"{ requestTransform: .input.requestTransform // \".body\" , responseTransform: .input.responseTransform // \".response.body\", method: .input.method // \"GET\", document: .input.document // \"rgt_results\", successExpression: .input.successExpression // \"true\" }");
task.setInputData(taskInput);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof Map);
HashMap<String, Object> result =
(HashMap<String, Object>) task.getOutputData().get("result");
assertEquals("POST", result.get("method"));
assertEquals(
"{name: (.body.name + \" you are a \" + .body.title) }",
result.get("requestTransform"));
assertEquals(
"{result: \"reply: \" + .response.body.message}", result.get("responseTransform"));
List<Object> resultList = (List<Object>) task.getOutputData().get("resultList");
assertTrue(resultList.get(0) instanceof Map);
}
@Test
public void stringResultShouldBeCorrectlyExtracted() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("data", new ArrayList<>());
taskInput.put(
"queryExpression", "if(.data | length >0) then \"EXISTS\" else \"CREATE\" end");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof String);
String result = (String) task.getOutputData().get("result");
assertEquals("CREATE", result);
}
@SuppressWarnings("unchecked")
@Test
public void listResultShouldBeCorrectlyExtracted() throws JsonProcessingException {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
String json =
"{ \"request\": { \"transitions\": [ { \"name\": \"redeliver\" }, { \"name\": \"redeliver_from_validation_error\" }, { \"name\": \"redelivery\" } ] } }";
Map<String, Object> inputData = objectMapper.readValue(json, Map.class);
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("inputData", inputData);
taskInput.put("queryExpression", ".inputData.request.transitions | map(.name)");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof List);
List<Object> result = (List<Object>) task.getOutputData().get("result");
assertEquals(3, result.size());
}
@Test
public void nullResultShouldBeCorrectlyExtracted() throws JsonProcessingException {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("queryExpression", "null");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertNull(task.getOutputData().get("result"));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java | json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.json;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.benmanes.caffeine.cache.CacheLoader;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import net.thisptr.jackson.jq.JsonQuery;
import net.thisptr.jackson.jq.Scope;
@Component(JsonJqTransform.NAME)
public class JsonJqTransform extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(JsonJqTransform.class);
public static final String NAME = "JSON_JQ_TRANSFORM";
private static final String QUERY_EXPRESSION_PARAMETER = "queryExpression";
private static final String OUTPUT_RESULT = "result";
private static final String OUTPUT_RESULT_LIST = "resultList";
private static final String OUTPUT_ERROR = "error";
private static final TypeReference<Map<String, Object>> mapType = new TypeReference<>() {};
private final TypeReference<List<Object>> listType = new TypeReference<>() {};
private final Scope rootScope;
private final ObjectMapper objectMapper;
private final LoadingCache<String, JsonQuery> queryCache = createQueryCache();
public JsonJqTransform(ObjectMapper objectMapper) {
super(NAME);
this.objectMapper = objectMapper;
this.rootScope = Scope.newEmptyScope();
this.rootScope.loadFunctions(Scope.class.getClassLoader());
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
final Map<String, Object> taskInput = task.getInputData();
final String queryExpression = (String) taskInput.get(QUERY_EXPRESSION_PARAMETER);
if (queryExpression == null) {
task.setReasonForIncompletion(
"Missing '" + QUERY_EXPRESSION_PARAMETER + "' in input parameters");
task.setStatus(TaskModel.Status.FAILED);
return;
}
try {
final JsonNode input = objectMapper.valueToTree(taskInput);
final JsonQuery query = queryCache.get(queryExpression);
final Scope childScope = Scope.newChildScope(rootScope);
final List<JsonNode> result = query.apply(childScope, input);
task.setStatus(TaskModel.Status.COMPLETED);
if (result == null) {
task.addOutput(OUTPUT_RESULT, null);
task.addOutput(OUTPUT_RESULT_LIST, null);
} else {
List<Object> extractedResults = extractBodies(result);
if (extractedResults.isEmpty()) {
task.addOutput(OUTPUT_RESULT, null);
} else {
task.addOutput(OUTPUT_RESULT, extractedResults.get(0));
}
task.addOutput(OUTPUT_RESULT_LIST, extractedResults);
}
} catch (final Exception e) {
LOGGER.error(
"Error executing task: {} in workflow: {}",
task.getTaskId(),
workflow.getWorkflowId(),
e);
task.setStatus(TaskModel.Status.FAILED);
final String message = extractFirstValidMessage(e);
task.setReasonForIncompletion(message);
task.addOutput(OUTPUT_ERROR, message);
}
}
private LoadingCache<String, JsonQuery> createQueryCache() {
final CacheLoader<String, JsonQuery> loader = JsonQuery::compile;
return Caffeine.newBuilder()
.expireAfterWrite(1, TimeUnit.HOURS)
.maximumSize(1000)
.build(loader);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
this.start(workflow, task, workflowExecutor);
return true;
}
private String extractFirstValidMessage(final Exception e) {
Throwable currentStack = e;
final List<String> messages = new ArrayList<>();
messages.add(currentStack.getMessage());
while (currentStack.getCause() != null) {
currentStack = currentStack.getCause();
messages.add(currentStack.getMessage());
}
return messages.stream().filter(it -> !it.contains("N/A")).findFirst().orElse("");
}
private List<Object> extractBodies(List<JsonNode> nodes) {
List<Object> values = new ArrayList<>(nodes.size());
for (JsonNode node : nodes) {
values.add(extractBody(node));
}
return values;
}
private Object extractBody(JsonNode node) {
if (node.isNull()) {
return null;
} else if (node.isObject()) {
return objectMapper.convertValue(node, mapType);
} else if (node.isArray()) {
return objectMapper.convertValue(node, listType);
} else if (node.isBoolean()) {
return node.asBoolean();
} else if (node.isNumber()) {
if (node.isIntegralNumber()) {
return node.asLong();
} else {
return node.asDouble();
}
} else {
return node.asText();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/http-task/src/test/java/com/netflix/conductor/tasks/http/HttpTaskTest.java | http-task/src/test/java/com/netflix/conductor/tasks/http/HttpTaskTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.http;
import java.time.Duration;
import java.time.Instant;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.mockserver.client.MockServerClient;
import org.mockserver.model.HttpRequest;
import org.mockserver.model.HttpResponse;
import org.mockserver.model.MediaType;
import org.testcontainers.containers.MockServerContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.execution.DeciderService;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.tasks.http.providers.DefaultRestTemplateProvider;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
@SuppressWarnings("unchecked")
public class HttpTaskTest {
private static final String ERROR_RESPONSE = "Something went wrong!";
private static final String TEXT_RESPONSE = "Text Response";
private static final double NUM_RESPONSE = 42.42d;
private HttpTask httpTask;
private WorkflowExecutor workflowExecutor;
private final WorkflowModel workflow = new WorkflowModel();
private static final ObjectMapper objectMapper = new ObjectMapper();
private static String JSON_RESPONSE;
@ClassRule
public static MockServerContainer mockServer =
new MockServerContainer(
DockerImageName.parse("mockserver/mockserver").withTag("mockserver-5.12.0"));
@BeforeClass
public static void init() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("key", "value1");
map.put("num", 42);
map.put("SomeKey", null);
JSON_RESPONSE = objectMapper.writeValueAsString(map);
final TypeReference<Map<String, Object>> mapOfObj = new TypeReference<>() {};
MockServerClient client =
new MockServerClient(mockServer.getHost(), mockServer.getServerPort());
client.when(HttpRequest.request().withPath("/post").withMethod("POST"))
.respond(
request -> {
Map<String, Object> reqBody =
objectMapper.readValue(request.getBody().toString(), mapOfObj);
Set<String> keys = reqBody.keySet();
Map<String, Object> respBody = new HashMap<>();
keys.forEach(k -> respBody.put(k, k));
return HttpResponse.response()
.withContentType(MediaType.APPLICATION_JSON)
.withBody(objectMapper.writeValueAsString(respBody));
});
client.when(HttpRequest.request().withPath("/post2").withMethod("POST"))
.respond(HttpResponse.response().withStatusCode(204));
client.when(HttpRequest.request().withPath("/failure").withMethod("GET"))
.respond(
HttpResponse.response()
.withStatusCode(500)
.withContentType(MediaType.TEXT_PLAIN)
.withBody(ERROR_RESPONSE));
client.when(HttpRequest.request().withPath("/text").withMethod("GET"))
.respond(HttpResponse.response().withBody(TEXT_RESPONSE));
client.when(HttpRequest.request().withPath("/numeric").withMethod("GET"))
.respond(HttpResponse.response().withBody(String.valueOf(NUM_RESPONSE)));
client.when(HttpRequest.request().withPath("/json").withMethod("GET"))
.respond(
HttpResponse.response()
.withContentType(MediaType.APPLICATION_JSON)
.withBody(JSON_RESPONSE));
}
@Before
public void setup() {
workflowExecutor = mock(WorkflowExecutor.class);
DefaultRestTemplateProvider defaultRestTemplateProvider =
new DefaultRestTemplateProvider(Duration.ofMillis(150), Duration.ofMillis(100));
httpTask = new HttpTask(defaultRestTemplateProvider, objectMapper);
}
@Test
public void testPost() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/post");
Map<String, Object> body = new HashMap<>();
body.put("input_key1", "value1");
body.put("input_key2", 45.3d);
body.put("someKey", null);
input.setBody(body);
input.setMethod("POST");
input.setReadTimeOut(1000);
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(task.getReasonForIncompletion(), TaskModel.Status.COMPLETED, task.getStatus());
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertTrue("response is: " + response, response instanceof Map);
Map<String, Object> map = (Map<String, Object>) response;
Set<String> inputKeys = body.keySet();
Set<String> responseKeys = map.keySet();
inputKeys.containsAll(responseKeys);
responseKeys.containsAll(inputKeys);
}
@Test
public void testPostNoContent() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri(
"http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/post2");
Map<String, Object> body = new HashMap<>();
body.put("input_key1", "value1");
body.put("input_key2", 45.3d);
input.setBody(body);
input.setMethod("POST");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(task.getReasonForIncompletion(), TaskModel.Status.COMPLETED, task.getStatus());
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertNull("response is: " + response, response);
}
@Test
public void testFailure() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri(
"http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/failure");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(
"Task output: " + task.getOutputData(), TaskModel.Status.FAILED, task.getStatus());
assertTrue(task.getReasonForIncompletion().contains(ERROR_RESPONSE));
task.setStatus(TaskModel.Status.SCHEDULED);
task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion());
}
@Test
public void testPostAsyncComplete() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/post");
Map<String, Object> body = new HashMap<>();
body.put("input_key1", "value1");
body.put("input_key2", 45.3d);
input.setBody(body);
input.setMethod("POST");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
task.getInputData().put("asyncComplete", true);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(
task.getReasonForIncompletion(), TaskModel.Status.IN_PROGRESS, task.getStatus());
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertTrue("response is: " + response, response instanceof Map);
Map<String, Object> map = (Map<String, Object>) response;
Set<String> inputKeys = body.keySet();
Set<String> responseKeys = map.keySet();
inputKeys.containsAll(responseKeys);
responseKeys.containsAll(inputKeys);
}
@Test
public void testTextGET() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/text");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertEquals(TEXT_RESPONSE, response);
}
@Test
public void testNumberGET() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri(
"http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/numeric");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertEquals(NUM_RESPONSE, response);
assertTrue(response instanceof Number);
}
@Test
public void testJsonGET() throws JsonProcessingException {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/json");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
Map<String, Object> hr = (Map<String, Object>) task.getOutputData().get("response");
Object response = hr.get("body");
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertTrue(response instanceof Map);
Map<String, Object> map = (Map<String, Object>) response;
assertEquals(JSON_RESPONSE, objectMapper.writeValueAsString(map));
}
@Test
public void testExecute() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/json");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setScheduledTime(0);
boolean executed = httpTask.execute(workflow, task, workflowExecutor);
assertFalse(executed);
}
@Test
public void testHTTPGetConnectionTimeOut() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
Instant start = Instant.now();
input.setConnectionTimeOut(110);
input.setMethod("GET");
input.setUri("http://10.255.14.15");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setScheduledTime(0);
httpTask.start(workflow, task, workflowExecutor);
Instant end = Instant.now();
long diff = end.toEpochMilli() - start.toEpochMilli();
assertEquals(task.getStatus(), TaskModel.Status.FAILED);
assertTrue(diff >= 110L);
}
@Test
public void testHTTPGETReadTimeOut() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setReadTimeOut(-1);
input.setMethod("GET");
input.setUri("http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/json");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setScheduledTime(0);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(TaskModel.Status.FAILED, task.getStatus());
}
@Test
public void testOptional() {
TaskModel task = new TaskModel();
HttpTask.Input input = new HttpTask.Input();
input.setUri(
"http://" + mockServer.getHost() + ":" + mockServer.getServerPort() + "/failure");
input.setMethod("GET");
task.getInputData().put(HttpTask.REQUEST_PARAMETER_NAME, input);
httpTask.start(workflow, task, workflowExecutor);
assertEquals(
"Task output: " + task.getOutputData(), TaskModel.Status.FAILED, task.getStatus());
assertTrue(task.getReasonForIncompletion().contains(ERROR_RESPONSE));
assertFalse(task.getStatus().isSuccessful());
task.setStatus(TaskModel.Status.SCHEDULED);
task.getInputData().remove(HttpTask.REQUEST_PARAMETER_NAME);
task.setReferenceTaskName("t1");
httpTask.start(workflow, task, workflowExecutor);
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertEquals(HttpTask.MISSING_REQUEST, task.getReasonForIncompletion());
assertFalse(task.getStatus().isSuccessful());
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setOptional(true);
workflowTask.setName("HTTP");
workflowTask.setWorkflowTaskType(TaskType.USER_DEFINED);
workflowTask.setTaskReferenceName("t1");
WorkflowDef def = new WorkflowDef();
def.getTasks().add(workflowTask);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getTasks().add(task);
MetadataDAO metadataDAO = mock(MetadataDAO.class);
ExternalPayloadStorageUtils externalPayloadStorageUtils =
mock(ExternalPayloadStorageUtils.class);
ParametersUtils parametersUtils = mock(ParametersUtils.class);
SystemTaskRegistry systemTaskRegistry = mock(SystemTaskRegistry.class);
new DeciderService(
new IDGenerator(),
parametersUtils,
metadataDAO,
externalPayloadStorageUtils,
systemTaskRegistry,
Collections.emptyMap(),
Duration.ofMinutes(60))
.decide(workflow);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/http-task/src/test/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProviderTest.java | http-task/src/test/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProviderTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.http.providers;
import java.time.Duration;
import org.junit.Ignore;
import org.junit.Test;
import org.springframework.web.client.RestTemplate;
import com.netflix.conductor.tasks.http.HttpTask;
import static org.junit.Assert.*;
public class DefaultRestTemplateProviderTest {
@Test
public void differentObjectsForDifferentThreads() throws InterruptedException {
DefaultRestTemplateProvider defaultRestTemplateProvider =
new DefaultRestTemplateProvider(Duration.ofMillis(150), Duration.ofMillis(100));
final RestTemplate restTemplate =
defaultRestTemplateProvider.getRestTemplate(new HttpTask.Input());
final StringBuilder result = new StringBuilder();
Thread t1 =
new Thread(
() -> {
RestTemplate restTemplate1 =
defaultRestTemplateProvider.getRestTemplate(
new HttpTask.Input());
if (restTemplate1 != restTemplate) {
result.append("different");
}
});
t1.start();
t1.join();
assertEquals(result.toString(), "different");
}
@Test
@Ignore("We can no longer do this and have customizable timeouts per HttpTask.")
public void sameObjectForSameThread() {
DefaultRestTemplateProvider defaultRestTemplateProvider =
new DefaultRestTemplateProvider(Duration.ofMillis(150), Duration.ofMillis(100));
RestTemplate client1 = defaultRestTemplateProvider.getRestTemplate(new HttpTask.Input());
RestTemplate client2 = defaultRestTemplateProvider.getRestTemplate(new HttpTask.Input());
assertSame(client1, client2);
assertNotNull(client1);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/http-task/src/main/java/com/netflix/conductor/tasks/http/HttpTask.java | http-task/src/main/java/com/netflix/conductor/tasks/http/HttpTask.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.http;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.*;
import org.springframework.stereotype.Component;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestClientException;
import org.springframework.web.client.RestTemplate;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.tasks.http.providers.RestTemplateProvider;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_HTTP;
/** Task that enables calling another HTTP endpoint as part of its execution */
@Component(TASK_TYPE_HTTP)
public class HttpTask extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpTask.class);
public static final String REQUEST_PARAMETER_NAME = "http_request";
static final String MISSING_REQUEST =
"Missing HTTP request. Task input MUST have a '"
+ REQUEST_PARAMETER_NAME
+ "' key with HttpTask.Input as value. See documentation for HttpTask for required input parameters";
private final TypeReference<Map<String, Object>> mapOfObj =
new TypeReference<Map<String, Object>>() {};
private final TypeReference<List<Object>> listOfObj = new TypeReference<List<Object>>() {};
protected ObjectMapper objectMapper;
protected RestTemplateProvider restTemplateProvider;
private final String requestParameter;
@Autowired
public HttpTask(RestTemplateProvider restTemplateProvider, ObjectMapper objectMapper) {
this(TASK_TYPE_HTTP, restTemplateProvider, objectMapper);
}
public HttpTask(
String name, RestTemplateProvider restTemplateProvider, ObjectMapper objectMapper) {
super(name);
this.restTemplateProvider = restTemplateProvider;
this.objectMapper = objectMapper;
this.requestParameter = REQUEST_PARAMETER_NAME;
LOGGER.info("{} initialized...", getTaskType());
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
Object request = task.getInputData().get(requestParameter);
task.setWorkerId(Utils.getServerId());
if (request == null) {
task.setReasonForIncompletion(MISSING_REQUEST);
task.setStatus(TaskModel.Status.FAILED);
return;
}
Input input = objectMapper.convertValue(request, Input.class);
if (input.getUri() == null) {
String reason =
"Missing HTTP URI. See documentation for HttpTask for required input parameters";
task.setReasonForIncompletion(reason);
task.setStatus(TaskModel.Status.FAILED);
return;
}
if (input.getMethod() == null) {
String reason = "No HTTP method specified";
task.setReasonForIncompletion(reason);
task.setStatus(TaskModel.Status.FAILED);
return;
}
try {
HttpResponse response = httpCall(input);
LOGGER.debug(
"Response: {}, {}, task:{}",
response.statusCode,
response.body,
task.getTaskId());
if (response.statusCode > 199 && response.statusCode < 300) {
if (isAsyncComplete(task)) {
task.setStatus(TaskModel.Status.IN_PROGRESS);
} else {
task.setStatus(TaskModel.Status.COMPLETED);
}
} else {
if (response.body != null) {
task.setReasonForIncompletion(response.body.toString());
} else {
task.setReasonForIncompletion("No response from the remote service");
}
task.setStatus(TaskModel.Status.FAILED);
}
//noinspection ConstantConditions
if (response != null) {
task.addOutput("response", response.asMap());
}
} catch (Exception e) {
LOGGER.error(
"Failed to invoke {} task: {} - uri: {}, vipAddress: {} in workflow: {}",
getTaskType(),
task.getTaskId(),
input.getUri(),
input.getVipAddress(),
task.getWorkflowInstanceId(),
e);
task.setStatus(TaskModel.Status.FAILED);
task.setReasonForIncompletion(
"Failed to invoke " + getTaskType() + " task due to: " + e);
task.addOutput("response", e.toString());
}
}
/**
* @param input HTTP Request
* @return Response of the http call
* @throws Exception If there was an error making http call Note: protected access is so that
* tasks extended from this task can re-use this to make http calls
*/
protected HttpResponse httpCall(Input input) throws Exception {
RestTemplate restTemplate = restTemplateProvider.getRestTemplate(input);
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.valueOf(input.getContentType()));
headers.setAccept(Collections.singletonList(MediaType.valueOf(input.getAccept())));
input.headers.forEach(
(key, value) -> {
if (value != null) {
headers.add(key, value.toString());
}
});
HttpEntity<Object> request = new HttpEntity<>(input.getBody(), headers);
HttpResponse response = new HttpResponse();
try {
ResponseEntity<String> responseEntity =
restTemplate.exchange(
input.getUri(),
HttpMethod.valueOf(input.getMethod()),
request,
String.class);
if (responseEntity.getStatusCode().is2xxSuccessful() && responseEntity.hasBody()) {
response.body = extractBody(responseEntity.getBody());
}
response.statusCode = responseEntity.getStatusCodeValue();
response.reasonPhrase =
HttpStatus.valueOf(responseEntity.getStatusCode().value()).getReasonPhrase();
response.headers = responseEntity.getHeaders();
return response;
} catch (RestClientException ex) {
LOGGER.error(
String.format(
"Got unexpected http response - uri: %s, vipAddress: %s",
input.getUri(), input.getVipAddress()),
ex);
String reason = ex.getLocalizedMessage();
LOGGER.error(reason, ex);
throw new Exception(reason);
}
}
private Object extractBody(String responseBody) {
try {
JsonNode node = objectMapper.readTree(responseBody);
if (node.isArray()) {
return objectMapper.convertValue(node, listOfObj);
} else if (node.isObject()) {
return objectMapper.convertValue(node, mapOfObj);
} else if (node.isNumber()) {
return objectMapper.convertValue(node, Double.class);
} else {
return node.asText();
}
} catch (IOException jpe) {
LOGGER.error("Error extracting response body", jpe);
return responseBody;
}
}
@Override
public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
return false;
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
task.setStatus(TaskModel.Status.CANCELED);
}
@Override
public boolean isAsync() {
return true;
}
public static class HttpResponse {
public Object body;
public MultiValueMap<String, String> headers;
public int statusCode;
public String reasonPhrase;
@Override
public String toString() {
return "HttpResponse [body="
+ body
+ ", headers="
+ headers
+ ", statusCode="
+ statusCode
+ ", reasonPhrase="
+ reasonPhrase
+ "]";
}
public Map<String, Object> asMap() {
Map<String, Object> map = new HashMap<>();
map.put("body", body);
map.put("headers", headers);
map.put("statusCode", statusCode);
map.put("reasonPhrase", reasonPhrase);
return map;
}
}
public static class Input {
private String method; // PUT, POST, GET, DELETE, OPTIONS, HEAD
private String vipAddress;
private String appName;
private Map<String, Object> headers = new HashMap<>();
private String uri;
private Object body;
private String accept = MediaType.APPLICATION_JSON_VALUE;
private String contentType = MediaType.APPLICATION_JSON_VALUE;
private Integer connectionTimeOut;
private Integer readTimeOut;
/**
* @return the method
*/
public String getMethod() {
return method;
}
/**
* @param method the method to set
*/
public void setMethod(String method) {
this.method = method;
}
/**
* @return the headers
*/
public Map<String, Object> getHeaders() {
return headers;
}
/**
* @param headers the headers to set
*/
public void setHeaders(Map<String, Object> headers) {
this.headers = headers;
}
/**
* @return the body
*/
public Object getBody() {
return body;
}
/**
* @param body the body to set
*/
public void setBody(Object body) {
this.body = body;
}
/**
* @return the uri
*/
public String getUri() {
return uri;
}
/**
* @param uri the uri to set
*/
public void setUri(String uri) {
this.uri = uri;
}
/**
* @return the vipAddress
*/
public String getVipAddress() {
return vipAddress;
}
/**
* @param vipAddress the vipAddress to set
*/
public void setVipAddress(String vipAddress) {
this.vipAddress = vipAddress;
}
/**
* @return the accept
*/
public String getAccept() {
return accept;
}
/**
* @param accept the accept to set
*/
public void setAccept(String accept) {
this.accept = accept;
}
/**
* @return the MIME content type to use for the request
*/
public String getContentType() {
return contentType;
}
/**
* @param contentType the MIME content type to set
*/
public void setContentType(String contentType) {
this.contentType = contentType;
}
public String getAppName() {
return appName;
}
public void setAppName(String appName) {
this.appName = appName;
}
/**
* @return the connectionTimeOut
*/
public Integer getConnectionTimeOut() {
return connectionTimeOut;
}
/**
* @return the readTimeOut
*/
public Integer getReadTimeOut() {
return readTimeOut;
}
public void setConnectionTimeOut(Integer connectionTimeOut) {
this.connectionTimeOut = connectionTimeOut;
}
public void setReadTimeOut(Integer readTimeOut) {
this.readTimeOut = readTimeOut;
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/RestTemplateProvider.java | http-task/src/main/java/com/netflix/conductor/tasks/http/providers/RestTemplateProvider.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.http.providers;
import org.springframework.lang.NonNull;
import org.springframework.web.client.RestTemplate;
import com.netflix.conductor.tasks.http.HttpTask;
@FunctionalInterface
public interface RestTemplateProvider {
RestTemplate getRestTemplate(@NonNull HttpTask.Input input);
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProvider.java | http-task/src/main/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProvider.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.http.providers;
import java.time.Duration;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.apache.hc.client5.http.classic.HttpClient;
import org.apache.hc.client5.http.config.RequestConfig;
import org.apache.hc.client5.http.impl.classic.HttpClients;
import org.apache.hc.core5.http.io.SocketConfig;
import org.apache.hc.core5.util.Timeout;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.lang.NonNull;
import org.springframework.stereotype.Component;
import org.springframework.web.client.RestTemplate;
import com.netflix.conductor.tasks.http.HttpTask;
/**
* Provider for a customized RestTemplateBuilder. This class provides a default {@link
* RestTemplateBuilder} which can be configured or extended as needed.
*/
@Component
public class DefaultRestTemplateProvider implements RestTemplateProvider {
private final ThreadLocal<RestTemplateBuilder> threadLocalRestTemplateBuilder;
private final int defaultReadTimeout;
private final int defaultConnectTimeout;
public DefaultRestTemplateProvider(
@Value("${conductor.tasks.http.readTimeout:150ms}") Duration readTimeout,
@Value("${conductor.tasks.http.connectTimeout:100ms}") Duration connectTimeout) {
this.threadLocalRestTemplateBuilder = ThreadLocal.withInitial(RestTemplateBuilder::new);
this.defaultReadTimeout = (int) readTimeout.toMillis();
this.defaultConnectTimeout = (int) connectTimeout.toMillis();
}
@Override
public @NonNull RestTemplate getRestTemplate(@NonNull HttpTask.Input input) {
Duration timeout =
Duration.ofMillis(
Optional.ofNullable(input.getReadTimeOut()).orElse(defaultReadTimeout));
threadLocalRestTemplateBuilder.get().setReadTimeout(timeout);
RestTemplate restTemplate =
threadLocalRestTemplateBuilder.get().setReadTimeout(timeout).build();
RequestConfig requestConfig =
RequestConfig.custom()
.setResponseTimeout(Timeout.ofMilliseconds(timeout.toMillis()))
.build();
HttpClient httpClient = HttpClients.custom().setDefaultRequestConfig(requestConfig).build();
HttpComponentsClientHttpRequestFactory requestFactory =
new HttpComponentsClientHttpRequestFactory(httpClient);
SocketConfig.Builder builder = SocketConfig.custom();
builder.setSoTimeout(
Timeout.of(
Optional.ofNullable(input.getReadTimeOut()).orElse(defaultReadTimeout),
TimeUnit.MILLISECONDS));
requestFactory.setConnectTimeout(
Optional.ofNullable(input.getConnectionTimeOut()).orElse(defaultConnectTimeout));
restTemplate.setRequestFactory(requestFactory);
return restTemplate;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.junit.Test;
import com.netflix.conductor.es7.dao.query.parser.internal.AbstractParserTest;
import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* @author Viren
*/
public class TestExpression extends AbstractParserTest {
@Test
public void test() throws Exception {
String test =
"type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)";
// test = "type='IMAGE' AND subType ='sdp'";
// test = "(metadata.type = 'IMAGE')";
InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expr = new Expression(is);
System.out.println(expr);
assertTrue(expr.isBinaryExpr());
assertNull(expr.getGroupedExpression());
assertNotNull(expr.getNameValue());
NameValue nv = expr.getNameValue();
assertEquals("type", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"IMAGE\"", nv.getValue().getValue());
Expression rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertTrue(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv); // subType = sdp
assertNull(rhs.getGroupedExpression());
assertEquals("subType", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"sdp\"", nv.getValue().getValue());
assertEquals("AND", rhs.getOperator().getOperator());
rhs = rhs.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
GroupedExpression ge = rhs.getGroupedExpression();
assertNotNull(ge);
expr = ge.getExpression();
assertNotNull(expr);
assertTrue(expr.isBinaryExpr());
nv = expr.getNameValue();
assertNotNull(nv);
assertEquals("metadata.width", nv.getName().getName());
assertEquals(">", nv.getOp().getOperator());
assertEquals("50", nv.getValue().getValue());
assertEquals("OR", expr.getOperator().getOperator());
rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv);
assertEquals("metadata.height", nv.getName().getName());
assertEquals(">", nv.getOp().getOperator());
assertEquals("50", nv.getValue().getValue());
}
@Test
public void testWithSysConstants() throws Exception {
String test = "type='IMAGE' AND subType ='sdp' AND description IS null";
InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expr = new Expression(is);
System.out.println(expr);
assertTrue(expr.isBinaryExpr());
assertNull(expr.getGroupedExpression());
assertNotNull(expr.getNameValue());
NameValue nv = expr.getNameValue();
assertEquals("type", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"IMAGE\"", nv.getValue().getValue());
Expression rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertTrue(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv); // subType = sdp
assertNull(rhs.getGroupedExpression());
assertEquals("subType", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"sdp\"", nv.getValue().getValue());
assertEquals("AND", rhs.getOperator().getOperator());
rhs = rhs.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
GroupedExpression ge = rhs.getGroupedExpression();
assertNull(ge);
nv = rhs.getNameValue();
assertNotNull(nv);
assertEquals("description", nv.getName().getName());
assertEquals("IS", nv.getOp().getOperator());
ConstValue cv = nv.getValue();
assertNotNull(cv);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "description IS not null";
is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
expr = new Expression(is);
System.out.println(expr);
nv = expr.getNameValue();
assertNotNull(nv);
assertEquals("description", nv.getName().getName());
assertEquals("IS", nv.getOp().getOperator());
cv = nv.getValue();
assertNotNull(cv);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import org.junit.Test;
/**
* @author Viren
*/
public class TestGroupedExpression {
@Test
public void test() {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* @author Viren
*/
public class TestConstValue extends AbstractParserTest {
@Test
public void testStringConst() throws Exception {
String test = "'string value'";
String expected =
test.replaceAll(
"'", "\""); // Quotes are removed but then the result is double quoted.
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(expected, cv.getValue());
assertTrue(cv.getValue() instanceof String);
test = "\"string value\"";
cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(expected, cv.getValue());
assertTrue(cv.getValue() instanceof String);
}
@Test
public void testSystemConst() throws Exception {
String test = "null";
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertTrue(cv.getValue() instanceof String);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "null";
test = "not null";
cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "'string value";
new ConstValue(getInputStream(test));
}
@Test
public void testNumConst() throws Exception {
String test = "12345.89";
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertTrue(
cv.getValue()
instanceof
String); // Numeric values are stored as string as we are just passing thru
// them to ES
assertEquals(test, cv.getValue());
}
@Test
public void testRange() throws Exception {
String test = "50 AND 100";
Range range = new Range(getInputStream(test));
assertEquals("50", range.getLow());
assertEquals("100", range.getHigh());
}
@Test(expected = ParserException.class)
public void testBadRange() throws Exception {
String test = "50 AND";
new Range(getInputStream(test));
}
@Test
public void testArray() throws Exception {
String test = "(1, 3, 'name', 'value2')";
ListConst lc = new ListConst(getInputStream(test));
List<Object> list = lc.getList();
assertEquals(4, list.size());
assertTrue(list.contains("1"));
assertEquals("'value2'", list.get(3)); // Values are preserved as it is...
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
/**
* @author Viren
*/
public abstract class AbstractParserTest {
protected InputStream getInputStream(String expression) {
return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes()));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestName extends AbstractParserTest {
@Test
public void test() throws Exception {
String test = "metadata.en_US.lang ";
Name name = new Name(getInputStream(test));
String nameVal = name.getName();
assertNotNull(nameVal);
assertEquals(test.trim(), nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestBooleanOp extends AbstractParserTest {
@Test
public void test() throws Exception {
String[] tests = new String[] {"AND", "OR"};
for (String test : tests) {
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "<";
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestComparisonOp extends AbstractParserTest {
@Test
public void test() throws Exception {
String[] tests = new String[] {"<", ">", "=", "!=", "IN", "BETWEEN", "STARTS_WITH"};
for (String test : tests) {
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalidOp() throws Exception {
String test = "AND";
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.function.Supplier;
import org.joda.time.DateTime;
import org.junit.Test;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.es7.utils.TestUtils;
import com.google.common.collect.ImmutableMap;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestElasticSearchRestDAOV7 extends ElasticSearchRestDaoBaseTest {
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private static final String INDEX_PREFIX = "conductor";
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String MSG_DOC_TYPE = "message";
private static final String EVENT_DOC_TYPE = "event";
private static final String LOG_DOC_TYPE = "task_log";
private boolean indexExists(final String index) throws IOException {
return indexDAO.doesResourceExist("/" + index);
}
private boolean doesMappingExist(final String index, final String mappingName)
throws IOException {
return indexDAO.doesResourceExist("/" + index + "/_mapping/" + mappingName);
}
@Test
public void assertInitialSetup() throws IOException {
SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT"));
String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE;
String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE;
String taskLogIndex =
INDEX_PREFIX + "_" + LOG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String messageIndex =
INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String eventIndex =
INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
assertTrue("Index 'conductor_workflow' should exist", indexExists(workflowIndex));
assertTrue("Index 'conductor_task' should exist", indexExists(taskIndex));
assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex));
assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex));
assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex));
assertTrue(
"Index template for 'message' should exist",
indexDAO.doesResourceExist("/_template/template_" + MSG_DOC_TYPE));
assertTrue(
"Index template for 'event' should exist",
indexDAO.doesResourceExist("/_template/template_" + EVENT_DOC_TYPE));
assertTrue(
"Index template for 'task_log' should exist",
indexDAO.doesResourceExist("/_template/template_" + LOG_DOC_TYPE));
}
@Test
public void shouldIndexWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexWorkflowAsync() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.asyncIndexWorkflow(workflowSummary).get();
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldRemoveWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.removeWorkflow(workflowSummary.getWorkflowId());
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldAsyncRemoveWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.asyncRemoveWorkflow(workflowSummary.getWorkflowId()).get();
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldUpdateWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.updateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.COMPLETED});
workflowSummary.setStatus(WorkflowStatus.COMPLETED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldAsyncUpdateWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.asyncUpdateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.FAILED})
.get();
workflowSummary.setStatus(WorkflowStatus.FAILED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldIndexTaskAsync() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.asyncIndexTask(taskSummary).get();
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldRemoveTask() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldAsyncRemoveTask() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldNotRemoveTaskWhenNotAssociatedWithWorkflow() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask("InvalidWorkflow", taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldNotAsyncRemoveTaskWhenNotAssociatedWithWorkflow() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask("InvalidWorkflow", taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldAddTaskExecutionLogs() {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddTaskExecutionLogsAsync() throws Exception {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.asyncAddTaskExecutionLogs(logs).get();
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddMessage() {
String queue = "queue";
Message message1 = new Message(uuid(), "payload1", null);
Message message2 = new Message(uuid(), "payload2", null);
indexDAO.addMessage(queue, message1);
indexDAO.addMessage(queue, message2);
List<Message> indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2);
assertEquals(2, indexedMessages.size());
assertTrue(
"Not all messages was indexed",
indexedMessages.containsAll(Arrays.asList(message1, message2)));
}
@Test
public void shouldAddEventExecution() {
String event = "event";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.addEventExecution(execution1);
indexDAO.addEventExecution(execution2);
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAsyncAddEventExecution() throws Exception {
String event = "event2";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.asyncAddEventExecution(execution1).get();
indexDAO.asyncAddEventExecution(execution2).get();
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAddIndexPrefixToIndexTemplate() throws Exception {
String json = TestUtils.loadJsonResource("expected_template_task_log");
String content = indexDAO.loadTypeMappingSource("/template_task_log.json");
assertEquals(json, content);
}
@Test
public void shouldSearchRecentRunningWorkflows() throws Exception {
WorkflowSummary oldWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
oldWorkflow.setStatus(WorkflowStatus.RUNNING);
oldWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(2).toDate()));
WorkflowSummary recentWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
recentWorkflow.setStatus(WorkflowStatus.RUNNING);
recentWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(1).toDate()));
WorkflowSummary tooRecentWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
tooRecentWorkflow.setStatus(WorkflowStatus.RUNNING);
tooRecentWorkflow.setUpdateTime(getFormattedTime(new DateTime().toDate()));
indexDAO.indexWorkflow(oldWorkflow);
indexDAO.indexWorkflow(recentWorkflow);
indexDAO.indexWorkflow(tooRecentWorkflow);
Thread.sleep(1000);
List<String> ids = indexDAO.searchRecentRunningWorkflows(2, 1);
assertEquals(1, ids.size());
assertEquals(recentWorkflow.getWorkflowId(), ids.get(0));
}
@Test
public void shouldCountWorkflows() {
int counts = 1100;
for (int i = 0; i < counts; i++) {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
}
// wait for workflow to be indexed
long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts);
assertEquals(counts, result);
}
private long tryGetCount(Supplier<Long> countFunction, int resultsCount) {
long result = 0;
for (int i = 0; i < 20; i++) {
result = countFunction.get();
if (result == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
// Get total workflow counts given the name and status
private long getWorkflowCount(String workflowName, String status) {
return indexDAO.getWorkflowCount(
"status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*");
}
private void assertWorkflowSummary(String workflowId, WorkflowSummary summary) {
assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType"));
assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version"));
assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId"));
assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId"));
assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime"));
assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime"));
assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime"));
assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status"));
assertEquals(summary.getInput(), indexDAO.get(workflowId, "input"));
assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output"));
assertEquals(
summary.getReasonForIncompletion(),
indexDAO.get(workflowId, "reasonForIncompletion"));
assertEquals(
String.valueOf(summary.getExecutionTime()),
indexDAO.get(workflowId, "executionTime"));
assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event"));
assertEquals(
summary.getFailedReferenceTaskNames(),
indexDAO.get(workflowId, "failedReferenceTaskNames"));
}
private String getFormattedTime(Date time) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
return sdf.format(time);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction) {
return tryFindResults(searchFunction, 1);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction, int resultsCount) {
List<T> result = Collections.emptyList();
for (int i = 0; i < 20; i++) {
result = searchFunction.get();
if (result.size() == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
private List<String> searchWorkflows(String workflowId) {
return indexDAO.searchWorkflows(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<String> searchTasks(TaskSummary taskSummary) {
return indexDAO.searchTasks(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private TaskExecLog createLog(String taskId, String log) {
TaskExecLog taskExecLog = new TaskExecLog(log);
taskExecLog.setTaskId(taskId);
return taskExecLog;
}
private EventExecution createEventExecution(String event) {
EventExecution execution = new EventExecution(uuid(), uuid());
execution.setName("name");
execution.setEvent(event);
execution.setCreated(System.currentTimeMillis());
execution.setStatus(EventExecution.Status.COMPLETED);
execution.setAction(EventHandler.Action.Type.start_workflow);
execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3));
return execution;
}
private String uuid() {
return UUID.randomUUID().toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.junit.After;
import org.junit.Before;
import org.springframework.retry.support.RetryTemplate;
public abstract class ElasticSearchRestDaoBaseTest extends ElasticSearchTest {
protected RestClient restClient;
protected ElasticSearchRestDAOV7 indexDAO;
@Before
public void setup() throws Exception {
String httpHostAddress = container.getHttpHostAddress();
String host = httpHostAddress.split(":")[0];
int port = Integer.parseInt(httpHostAddress.split(":")[1]);
properties.setUrl("http://" + httpHostAddress);
RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http"));
restClient = restClientBuilder.build();
indexDAO =
new ElasticSearchRestDAOV7(
restClientBuilder, new RetryTemplate(), properties, objectMapper);
indexDAO.setup();
}
@After
public void tearDown() throws Exception {
deleteAllIndices();
if (restClient != null) {
restClient.close();
}
}
private void deleteAllIndices() throws IOException {
Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices"));
Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent());
BufferedReader bufferedReader = new BufferedReader(streamReader);
String line;
while ((line = bufferedReader.readLine()) != null) {
String[] fields = line.split("\\s");
String endpoint = String.format("/%s", fields[2]);
restClient.performRequest(new Request("DELETE", endpoint));
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.springframework.test.context.TestPropertySource;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.fasterxml.jackson.core.JsonProcessingException;
import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2")
public class TestElasticSearchRestDAOV7Batch extends ElasticSearchRestDaoBaseTest {
@Test
public void indexTaskWithBatchSizeTwo() {
String correlationId = "some-correlation-id";
TaskSummary taskSummary = new TaskSummary();
taskSummary.setTaskId("some-task-id");
taskSummary.setWorkflowId("some-workflow-instance-id");
taskSummary.setTaskType("some-task-type");
taskSummary.setStatus(Status.FAILED);
try {
taskSummary.setInput(
objectMapper.writeValueAsString(
new HashMap<String, Object>() {
{
put("input_key", "input_value");
}
}));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
taskSummary.setCorrelationId(correlationId);
taskSummary.setTaskDefName("some-task-def-name");
taskSummary.setReasonForIncompletion("some-failure-reason");
indexDAO.indexTask(taskSummary);
indexDAO.indexTask(taskSummary);
await().atMost(5, TimeUnit.SECONDS)
.untilAsserted(
() -> {
SearchResult<String> result =
indexDAO.searchTasks(
"correlationId='" + correlationId + "'",
"*",
0,
10000,
null);
assertTrue(
"should return 1 or more search results",
result.getResults().size() > 0);
assertEquals(
"taskId should match the indexed task",
"some-task-id",
result.getResults().get(0));
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.elasticsearch.ElasticsearchContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.es7.config.ElasticSearchProperties;
import com.fasterxml.jackson.databind.ObjectMapper;
@ContextConfiguration(
classes = {TestObjectMapperConfiguration.class, ElasticSearchTest.TestConfiguration.class})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=7"})
public abstract class ElasticSearchTest {
@Configuration
static class TestConfiguration {
@Bean
public ElasticSearchProperties elasticSearchProperties() {
return new ElasticSearchProperties();
}
}
protected static final ElasticsearchContainer container =
new ElasticsearchContainer(
DockerImageName.parse("elasticsearch")
.withTag("7.17.11")); // this should match the client version
@Autowired protected ObjectMapper objectMapper;
@Autowired protected ElasticSearchProperties properties;
@BeforeClass
public static void startServer() {
container.start();
}
@AfterClass
public static void stopServer() {
container.stop();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java | es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.junit.Test;
import org.mockito.Mockito;
public class TestBulkRequestBuilderWrapper {
BulkRequestBuilder builder = Mockito.mock(BulkRequestBuilder.class);
BulkRequestBuilderWrapper wrapper = new BulkRequestBuilderWrapper(builder);
@Test(expected = Exception.class)
public void testAddNullUpdateRequest() {
wrapper.add((UpdateRequest) null);
}
@Test(expected = Exception.class)
public void testAddNullIndexRequest() {
wrapper.add((IndexRequest) null);
}
@Test
public void testBuilderCalls() {
IndexRequest indexRequest = new IndexRequest();
UpdateRequest updateRequest = new UpdateRequest();
wrapper.add(indexRequest);
wrapper.add(updateRequest);
wrapper.numberOfActions();
wrapper.execute();
Mockito.verify(builder, Mockito.times(1)).add(indexRequest);
Mockito.verify(builder, Mockito.times(1)).add(updateRequest);
Mockito.verify(builder, Mockito.times(1)).numberOfActions();
Mockito.verify(builder, Mockito.times(1)).execute();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java | es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.utils;
import org.apache.commons.io.Charsets;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.utils.IDGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.io.Resources;
public class TestUtils {
private static final String WORKFLOW_SCENARIO_EXTENSION = ".json";
private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID";
public static WorkflowSummary loadWorkflowSnapshot(
ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, WorkflowSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(
ObjectMapper objectMapper, String resourceFileName, String workflowId) {
try {
String content = loadJsonResource(resourceFileName);
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static String loadJsonResource(String resourceFileName) {
try {
return Resources.toString(
TestUtils.class.getResource(
"/" + resourceFileName + WORKFLOW_SCENARIO_EXTENSION),
Charsets.UTF_8);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/test/java/com/netflix/conductor/es7/config/ElasticSearchPropertiesTest.java | es7-persistence/src/test/java/com/netflix/conductor/es7/config/ElasticSearchPropertiesTest.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.config;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class ElasticSearchPropertiesTest {
@Test
public void testWaitForIndexRefreshDefaultsToFalse() {
ElasticSearchProperties properties = new ElasticSearchProperties();
assertFalse(
"waitForIndexRefresh should default to false for v3.21.19 performance",
properties.isWaitForIndexRefresh());
}
@Test
public void testWaitForIndexRefreshCanBeEnabled() {
ElasticSearchProperties properties = new ElasticSearchProperties();
properties.setWaitForIndexRefresh(true);
assertTrue(
"waitForIndexRefresh should be configurable to true",
properties.isWaitForIndexRefresh());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import org.elasticsearch.index.query.QueryBuilder;
/**
* @author Viren
*/
public interface FilterProvider {
/**
* @return FilterBuilder for elasticsearch
*/
public QueryBuilder getFilterBuilder();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es7.dao.query.parser.internal.BooleanOp;
import com.netflix.conductor.es7.dao.query.parser.internal.ParserException;
/**
* @author Viren
*/
public class Expression extends AbstractNode implements FilterProvider {
private NameValue nameVal;
private GroupedExpression ge;
private BooleanOp op;
private Expression rhs;
public Expression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(1);
if (peeked[0] == '(') {
this.ge = new GroupedExpression(is);
} else {
this.nameVal = new NameValue(is);
}
peeked = peek(3);
if (isBoolOpr(peeked)) {
// we have an expression next
this.op = new BooleanOp(is);
this.rhs = new Expression(is);
}
}
public boolean isBinaryExpr() {
return this.op != null;
}
public BooleanOp getOperator() {
return this.op;
}
public Expression getRightHandSide() {
return this.rhs;
}
public boolean isNameValue() {
return this.nameVal != null;
}
public NameValue getNameValue() {
return this.nameVal;
}
public GroupedExpression getGroupedExpression() {
return this.ge;
}
@Override
public QueryBuilder getFilterBuilder() {
QueryBuilder lhs = null;
if (nameVal != null) {
lhs = nameVal.getFilterBuilder();
} else {
lhs = ge.getFilterBuilder();
}
if (this.isBinaryExpr()) {
QueryBuilder rhsFilter = rhs.getFilterBuilder();
if (this.op.isAnd()) {
return QueryBuilders.boolQuery().must(lhs).must(rhsFilter);
} else {
return QueryBuilders.boolQuery().should(lhs).should(rhsFilter);
}
} else {
return lhs;
}
}
@Override
public String toString() {
if (isBinaryExpr()) {
return "" + (nameVal == null ? ge : nameVal) + op + rhs;
} else {
return "" + (nameVal == null ? ge : nameVal);
}
}
public static Expression fromString(String value) throws ParserException {
return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes())));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp;
import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp.Operators;
import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue;
import com.netflix.conductor.es7.dao.query.parser.internal.ListConst;
import com.netflix.conductor.es7.dao.query.parser.internal.Name;
import com.netflix.conductor.es7.dao.query.parser.internal.ParserException;
import com.netflix.conductor.es7.dao.query.parser.internal.Range;
/**
* @author Viren
* <pre>
* Represents an expression of the form as below:
* key OPR value
* OPR is the comparison operator which could be on the following:
* >, <, = , !=, IN, BETWEEN
* </pre>
*/
public class NameValue extends AbstractNode implements FilterProvider {
private Name name;
private ComparisonOp op;
private ConstValue value;
private Range range;
private ListConst valueList;
public NameValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.name = new Name(is);
this.op = new ComparisonOp(is);
if (this.op.getOperator().equals(Operators.BETWEEN.value())) {
this.range = new Range(is);
}
if (this.op.getOperator().equals(Operators.IN.value())) {
this.valueList = new ListConst(is);
} else {
this.value = new ConstValue(is);
}
}
@Override
public String toString() {
return "" + name + op + value;
}
/**
* @return the name
*/
public Name getName() {
return name;
}
/**
* @return the op
*/
public ComparisonOp getOp() {
return op;
}
/**
* @return the value
*/
public ConstValue getValue() {
return value;
}
@Override
public QueryBuilder getFilterBuilder() {
if (op.getOperator().equals(Operators.EQUALS.value())) {
return QueryBuilders.queryStringQuery(
name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.BETWEEN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(range.getLow())
.to(range.getHigh());
} else if (op.getOperator().equals(Operators.IN.value())) {
return QueryBuilders.termsQuery(name.getName(), valueList.getList());
} else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) {
return QueryBuilders.queryStringQuery(
"NOT " + name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.GREATER_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.IS.value())) {
if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.mustNot(QueryBuilders.existsQuery(name.getName())));
} else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.must(QueryBuilders.existsQuery(name.getName())));
}
} else if (op.getOperator().equals(Operators.LESS_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.to(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.STARTS_WITH.value())) {
return QueryBuilders.prefixQuery(name.getName(), value.getUnquotedValue());
}
throw new IllegalStateException("Incorrect/unsupported operators");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser;
import java.io.InputStream;
import org.elasticsearch.index.query.QueryBuilder;
import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.es7.dao.query.parser.internal.ParserException;
/**
* @author Viren
*/
public class GroupedExpression extends AbstractNode implements FilterProvider {
private Expression expression;
public GroupedExpression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.expression = new Expression(is);
peeked = read(1);
assertExpected(peeked, ")");
}
@Override
public String toString() {
return "(" + expression + ")";
}
/**
* @return the expression
*/
public Expression getExpression() {
return expression;
}
@Override
public QueryBuilder getFilterBuilder() {
return expression.getFilterBuilder();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren Constant value can be:
* <ol>
* <li>List of values (a,b,c)
* <li>Range of values (m AND n)
* <li>A value (x)
* <li>A value is either a string or a number
* </ol>
*/
public class ConstValue extends AbstractNode {
public static enum SystemConsts {
NULL("null"),
NOT_NULL("not null");
private String value;
SystemConsts(String value) {
this.value = value;
}
public String value() {
return value;
}
}
private static String QUOTE = "\"";
private Object value;
private SystemConsts sysConsts;
public ConstValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(4);
String sp = new String(peeked).trim();
// Read a constant value (number or a string)
if (peeked[0] == '"' || peeked[0] == '\'') {
this.value = readString(is);
} else if (sp.toLowerCase().startsWith("not")) {
this.value = SystemConsts.NOT_NULL.value();
sysConsts = SystemConsts.NOT_NULL;
read(SystemConsts.NOT_NULL.value().length());
} else if (sp.equalsIgnoreCase(SystemConsts.NULL.value())) {
this.value = SystemConsts.NULL.value();
sysConsts = SystemConsts.NULL;
read(SystemConsts.NULL.value().length());
} else {
this.value = readNumber(is);
}
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
String numValue = sb.toString().trim();
return numValue;
}
/**
* Reads an escaped string
*
* @throws Exception
*/
private String readString(InputStream is) throws Exception {
char delim = (char) read(1)[0];
StringBuilder sb = new StringBuilder();
boolean valid = false;
while (is.available() > 0) {
char c = (char) is.read();
if (c == delim) {
valid = true;
break;
} else if (c == '\\') {
// read the next character as part of the value
c = (char) is.read();
sb.append(c);
} else {
sb.append(c);
}
}
if (!valid) {
throw new ParserException(
"String constant is not quoted with <" + delim + "> : " + sb.toString());
}
return QUOTE + sb.toString() + QUOTE;
}
public Object getValue() {
return value;
}
@Override
public String toString() {
return "" + value;
}
public String getUnquotedValue() {
String result = toString();
if (result.length() >= 2 && result.startsWith(QUOTE) && result.endsWith(QUOTE)) {
result = result.substring(1, result.length() - 1);
}
return result;
}
public boolean isSysConstant() {
return this.sysConsts != null;
}
public SystemConsts getSysConstant() {
return this.sysConsts;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
/**
* @author Viren List of constants
*/
public class ListConst extends AbstractNode {
private List<Object> values;
public ListConst(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.values = readList();
}
private List<Object> readList() throws Exception {
List<Object> list = new LinkedList<Object>();
boolean valid = false;
char c;
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
c = (char) is.read();
if (c == ')') {
valid = true;
break;
} else if (c == ',') {
list.add(sb.toString().trim());
sb = new StringBuilder();
} else {
sb.append(c);
}
}
list.add(sb.toString().trim());
if (!valid) {
throw new ParserException("Expected ')' but never encountered in the stream");
}
return list;
}
public List<Object> getList() {
return (List<Object>) values;
}
@Override
public String toString() {
return values.toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
/**
* @author Viren
*/
@FunctionalInterface
public interface FunctionThrowingException<T> {
void accept(T t) throws Exception;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
import java.math.BigDecimal;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Pattern;
/**
* @author Viren
*/
public abstract class AbstractNode {
public static final Pattern WHITESPACE = Pattern.compile("\\s");
protected static Set<Character> comparisonOprs = new HashSet<Character>();
static {
comparisonOprs.add('>');
comparisonOprs.add('<');
comparisonOprs.add('=');
}
protected InputStream is;
protected AbstractNode(InputStream is) throws ParserException {
this.is = is;
this.parse();
}
protected boolean isNumber(String test) {
try {
// If you can convert to a big decimal value, then it is a number.
new BigDecimal(test);
return true;
} catch (NumberFormatException e) {
// Ignore
}
return false;
}
protected boolean isBoolOpr(byte[] buffer) {
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
return true;
} else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') {
return true;
}
return false;
}
protected boolean isComparisonOpr(byte[] buffer) {
if (buffer[0] == 'I' && buffer[1] == 'N') {
return true;
} else if (buffer[0] == '!' && buffer[1] == '=') {
return true;
} else {
return comparisonOprs.contains((char) buffer[0]);
}
}
protected byte[] peek(int length) throws Exception {
return read(length, true);
}
protected byte[] read(int length) throws Exception {
return read(length, false);
}
protected String readToken() throws Exception {
skipWhitespace();
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
char c = (char) peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
is.skip(1);
break;
} else if (c == '=' || c == '>' || c == '<' || c == '!') {
// do not skip
break;
}
sb.append(c);
is.skip(1);
}
return sb.toString().trim();
}
protected boolean isNumeric(char c) {
if (c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.') {
return true;
}
return false;
}
protected void assertExpected(byte[] found, String expected) throws ParserException {
assertExpected(new String(found), expected);
}
protected void assertExpected(String found, String expected) throws ParserException {
if (!found.equals(expected)) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected void assertExpected(char found, char expected) throws ParserException {
if (found != expected) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected static void efor(int length, FunctionThrowingException<Integer> consumer)
throws Exception {
for (int i = 0; i < length; i++) {
consumer.accept(i);
}
}
protected abstract void _parse() throws Exception;
// Public stuff here
private void parse() throws ParserException {
// skip white spaces
skipWhitespace();
try {
_parse();
} catch (Exception e) {
System.out.println("\t" + this.getClass().getSimpleName() + "->" + this.toString());
if (!(e instanceof ParserException)) {
throw new ParserException("Error parsing", e);
} else {
throw (ParserException) e;
}
}
skipWhitespace();
}
// Private methods
private byte[] read(int length, boolean peekOnly) throws Exception {
byte[] buf = new byte[length];
if (peekOnly) {
is.mark(length);
}
efor(length, (Integer c) -> buf[c] = (byte) is.read());
if (peekOnly) {
is.reset();
}
return buf;
}
protected void skipWhitespace() throws ParserException {
try {
while (is.available() > 0) {
byte c = peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
// skip
read(1);
} else {
break;
}
}
} catch (Exception e) {
throw new ParserException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren Represents the name of the field to be searched against.
*/
public class Name extends AbstractNode {
private String value;
public Name(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.value = readToken();
}
@Override
public String toString() {
return value;
}
public String getName() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class Range extends AbstractNode {
private String low;
private String high;
public Range(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.low = readNumber(is);
skipWhitespace();
byte[] peeked = read(3);
assertExpected(peeked, "AND");
skipWhitespace();
String num = readNumber(is);
if (num == null || "".equals(num)) {
throw new ParserException("Missing the upper range value...");
}
this.high = num;
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
String numValue = sb.toString().trim();
return numValue;
}
/**
* @return the low
*/
public String getLow() {
return low;
}
/**
* @return the high
*/
public String getHigh() {
return high;
}
@Override
public String toString() {
return low + " AND " + high;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class BooleanOp extends AbstractNode {
private String value;
public BooleanOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] buffer = peek(3);
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
this.value = "OR";
} else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') {
this.value = "AND";
} else {
throw new ParserException("No valid boolean operator found...");
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
public boolean isAnd() {
return "AND".equals(value);
}
public boolean isOr() {
return "OR".equals(value);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
/**
* @author Viren
*/
@SuppressWarnings("serial")
public class ParserException extends Exception {
public ParserException(String message) {
super(message);
}
public ParserException(String message, Throwable cause) {
super(message, cause);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class ComparisonOp extends AbstractNode {
public enum Operators {
BETWEEN("BETWEEN"),
EQUALS("="),
LESS_THAN("<"),
GREATER_THAN(">"),
IN("IN"),
NOT_EQUALS("!="),
IS("IS"),
STARTS_WITH("STARTS_WITH");
private final String value;
Operators(String value) {
this.value = value;
}
public String value() {
return value;
}
}
static {
int max = 0;
for (Operators op : Operators.values()) {
max = Math.max(max, op.value().length());
}
maxOperatorLength = max;
}
private static final int maxOperatorLength;
private static final int betweenLen = Operators.BETWEEN.value().length();
private static final int startsWithLen = Operators.STARTS_WITH.value().length();
private String value;
public ComparisonOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(maxOperatorLength);
if (peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<') {
this.value = new String(peeked, 0, 1);
} else if (peeked[0] == 'I' && peeked[1] == 'N') {
this.value = "IN";
} else if (peeked[0] == 'I' && peeked[1] == 'S') {
this.value = "IS";
} else if (peeked[0] == '!' && peeked[1] == '=') {
this.value = "!=";
} else if (peeked.length >= betweenLen
&& peeked[0] == 'B'
&& peeked[1] == 'E'
&& peeked[2] == 'T'
&& peeked[3] == 'W'
&& peeked[4] == 'E'
&& peeked[5] == 'E'
&& peeked[6] == 'N') {
this.value = Operators.BETWEEN.value();
} else if (peeked.length == startsWithLen
&& new String(peeked).equals(Operators.STARTS_WITH.value())) {
this.value = Operators.STARTS_WITH.value();
} else {
throw new ParserException(
"Expecting an operator (=, >, <, !=, BETWEEN, IN, STARTS_WITH), but found none. Peeked=>"
+ new String(peeked));
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.LocalDate;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.*;
import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.FieldSortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.xcontent.XContentType;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es7.config.ElasticSearchProperties;
import com.netflix.conductor.es7.dao.query.parser.internal.ParserException;
import com.netflix.conductor.metrics.Monitors;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.type.MapType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import jakarta.annotation.*;
@Trace
public class ElasticSearchRestDAOV7 extends ElasticSearchBaseDAO implements IndexDAO {
private static final Logger logger = LoggerFactory.getLogger(ElasticSearchRestDAOV7.class);
private static final String CLASS_NAME = ElasticSearchRestDAOV7.class.getSimpleName();
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String LOG_DOC_TYPE = "task_log";
private static final String EVENT_DOC_TYPE = "event";
private static final String MSG_DOC_TYPE = "message";
private static final TimeZone GMT = TimeZone.getTimeZone("GMT");
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private @interface HttpMethod {
String GET = "GET";
String POST = "POST";
String PUT = "PUT";
String HEAD = "HEAD";
}
private static final String className = ElasticSearchRestDAOV7.class.getSimpleName();
private final String workflowIndexName;
private final String taskIndexName;
private final String eventIndexPrefix;
private String eventIndexName;
private final String messageIndexPrefix;
private String messageIndexName;
private String logIndexName;
private final String logIndexPrefix;
private final String clusterHealthColor;
private final RestHighLevelClient elasticSearchClient;
private final RestClient elasticSearchAdminClient;
private final ExecutorService executorService;
private final ExecutorService logExecutorService;
private final ConcurrentHashMap<Pair<String, WriteRequest.RefreshPolicy>, BulkRequests>
bulkRequests;
private final int indexBatchSize;
private final int asyncBufferFlushTimeout;
private final ElasticSearchProperties properties;
private final RetryTemplate retryTemplate;
static {
SIMPLE_DATE_FORMAT.setTimeZone(GMT);
}
public ElasticSearchRestDAOV7(
RestClientBuilder restClientBuilder,
RetryTemplate retryTemplate,
ElasticSearchProperties properties,
ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
this.elasticSearchAdminClient = restClientBuilder.build();
this.elasticSearchClient = new RestHighLevelClient(restClientBuilder);
this.clusterHealthColor = properties.getClusterHealthColor();
this.bulkRequests = new ConcurrentHashMap<>();
this.indexBatchSize = properties.getIndexBatchSize();
this.asyncBufferFlushTimeout = (int) properties.getAsyncBufferFlushTimeout().getSeconds();
this.properties = properties;
this.indexPrefix = properties.getIndexPrefix();
this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE);
this.taskIndexName = getIndexName(TASK_DOC_TYPE);
this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE;
this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE;
this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE;
int workerQueueSize = properties.getAsyncWorkerQueueSize();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
// Set up a workerpool for performing async operations.
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
// Set up a workerpool for performing async operations for task_logs, event_executions,
// message
int corePoolSize = 1;
maximumPoolSize = 2;
long keepAliveTime = 30L;
this.logExecutorService =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
keepAliveTime,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async log dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("logQueue");
});
Executors.newSingleThreadScheduledExecutor()
.scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS);
this.retryTemplate = retryTemplate;
}
@PreDestroy
private void shutdown() {
logger.info("Gracefully shutdown executor service");
shutdownExecutorService(logExecutorService);
shutdownExecutorService(executorService);
}
private void shutdownExecutorService(ExecutorService execService) {
try {
execService.shutdown();
if (execService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
execService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue");
execService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
@PostConstruct
public void setup() throws Exception {
waitForHealthyCluster();
if (properties.isAutoIndexManagementEnabled()) {
createIndexesTemplates();
createWorkflowIndex();
createTaskIndex();
}
}
private void createIndexesTemplates() {
try {
initIndexesTemplates();
updateIndexesNames();
Executors.newScheduledThreadPool(1)
.scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS);
} catch (Exception e) {
logger.error("Error creating index templates!", e);
}
}
private void initIndexesTemplates() {
initIndexTemplate(LOG_DOC_TYPE);
initIndexTemplate(EVENT_DOC_TYPE);
initIndexTemplate(MSG_DOC_TYPE);
}
/** Initializes the index with the required templates and mappings. */
private void initIndexTemplate(String type) {
String template = "template_" + type;
try {
if (doesResourceNotExist("/_template/" + template)) {
logger.info("Creating the index template '" + template + "'");
InputStream stream =
ElasticSearchRestDAOV7.class.getResourceAsStream("/" + template + ".json");
byte[] templateSource = IOUtils.toByteArray(stream);
HttpEntity entity =
new NByteArrayEntity(templateSource, ContentType.APPLICATION_JSON);
Request request = new Request(HttpMethod.PUT, "/_template/" + template);
request.setEntity(entity);
String test =
IOUtils.toString(
elasticSearchAdminClient
.performRequest(request)
.getEntity()
.getContent());
}
} catch (Exception e) {
logger.error("Failed to init " + template, e);
}
}
private void updateIndexesNames() {
logIndexName = updateIndexName(LOG_DOC_TYPE);
eventIndexName = updateIndexName(EVENT_DOC_TYPE);
messageIndexName = updateIndexName(MSG_DOC_TYPE);
}
private String updateIndexName(String type) {
String indexName =
this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date());
try {
addIndex(indexName);
return indexName;
} catch (IOException e) {
logger.error("Failed to update log index name: {}", indexName, e);
throw new NonTransientException(e.getMessage(), e);
}
}
private void createWorkflowIndex() {
String indexName = getIndexName(WORKFLOW_DOC_TYPE);
try {
addIndex(indexName, "/mappings_docType_workflow.json");
} catch (IOException e) {
logger.error("Failed to initialize index '{}'", indexName, e);
}
}
private void createTaskIndex() {
String indexName = getIndexName(TASK_DOC_TYPE);
try {
addIndex(indexName, "/mappings_docType_task.json");
} catch (IOException e) {
logger.error("Failed to initialize index '{}'", indexName, e);
}
}
/**
* Waits for the ES cluster to become green.
*
* @throws Exception If there is an issue connecting with the ES cluster.
*/
private void waitForHealthyCluster() throws Exception {
Map<String, String> params = new HashMap<>();
params.put("wait_for_status", this.clusterHealthColor);
params.put("timeout", "30s");
Request request = new Request("GET", "/_cluster/health");
request.addParameters(params);
elasticSearchAdminClient.performRequest(request);
}
/**
* Adds an index to elasticsearch if it does not exist.
*
* @param index The name of the index to create.
* @param mappingFilename Index mapping filename
* @throws IOException If an error occurred during requests to ES.
*/
private void addIndex(String index, final String mappingFilename) throws IOException {
logger.info("Adding index '{}'...", index);
String resourcePath = "/" + index;
if (doesResourceNotExist(resourcePath)) {
try {
ObjectNode setting = objectMapper.createObjectNode();
ObjectNode indexSetting = objectMapper.createObjectNode();
ObjectNode root = objectMapper.createObjectNode();
indexSetting.put("number_of_shards", properties.getIndexShardCount());
indexSetting.put("number_of_replicas", properties.getIndexReplicasCount());
JsonNode mappingNodeValue =
objectMapper.readTree(loadTypeMappingSource(mappingFilename));
root.set("settings", indexSetting);
root.set("mappings", mappingNodeValue);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(
new NStringEntity(
objectMapper.writeValueAsString(root),
ContentType.APPLICATION_JSON));
elasticSearchAdminClient.performRequest(request);
logger.info("Added '{}' index", index);
} catch (ResponseException e) {
boolean errorCreatingIndex = true;
Response errorResponse = e.getResponse();
if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) {
JsonNode root =
objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity()));
String errorCode = root.get("error").get("type").asText();
if ("index_already_exists_exception".equals(errorCode)) {
errorCreatingIndex = false;
}
}
if (errorCreatingIndex) {
throw e;
}
}
} else {
logger.info("Index '{}' already exists", index);
}
}
/**
* Adds an index to elasticsearch if it does not exist.
*
* @param index The name of the index to create.
* @throws IOException If an error occurred during requests to ES.
*/
private void addIndex(final String index) throws IOException {
logger.info("Adding index '{}'...", index);
String resourcePath = "/" + index;
if (doesResourceNotExist(resourcePath)) {
try {
ObjectNode setting = objectMapper.createObjectNode();
ObjectNode indexSetting = objectMapper.createObjectNode();
indexSetting.put("number_of_shards", properties.getIndexShardCount());
indexSetting.put("number_of_replicas", properties.getIndexReplicasCount());
setting.set("settings", indexSetting);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(
new NStringEntity(setting.toString(), ContentType.APPLICATION_JSON));
elasticSearchAdminClient.performRequest(request);
logger.info("Added '{}' index", index);
} catch (ResponseException e) {
boolean errorCreatingIndex = true;
Response errorResponse = e.getResponse();
if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) {
JsonNode root =
objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity()));
String errorCode = root.get("error").get("type").asText();
if ("index_already_exists_exception".equals(errorCode)) {
errorCreatingIndex = false;
}
}
if (errorCreatingIndex) {
throw e;
}
}
} else {
logger.info("Index '{}' already exists", index);
}
}
/**
* Adds a mapping type to an index if it does not exist.
*
* @param index The name of the index.
* @param mappingType The name of the mapping type.
* @param mappingFilename The name of the mapping file to use to add the mapping if it does not
* exist.
* @throws IOException If an error occurred during requests to ES.
*/
private void addMappingToIndex(
final String index, final String mappingType, final String mappingFilename)
throws IOException {
logger.info("Adding '{}' mapping to index '{}'...", mappingType, index);
String resourcePath = "/" + index + "/_mapping";
if (doesResourceNotExist(resourcePath)) {
HttpEntity entity =
new NByteArrayEntity(
loadTypeMappingSource(mappingFilename).getBytes(),
ContentType.APPLICATION_JSON);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(entity);
elasticSearchAdminClient.performRequest(request);
logger.info("Added '{}' mapping", mappingType);
} else {
logger.info("Mapping '{}' already exists", mappingType);
}
}
/**
* Determines whether a resource exists in ES. This will call a GET method to a particular path
* and return true if status 200; false otherwise.
*
* @param resourcePath The path of the resource to get.
* @return True if it exists; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceExist(final String resourcePath) throws IOException {
Request request = new Request(HttpMethod.HEAD, resourcePath);
Response response = elasticSearchAdminClient.performRequest(request);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
}
/**
* The inverse of doesResourceExist.
*
* @param resourcePath The path of the resource to check.
* @return True if it does not exist; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceNotExist(final String resourcePath) throws IOException {
return !doesResourceExist(resourcePath);
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
try {
long startTime = Instant.now().toEpochMilli();
String workflowId = workflow.getWorkflowId();
byte[] docBytes = objectMapper.writeValueAsBytes(workflow);
IndexRequest request =
new IndexRequest(workflowIndexName)
.id(workflowId)
.source(docBytes, XContentType.JSON);
if (properties.isWaitForIndexRefresh()) {
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
}
elasticSearchClient.index(request, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing workflow: {}", endTime - startTime, workflowId);
Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
Monitors.error(className, "indexWorkflow");
logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService);
}
@Override
public void indexTask(TaskSummary task) {
try {
long startTime = Instant.now().toEpochMilli();
String taskId = task.getTaskId();
WriteRequest.RefreshPolicy refreshPolicy =
properties.isWaitForIndexRefresh()
? WriteRequest.RefreshPolicy.WAIT_UNTIL
: null;
indexObject(taskIndexName, TASK_DOC_TYPE, taskId, task, refreshPolicy);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing task:{} in workflow: {}",
endTime - startTime,
taskId,
task.getWorkflowId());
Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
logger.error("Failed to index task: {}", task.getTaskId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
return CompletableFuture.runAsync(() -> indexTask(task), executorService);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) {
if (taskExecLogs.isEmpty()) {
return;
}
long startTime = Instant.now().toEpochMilli();
BulkRequest bulkRequest = new BulkRequest();
for (TaskExecLog log : taskExecLogs) {
byte[] docBytes;
try {
docBytes = objectMapper.writeValueAsBytes(log);
} catch (JsonProcessingException e) {
logger.error("Failed to convert task log to JSON for task {}", log.getTaskId());
continue;
}
IndexRequest request = new IndexRequest(logIndexName);
request.source(docBytes, XContentType.JSON);
bulkRequest.add(request);
}
try {
elasticSearchClient.bulk(bulkRequest, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
logger.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime);
Monitors.recordESIndexTime(
"index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
List<String> taskIds =
taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList());
logger.error("Failed to index task execution logs for tasks: {}", taskIds, e);
}
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService);
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
try {
BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("createdTime").order(SortOrder.ASC));
searchSourceBuilder.size(properties.getTaskLogResultLimit());
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(logIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapTaskExecLogsResponse(response);
} catch (Exception e) {
logger.error("Failed to get task execution logs for task: {}", taskId, e);
}
return null;
}
private List<TaskExecLog> mapTaskExecLogsResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<TaskExecLog> logs = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class);
logs.add(tel);
}
return logs;
}
@Override
public List<Message> getMessages(String queue) {
try {
BoolQueryBuilder query = boolQueryBuilder("queue='" + queue + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(messageIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapGetMessagesResponse(response);
} catch (Exception e) {
logger.error("Failed to get messages for queue: {}", queue, e);
}
return null;
}
private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
TypeFactory factory = TypeFactory.defaultInstance();
MapType type = factory.constructMapType(HashMap.class, String.class, String.class);
List<Message> messages = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
Map<String, String> mapSource = objectMapper.readValue(source, type);
Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null);
messages.add(msg);
}
return messages;
}
@Override
public List<EventExecution> getEventExecutions(String event) {
try {
BoolQueryBuilder query = boolQueryBuilder("event='" + event + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(eventIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapEventExecutionsResponse(response);
} catch (Exception e) {
logger.error("Failed to get executions for event: {}", event, e);
}
return null;
}
private List<EventExecution> mapEventExecutionsResponse(SearchResponse response)
throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<EventExecution> executions = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
EventExecution tel = objectMapper.readValue(source, EventExecution.class);
executions.add(tel);
}
return executions;
}
@Override
public void addMessage(String queue, Message message) {
try {
long startTime = Instant.now().toEpochMilli();
Map<String, Object> doc = new HashMap<>();
doc.put("messageId", message.getId());
doc.put("payload", message.getPayload());
doc.put("queue", queue);
doc.put("created", System.currentTimeMillis());
indexObject(messageIndexName, MSG_DOC_TYPE, doc);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing message: {}",
endTime - startTime,
message.getId());
Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime);
} catch (Exception e) {
logger.error("Failed to index message: {}", message.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
try {
long startTime = Instant.now().toEpochMilli();
String id =
eventExecution.getName()
+ "."
+ eventExecution.getEvent()
+ "."
+ eventExecution.getMessageId()
+ "."
+ eventExecution.getId();
indexObject(eventIndexName, EVENT_DOC_TYPE, id, eventExecution, null);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing event execution: {}",
endTime - startTime,
eventExecution.getId());
Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
logger.error("Failed to index event execution: {}", eventExecution.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
return CompletableFuture.runAsync(
() -> addEventExecution(eventExecution), logExecutorService);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectIdsViaExpression(
query, start, count, sort, freeText, WORKFLOW_DOC_TYPE);
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
try {
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryStringQueryBuilder;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es7.dao.query.parser.Expression;
import com.netflix.conductor.es7.dao.query.parser.internal.ParserException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
abstract class ElasticSearchBaseDAO implements IndexDAO {
String indexPrefix;
ObjectMapper objectMapper;
String loadTypeMappingSource(String path) throws IOException {
return applyIndexPrefixToTemplate(
IOUtils.toString(ElasticSearchBaseDAO.class.getResourceAsStream(path)));
}
private String applyIndexPrefixToTemplate(String text) throws JsonProcessingException {
String indexPatternsFieldName = "index_patterns";
JsonNode root = objectMapper.readTree(text);
if (root != null) {
JsonNode indexPatternsNodeValue = root.get(indexPatternsFieldName);
if (indexPatternsNodeValue != null && indexPatternsNodeValue.isArray()) {
ArrayList<String> patternsWithPrefix = new ArrayList<>();
indexPatternsNodeValue.forEach(
v -> {
String patternText = v.asText();
StringBuilder sb = new StringBuilder();
if (patternText.startsWith("*")) {
sb.append("*")
.append(indexPrefix)
.append("_")
.append(patternText.substring(1));
} else {
sb.append(indexPrefix).append("_").append(patternText);
}
patternsWithPrefix.add(sb.toString());
});
((ObjectNode) root)
.set(indexPatternsFieldName, objectMapper.valueToTree(patternsWithPrefix));
System.out.println(
objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root));
return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root);
}
}
return text;
}
BoolQueryBuilder boolQueryBuilder(String expression, String queryString)
throws ParserException {
QueryBuilder queryBuilder = QueryBuilders.matchAllQuery();
if (StringUtils.isNotEmpty(expression)) {
Expression exp = Expression.fromString(expression);
queryBuilder = exp.getFilterBuilder();
}
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(queryString);
return QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
}
protected String getIndexName(String documentType) {
return indexPrefix + "_" + documentType;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.util.Objects;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequestBuilder}. */
public class BulkRequestBuilderWrapper {
private final BulkRequestBuilder bulkRequestBuilder;
public BulkRequestBuilderWrapper(@NonNull BulkRequestBuilder bulkRequestBuilder) {
this.bulkRequestBuilder = Objects.requireNonNull(bulkRequestBuilder);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public int numberOfActions() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.numberOfActions();
}
}
public ActionFuture<BulkResponse> execute() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.execute();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java | es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.dao.index;
import java.util.Objects;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequest}. */
class BulkRequestWrapper {
private final BulkRequest bulkRequest;
BulkRequestWrapper(@NonNull BulkRequest bulkRequest) {
this.bulkRequest = Objects.requireNonNull(bulkRequest);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
BulkRequest get() {
return bulkRequest;
}
int numberOfActions() {
synchronized (bulkRequest) {
return bulkRequest.numberOfActions();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java | es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.config;
import java.net.URL;
import java.util.List;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.es7.dao.index.ElasticSearchRestDAOV7;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(ElasticSearchProperties.class)
@Conditional(ElasticSearchConditions.ElasticSearchV7Enabled.class)
public class ElasticSearchV7Configuration {
private static final Logger log = LoggerFactory.getLogger(ElasticSearchV7Configuration.class);
@Bean
public RestClient restClient(RestClientBuilder restClientBuilder) {
return restClientBuilder.build();
}
@Bean
public RestClientBuilder elasticRestClientBuilder(ElasticSearchProperties properties) {
RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs()));
if (properties.getRestClientConnectionRequestTimeout() > 0) {
builder.setRequestConfigCallback(
requestConfigBuilder ->
requestConfigBuilder.setConnectionRequestTimeout(
properties.getRestClientConnectionRequestTimeout()));
}
if (properties.getUsername() != null && properties.getPassword() != null) {
log.info(
"Configure ElasticSearch with BASIC authentication. User:{}",
properties.getUsername());
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(
properties.getUsername(), properties.getPassword()));
builder.setHttpClientConfigCallback(
httpClientBuilder ->
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider));
} else {
log.info("Configure ElasticSearch with no authentication.");
}
return builder;
}
@Primary // If you are including this project, it's assumed you want ES to be your indexing
// mechanism
@Bean
public IndexDAO es7IndexDAO(
RestClientBuilder restClientBuilder,
@Qualifier("es7RetryTemplate") RetryTemplate retryTemplate,
ElasticSearchProperties properties,
ObjectMapper objectMapper) {
String url = properties.getUrl();
return new ElasticSearchRestDAOV7(
restClientBuilder, retryTemplate, properties, objectMapper);
}
@Bean
public RetryTemplate es7RetryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy();
fixedBackOffPolicy.setBackOffPeriod(1000L);
retryTemplate.setBackOffPolicy(fixedBackOffPolicy);
return retryTemplate;
}
private HttpHost[] convertToHttpHosts(List<URL> hosts) {
return hosts.stream()
.map(host -> new HttpHost(host.getHost(), host.getPort(), host.getProtocol()))
.toArray(HttpHost[]::new);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java | es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.config;
import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
public class ElasticSearchConditions {
private ElasticSearchConditions() {}
public static class ElasticSearchV7Enabled extends AllNestedConditions {
ElasticSearchV7Enabled() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
@SuppressWarnings("unused")
@ConditionalOnProperty(
name = "conductor.indexing.enabled",
havingValue = "true",
matchIfMissing = true)
static class enabledIndexing {}
@SuppressWarnings("unused")
@ConditionalOnProperty(
name = "conductor.elasticsearch.version",
havingValue = "7",
matchIfMissing = true)
static class enabledES7 {}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java | es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.es7.config;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.elasticsearch")
public class ElasticSearchProperties {
/**
* The comma separated list of urls for the elasticsearch cluster. Format --
* host1:port1,host2:port2
*/
private String url = "localhost:9300";
/** The index prefix to be used when creating indices */
private String indexPrefix = "conductor";
/** The color of the elasticserach cluster to wait for to confirm healthy status */
private String clusterHealthColor = "green";
/** The size of the batch to be used for bulk indexing in async mode */
private int indexBatchSize = 1;
/** The size of the queue used for holding async indexing tasks */
private int asyncWorkerQueueSize = 100;
/** The maximum number of threads allowed in the async pool */
private int asyncMaxPoolSize = 12;
/**
* The time in seconds after which the async buffers will be flushed (if no activity) to prevent
* data loss
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration asyncBufferFlushTimeout = Duration.ofSeconds(10);
/** The number of shards that the index will be created with */
private int indexShardCount = 5;
/** The number of replicas that the index will be configured to have */
private int indexReplicasCount = 1;
/** The number of task log results that will be returned in the response */
private int taskLogResultLimit = 10;
/** The timeout in milliseconds used when requesting a connection from the connection manager */
private int restClientConnectionRequestTimeout = -1;
/** Used to control if index management is to be enabled or will be controlled externally */
private boolean autoIndexManagementEnabled = true;
/**
* Document types are deprecated in ES6 and removed from ES7. This property can be used to
* disable the use of specific document types with an override. This property is currently used
* in ES6 module.
*
* <p><em>Note that this property will only take effect if {@link
* ElasticSearchProperties#isAutoIndexManagementEnabled} is set to false and index management is
* handled outside of this module.</em>
*/
private String documentTypeOverride = "";
/** Elasticsearch basic auth username */
private String username;
/** Elasticsearch basic auth password */
private String password;
/**
* Whether to wait for index refresh when updating tasks and workflows. When enabled, the
* operation will block until the changes are visible for search. This guarantees immediate
* search visibility but can significantly impact performance (20-30s delays). Defaults to false
* for better performance.
*/
private boolean waitForIndexRefresh = false;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getIndexPrefix() {
return indexPrefix;
}
public void setIndexPrefix(String indexPrefix) {
this.indexPrefix = indexPrefix;
}
public String getClusterHealthColor() {
return clusterHealthColor;
}
public void setClusterHealthColor(String clusterHealthColor) {
this.clusterHealthColor = clusterHealthColor;
}
public int getIndexBatchSize() {
return indexBatchSize;
}
public void setIndexBatchSize(int indexBatchSize) {
this.indexBatchSize = indexBatchSize;
}
public int getAsyncWorkerQueueSize() {
return asyncWorkerQueueSize;
}
public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) {
this.asyncWorkerQueueSize = asyncWorkerQueueSize;
}
public int getAsyncMaxPoolSize() {
return asyncMaxPoolSize;
}
public void setAsyncMaxPoolSize(int asyncMaxPoolSize) {
this.asyncMaxPoolSize = asyncMaxPoolSize;
}
public Duration getAsyncBufferFlushTimeout() {
return asyncBufferFlushTimeout;
}
public void setAsyncBufferFlushTimeout(Duration asyncBufferFlushTimeout) {
this.asyncBufferFlushTimeout = asyncBufferFlushTimeout;
}
public int getIndexShardCount() {
return indexShardCount;
}
public void setIndexShardCount(int indexShardCount) {
this.indexShardCount = indexShardCount;
}
public int getIndexReplicasCount() {
return indexReplicasCount;
}
public void setIndexReplicasCount(int indexReplicasCount) {
this.indexReplicasCount = indexReplicasCount;
}
public int getTaskLogResultLimit() {
return taskLogResultLimit;
}
public void setTaskLogResultLimit(int taskLogResultLimit) {
this.taskLogResultLimit = taskLogResultLimit;
}
public int getRestClientConnectionRequestTimeout() {
return restClientConnectionRequestTimeout;
}
public void setRestClientConnectionRequestTimeout(int restClientConnectionRequestTimeout) {
this.restClientConnectionRequestTimeout = restClientConnectionRequestTimeout;
}
public boolean isAutoIndexManagementEnabled() {
return autoIndexManagementEnabled;
}
public void setAutoIndexManagementEnabled(boolean autoIndexManagementEnabled) {
this.autoIndexManagementEnabled = autoIndexManagementEnabled;
}
public String getDocumentTypeOverride() {
return documentTypeOverride;
}
public void setDocumentTypeOverride(String documentTypeOverride) {
this.documentTypeOverride = documentTypeOverride;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public boolean isWaitForIndexRefresh() {
return waitForIndexRefresh;
}
public void setWaitForIndexRefresh(boolean waitForIndexRefresh) {
this.waitForIndexRefresh = waitForIndexRefresh;
}
public List<URL> toURLs() {
String clusterAddress = getUrl();
String[] hosts = clusterAddress.split(",");
return Arrays.stream(hosts)
.map(
host ->
(host.startsWith("http://") || host.startsWith("https://"))
? toURL(host)
: toURL("http://" + host))
.collect(Collectors.toList());
}
private URL toURL(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new IllegalArgumentException(url + "can not be converted to java.net.URL");
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/RedisConcurrentExecutionLimitDAO.java | redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/RedisConcurrentExecutionLimitDAO.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.limit;
import java.util.Optional;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Component;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.redis.limit.config.RedisConcurrentExecutionLimitProperties;
@Trace
@Component
@ConditionalOnProperty(
value = "conductor.redis-concurrent-execution-limit.enabled",
havingValue = "true")
public class RedisConcurrentExecutionLimitDAO implements ConcurrentExecutionLimitDAO {
private static final Logger LOGGER =
LoggerFactory.getLogger(RedisConcurrentExecutionLimitDAO.class);
private static final String CLASS_NAME = RedisConcurrentExecutionLimitDAO.class.getSimpleName();
private final StringRedisTemplate stringRedisTemplate;
private final RedisConcurrentExecutionLimitProperties properties;
public RedisConcurrentExecutionLimitDAO(
StringRedisTemplate stringRedisTemplate,
RedisConcurrentExecutionLimitProperties properties) {
this.stringRedisTemplate = stringRedisTemplate;
this.properties = properties;
}
/**
* Adds the {@link TaskModel} identifier to a Redis Set for the {@link TaskDef}'s name.
*
* @param task The {@link TaskModel} object.
*/
@Override
public void addTaskToLimit(TaskModel task) {
try {
Monitors.recordDaoRequests(
CLASS_NAME, "addTaskToLimit", task.getTaskType(), task.getWorkflowType());
String taskId = task.getTaskId();
String taskDefName = task.getTaskDefName();
String keyName = createKeyName(taskDefName);
stringRedisTemplate.opsForSet().add(keyName, taskId);
LOGGER.debug("Added taskId: {} to key: {}", taskId, keyName);
} catch (Exception e) {
Monitors.error(CLASS_NAME, "addTaskToLimit");
String errorMsg =
String.format(
"Error updating taskDefLimit for task - %s:%s in workflow: %s",
task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
/**
* Remove the {@link TaskModel} identifier from the Redis Set for the {@link TaskDef}'s name.
*
* @param task The {@link TaskModel} object.
*/
@Override
public void removeTaskFromLimit(TaskModel task) {
try {
Monitors.recordDaoRequests(
CLASS_NAME, "removeTaskFromLimit", task.getTaskType(), task.getWorkflowType());
String taskId = task.getTaskId();
String taskDefName = task.getTaskDefName();
String keyName = createKeyName(taskDefName);
stringRedisTemplate.opsForSet().remove(keyName, taskId);
LOGGER.debug("Removed taskId: {} from key: {}", taskId, keyName);
} catch (Exception e) {
Monitors.error(CLASS_NAME, "removeTaskFromLimit");
String errorMsg =
String.format(
"Error updating taskDefLimit for task - %s:%s in workflow: %s",
task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg, e);
}
}
/**
* Checks if the {@link TaskModel} identifier is in the Redis Set and size of the set is more
* than the {@link TaskDef#concurrencyLimit()}.
*
* @param task The {@link TaskModel} object.
* @return true if the task id is not in the set and size of the set is more than the {@link
* TaskDef#concurrencyLimit()}.
*/
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
int limit = taskDefinition.get().concurrencyLimit();
if (limit <= 0) {
return false;
}
try {
Monitors.recordDaoRequests(
CLASS_NAME, "exceedsLimit", task.getTaskType(), task.getWorkflowType());
String taskId = task.getTaskId();
String taskDefName = task.getTaskDefName();
String keyName = createKeyName(taskDefName);
boolean isMember =
ObjectUtils.defaultIfNull(
stringRedisTemplate.opsForSet().isMember(keyName, taskId), false);
long size =
ObjectUtils.defaultIfNull(stringRedisTemplate.opsForSet().size(keyName), -1L);
LOGGER.debug(
"Task: {} is {} of {}, size: {} and limit: {}",
taskId,
isMember ? "a member" : "not a member",
keyName,
size,
limit);
return !isMember && size >= limit;
} catch (Exception e) {
Monitors.error(CLASS_NAME, "exceedsLimit");
String errorMsg =
String.format(
"Failed to get in progress limit - %s:%s in workflow :%s",
task.getTaskDefName(), task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.error(errorMsg, e);
throw new TransientException(errorMsg);
}
}
private String createKeyName(String taskDefName) {
StringBuilder builder = new StringBuilder();
String namespace = properties.getNamespace();
if (StringUtils.isNotBlank(namespace)) {
builder.append(namespace).append(':');
}
return builder.append(taskDefName).toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitConfiguration.java | redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitConfiguration.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.limit.config;
import java.util.List;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisClusterConfiguration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisStandaloneConfiguration;
import org.springframework.data.redis.connection.jedis.JedisClientConfiguration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
@Configuration
@ConditionalOnProperty(
value = "conductor.redis-concurrent-execution-limit.enabled",
havingValue = "true")
@EnableConfigurationProperties(RedisConcurrentExecutionLimitProperties.class)
public class RedisConcurrentExecutionLimitConfiguration {
@Bean
@ConditionalOnProperty(
value = "conductor.redis-concurrent-execution-limit.type",
havingValue = "cluster")
public RedisConnectionFactory redisClusterConnectionFactory(
RedisConcurrentExecutionLimitProperties properties) {
GenericObjectPoolConfig<?> poolConfig = new GenericObjectPoolConfig<>();
poolConfig.setMaxTotal(properties.getMaxConnectionsPerHost());
poolConfig.setTestWhileIdle(true);
JedisClientConfiguration clientConfig =
JedisClientConfiguration.builder()
.usePooling()
.poolConfig(poolConfig)
.and()
.clientName(properties.getClientName())
.build();
RedisClusterConfiguration redisClusterConfiguration =
new RedisClusterConfiguration(
List.of(properties.getHost() + ":" + properties.getPort()));
return new JedisConnectionFactory(redisClusterConfiguration, clientConfig);
}
@Bean
@ConditionalOnProperty(
value = "conductor.redis-concurrent-execution-limit.type",
havingValue = "standalone",
matchIfMissing = true)
public RedisConnectionFactory redisStandaloneConnectionFactory(
RedisConcurrentExecutionLimitProperties properties) {
RedisStandaloneConfiguration config =
new RedisStandaloneConfiguration(properties.getHost(), properties.getPort());
return new JedisConnectionFactory(config);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitProperties.java | redis-concurrency-limit/src/main/java/com/netflix/conductor/redis/limit/config/RedisConcurrentExecutionLimitProperties.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.limit.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.redis-concurrent-execution-limit")
public class RedisConcurrentExecutionLimitProperties {
public enum RedisType {
STANDALONE,
CLUSTER
}
private RedisType type;
private String host;
private int port;
private String password;
private int maxConnectionsPerHost;
private String clientName;
private String namespace = "conductor";
public RedisType getType() {
return type;
}
public void setType(RedisType type) {
this.type = type;
}
public int getMaxConnectionsPerHost() {
return maxConnectionsPerHost;
}
public void setMaxConnectionsPerHost(int maxConnectionsPerHost) {
this.maxConnectionsPerHost = maxConnectionsPerHost;
}
public String getClientName() {
return clientName;
}
public void setClientName(String clientName) {
this.clientName = clientName;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka-event-queue/src/test/java/com/netflix/conductor/kafkaeq/eventqueue/KafkaObservableQueueTest.java | kafka-event-queue/src/test/java/com/netflix/conductor/kafkaeq/eventqueue/KafkaObservableQueueTest.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.kafkaeq.eventqueue;
import java.lang.reflect.Field;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.DescribeTopicsResult;
import org.apache.kafka.clients.admin.ListOffsetsResult;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.header.Headers;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.kafkaeq.config.KafkaEventQueueProperties;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import rx.Observable;
import static org.junit.jupiter.api.Assertions.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyCollection;
import static org.mockito.ArgumentMatchers.anyMap;
import static org.mockito.Mockito.*;
@SuppressWarnings("unchecked")
@RunWith(SpringJUnit4ClassRunner.class)
public class KafkaObservableQueueTest {
private KafkaObservableQueue queue;
@Mock private volatile MockConsumer<String, String> mockKafkaConsumer;
@Mock private volatile MockProducer<String, String> mockKafkaProducer;
@Mock private volatile AdminClient mockAdminClient;
@Mock private volatile KafkaEventQueueProperties mockProperties;
@Before
public void setUp() throws Exception {
System.out.println("Setup called");
// Create mock instances
this.mockKafkaConsumer = mock(MockConsumer.class);
this.mockKafkaProducer = mock(MockProducer.class);
this.mockAdminClient = mock(AdminClient.class);
this.mockProperties = mock(KafkaEventQueueProperties.class);
// Mock KafkaEventQueueProperties behavior
when(this.mockProperties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100));
when(this.mockProperties.getDlqTopic()).thenReturn("test-dlq");
// Create an instance of KafkaObservableQueue with the mocks
queue =
new KafkaObservableQueue(
"test-topic",
this.mockKafkaConsumer,
this.mockKafkaProducer,
this.mockAdminClient,
this.mockProperties);
}
private void injectMockField(Object target, String fieldName, Object mock) throws Exception {
Field field = target.getClass().getDeclaredField(fieldName);
field.setAccessible(true);
field.set(target, mock);
}
@Test
public void testObserveWithHeaders() throws Exception {
// Prepare mock consumer records with diverse headers, keys, and payloads
List<ConsumerRecord<String, String>> records =
List.of(
new ConsumerRecord<>("test-topic", 0, 0, "key-1", "payload-1"),
new ConsumerRecord<>("test-topic", 0, 1, "key-2", "{\"field\":\"value\"}"),
new ConsumerRecord<>("test-topic", 0, 2, null, "null-key-payload"),
new ConsumerRecord<>("test-topic", 0, 3, "key-3", ""),
new ConsumerRecord<>("test-topic", 0, 4, "key-4", "12345"),
new ConsumerRecord<>(
"test-topic",
0,
5,
"key-5",
"{\"complex\":{\"nested\":\"value\"}}"));
// Add headers to each ConsumerRecord
for (int i = 0; i < records.size(); i++) {
ConsumerRecord<String, String> record = records.get(i);
record.headers().add("header-key-" + i, ("header-value-" + i).getBytes());
}
ConsumerRecords<String, String> consumerRecords =
new ConsumerRecords<>(Map.of(new TopicPartition("test-topic", 0), records));
// Mock the KafkaConsumer poll behavior
when(mockKafkaConsumer.poll(any(Duration.class)))
.thenReturn(consumerRecords)
.thenReturn(new ConsumerRecords<>(Collections.emptyMap())); // Subsequent polls
// return empty
// Start the queue
queue.start();
// Collect emitted messages
List<Message> found = new ArrayList<>();
Observable<Message> observable = queue.observe();
assertNotNull(observable);
observable.subscribe(found::add);
// Allow polling to run
try {
Thread.sleep(1000); // Adjust duration if necessary
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Assert results
assertNotNull(queue);
assertEquals(6, found.size()); // Expect all 6 messages to be processed
assertEquals("0-0", found.get(0).getId());
assertEquals("0-1", found.get(1).getId());
assertEquals("0-2", found.get(2).getId());
assertEquals("0-3", found.get(3).getId());
assertEquals("0-4", found.get(4).getId());
assertEquals("0-5", found.get(5).getId());
// Validate headers
for (int i = 0; i < records.size(); i++) {
ConsumerRecord<String, String> record = records.get(i);
assertNotNull(record.headers());
assertEquals(1, record.headers().toArray().length);
assertEquals(
"header-value-" + i,
new String(record.headers().lastHeader("header-key-" + i).value()));
}
}
@Test
public void testObserveWithComplexPayload() throws Exception {
// Prepare mock consumer records with diverse headers, keys, and payloads
List<ConsumerRecord<String, String>> records =
List.of(
new ConsumerRecord<>(
"test-topic", 0, 0, "key-1", "{\"data\":\"payload-1\"}"),
new ConsumerRecord<>("test-topic", 0, 1, "key-2", "{\"field\":\"value\"}"),
new ConsumerRecord<>("test-topic", 0, 2, null, "null-key-payload"),
new ConsumerRecord<>("test-topic", 0, 3, "key-3", ""),
new ConsumerRecord<>("test-topic", 0, 4, "key-4", "12345"),
new ConsumerRecord<>(
"test-topic",
0,
5,
"key-5",
"{\"complex\":{\"nested\":\"value\"}}"));
// Add headers to each ConsumerRecord
for (int i = 0; i < records.size(); i++) {
ConsumerRecord<String, String> record = records.get(i);
record.headers().add("header-key-" + i, ("header-value-" + i).getBytes());
}
ConsumerRecords<String, String> consumerRecords =
new ConsumerRecords<>(Map.of(new TopicPartition("test-topic", 0), records));
// Mock the KafkaConsumer poll behavior
when(mockKafkaConsumer.poll(any(Duration.class)))
.thenReturn(consumerRecords)
.thenReturn(new ConsumerRecords<>(Collections.emptyMap())); // Subsequent polls
// return empty
// Start the queue
queue.start();
// Collect emitted messages
List<Message> found = new ArrayList<>();
Observable<Message> observable = queue.observe();
assertNotNull(observable);
observable.subscribe(found::add);
// Allow polling to run
try {
Thread.sleep(1000); // Adjust duration if necessary
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Assert results
assertNotNull(queue);
assertEquals(6, found.size()); // Expect all 6 messages to be processed
// Validate individual message payloads, keys, and headers in the structured
ObjectMapper objectMapper = new ObjectMapper();
// JSON format
for (int i = 0; i < records.size(); i++) {
ConsumerRecord<String, String> record = records.get(i);
Message message = found.get(i);
String expectedPayload =
constructJsonMessage(
objectMapper,
record.key(),
record.headers().toArray().length > 0
? extractHeaders(record.headers())
: Collections.emptyMap(),
record.value());
assertEquals(expectedPayload, message.getPayload());
}
}
private Map<String, String> extractHeaders(Headers headers) {
Map<String, String> headerMap = new HashMap<>();
headers.forEach(header -> headerMap.put(header.key(), new String(header.value())));
return headerMap;
}
private String constructJsonMessage(
ObjectMapper objectMapper, String key, Map<String, String> headers, String payload) {
StringBuilder json = new StringBuilder();
json.append("{");
json.append("\"key\":\"").append(key != null ? key : "").append("\",");
// Serialize headers to JSON, handling potential errors
String headersJson = toJson(objectMapper, headers);
if (headersJson != null) {
json.append("\"headers\":").append(headersJson).append(",");
} else {
json.append("\"headers\":{}")
.append(","); // Default to an empty JSON object if headers are invalid
}
json.append("\"payload\":");
// Detect if the payload is valid JSON
if (isJsonValid(objectMapper, payload)) {
json.append(payload); // Embed JSON object directly
} else {
json.append(payload != null ? "\"" + payload + "\"" : "null"); // Treat as plain text
}
json.append("}");
return json.toString();
}
private boolean isJsonValid(ObjectMapper objectMapper, String json) {
if (json == null || json.isEmpty()) {
return false;
}
try {
objectMapper.readTree(json); // Parses the JSON to check validity
return true;
} catch (JsonProcessingException e) {
return false;
}
}
protected String toJson(ObjectMapper objectMapper, Object value) {
if (value == null) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
return null;
}
}
@Test
public void testAck() throws Exception {
Map<String, Long> unacknowledgedMessages = new ConcurrentHashMap<>();
unacknowledgedMessages.put("0-1", 1L);
injectMockField(queue, "unacknowledgedMessages", unacknowledgedMessages);
Message message = new Message("0-1", "payload", null);
List<Message> messages = List.of(message);
doNothing().when(mockKafkaConsumer).commitSync(anyMap());
List<String> failedAcks = queue.ack(messages);
assertTrue(failedAcks.isEmpty());
verify(mockKafkaConsumer, times(1)).commitSync(anyMap());
}
@Test
public void testNack() {
// Arrange
Message message = new Message("0-1", "payload", null);
List<Message> messages = List.of(message);
// Simulate the Kafka Producer behavior
doAnswer(
invocation -> {
ProducerRecord<String, String> record = invocation.getArgument(0);
System.out.println("Simulated record sent: " + record);
return null; // Simulate success
})
.when(mockKafkaProducer)
.send(any(ProducerRecord.class));
// Act
queue.nack(messages);
// Assert
ArgumentCaptor<ProducerRecord<String, String>> captor =
ArgumentCaptor.forClass(ProducerRecord.class);
verify(mockKafkaProducer).send(captor.capture());
ProducerRecord<String, String> actualRecord = captor.getValue();
System.out.println("Captured Record: " + actualRecord);
// Verify the captured record matches the expected values
assertEquals("test-dlq", actualRecord.topic());
assertEquals("0-1", actualRecord.key());
assertEquals("payload", actualRecord.value());
}
@Test
public void testPublish() {
Message message = new Message("key-1", "payload", null);
List<Message> messages = List.of(message);
// Mock the behavior of the producer's send() method
when(mockKafkaProducer.send(any(ProducerRecord.class), any()))
.thenAnswer(
invocation -> {
Callback callback = invocation.getArgument(1);
// Simulate a successful send with mock metadata
callback.onCompletion(
new RecordMetadata(
new TopicPartition("test-topic", 0), // Topic and
// partition
0, // Base offset
0, // Log append time
0, // Create time
10, // Serialized key size
100 // Serialized value size
),
null);
return null;
});
// Invoke the publish method
queue.publish(messages);
// Verify that the producer's send() method was called exactly once
verify(mockKafkaProducer, times(1)).send(any(ProducerRecord.class), any());
}
@Test
public void testSize() throws Exception {
// Step 1: Mock TopicDescription
TopicDescription topicDescription =
new TopicDescription(
"test-topic",
false,
List.of(
new TopicPartitionInfo(
0, null, List.of(), List.of())) // One partition
);
// Simulate `describeTopics` returning the TopicDescription
DescribeTopicsResult mockDescribeTopicsResult = mock(DescribeTopicsResult.class);
KafkaFuture<TopicDescription> mockFutureTopicDescription =
KafkaFuture.completedFuture(topicDescription);
when(mockDescribeTopicsResult.topicNameValues())
.thenReturn(Map.of("test-topic", mockFutureTopicDescription));
when(mockAdminClient.describeTopics(anyCollection())).thenReturn(mockDescribeTopicsResult);
// Step 2: Mock Offsets
ListOffsetsResult.ListOffsetsResultInfo offsetInfo =
new ListOffsetsResult.ListOffsetsResultInfo(
10, // Mock the offset size
0, // Leader epoch
null // Timestamp
);
ListOffsetsResult mockListOffsetsResult = mock(ListOffsetsResult.class);
KafkaFuture<Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo>>
mockOffsetsFuture =
KafkaFuture.completedFuture(
Map.of(new TopicPartition("test-topic", 0), offsetInfo));
when(mockListOffsetsResult.all()).thenReturn(mockOffsetsFuture);
when(mockAdminClient.listOffsets(anyMap())).thenReturn(mockListOffsetsResult);
// Step 3: Call the `size` method
long size = queue.size();
// Step 4: Verify the size is correctly calculated
assertEquals(10, size); // As we mocked 10 as the offset in the partition
}
@Test
public void testSizeWhenTopicExists() throws Exception {
// Mock topic description
TopicDescription topicDescription =
new TopicDescription(
"test-topic",
false,
List.of(new TopicPartitionInfo(0, null, List.of(), List.of())));
// Mock offsets
Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> offsets =
Map.of(
new TopicPartition("test-topic", 0),
new ListOffsetsResult.ListOffsetsResultInfo(
10L, // Offset value
0L, // Log append time (can be 0 if not available)
Optional.empty() // Leader epoch (optional, use a default value like
// 100)
));
// Mock AdminClient behavior for describeTopics
DescribeTopicsResult mockDescribeTopicsResult = mock(DescribeTopicsResult.class);
when(mockDescribeTopicsResult.topicNameValues())
.thenReturn(Map.of("test-topic", KafkaFuture.completedFuture(topicDescription)));
when(mockAdminClient.describeTopics(anyCollection())).thenReturn(mockDescribeTopicsResult);
// Mock AdminClient behavior for listOffsets
ListOffsetsResult mockListOffsetsResult = mock(ListOffsetsResult.class);
when(mockListOffsetsResult.all()).thenReturn(KafkaFuture.completedFuture(offsets));
when(mockAdminClient.listOffsets(anyMap())).thenReturn(mockListOffsetsResult);
// Call size
long size = queue.size();
// Verify
assertEquals(10L, size);
}
@Test
public void testSizeWhenTopicDoesNotExist() throws Exception {
// Mock KafkaFuture to simulate a topic-not-found exception
KafkaFuture<TopicDescription> failedFuture = mock(KafkaFuture.class);
when(failedFuture.get())
.thenThrow(
new org.apache.kafka.common.errors.UnknownTopicOrPartitionException(
"Topic not found"));
// Mock DescribeTopicsResult
DescribeTopicsResult mockDescribeTopicsResult = mock(DescribeTopicsResult.class);
when(mockDescribeTopicsResult.topicNameValues())
.thenReturn(Map.of("test-topic", failedFuture));
when(mockAdminClient.describeTopics(anyCollection())).thenReturn(mockDescribeTopicsResult);
// Call size
long size = queue.size();
// Verify the result
assertEquals(-1L, size); // Return -1 for non-existent topics
}
@Test
public void testLifecycle() {
queue.start();
assertTrue(queue.isRunning());
queue.stop();
assertFalse(queue.isRunning());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/eventqueue/KafkaObservableQueue.java | kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/eventqueue/KafkaObservableQueue.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.kafkaeq.eventqueue;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.ListOffsetsResult;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.header.Header;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.kafkaeq.config.KafkaEventQueueProperties;
import com.netflix.conductor.metrics.Monitors;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import rx.Observable;
import rx.subscriptions.Subscriptions;
public class KafkaObservableQueue implements ObservableQueue {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaObservableQueue.class);
private static final String QUEUE_TYPE = "kafka";
private final String topic;
private volatile AdminClient adminClient;
private final Consumer<String, String> kafkaConsumer;
private final Producer<String, String> kafkaProducer;
private final long pollTimeInMS;
private final String dlqTopic;
private final boolean autoCommitEnabled;
private final Map<String, Long> unacknowledgedMessages = new ConcurrentHashMap<>();
private volatile boolean running = false;
private final KafkaEventQueueProperties properties;
private final ObjectMapper objectMapper = new ObjectMapper();
public KafkaObservableQueue(
String topic,
Properties consumerConfig,
Properties producerConfig,
Properties adminConfig,
KafkaEventQueueProperties properties) {
this.topic = topic;
this.kafkaConsumer = new KafkaConsumer<>(consumerConfig);
this.kafkaProducer = new KafkaProducer<>(producerConfig);
this.properties = properties;
this.pollTimeInMS = properties.getPollTimeDuration().toMillis();
this.dlqTopic = properties.getDlqTopic();
this.autoCommitEnabled =
Boolean.parseBoolean(
consumerConfig
.getOrDefault(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
.toString());
this.adminClient = AdminClient.create(adminConfig);
}
public KafkaObservableQueue(
String topic,
Consumer<String, String> kafkaConsumer,
Producer<String, String> kafkaProducer,
AdminClient adminClient,
KafkaEventQueueProperties properties) {
this.topic = topic;
this.kafkaConsumer = kafkaConsumer;
this.kafkaProducer = kafkaProducer;
this.adminClient = adminClient;
this.properties = properties;
this.pollTimeInMS = properties.getPollTimeDuration().toMillis();
this.dlqTopic = properties.getDlqTopic();
this.autoCommitEnabled =
Boolean.parseBoolean(
properties
.toConsumerConfig()
.getOrDefault(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
.toString());
}
@Override
public Observable<Message> observe() {
return Observable.create(
subscriber -> {
Observable<Long> interval =
Observable.interval(pollTimeInMS, TimeUnit.MILLISECONDS);
interval.flatMap(
(Long x) -> {
if (!isRunning()) {
return Observable.from(Collections.emptyList());
}
try {
ConsumerRecords<String, String> records =
kafkaConsumer.poll(
this.properties.getPollTimeDuration());
List<Message> messages = new ArrayList<>();
for (ConsumerRecord<String, String> record : records) {
try {
String messageId =
record.partition()
+ "-"
+ record.offset();
String key = record.key();
String value = record.value();
Map<String, String> headers = new HashMap<>();
// Extract headers
if (record.headers() != null) {
for (Header header : record.headers()) {
headers.put(
header.key(),
new String(header.value()));
}
}
// Log the details
LOGGER.debug(
"Input values MessageId: {} Key: {} Headers: {} Value: {}",
messageId,
key,
headers,
value);
// Construct message
String jsonMessage =
constructJsonMessage(
key, headers, value);
LOGGER.debug("Payload: {}", jsonMessage);
Message message =
new Message(
messageId, jsonMessage, null);
unacknowledgedMessages.put(
messageId, record.offset());
messages.add(message);
} catch (Exception e) {
LOGGER.error(
"Failed to process record from Kafka: {}",
record,
e);
}
}
Monitors.recordEventQueueMessagesProcessed(
QUEUE_TYPE, this.topic, messages.size());
return Observable.from(messages);
} catch (Exception e) {
LOGGER.error(
"Error while polling Kafka for topic: {}",
topic,
e);
Monitors.recordObservableQMessageReceivedErrors(
QUEUE_TYPE);
return Observable.error(e);
}
})
.subscribe(subscriber::onNext, subscriber::onError);
subscriber.add(Subscriptions.create(this::stop));
});
}
private String constructJsonMessage(String key, Map<String, String> headers, String payload) {
StringBuilder json = new StringBuilder();
json.append("{");
json.append("\"key\":\"").append(key != null ? key : "").append("\",");
// Serialize headers to JSON, handling potential errors
String headersJson = toJson(headers);
if (headersJson != null) {
json.append("\"headers\":").append(headersJson).append(",");
} else {
json.append("\"headers\":{}")
.append(","); // Default to an empty JSON object if headers are invalid
}
json.append("\"payload\":");
// Detect if the payload is valid JSON
if (isJsonValid(payload)) {
json.append(payload); // Embed JSON object directly
} else {
json.append(payload != null ? "\"" + payload + "\"" : "null"); // Treat as plain text
}
json.append("}");
return json.toString();
}
private boolean isJsonValid(String json) {
if (json == null || json.isEmpty()) {
return false;
}
try {
objectMapper.readTree(json); // Parses the JSON to check validity
return true;
} catch (JsonProcessingException e) {
return false;
}
}
protected String toJson(Object value) {
if (value == null) {
return null;
}
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException ex) {
// Log the error and return a placeholder or null
LOGGER.error("Failed to convert object to JSON: {}", value, ex);
return null;
}
}
@Override
public List<String> ack(List<Message> messages) {
// If autocommit is enabled we do not run this code.
if (autoCommitEnabled == true) {
LOGGER.info("Auto commit is enabled. Skipping manual acknowledgment.");
return List.of();
}
Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();
List<String> failedAcks = new ArrayList<>(); // Collect IDs of failed messages
for (Message message : messages) {
String messageId = message.getId();
if (unacknowledgedMessages.containsKey(messageId)) {
try {
String[] parts = messageId.split("-");
if (parts.length != 2) {
throw new IllegalArgumentException(
"Invalid message ID format: " + messageId);
}
// Extract partition and offset from messageId
int partition = Integer.parseInt(parts[0]);
long offset = Long.parseLong(parts[1]);
// Remove message
unacknowledgedMessages.remove(messageId);
TopicPartition tp = new TopicPartition(topic, partition);
LOGGER.debug(
"Parsed messageId: {}, topic: {}, partition: {}, offset: {}",
messageId,
topic,
partition,
offset);
offsetsToCommit.put(tp, new OffsetAndMetadata(offset + 1));
} catch (Exception e) {
LOGGER.error("Failed to prepare acknowledgment for message: {}", messageId, e);
failedAcks.add(messageId); // Add to failed list if exception occurs
}
} else {
LOGGER.warn("Message ID not found in unacknowledged messages: {}", messageId);
failedAcks.add(messageId); // Add to failed list if not found
}
}
try {
LOGGER.debug("Committing offsets: {}", offsetsToCommit);
kafkaConsumer.commitSync(offsetsToCommit); // Commit all collected offsets
} catch (CommitFailedException e) {
LOGGER.warn("Offset commit failed: {}", e.getMessage());
} catch (OffsetOutOfRangeException e) {
LOGGER.error(
"OffsetOutOfRangeException encountered for topic {}: {}",
e.partitions(),
e.getMessage());
// Reset offsets for the out-of-range partition
Map<TopicPartition, OffsetAndMetadata> offsetsToReset = new HashMap<>();
for (TopicPartition partition : e.partitions()) {
long newOffset =
kafkaConsumer.position(partition); // Default to the current position
offsetsToReset.put(partition, new OffsetAndMetadata(newOffset));
LOGGER.warn("Resetting offset for partition {} to {}", partition, newOffset);
}
// Commit the new offsets
kafkaConsumer.commitSync(offsetsToReset);
} catch (Exception e) {
LOGGER.error("Failed to commit offsets to Kafka: {}", offsetsToCommit, e);
// Add all message IDs from the current batch to the failed list
failedAcks.addAll(messages.stream().map(Message::getId).toList());
}
return failedAcks; // Return IDs of messages that were not successfully acknowledged
}
@Override
public void nack(List<Message> messages) {
for (Message message : messages) {
try {
kafkaProducer.send(
new ProducerRecord<>(dlqTopic, message.getId(), message.getPayload()));
} catch (Exception e) {
LOGGER.error("Failed to send message to DLQ. Message ID: {}", message.getId(), e);
}
}
}
@Override
public void publish(List<Message> messages) {
for (Message message : messages) {
try {
kafkaProducer.send(
new ProducerRecord<>(topic, message.getId(), message.getPayload()),
(metadata, exception) -> {
if (exception != null) {
LOGGER.error(
"Failed to publish message to Kafka. Message ID: {}",
message.getId(),
exception);
} else {
LOGGER.info(
"Message published to Kafka. Topic: {}, Partition: {}, Offset: {}",
metadata.topic(),
metadata.partition(),
metadata.offset());
}
});
} catch (Exception e) {
LOGGER.error(
"Error publishing message to Kafka. Message ID: {}", message.getId(), e);
}
}
}
@Override
public boolean rePublishIfNoAck() {
return false;
}
@Override
public void setUnackTimeout(Message message, long unackTimeout) {
// Kafka does not support visibility timeout; this can be managed externally if
// needed.
}
@Override
public long size() {
if (topicExists(this.topic) == false) {
LOGGER.info("Topic '{}' not available, will refresh metadata.", this.topic);
refreshMetadata(this.topic);
}
long topicSize = getTopicSizeUsingAdminClient();
if (topicSize != -1) {
LOGGER.info("Topic size for '{}': {}", this.topic, topicSize);
} else {
LOGGER.error("Failed to fetch topic size for '{}'", this.topic);
}
return topicSize;
}
private long getTopicSizeUsingAdminClient() {
try {
KafkaFuture<TopicDescription> topicDescriptionFuture =
adminClient
.describeTopics(Collections.singletonList(topic))
.topicNameValues()
.get(topic);
TopicDescription topicDescription = topicDescriptionFuture.get();
// Prepare request for latest offsets
Map<TopicPartition, OffsetSpec> offsetRequest = new HashMap<>();
for (TopicPartitionInfo partition : topicDescription.partitions()) {
TopicPartition tp = new TopicPartition(topic, partition.partition());
offsetRequest.put(tp, OffsetSpec.latest());
}
// Fetch offsets asynchronously
KafkaFuture<Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo>>
offsetsFuture = adminClient.listOffsets(offsetRequest).all();
Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> offsets =
offsetsFuture.get();
// Calculate total size by summing offsets
return offsets.values().stream()
.mapToLong(ListOffsetsResult.ListOffsetsResultInfo::offset)
.sum();
} catch (ExecutionException e) {
if (e.getCause()
instanceof org.apache.kafka.common.errors.UnknownTopicOrPartitionException) {
LOGGER.warn("Topic '{}' does not exist or partitions unavailable.", topic);
} else {
LOGGER.error("Error fetching offsets for topic '{}': {}", topic, e.getMessage());
}
} catch (Exception e) {
LOGGER.error(
"General error fetching offsets for topic '{}': {}", topic, e.getMessage());
}
return -1;
}
@Override
public void close() {
try {
stop();
LOGGER.info("KafkaObservableQueue fully stopped and resources closed.");
} catch (Exception e) {
LOGGER.error("Error during close(): {}", e.getMessage(), e);
}
}
@Override
public void start() {
LOGGER.info("KafkaObservableQueue starting for topic: {}", topic);
if (running) {
LOGGER.warn("KafkaObservableQueue is already running for topic: {}", topic);
return;
}
try {
running = true;
kafkaConsumer.subscribe(
Collections.singletonList(topic)); // Subscribe to a single topic
LOGGER.info("KafkaObservableQueue started for topic: {}", topic);
} catch (Exception e) {
running = false;
LOGGER.error("Error starting KafkaObservableQueue for topic: {}", topic, e);
}
}
@Override
public synchronized void stop() {
LOGGER.info("Kafka consumer stopping for topic: {}", topic);
if (!running) {
LOGGER.warn("KafkaObservableQueue is already stopped for topic: {}", topic);
return;
}
try {
running = false;
try {
kafkaConsumer.unsubscribe();
kafkaConsumer.close();
LOGGER.info("Kafka consumer stopped for topic: {}", topic);
} catch (Exception e) {
LOGGER.error("Error stopping Kafka consumer for topic: {}", topic, e);
retryCloseConsumer();
}
try {
kafkaProducer.close();
LOGGER.info("Kafka producer stopped for topic: {}", topic);
} catch (Exception e) {
LOGGER.error("Error stopping Kafka producer for topic: {}", topic, e);
retryCloseProducer();
}
} catch (Exception e) {
LOGGER.error("Critical error stopping KafkaObservableQueue for topic: {}", topic, e);
}
}
private void retryCloseConsumer() {
int attempts = 3;
while (attempts > 0) {
try {
kafkaConsumer.unsubscribe();
kafkaConsumer.close();
LOGGER.info("Kafka consumer stopped for topic: {}", topic);
return; // Exit if successful
} catch (Exception e) {
LOGGER.warn(
"Error stopping Kafka consumer for topic: {}, attempts remaining: {}",
topic,
attempts - 1,
e);
attempts--;
try {
Thread.sleep(1000); // Wait before retrying
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOGGER.error("Thread interrupted during Kafka consumer shutdown retries");
break;
}
}
}
LOGGER.error("Failed to stop Kafka consumer for topic: {} after retries", topic);
}
private void retryCloseProducer() {
int attempts = 3;
while (attempts > 0) {
try {
kafkaProducer.close();
LOGGER.info("Kafka producer stopped for topic: {}", topic);
return; // Exit if successful
} catch (Exception e) {
LOGGER.warn(
"Error stopping Kafka producer for topic: {}, attempts remaining: {}",
topic,
attempts - 1,
e);
attempts--;
try {
Thread.sleep(1000); // Wait before retrying
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
LOGGER.error("Thread interrupted during Kafka producer shutdown retries");
break;
}
}
}
LOGGER.error("Failed to stop Kafka producer for topic: {} after retries", topic);
}
@Override
public String getType() {
return QUEUE_TYPE;
}
@Override
public String getName() {
return topic;
}
@Override
public String getURI() {
return "kafka://" + topic;
}
@Override
public boolean isRunning() {
return running;
}
public static class Builder {
private final KafkaEventQueueProperties properties;
public Builder(KafkaEventQueueProperties properties) {
this.properties = properties;
}
public KafkaObservableQueue build(final String topic, final Properties consumerOverrides) {
Properties consumerConfig = new Properties();
consumerConfig.putAll(properties.toConsumerConfig());
// To handle condutors default COMPLETED and FAILED queues
if (consumerOverrides != null) {
consumerConfig.putAll(consumerOverrides);
}
LOGGER.debug("Kafka Consumer Config: {}", consumerConfig);
Properties producerConfig = new Properties();
producerConfig.putAll(properties.toProducerConfig());
LOGGER.debug("Kafka Producer Config: {}", producerConfig);
Properties adminConfig = new Properties();
adminConfig.putAll(properties.toAdminConfig());
LOGGER.debug("Kafka Admin Config: {}", adminConfig);
try {
return new KafkaObservableQueue(
topic, consumerConfig, producerConfig, adminConfig, properties);
} catch (Exception e) {
LOGGER.error("Failed to initialize KafkaObservableQueue for topic: {}", topic, e);
throw new RuntimeException(
"Failed to initialize KafkaObservableQueue for topic: " + topic, e);
}
}
}
private boolean topicExists(String topic) {
try {
KafkaFuture<TopicDescription> future =
adminClient
.describeTopics(Collections.singletonList(topic))
.topicNameValues()
.get(topic);
future.get(); // Attempt to fetch metadata
return true;
} catch (ExecutionException e) {
if (e.getCause()
instanceof org.apache.kafka.common.errors.UnknownTopicOrPartitionException) {
LOGGER.warn("Topic '{}' does not exist.", topic);
return false;
}
LOGGER.error("Error checking if topic '{}' exists: {}", topic, e.getMessage());
return false;
} catch (Exception e) {
LOGGER.error("General error checking if topic '{}' exists: {}", topic, e.getMessage());
return false;
}
}
private void refreshMetadata(String topic) {
adminClient
.describeTopics(Collections.singletonList(topic))
.topicNameValues()
.get(topic)
.whenComplete(
(topicDescription, exception) -> {
if (exception != null) {
if (exception.getCause()
instanceof
org.apache.kafka.common.errors
.UnknownTopicOrPartitionException) {
LOGGER.warn("Topic '{}' still does not exist.", topic);
} else {
LOGGER.error(
"Error refreshing metadata for topic '{}': {}",
topic,
exception.getMessage());
}
} else {
LOGGER.info(
"Metadata refreshed for topic '{}': Partitions = {}",
topic,
topicDescription.partitions());
}
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueConfiguration.java | kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueConfiguration.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.kafkaeq.config;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.kafkaeq.eventqueue.KafkaObservableQueue.Builder;
import com.netflix.conductor.model.TaskModel.Status;
@Configuration
@EnableConfigurationProperties(KafkaEventQueueProperties.class)
@ConditionalOnProperty(name = "conductor.event-queues.kafka.enabled", havingValue = "true")
public class KafkaEventQueueConfiguration {
@Autowired private KafkaEventQueueProperties kafkaProperties;
private static final Logger LOGGER =
LoggerFactory.getLogger(KafkaEventQueueConfiguration.class);
public KafkaEventQueueConfiguration(KafkaEventQueueProperties kafkaProperties) {
this.kafkaProperties = kafkaProperties;
}
@Bean
public EventQueueProvider kafkaEventQueueProvider() {
return new KafkaEventQueueProvider(kafkaProperties);
}
@ConditionalOnProperty(
name = "conductor.default-event-queue.type",
havingValue = "kafka",
matchIfMissing = false)
@Bean
public Map<Status, ObservableQueue> getQueues(
ConductorProperties conductorProperties, KafkaEventQueueProperties properties) {
try {
LOGGER.debug(
"Starting to create KafkaObservableQueues with properties: {}", properties);
String stack =
Optional.ofNullable(conductorProperties.getStack())
.filter(stackName -> stackName.length() > 0)
.map(stackName -> stackName + "_")
.orElse("");
LOGGER.debug("Using stack: {}", stack);
Status[] statuses = new Status[] {Status.COMPLETED, Status.FAILED};
Map<Status, ObservableQueue> queues = new HashMap<>();
for (Status status : statuses) {
// Log the status being processed
LOGGER.debug("Processing status: {}", status);
String queuePrefix =
StringUtils.isBlank(properties.getListenerQueuePrefix())
? conductorProperties.getAppId() + "_kafka_notify_" + stack
: properties.getListenerQueuePrefix();
LOGGER.debug("queuePrefix: {}", queuePrefix);
String topicName = queuePrefix + status.name();
LOGGER.debug("topicName: {}", topicName);
// Create unique overrides
Properties consumerOverrides = new Properties();
consumerOverrides.put(ConsumerConfig.CLIENT_ID_CONFIG, topicName + "-consumer");
consumerOverrides.put(ConsumerConfig.GROUP_ID_CONFIG, topicName + "-group");
final ObservableQueue queue =
new Builder(properties).build(topicName, consumerOverrides);
queues.put(status, queue);
}
LOGGER.debug("Successfully created queues: {}", queues);
return queues;
} catch (Exception e) {
LOGGER.error("Failed to create KafkaObservableQueues", e);
throw new RuntimeException("Failed to getQueues on KafkaEventQueueConfiguration", e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueProvider.java | kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueProvider.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.kafkaeq.config;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.events.EventQueueProvider;
import com.netflix.conductor.core.events.queue.ObservableQueue;
import com.netflix.conductor.kafkaeq.eventqueue.KafkaObservableQueue.Builder;
public class KafkaEventQueueProvider implements EventQueueProvider {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaEventQueueProvider.class);
private final Map<String, ObservableQueue> queues = new ConcurrentHashMap<>();
private final KafkaEventQueueProperties properties;
public KafkaEventQueueProvider(KafkaEventQueueProperties properties) {
this.properties = properties;
}
@Override
public String getQueueType() {
return "kafka";
}
@Override
public ObservableQueue getQueue(String queueURI) {
LOGGER.info("Creating KafkaObservableQueue for topic: {}", queueURI);
return queues.computeIfAbsent(queueURI, q -> new Builder(properties).build(queueURI, null));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueProperties.java | kafka-event-queue/src/main/java/com/netflix/conductor/kafkaeq/config/KafkaEventQueueProperties.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.kafkaeq.config;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.logging.log4j.core.config.plugins.validation.constraints.NotBlank;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.validation.annotation.Validated;
@ConfigurationProperties("conductor.event-queues.kafka")
@Validated
public class KafkaEventQueueProperties {
/** Kafka bootstrap servers (comma-separated). */
@NotBlank(message = "Bootstrap servers must not be blank")
private String bootstrapServers = "kafka:29092";
/** Dead Letter Queue (DLQ) topic for failed messages. */
private String dlqTopic = "conductor-dlq";
/** Prefix for dynamically created Kafka topics, if applicable. */
private String listenerQueuePrefix = "";
/** The polling interval for Kafka (in milliseconds). */
private Duration pollTimeDuration = Duration.ofMillis(100);
/** Additional properties for consumers, producers, and admin clients. */
private Map<String, Object> consumer = new HashMap<>();
private Map<String, Object> producer = new HashMap<>();
private Map<String, Object> admin = new HashMap<>();
// Getters and setters
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getDlqTopic() {
return dlqTopic;
}
public void setDlqTopic(String dlqTopic) {
this.dlqTopic = dlqTopic;
}
public String getListenerQueuePrefix() {
return listenerQueuePrefix;
}
public void setListenerQueuePrefix(String listenerQueuePrefix) {
this.listenerQueuePrefix = listenerQueuePrefix;
}
public Duration getPollTimeDuration() {
return pollTimeDuration;
}
public void setPollTimeDuration(Duration pollTimeDuration) {
this.pollTimeDuration = pollTimeDuration;
}
public Map<String, Object> getConsumer() {
return consumer;
}
public void setConsumer(Map<String, Object> consumer) {
this.consumer = consumer;
}
public Map<String, Object> getProducer() {
return producer;
}
public void setProducer(Map<String, Object> producer) {
this.producer = producer;
}
public Map<String, Object> getAdmin() {
return admin;
}
public void setAdmin(Map<String, Object> admin) {
this.admin = admin;
}
/**
* Generates configuration properties for Kafka consumers. Maps against `ConsumerConfig` keys.
*/
public Map<String, Object> toConsumerConfig() {
Map<String, Object> config = mapProperties(ConsumerConfig.configNames(), consumer);
// Ensure key.deserializer and value.deserializer are always set
setDefaultIfNullOrEmpty(
config,
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringDeserializer");
setDefaultIfNullOrEmpty(
config,
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringDeserializer");
setDefaultIfNullOrEmpty(config, ConsumerConfig.GROUP_ID_CONFIG, "conductor-group");
setDefaultIfNullOrEmpty(config, ConsumerConfig.CLIENT_ID_CONFIG, "consumer-client");
return config;
}
/**
* Generates configuration properties for Kafka producers. Maps against `ProducerConfig` keys.
*/
public Map<String, Object> toProducerConfig() {
Map<String, Object> config = mapProperties(ProducerConfig.configNames(), producer);
setDefaultIfNullOrEmpty(
config,
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
setDefaultIfNullOrEmpty(
config,
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
setDefaultIfNullOrEmpty(config, ProducerConfig.CLIENT_ID_CONFIG, "admin-client");
return config;
}
/**
* Generates configuration properties for Kafka AdminClient. Maps against `AdminClientConfig`
* keys.
*/
public Map<String, Object> toAdminConfig() {
Map<String, Object> config = mapProperties(AdminClientConfig.configNames(), admin);
setDefaultIfNullOrEmpty(config, ConsumerConfig.CLIENT_ID_CONFIG, "admin-client");
return config;
}
/**
* Filters and maps properties based on the allowed keys for a specific Kafka client
* configuration.
*
* @param allowedKeys The keys allowed for the specific Kafka client configuration.
* @param inputProperties The user-specified properties to filter.
* @return A filtered map containing only valid properties.
*/
private Map<String, Object> mapProperties(
Iterable<String> allowedKeys, Map<String, Object> inputProperties) {
Map<String, Object> config = new HashMap<>();
for (String key : allowedKeys) {
if (inputProperties.containsKey(key)) {
config.put(key, inputProperties.get(key));
}
}
setDefaultIfNullOrEmpty(
config, AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // Ensure
// bootstrapServers
// is
// always added
return config;
}
private void setDefaultIfNullOrEmpty(
Map<String, Object> config, String key, String defaultValue) {
Object value = config.get(key);
if (value == null || (value instanceof String && ((String) value).isBlank())) {
config.put(key, defaultValue);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java | azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.storage;
import java.time.Duration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.azureblob.config.AzureBlobProperties;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AzureBlobPayloadStorageTest {
private AzureBlobProperties properties;
private IDGenerator idGenerator;
@Before
public void setUp() {
properties = mock(AzureBlobProperties.class);
idGenerator = new IDGenerator();
when(properties.getConnectionString()).thenReturn(null);
when(properties.getContainerName()).thenReturn("conductor-payloads");
when(properties.getEndpoint()).thenReturn(null);
when(properties.getSasToken()).thenReturn(null);
when(properties.getSignedUrlExpirationDuration()).thenReturn(Duration.ofSeconds(5));
when(properties.getWorkflowInputPath()).thenReturn("workflow/input/");
when(properties.getWorkflowOutputPath()).thenReturn("workflow/output/");
when(properties.getTaskInputPath()).thenReturn("task/input");
when(properties.getTaskOutputPath()).thenReturn("task/output/");
}
/** Dummy credentials Azure SDK doesn't work with Azurite since it cleans parameters */
private final String azuriteConnectionString =
"DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;EndpointSuffix=localhost";
@Rule public ExpectedException expectedException = ExpectedException.none();
@Test
public void testNoStorageAccount() {
expectedException.expect(NonTransientException.class);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testUseConnectionString() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testUseEndpoint() {
String azuriteEndpoint = "http://127.0.0.1:10000/";
when(properties.getEndpoint()).thenReturn(azuriteEndpoint);
new AzureBlobPayloadStorage(idGenerator, properties);
}
@Test
public void testGetLocationFixedPath() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
AzureBlobPayloadStorage azureBlobPayloadStorage =
new AzureBlobPayloadStorage(idGenerator, properties);
String path = "somewhere";
ExternalStorageLocation externalStorageLocation =
azureBlobPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
path);
assertNotNull(externalStorageLocation);
assertEquals(path, externalStorageLocation.getPath());
assertNotNull(externalStorageLocation.getUri());
}
private void testGetLocation(
AzureBlobPayloadStorage azureBlobPayloadStorage,
ExternalPayloadStorage.Operation operation,
ExternalPayloadStorage.PayloadType payloadType,
String expectedPath) {
ExternalStorageLocation externalStorageLocation =
azureBlobPayloadStorage.getLocation(operation, payloadType, null);
assertNotNull(externalStorageLocation);
assertNotNull(externalStorageLocation.getPath());
assertTrue(externalStorageLocation.getPath().startsWith(expectedPath));
assertNotNull(externalStorageLocation.getUri());
assertTrue(externalStorageLocation.getUri().contains(expectedPath));
}
@Test
public void testGetAllLocations() {
when(properties.getConnectionString()).thenReturn(azuriteConnectionString);
AzureBlobPayloadStorage azureBlobPayloadStorage =
new AzureBlobPayloadStorage(idGenerator, properties);
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
properties.getWorkflowInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT,
properties.getWorkflowOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
properties.getTaskInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_OUTPUT,
properties.getTaskOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT,
properties.getWorkflowInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT,
properties.getWorkflowOutputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
properties.getTaskInputPath());
testGetLocation(
azureBlobPayloadStorage,
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.TASK_OUTPUT,
properties.getTaskOutputPath());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java | azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.azureblob.storage;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.io.UncheckedIOException;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.azureblob.config.AzureBlobProperties;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import com.azure.core.exception.UnexpectedLengthException;
import com.azure.core.util.Context;
import com.azure.storage.blob.BlobContainerClient;
import com.azure.storage.blob.BlobContainerClientBuilder;
import com.azure.storage.blob.models.BlobHttpHeaders;
import com.azure.storage.blob.models.BlobStorageException;
import com.azure.storage.blob.sas.BlobSasPermission;
import com.azure.storage.blob.sas.BlobServiceSasSignatureValues;
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.common.Utility;
import com.azure.storage.common.implementation.credentials.SasTokenCredential;
/**
* An implementation of {@link ExternalPayloadStorage} using Azure Blob for storing large JSON
* payload data.
*
* @see <a href="https://github.com/Azure/azure-sdk-for-java">Azure Java SDK</a>
*/
public class AzureBlobPayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(AzureBlobPayloadStorage.class);
private static final String CONTENT_TYPE = "application/json";
private final IDGenerator idGenerator;
private final String workflowInputPath;
private final String workflowOutputPath;
private final String taskInputPath;
private final String taskOutputPath;
private final BlobContainerClient blobContainerClient;
private final long expirationSec;
private final SasTokenCredential sasTokenCredential;
public AzureBlobPayloadStorage(IDGenerator idGenerator, AzureBlobProperties properties) {
this.idGenerator = idGenerator;
workflowInputPath = properties.getWorkflowInputPath();
workflowOutputPath = properties.getWorkflowOutputPath();
taskInputPath = properties.getTaskInputPath();
taskOutputPath = properties.getTaskOutputPath();
expirationSec = properties.getSignedUrlExpirationDuration().getSeconds();
String connectionString = properties.getConnectionString();
String containerName = properties.getContainerName();
String endpoint = properties.getEndpoint();
String sasToken = properties.getSasToken();
BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder();
if (connectionString != null) {
blobContainerClientBuilder.connectionString(connectionString);
sasTokenCredential = null;
} else if (endpoint != null) {
blobContainerClientBuilder.endpoint(endpoint);
if (sasToken != null) {
sasTokenCredential = SasTokenCredential.fromSasTokenString(sasToken);
blobContainerClientBuilder.sasToken(sasTokenCredential.getSasToken());
} else {
sasTokenCredential = null;
}
} else {
String msg = "Missing property for connectionString OR endpoint";
LOGGER.error(msg);
throw new NonTransientException(msg);
}
blobContainerClient = blobContainerClientBuilder.containerName(containerName).buildClient();
}
/**
* @param operation the type of {@link Operation} to be performed
* @param payloadType the {@link PayloadType} that is being accessed
* @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the
* azure blob name for the json payload
*/
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
try {
ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation();
String objectKey;
if (StringUtils.isNotBlank(path)) {
objectKey = path;
} else {
objectKey = getObjectKey(payloadType);
}
externalStorageLocation.setPath(objectKey);
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(objectKey).getBlockBlobClient();
String blobUrl = Utility.urlDecode(blockBlobClient.getBlobUrl());
if (sasTokenCredential != null) {
blobUrl = blobUrl + "?" + sasTokenCredential.getSasToken();
} else {
BlobSasPermission blobSASPermission = new BlobSasPermission();
if (operation.equals(Operation.READ)) {
blobSASPermission.setReadPermission(true);
} else if (operation.equals(Operation.WRITE)) {
blobSASPermission.setWritePermission(true);
blobSASPermission.setCreatePermission(true);
}
BlobServiceSasSignatureValues blobServiceSasSignatureValues =
new BlobServiceSasSignatureValues(
OffsetDateTime.now(ZoneOffset.UTC).plusSeconds(expirationSec),
blobSASPermission);
blobUrl =
blobUrl + "?" + blockBlobClient.generateSas(blobServiceSasSignatureValues);
}
externalStorageLocation.setUri(blobUrl);
return externalStorageLocation;
} catch (BlobStorageException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Uploads the payload to the given azure blob name. It is expected that the caller retrieves
* the blob name using {@link #getLocation(Operation, PayloadType, String)} before making this
* call.
*
* @param path the name of the blob to be uploaded
* @param payload an {@link InputStream} containing the json payload which is to be uploaded
* @param payloadSize the size of the json payload in bytes
*/
@Override
public void upload(String path, InputStream payload, long payloadSize) {
try {
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(path).getBlockBlobClient();
BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders().setContentType(CONTENT_TYPE);
blockBlobClient.uploadWithResponse(
payload,
payloadSize,
blobHttpHeaders,
null,
null,
null,
null,
null,
Context.NONE);
} catch (BlobStorageException | UncheckedIOException | UnexpectedLengthException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Downloads the payload stored in an azure blob.
*
* @param path the path of the blob
* @return an input stream containing the contents of the object Caller is expected to close the
* input stream.
*/
@Override
public InputStream download(String path) {
try {
BlockBlobClient blockBlobClient =
blobContainerClient.getBlobClient(path).getBlockBlobClient();
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
// Avoid another call to the api to get the blob size
// ByteArrayOutputStream outputStream = new
// ByteArrayOutputStream(blockBlobClient.getProperties().value().blobSize());
blockBlobClient.download(outputStream);
return new ByteArrayInputStream(outputStream.toByteArray());
} catch (BlobStorageException | UncheckedIOException | NullPointerException e) {
String msg = "Error communicating with Azure";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Build path on external storage. Copied from S3PayloadStorage.
*
* @param payloadType the {@link PayloadType} which will determine the base path of the object
* @return External Storage path
*/
private String getObjectKey(PayloadType payloadType) {
StringBuilder stringBuilder = new StringBuilder();
switch (payloadType) {
case WORKFLOW_INPUT:
stringBuilder.append(workflowInputPath);
break;
case WORKFLOW_OUTPUT:
stringBuilder.append(workflowOutputPath);
break;
case TASK_INPUT:
stringBuilder.append(taskInputPath);
break;
case TASK_OUTPUT:
stringBuilder.append(taskOutputPath);
break;
}
stringBuilder.append(idGenerator.generate()).append(".json");
return stringBuilder.toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.