repo stringclasses 1k
values | file_url stringlengths 96 373 | file_path stringlengths 11 294 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 6
values | commit_sha stringclasses 1k
values | retrieved_at stringdate 2026-01-04 14:45:56 2026-01-04 18:30:23 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/rest/src/main/java/com/netflix/conductor/rest/startup/KitchenSinkInitializer.java | rest/src/main/java/com/netflix/conductor/rest/startup/KitchenSinkInitializer.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.rest.startup;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.context.event.EventListener;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpEntity;
import org.springframework.stereotype.Component;
import org.springframework.util.FileCopyUtils;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import org.springframework.web.client.RestTemplate;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.exception.ConflictException;
import static org.springframework.http.HttpHeaders.CONTENT_TYPE;
import static org.springframework.http.MediaType.APPLICATION_JSON_VALUE;
@Component
public class KitchenSinkInitializer {
private static final Logger LOGGER = LoggerFactory.getLogger(KitchenSinkInitializer.class);
private final RestTemplate restTemplate;
@Value("${loadSample:false}")
private boolean loadSamples;
@Value("${server.port:8080}")
private int port;
@Value("classpath:./kitchensink/kitchensink.json")
private Resource kitchenSink;
@Value("classpath:./kitchensink/sub_flow_1.json")
private Resource subFlow;
@Value("classpath:./kitchensink/kitchenSink-ephemeralWorkflowWithStoredTasks.json")
private Resource ephemeralWorkflowWithStoredTasks;
@Value("classpath:./kitchensink/kitchenSink-ephemeralWorkflowWithEphemeralTasks.json")
private Resource ephemeralWorkflowWithEphemeralTasks;
public KitchenSinkInitializer(RestTemplateBuilder restTemplateBuilder) {
this.restTemplate = restTemplateBuilder.build();
}
@EventListener(ApplicationReadyEvent.class)
public void setupKitchenSink() {
try {
if (loadSamples) {
LOGGER.info("Loading Kitchen Sink examples");
createKitchenSink();
}
} catch (ConflictException ignored) {
// Already present in the system :)
} catch (Exception e) {
LOGGER.error("Error initializing kitchen sink", e);
}
}
private void createKitchenSink() throws Exception {
List<TaskDef> taskDefs = new LinkedList<>();
TaskDef taskDef;
for (int i = 0; i < 40; i++) {
taskDef = new TaskDef("task_" + i, "task_" + i, 1, 0);
taskDef.setOwnerEmail("example@email.com");
taskDefs.add(taskDef);
}
taskDef = new TaskDef("search_elasticsearch", "search_elasticsearch", 1, 0);
taskDef.setOwnerEmail("example@email.com");
taskDefs.add(taskDef);
restTemplate.postForEntity(url("/api/metadata/taskdefs"), taskDefs, Object.class);
/*
* Kitchensink example (stored workflow with stored tasks)
*/
MultiValueMap<String, String> headers = new LinkedMultiValueMap<>();
headers.add(CONTENT_TYPE, APPLICATION_JSON_VALUE);
HttpEntity<String> request = new HttpEntity<>(readToString(kitchenSink), headers);
restTemplate.postForEntity(url("/api/metadata/workflow"), request, Map.class);
request = new HttpEntity<>(readToString(subFlow), headers);
restTemplate.postForEntity(url("/api/metadata/workflow"), request, Map.class);
restTemplate.postForEntity(
url("/api/workflow/kitchensink"),
Collections.singletonMap("task2Name", "task_5"),
String.class);
LOGGER.info("Kitchen sink workflow is created!");
/*
* Kitchensink example with ephemeral workflow and stored tasks
*/
request = new HttpEntity<>(readToString(ephemeralWorkflowWithStoredTasks), headers);
restTemplate.postForEntity(url("/api/workflow"), request, String.class);
LOGGER.info("Ephemeral Kitchen sink workflow with stored tasks is created!");
/*
* Kitchensink example with ephemeral workflow and ephemeral tasks
*/
request = new HttpEntity<>(readToString(ephemeralWorkflowWithEphemeralTasks), headers);
restTemplate.postForEntity(url("/api/workflow"), request, String.class);
LOGGER.info("Ephemeral Kitchen sink workflow with ephemeral tasks is created!");
}
private String readToString(Resource resource) throws IOException {
return FileCopyUtils.copyToString(new InputStreamReader(resource.getInputStream()));
}
private String url(String path) {
return "http://localhost:" + port + path;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/awss3-storage/src/main/java/com/netflix/conductor/s3/storage/S3PayloadStorage.java | awss3-storage/src/main/java/com/netflix/conductor/s3/storage/S3PayloadStorage.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.s3.storage;
import java.io.InputStream;
import java.time.Duration;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.s3.config.S3Properties;
import software.amazon.awssdk.core.exception.SdkException;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.GetObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest;
import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest;
/**
* An implementation of {@link ExternalPayloadStorage} using AWS S3 for storing large JSON payload
* data.
*
* <p><em>NOTE: The S3 client assumes that access to S3 is configured on the instance.</em>
*
* @see <a
* href="https://docs.aws.amazon.com/sdk-for-java/latest/developer-guide/credentials.html">AWS
* SDK for Java v2 Credentials</a>
*/
public class S3PayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(S3PayloadStorage.class);
private static final String CONTENT_TYPE = "application/json";
private final IDGenerator idGenerator;
private final S3Client s3Client;
private final S3Presigner s3Presigner;
private final String bucketName;
private final long expirationSec;
public S3PayloadStorage(
IDGenerator idGenerator,
S3Properties properties,
S3Client s3Client,
S3Presigner s3Presigner) {
this.idGenerator = idGenerator;
this.s3Client = s3Client;
this.s3Presigner = s3Presigner;
this.bucketName = properties.getBucketName();
this.expirationSec = properties.getSignedUrlExpirationDuration().getSeconds();
}
/**
* @param operation the type of {@link Operation} to be performed
* @param payloadType the {@link PayloadType} that is being accessed
* @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the s3
* object key for the json payload
*/
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
try {
ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation();
Duration signatureDuration = Duration.ofSeconds(expirationSec);
String objectKey;
if (StringUtils.isNotBlank(path)) {
objectKey = path;
} else {
objectKey = getObjectKey(payloadType);
}
externalStorageLocation.setPath(objectKey);
String presignedUrl;
if (operation == Operation.WRITE) {
// For PUT operations
PutObjectRequest putObjectRequest =
PutObjectRequest.builder()
.bucket(bucketName)
.key(objectKey)
.contentType(CONTENT_TYPE)
.build();
PutObjectPresignRequest presignRequest =
PutObjectPresignRequest.builder()
.signatureDuration(signatureDuration)
.putObjectRequest(putObjectRequest)
.build();
presignedUrl = s3Presigner.presignPutObject(presignRequest).url().toString();
} else {
// For GET operations
GetObjectRequest getObjectRequest =
GetObjectRequest.builder().bucket(bucketName).key(objectKey).build();
GetObjectPresignRequest presignRequest =
GetObjectPresignRequest.builder()
.signatureDuration(signatureDuration)
.getObjectRequest(getObjectRequest)
.build();
presignedUrl = s3Presigner.presignGetObject(presignRequest).url().toString();
}
externalStorageLocation.setUri(presignedUrl);
return externalStorageLocation;
} catch (SdkException e) {
String msg =
String.format(
"Error communicating with S3 - operation:%s, payloadType: %s, path: %s",
operation, payloadType, path);
LOGGER.error(msg, e);
throw new TransientException(msg, e);
} catch (Exception e) {
String msg = "Error generating presigned URL";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Uploads the payload to the given s3 object key. It is expected that the caller retrieves the
* object key using {@link #getLocation(Operation, PayloadType, String)} before making this
* call.
*
* @param path the s3 key of the object to be uploaded
* @param payload an {@link InputStream} containing the json payload which is to be uploaded
* @param payloadSize the size of the json payload in bytes
*/
@Override
public void upload(String path, InputStream payload, long payloadSize) {
try {
PutObjectRequest request =
PutObjectRequest.builder()
.bucket(bucketName)
.key(path)
.contentType(CONTENT_TYPE)
.contentLength(payloadSize)
.build();
s3Client.putObject(request, RequestBody.fromInputStream(payload, payloadSize));
} catch (SdkException e) {
String msg =
String.format(
"Error uploading to S3 - path:%s, payloadSize: %d", path, payloadSize);
LOGGER.error(msg, e);
throw new TransientException(msg, e);
}
}
/**
* Downloads the payload stored in the s3 object.
*
* @param path the S3 key of the object
* @return an input stream containing the contents of the object Caller is expected to close the
* input stream.
*/
@Override
public InputStream download(String path) {
try {
GetObjectRequest request =
GetObjectRequest.builder().bucket(bucketName).key(path).build();
return s3Client.getObject(request);
} catch (SdkException e) {
String msg = String.format("Error downloading from S3 - path:%s", path);
LOGGER.error(msg, e);
throw new TransientException(msg, e);
}
}
private String getObjectKey(PayloadType payloadType) {
StringBuilder stringBuilder = new StringBuilder();
switch (payloadType) {
case WORKFLOW_INPUT:
stringBuilder.append("workflow/input/");
break;
case WORKFLOW_OUTPUT:
stringBuilder.append("workflow/output/");
break;
case TASK_INPUT:
stringBuilder.append("task/input/");
break;
case TASK_OUTPUT:
stringBuilder.append("task/output/");
break;
}
stringBuilder.append(idGenerator.generate()).append(".json");
return stringBuilder.toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Properties.java | awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Properties.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.s3.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.external-payload-storage.s3")
public class S3Properties {
/** The s3 bucket name where the payloads will be stored */
private String bucketName = "conductor_payloads";
/** The time (in seconds) for which the signed url will be valid */
@DurationUnit(ChronoUnit.SECONDS)
private Duration signedUrlExpirationDuration = Duration.ofSeconds(5);
/** The AWS region of the s3 bucket */
private String region = "us-east-1";
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
public Duration getSignedUrlExpirationDuration() {
return signedUrlExpirationDuration;
}
public void setSignedUrlExpirationDuration(Duration signedUrlExpirationDuration) {
this.signedUrlExpirationDuration = signedUrlExpirationDuration;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Configuration.java | awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Configuration.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.s3.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.s3.storage.S3PayloadStorage;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.presigner.S3Presigner;
@Configuration
@EnableConfigurationProperties(S3Properties.class)
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "s3")
public class S3Configuration {
@Bean
public ExternalPayloadStorage s3ExternalPayloadStorage(
IDGenerator idGenerator,
S3Properties properties,
S3Client s3Client,
S3Presigner s3Presigner) {
return new S3PayloadStorage(idGenerator, properties, s3Client, s3Presigner);
}
@ConditionalOnProperty(
name = "conductor.external-payload-storage.s3.use_default_client",
havingValue = "true",
matchIfMissing = true)
@Bean
public S3Client s3Client(S3Properties properties) {
return S3Client.builder().region(Region.of(properties.getRegion())).build();
}
@Bean
public S3Presigner s3Presigner(S3Properties properties) {
return S3Presigner.builder().region(Region.of(properties.getRegion())).build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/TestExpression.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/TestExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.junit.Test;
import com.netflix.conductor.os.dao.query.parser.internal.AbstractParserTest;
import com.netflix.conductor.os.dao.query.parser.internal.ConstValue;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* @author Viren
*/
public class TestExpression extends AbstractParserTest {
@Test
public void test() throws Exception {
String test =
"type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)";
// test = "type='IMAGE' AND subType ='sdp'";
// test = "(metadata.type = 'IMAGE')";
InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expr = new Expression(is);
System.out.println(expr);
assertTrue(expr.isBinaryExpr());
assertNull(expr.getGroupedExpression());
assertNotNull(expr.getNameValue());
NameValue nv = expr.getNameValue();
assertEquals("type", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"IMAGE\"", nv.getValue().getValue());
Expression rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertTrue(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv); // subType = sdp
assertNull(rhs.getGroupedExpression());
assertEquals("subType", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"sdp\"", nv.getValue().getValue());
assertEquals("AND", rhs.getOperator().getOperator());
rhs = rhs.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
GroupedExpression ge = rhs.getGroupedExpression();
assertNotNull(ge);
expr = ge.getExpression();
assertNotNull(expr);
assertTrue(expr.isBinaryExpr());
nv = expr.getNameValue();
assertNotNull(nv);
assertEquals("metadata.width", nv.getName().getName());
assertEquals(">", nv.getOp().getOperator());
assertEquals("50", nv.getValue().getValue());
assertEquals("OR", expr.getOperator().getOperator());
rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv);
assertEquals("metadata.height", nv.getName().getName());
assertEquals(">", nv.getOp().getOperator());
assertEquals("50", nv.getValue().getValue());
}
@Test
public void testWithSysConstants() throws Exception {
String test = "type='IMAGE' AND subType ='sdp' AND description IS null";
InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
Expression expr = new Expression(is);
System.out.println(expr);
assertTrue(expr.isBinaryExpr());
assertNull(expr.getGroupedExpression());
assertNotNull(expr.getNameValue());
NameValue nv = expr.getNameValue();
assertEquals("type", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"IMAGE\"", nv.getValue().getValue());
Expression rhs = expr.getRightHandSide();
assertNotNull(rhs);
assertTrue(rhs.isBinaryExpr());
nv = rhs.getNameValue();
assertNotNull(nv); // subType = sdp
assertNull(rhs.getGroupedExpression());
assertEquals("subType", nv.getName().getName());
assertEquals("=", nv.getOp().getOperator());
assertEquals("\"sdp\"", nv.getValue().getValue());
assertEquals("AND", rhs.getOperator().getOperator());
rhs = rhs.getRightHandSide();
assertNotNull(rhs);
assertFalse(rhs.isBinaryExpr());
GroupedExpression ge = rhs.getGroupedExpression();
assertNull(ge);
nv = rhs.getNameValue();
assertNotNull(nv);
assertEquals("description", nv.getName().getName());
assertEquals("IS", nv.getOp().getOperator());
ConstValue cv = nv.getValue();
assertNotNull(cv);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "description IS not null";
is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes()));
expr = new Expression(is);
System.out.println(expr);
nv = expr.getNameValue();
assertNotNull(nv);
assertEquals("description", nv.getName().getName());
assertEquals("IS", nv.getOp().getOperator());
cv = nv.getValue();
assertNotNull(cv);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/TestGroupedExpression.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/TestGroupedExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import org.junit.Test;
/**
* @author Viren
*/
public class TestGroupedExpression {
@Test
public void test() {}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestConstValue.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestConstValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.util.List;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* @author Viren
*/
public class TestConstValue extends AbstractParserTest {
@Test
public void testStringConst() throws Exception {
String test = "'string value'";
String expected =
test.replaceAll(
"'", "\""); // Quotes are removed but then the result is double quoted.
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(expected, cv.getValue());
assertTrue(cv.getValue() instanceof String);
test = "\"string value\"";
cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(expected, cv.getValue());
assertTrue(cv.getValue() instanceof String);
}
@Test
public void testSystemConst() throws Exception {
String test = "null";
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertTrue(cv.getValue() instanceof String);
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL);
test = "null";
test = "not null";
cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL);
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "'string value";
new ConstValue(getInputStream(test));
}
@Test
public void testNumConst() throws Exception {
String test = "12345.89";
ConstValue cv = new ConstValue(getInputStream(test));
assertNotNull(cv.getValue());
assertTrue(
cv.getValue()
instanceof
String); // Numeric values are stored as string as we are just passing thru
// them to ES
assertEquals(test, cv.getValue());
}
@Test
public void testRange() throws Exception {
String test = "50 AND 100";
Range range = new Range(getInputStream(test));
assertEquals("50", range.getLow());
assertEquals("100", range.getHigh());
}
@Test(expected = ParserException.class)
public void testBadRange() throws Exception {
String test = "50 AND";
new Range(getInputStream(test));
}
@Test
public void testArray() throws Exception {
String test = "(1, 3, 'name', 'value2')";
ListConst lc = new ListConst(getInputStream(test));
List<Object> list = lc.getList();
assertEquals(4, list.size());
assertTrue(list.contains("1"));
assertEquals("'value2'", list.get(3)); // Values are preserved as it is...
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/AbstractParserTest.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/AbstractParserTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
/**
* @author Viren
*/
public abstract class AbstractParserTest {
protected InputStream getInputStream(String expression) {
return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes()));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestName.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestName.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestName extends AbstractParserTest {
@Test
public void test() throws Exception {
String test = "metadata.en_US.lang ";
Name name = new Name(getInputStream(test));
String nameVal = name.getName();
assertNotNull(nameVal);
assertEquals(test.trim(), nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestBooleanOp.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestBooleanOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestBooleanOp extends AbstractParserTest {
@Test
public void test() throws Exception {
String[] tests = new String[] {"AND", "OR"};
for (String test : tests) {
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalid() throws Exception {
String test = "<";
BooleanOp name = new BooleanOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestComparisonOp.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/query/parser/internal/TestComparisonOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* @author Viren
*/
public class TestComparisonOp extends AbstractParserTest {
@Test
public void test() throws Exception {
String[] tests = new String[] {"<", ">", "=", "!=", "IN", "BETWEEN", "STARTS_WITH"};
for (String test : tests) {
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
@Test(expected = ParserException.class)
public void testInvalidOp() throws Exception {
String test = "AND";
ComparisonOp name = new ComparisonOp(getInputStream(test));
String nameVal = name.getOperator();
assertNotNull(nameVal);
assertEquals(test, nameVal);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestBulkRequestBuilderWrapper.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestBulkRequestBuilderWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import org.junit.Test;
import org.mockito.Mockito;
import org.opensearch.action.bulk.BulkRequestBuilder;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.update.UpdateRequest;
public class TestBulkRequestBuilderWrapper {
BulkRequestBuilder builder = Mockito.mock(BulkRequestBuilder.class);
BulkRequestBuilderWrapper wrapper = new BulkRequestBuilderWrapper(builder);
@Test(expected = Exception.class)
public void testAddNullUpdateRequest() {
wrapper.add((UpdateRequest) null);
}
@Test(expected = Exception.class)
public void testAddNullIndexRequest() {
wrapper.add((IndexRequest) null);
}
@Test
public void testBuilderCalls() {
IndexRequest indexRequest = new IndexRequest();
UpdateRequest updateRequest = new UpdateRequest();
wrapper.add(indexRequest);
wrapper.add(updateRequest);
wrapper.numberOfActions();
wrapper.execute();
Mockito.verify(builder, Mockito.times(1)).add(indexRequest);
Mockito.verify(builder, Mockito.times(1)).add(updateRequest);
Mockito.verify(builder, Mockito.times(1)).numberOfActions();
Mockito.verify(builder, Mockito.times(1)).execute();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/index/OpenSearchRestDaoBaseTest.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/index/OpenSearchRestDaoBaseTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import org.apache.http.HttpHost;
import org.junit.After;
import org.junit.Before;
import org.opensearch.client.Request;
import org.opensearch.client.Response;
import org.opensearch.client.RestClient;
import org.opensearch.client.RestClientBuilder;
import org.springframework.retry.support.RetryTemplate;
public abstract class OpenSearchRestDaoBaseTest extends OpenSearchTest {
protected RestClient restClient;
protected OpenSearchRestDAO indexDAO;
@Before
public void setup() throws Exception {
String httpHostAddress = container.getHttpHostAddress();
String host = httpHostAddress.split(":")[1].replace("//", "");
int port = Integer.parseInt(httpHostAddress.split(":")[2]);
properties.setUrl(httpHostAddress);
RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http"));
restClient = restClientBuilder.build();
indexDAO =
new OpenSearchRestDAO(
restClientBuilder, new RetryTemplate(), properties, objectMapper);
indexDAO.setup();
}
@After
public void tearDown() throws Exception {
deleteAllIndices();
if (restClient != null) {
restClient.close();
}
}
private void deleteAllIndices() throws IOException {
Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices"));
Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent());
BufferedReader bufferedReader = new BufferedReader(streamReader);
String line;
while ((line = bufferedReader.readLine()) != null) {
System.out.println("Deleting line: " + line);
String[] fields = line.split("(\\s+)");
String endpoint = String.format("/%s", fields[2]);
System.out.println("Deleting index: " + endpoint);
restClient.performRequest(new Request("DELETE", endpoint));
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestOpenSearchRestDAO.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestOpenSearchRestDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.function.Supplier;
import org.joda.time.DateTime;
import org.junit.Test;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.Workflow.WorkflowStatus;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.os.utils.TestUtils;
import com.google.common.collect.ImmutableMap;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestOpenSearchRestDAO extends OpenSearchRestDaoBaseTest {
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private static final String INDEX_PREFIX = "conductor";
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String MSG_DOC_TYPE = "message";
private static final String EVENT_DOC_TYPE = "event";
private static final String LOG_DOC_TYPE = "task_log";
private boolean indexExists(final String index) throws IOException {
return indexDAO.doesResourceExist("/" + index);
}
private boolean doesMappingExist(final String index, final String mappingName)
throws IOException {
return indexDAO.doesResourceExist("/" + index + "/_mapping/" + mappingName);
}
@Test
public void assertInitialSetup() throws IOException {
SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT"));
String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE;
String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE;
String taskLogIndex =
INDEX_PREFIX + "_" + LOG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String messageIndex =
INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
String eventIndex =
INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date());
assertTrue("Index 'conductor_workflow' should exist", indexExists(workflowIndex));
assertTrue("Index 'conductor_task' should exist", indexExists(taskIndex));
assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex));
assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex));
assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex));
assertTrue(
"Index template for 'message' should exist",
indexDAO.doesResourceExist("/_index_template/template_" + MSG_DOC_TYPE));
assertTrue(
"Index template for 'event' should exist",
indexDAO.doesResourceExist("/_index_template/template_" + EVENT_DOC_TYPE));
assertTrue(
"Index template for 'task_log' should exist",
indexDAO.doesResourceExist("/_index_template/template_" + LOG_DOC_TYPE));
}
@Test
public void shouldIndexWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexWorkflowAsync() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.asyncIndexWorkflow(workflowSummary).get();
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldRemoveWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.removeWorkflow(workflowSummary.getWorkflowId());
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldAsyncRemoveWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
List<String> workflows =
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
assertEquals(1, workflows.size());
indexDAO.asyncRemoveWorkflow(workflowSummary.getWorkflowId()).get();
workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0);
assertTrue("Workflow was not removed.", workflows.isEmpty());
}
@Test
public void shouldUpdateWorkflow() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.updateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.COMPLETED});
workflowSummary.setStatus(WorkflowStatus.COMPLETED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldAsyncUpdateWorkflow() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
indexDAO.asyncUpdateWorkflow(
workflowSummary.getWorkflowId(),
new String[] {"status"},
new Object[] {WorkflowStatus.FAILED})
.get();
workflowSummary.setStatus(WorkflowStatus.FAILED);
assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary);
}
@Test
public void shouldIndexTask() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldIndexTaskAsync() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.asyncIndexTask(taskSummary).get();
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary));
assertEquals(taskSummary.getTaskId(), tasks.get(0));
}
@Test
public void shouldRemoveTask() {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldAsyncRemoveTask() throws Exception {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
// wait for workflow to be indexed
tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1);
TaskSummary taskSummary =
TestUtils.loadTaskSnapshot(
objectMapper, "task_summary", workflowSummary.getWorkflowId());
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask(workflowSummary.getWorkflowId(), taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertTrue("Task was not removed.", tasks.isEmpty());
}
@Test
public void shouldNotRemoveTaskWhenNotAssociatedWithWorkflow() {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.removeTask("InvalidWorkflow", taskSummary.getTaskId());
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldNotAsyncRemoveTaskWhenNotAssociatedWithWorkflow() throws Exception {
TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary");
indexDAO.indexTask(taskSummary);
// Wait for the task to be indexed
List<String> tasks = tryFindResults(() -> searchTasks(taskSummary), 1);
indexDAO.asyncRemoveTask("InvalidWorkflow", taskSummary.getTaskId()).get();
tasks = tryFindResults(() -> searchTasks(taskSummary), 0);
assertFalse("Task was removed.", tasks.isEmpty());
}
@Test
public void shouldAddTaskExecutionLogs() {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.addTaskExecutionLogs(logs);
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddTaskExecutionLogsAsync() throws Exception {
List<TaskExecLog> logs = new ArrayList<>();
String taskId = uuid();
logs.add(createLog(taskId, "log1"));
logs.add(createLog(taskId, "log2"));
logs.add(createLog(taskId, "log3"));
indexDAO.asyncAddTaskExecutionLogs(logs).get();
List<TaskExecLog> indexedLogs =
tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3);
assertEquals(3, indexedLogs.size());
assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs));
}
@Test
public void shouldAddMessage() {
String queue = "queue";
Message message1 = new Message(uuid(), "payload1", null);
Message message2 = new Message(uuid(), "payload2", null);
indexDAO.addMessage(queue, message1);
indexDAO.addMessage(queue, message2);
List<Message> indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2);
assertEquals(2, indexedMessages.size());
assertTrue(
"Not all messages was indexed",
indexedMessages.containsAll(Arrays.asList(message1, message2)));
}
@Test
public void shouldAddEventExecution() {
String event = "event";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.addEventExecution(execution1);
indexDAO.addEventExecution(execution2);
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAsyncAddEventExecution() throws Exception {
String event = "event2";
EventExecution execution1 = createEventExecution(event);
EventExecution execution2 = createEventExecution(event);
indexDAO.asyncAddEventExecution(execution1).get();
indexDAO.asyncAddEventExecution(execution2).get();
List<EventExecution> indexedExecutions =
tryFindResults(() -> indexDAO.getEventExecutions(event), 2);
assertEquals(2, indexedExecutions.size());
assertTrue(
"Not all event executions was indexed",
indexedExecutions.containsAll(Arrays.asList(execution1, execution2)));
}
@Test
public void shouldAddIndexPrefixToIndexTemplate() throws Exception {
String json = TestUtils.loadJsonResource("expected_template_task_log");
String content = indexDAO.loadTypeMappingSource("/template_task_log.json");
assertEquals(json, content);
}
@Test
public void shouldSearchRecentRunningWorkflows() throws Exception {
WorkflowSummary oldWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
oldWorkflow.setStatus(WorkflowStatus.RUNNING);
oldWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(2).toDate()));
WorkflowSummary recentWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
recentWorkflow.setStatus(WorkflowStatus.RUNNING);
recentWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(1).toDate()));
WorkflowSummary tooRecentWorkflow =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
tooRecentWorkflow.setStatus(WorkflowStatus.RUNNING);
tooRecentWorkflow.setUpdateTime(getFormattedTime(new DateTime().toDate()));
indexDAO.indexWorkflow(oldWorkflow);
indexDAO.indexWorkflow(recentWorkflow);
indexDAO.indexWorkflow(tooRecentWorkflow);
Thread.sleep(1000);
List<String> ids = indexDAO.searchRecentRunningWorkflows(2, 1);
assertEquals(1, ids.size());
assertEquals(recentWorkflow.getWorkflowId(), ids.get(0));
}
@Test
public void shouldCountWorkflows() {
int counts = 1100;
for (int i = 0; i < counts; i++) {
WorkflowSummary workflowSummary =
TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary");
indexDAO.indexWorkflow(workflowSummary);
}
// wait for workflow to be indexed
long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts);
assertEquals(counts, result);
}
private long tryGetCount(Supplier<Long> countFunction, int resultsCount) {
long result = 0;
for (int i = 0; i < 20; i++) {
result = countFunction.get();
if (result == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
// Get total workflow counts given the name and status
private long getWorkflowCount(String workflowName, String status) {
return indexDAO.getWorkflowCount(
"status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*");
}
private void assertWorkflowSummary(String workflowId, WorkflowSummary summary) {
assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType"));
assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version"));
assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId"));
assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId"));
assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime"));
assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime"));
assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime"));
assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status"));
assertEquals(summary.getInput(), indexDAO.get(workflowId, "input"));
assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output"));
assertEquals(
summary.getReasonForIncompletion(),
indexDAO.get(workflowId, "reasonForIncompletion"));
assertEquals(
String.valueOf(summary.getExecutionTime()),
indexDAO.get(workflowId, "executionTime"));
assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event"));
assertEquals(
summary.getFailedReferenceTaskNames(),
indexDAO.get(workflowId, "failedReferenceTaskNames"));
}
private String getFormattedTime(Date time) {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
sdf.setTimeZone(TimeZone.getTimeZone("GMT"));
return sdf.format(time);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction) {
return tryFindResults(searchFunction, 1);
}
private <T> List<T> tryFindResults(Supplier<List<T>> searchFunction, int resultsCount) {
List<T> result = Collections.emptyList();
for (int i = 0; i < 20; i++) {
result = searchFunction.get();
if (result.size() == resultsCount) {
return result;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
return result;
}
private List<String> searchWorkflows(String workflowId) {
return indexDAO.searchWorkflows(
"", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList())
.getResults();
}
private List<String> searchTasks(TaskSummary taskSummary) {
return indexDAO.searchTasks(
"",
"workflowId:\"" + taskSummary.getWorkflowId() + "\"",
0,
100,
Collections.emptyList())
.getResults();
}
private TaskExecLog createLog(String taskId, String log) {
TaskExecLog taskExecLog = new TaskExecLog(log);
taskExecLog.setTaskId(taskId);
return taskExecLog;
}
private EventExecution createEventExecution(String event) {
EventExecution execution = new EventExecution(uuid(), uuid());
execution.setName("name");
execution.setEvent(event);
execution.setCreated(System.currentTimeMillis());
execution.setStatus(EventExecution.Status.COMPLETED);
execution.setAction(EventHandler.Action.Type.start_workflow);
execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3));
return execution;
}
private String uuid() {
return UUID.randomUUID().toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/index/OpenSearchTest.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/index/OpenSearchTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.runner.RunWith;
import org.opensearch.testcontainers.OpensearchContainer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.os.config.OpenSearchProperties;
import com.fasterxml.jackson.databind.ObjectMapper;
@ContextConfiguration(
classes = {TestObjectMapperConfiguration.class, OpenSearchTest.TestConfiguration.class})
@RunWith(SpringRunner.class)
@TestPropertySource(
properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=0"})
public abstract class OpenSearchTest {
@Configuration
static class TestConfiguration {
@Bean
public OpenSearchProperties elasticSearchProperties() {
return new OpenSearchProperties();
}
}
protected static OpensearchContainer<?> container =
new OpensearchContainer<>(
DockerImageName.parse(
"opensearchproject/opensearch:2.18.0")); // this should match the client
// version
@Autowired protected ObjectMapper objectMapper;
@Autowired protected OpenSearchProperties properties;
@BeforeClass
public static void startServer() {
container.start();
}
@AfterClass
public static void stopServer() {
container.stop();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestOpenSearchRestDAOBatch.java | os-persistence/src/test/java/com/netflix/conductor/os/dao/index/TestOpenSearchRestDAOBatch.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
import org.springframework.test.context.TestPropertySource;
import com.netflix.conductor.common.metadata.tasks.Task.Status;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.fasterxml.jackson.core.JsonProcessingException;
import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2")
public class TestOpenSearchRestDAOBatch extends OpenSearchRestDaoBaseTest {
@Test
public void indexTaskWithBatchSizeTwo() {
String correlationId = "some-correlation-id";
TaskSummary taskSummary = new TaskSummary();
taskSummary.setTaskId("some-task-id");
taskSummary.setWorkflowId("some-workflow-instance-id");
taskSummary.setTaskType("some-task-type");
taskSummary.setStatus(Status.FAILED);
try {
taskSummary.setInput(
objectMapper.writeValueAsString(
new HashMap<String, Object>() {
{
put("input_key", "input_value");
}
}));
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
taskSummary.setCorrelationId(correlationId);
taskSummary.setTaskDefName("some-task-def-name");
taskSummary.setReasonForIncompletion("some-failure-reason");
indexDAO.indexTask(taskSummary);
indexDAO.indexTask(taskSummary);
await().atMost(5, TimeUnit.SECONDS)
.untilAsserted(
() -> {
SearchResult<String> result =
indexDAO.searchTasks(
"correlationId='" + correlationId + "'",
"*",
0,
10000,
null);
assertTrue(
"should return 1 or more search results",
result.getResults().size() > 0);
assertEquals(
"taskId should match the indexed task",
"some-task-id",
result.getResults().get(0));
});
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/test/java/com/netflix/conductor/os/utils/TestUtils.java | os-persistence/src/test/java/com/netflix/conductor/os/utils/TestUtils.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.utils;
import org.apache.commons.io.Charsets;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.utils.IDGenerator;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.io.Resources;
public class TestUtils {
private static final String WORKFLOW_SCENARIO_EXTENSION = ".json";
private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID";
public static WorkflowSummary loadWorkflowSnapshot(
ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, WorkflowSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(ObjectMapper objectMapper, String resourceFileName) {
try {
String content = loadJsonResource(resourceFileName);
String workflowId = new IDGenerator().generate();
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static TaskSummary loadTaskSnapshot(
ObjectMapper objectMapper, String resourceFileName, String workflowId) {
try {
String content = loadJsonResource(resourceFileName);
content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId);
return objectMapper.readValue(content, TaskSummary.class);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
public static String loadJsonResource(String resourceFileName) {
try {
return Resources.toString(
TestUtils.class.getResource(
"/" + resourceFileName + WORKFLOW_SCENARIO_EXTENSION),
Charsets.UTF_8);
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/FilterProvider.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/FilterProvider.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import org.opensearch.index.query.QueryBuilder;
/**
* @author Viren
*/
public interface FilterProvider {
/**
* @return FilterBuilder for elasticsearch
*/
public QueryBuilder getFilterBuilder();
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/Expression.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/Expression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders;
import com.netflix.conductor.os.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.os.dao.query.parser.internal.BooleanOp;
import com.netflix.conductor.os.dao.query.parser.internal.ParserException;
/**
* @author Viren
*/
public class Expression extends AbstractNode implements FilterProvider {
private NameValue nameVal;
private GroupedExpression ge;
private BooleanOp op;
private Expression rhs;
public Expression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(1);
if (peeked[0] == '(') {
this.ge = new GroupedExpression(is);
} else {
this.nameVal = new NameValue(is);
}
peeked = peek(3);
if (isBoolOpr(peeked)) {
// we have an expression next
this.op = new BooleanOp(is);
this.rhs = new Expression(is);
}
}
public boolean isBinaryExpr() {
return this.op != null;
}
public BooleanOp getOperator() {
return this.op;
}
public Expression getRightHandSide() {
return this.rhs;
}
public boolean isNameValue() {
return this.nameVal != null;
}
public NameValue getNameValue() {
return this.nameVal;
}
public GroupedExpression getGroupedExpression() {
return this.ge;
}
@Override
public QueryBuilder getFilterBuilder() {
QueryBuilder lhs = null;
if (nameVal != null) {
lhs = nameVal.getFilterBuilder();
} else {
lhs = ge.getFilterBuilder();
}
if (this.isBinaryExpr()) {
QueryBuilder rhsFilter = rhs.getFilterBuilder();
if (this.op.isAnd()) {
return QueryBuilders.boolQuery().must(lhs).must(rhsFilter);
} else {
return QueryBuilders.boolQuery().should(lhs).should(rhsFilter);
}
} else {
return lhs;
}
}
@Override
public String toString() {
if (isBinaryExpr()) {
return "" + (nameVal == null ? ge : nameVal) + op + rhs;
} else {
return "" + (nameVal == null ? ge : nameVal);
}
}
public static Expression fromString(String value) throws ParserException {
return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes())));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/NameValue.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/NameValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import java.io.InputStream;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders;
import com.netflix.conductor.os.dao.query.parser.internal.*;
import com.netflix.conductor.os.dao.query.parser.internal.ComparisonOp.Operators;
/**
* @author Viren
* <pre>
* Represents an expression of the form as below:
* key OPR value
* OPR is the comparison operator which could be on the following:
* >, <, = , !=, IN, BETWEEN
* </pre>
*/
public class NameValue extends AbstractNode implements FilterProvider {
private Name name;
private ComparisonOp op;
private ConstValue value;
private Range range;
private ListConst valueList;
public NameValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.name = new Name(is);
this.op = new ComparisonOp(is);
if (this.op.getOperator().equals(Operators.BETWEEN.value())) {
this.range = new Range(is);
}
if (this.op.getOperator().equals(Operators.IN.value())) {
this.valueList = new ListConst(is);
} else {
this.value = new ConstValue(is);
}
}
@Override
public String toString() {
return "" + name + op + value;
}
/**
* @return the name
*/
public Name getName() {
return name;
}
/**
* @return the op
*/
public ComparisonOp getOp() {
return op;
}
/**
* @return the value
*/
public ConstValue getValue() {
return value;
}
@Override
public QueryBuilder getFilterBuilder() {
if (op.getOperator().equals(Operators.EQUALS.value())) {
return QueryBuilders.queryStringQuery(
name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.BETWEEN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(range.getLow())
.to(range.getHigh());
} else if (op.getOperator().equals(Operators.IN.value())) {
return QueryBuilders.termsQuery(name.getName(), valueList.getList());
} else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) {
return QueryBuilders.queryStringQuery(
"NOT " + name.getName() + ":" + value.getValue().toString());
} else if (op.getOperator().equals(Operators.GREATER_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.from(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.IS.value())) {
if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.mustNot(QueryBuilders.existsQuery(name.getName())));
} else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) {
return QueryBuilders.boolQuery()
.mustNot(
QueryBuilders.boolQuery()
.must(QueryBuilders.matchAllQuery())
.must(QueryBuilders.existsQuery(name.getName())));
}
} else if (op.getOperator().equals(Operators.LESS_THAN.value())) {
return QueryBuilders.rangeQuery(name.getName())
.to(value.getValue())
.includeLower(false)
.includeUpper(false);
} else if (op.getOperator().equals(Operators.STARTS_WITH.value())) {
return QueryBuilders.prefixQuery(name.getName(), value.getUnquotedValue());
}
throw new IllegalStateException("Incorrect/unsupported operators");
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/GroupedExpression.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/GroupedExpression.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser;
import java.io.InputStream;
import org.opensearch.index.query.QueryBuilder;
import com.netflix.conductor.os.dao.query.parser.internal.AbstractNode;
import com.netflix.conductor.os.dao.query.parser.internal.ParserException;
/**
* @author Viren
*/
public class GroupedExpression extends AbstractNode implements FilterProvider {
private Expression expression;
public GroupedExpression(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.expression = new Expression(is);
peeked = read(1);
assertExpected(peeked, ")");
}
@Override
public String toString() {
return "(" + expression + ")";
}
/**
* @return the expression
*/
public Expression getExpression() {
return expression;
}
@Override
public QueryBuilder getFilterBuilder() {
return expression.getFilterBuilder();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ConstValue.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ConstValue.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren Constant value can be:
* <ol>
* <li>List of values (a,b,c)
* <li>Range of values (m AND n)
* <li>A value (x)
* <li>A value is either a string or a number
* </ol>
*/
public class ConstValue extends AbstractNode {
public static enum SystemConsts {
NULL("null"),
NOT_NULL("not null");
private String value;
SystemConsts(String value) {
this.value = value;
}
public String value() {
return value;
}
}
private static String QUOTE = "\"";
private Object value;
private SystemConsts sysConsts;
public ConstValue(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(4);
String sp = new String(peeked).trim();
// Read a constant value (number or a string)
if (peeked[0] == '"' || peeked[0] == '\'') {
this.value = readString(is);
} else if (sp.toLowerCase().startsWith("not")) {
this.value = SystemConsts.NOT_NULL.value();
sysConsts = SystemConsts.NOT_NULL;
read(SystemConsts.NOT_NULL.value().length());
} else if (sp.equalsIgnoreCase(SystemConsts.NULL.value())) {
this.value = SystemConsts.NULL.value();
sysConsts = SystemConsts.NULL;
read(SystemConsts.NULL.value().length());
} else {
this.value = readNumber(is);
}
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
String numValue = sb.toString().trim();
return numValue;
}
/**
* Reads an escaped string
*
* @throws Exception
*/
private String readString(InputStream is) throws Exception {
char delim = (char) read(1)[0];
StringBuilder sb = new StringBuilder();
boolean valid = false;
while (is.available() > 0) {
char c = (char) is.read();
if (c == delim) {
valid = true;
break;
} else if (c == '\\') {
// read the next character as part of the value
c = (char) is.read();
sb.append(c);
} else {
sb.append(c);
}
}
if (!valid) {
throw new ParserException(
"String constant is not quoted with <" + delim + "> : " + sb.toString());
}
return QUOTE + sb.toString() + QUOTE;
}
public Object getValue() {
return value;
}
@Override
public String toString() {
return "" + value;
}
public String getUnquotedValue() {
String result = toString();
if (result.length() >= 2 && result.startsWith(QUOTE) && result.endsWith(QUOTE)) {
result = result.substring(1, result.length() - 1);
}
return result;
}
public boolean isSysConstant() {
return this.sysConsts != null;
}
public SystemConsts getSysConstant() {
return this.sysConsts;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ListConst.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ListConst.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
/**
* @author Viren List of constants
*/
public class ListConst extends AbstractNode {
private List<Object> values;
public ListConst(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = read(1);
assertExpected(peeked, "(");
this.values = readList();
}
private List<Object> readList() throws Exception {
List<Object> list = new LinkedList<Object>();
boolean valid = false;
char c;
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
c = (char) is.read();
if (c == ')') {
valid = true;
break;
} else if (c == ',') {
list.add(sb.toString().trim());
sb = new StringBuilder();
} else {
sb.append(c);
}
}
list.add(sb.toString().trim());
if (!valid) {
throw new ParserException("Expected ')' but never encountered in the stream");
}
return list;
}
public List<Object> getList() {
return (List<Object>) values;
}
@Override
public String toString() {
return values.toString();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/FunctionThrowingException.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/FunctionThrowingException.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
/**
* @author Viren
*/
@FunctionalInterface
public interface FunctionThrowingException<T> {
void accept(T t) throws Exception;
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/AbstractNode.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/AbstractNode.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
import java.math.BigDecimal;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Pattern;
/**
* @author Viren
*/
public abstract class AbstractNode {
public static final Pattern WHITESPACE = Pattern.compile("\\s");
protected static Set<Character> comparisonOprs = new HashSet<Character>();
static {
comparisonOprs.add('>');
comparisonOprs.add('<');
comparisonOprs.add('=');
}
protected InputStream is;
protected AbstractNode(InputStream is) throws ParserException {
this.is = is;
this.parse();
}
protected boolean isNumber(String test) {
try {
// If you can convert to a big decimal value, then it is a number.
new BigDecimal(test);
return true;
} catch (NumberFormatException e) {
// Ignore
}
return false;
}
protected boolean isBoolOpr(byte[] buffer) {
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
return true;
} else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') {
return true;
}
return false;
}
protected boolean isComparisonOpr(byte[] buffer) {
if (buffer[0] == 'I' && buffer[1] == 'N') {
return true;
} else if (buffer[0] == '!' && buffer[1] == '=') {
return true;
} else {
return comparisonOprs.contains((char) buffer[0]);
}
}
protected byte[] peek(int length) throws Exception {
return read(length, true);
}
protected byte[] read(int length) throws Exception {
return read(length, false);
}
protected String readToken() throws Exception {
skipWhitespace();
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
char c = (char) peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
is.skip(1);
break;
} else if (c == '=' || c == '>' || c == '<' || c == '!') {
// do not skip
break;
}
sb.append(c);
is.skip(1);
}
return sb.toString().trim();
}
protected boolean isNumeric(char c) {
if (c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.') {
return true;
}
return false;
}
protected void assertExpected(byte[] found, String expected) throws ParserException {
assertExpected(new String(found), expected);
}
protected void assertExpected(String found, String expected) throws ParserException {
if (!found.equals(expected)) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected void assertExpected(char found, char expected) throws ParserException {
if (found != expected) {
throw new ParserException("Expected " + expected + ", found " + found);
}
}
protected static void efor(int length, FunctionThrowingException<Integer> consumer)
throws Exception {
for (int i = 0; i < length; i++) {
consumer.accept(i);
}
}
protected abstract void _parse() throws Exception;
// Public stuff here
private void parse() throws ParserException {
// skip white spaces
skipWhitespace();
try {
_parse();
} catch (Exception e) {
System.out.println("\t" + this.getClass().getSimpleName() + "->" + this.toString());
if (!(e instanceof ParserException)) {
throw new ParserException("Error parsing", e);
} else {
throw (ParserException) e;
}
}
skipWhitespace();
}
// Private methods
private byte[] read(int length, boolean peekOnly) throws Exception {
byte[] buf = new byte[length];
if (peekOnly) {
is.mark(length);
}
efor(length, (Integer c) -> buf[c] = (byte) is.read());
if (peekOnly) {
is.reset();
}
return buf;
}
protected void skipWhitespace() throws ParserException {
try {
while (is.available() > 0) {
byte c = peek(1)[0];
if (c == ' ' || c == '\t' || c == '\n' || c == '\r') {
// skip
read(1);
} else {
break;
}
}
} catch (Exception e) {
throw new ParserException(e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/Name.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/Name.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren Represents the name of the field to be searched against.
*/
public class Name extends AbstractNode {
private String value;
public Name(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.value = readToken();
}
@Override
public String toString() {
return value;
}
public String getName() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/Range.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/Range.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class Range extends AbstractNode {
private String low;
private String high;
public Range(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
this.low = readNumber(is);
skipWhitespace();
byte[] peeked = read(3);
assertExpected(peeked, "AND");
skipWhitespace();
String num = readNumber(is);
if (num == null || "".equals(num)) {
throw new ParserException("Missing the upper range value...");
}
this.high = num;
}
private String readNumber(InputStream is) throws Exception {
StringBuilder sb = new StringBuilder();
while (is.available() > 0) {
is.mark(1);
char c = (char) is.read();
if (!isNumeric(c)) {
is.reset();
break;
} else {
sb.append(c);
}
}
String numValue = sb.toString().trim();
return numValue;
}
/**
* @return the low
*/
public String getLow() {
return low;
}
/**
* @return the high
*/
public String getHigh() {
return high;
}
@Override
public String toString() {
return low + " AND " + high;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/BooleanOp.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/BooleanOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class BooleanOp extends AbstractNode {
private String value;
public BooleanOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] buffer = peek(3);
if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') {
this.value = "OR";
} else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') {
this.value = "AND";
} else {
throw new ParserException("No valid boolean operator found...");
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
public boolean isAnd() {
return "AND".equals(value);
}
public boolean isOr() {
return "OR".equals(value);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ParserException.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ParserException.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
/**
* @author Viren
*/
@SuppressWarnings("serial")
public class ParserException extends Exception {
public ParserException(String message) {
super(message);
}
public ParserException(String message, Throwable cause) {
super(message, cause);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ComparisonOp.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/query/parser/internal/ComparisonOp.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.query.parser.internal;
import java.io.InputStream;
/**
* @author Viren
*/
public class ComparisonOp extends AbstractNode {
public enum Operators {
BETWEEN("BETWEEN"),
EQUALS("="),
LESS_THAN("<"),
GREATER_THAN(">"),
IN("IN"),
NOT_EQUALS("!="),
IS("IS"),
STARTS_WITH("STARTS_WITH");
private final String value;
Operators(String value) {
this.value = value;
}
public String value() {
return value;
}
}
static {
int max = 0;
for (Operators op : Operators.values()) {
max = Math.max(max, op.value().length());
}
maxOperatorLength = max;
}
private static final int maxOperatorLength;
private static final int betweenLen = Operators.BETWEEN.value().length();
private static final int startsWithLen = Operators.STARTS_WITH.value().length();
private String value;
public ComparisonOp(InputStream is) throws ParserException {
super(is);
}
@Override
protected void _parse() throws Exception {
byte[] peeked = peek(maxOperatorLength);
if (peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<') {
this.value = new String(peeked, 0, 1);
} else if (peeked[0] == 'I' && peeked[1] == 'N') {
this.value = "IN";
} else if (peeked[0] == 'I' && peeked[1] == 'S') {
this.value = "IS";
} else if (peeked[0] == '!' && peeked[1] == '=') {
this.value = "!=";
} else if (peeked.length >= betweenLen
&& peeked[0] == 'B'
&& peeked[1] == 'E'
&& peeked[2] == 'T'
&& peeked[3] == 'W'
&& peeked[4] == 'E'
&& peeked[5] == 'E'
&& peeked[6] == 'N') {
this.value = Operators.BETWEEN.value();
} else if (peeked.length == startsWithLen
&& new String(peeked).equals(Operators.STARTS_WITH.value())) {
this.value = Operators.STARTS_WITH.value();
} else {
throw new ParserException(
"Expecting an operator (=, >, <, !=, BETWEEN, IN, STARTS_WITH), but found none. Peeked=>"
+ new String(peeked));
}
read(this.value.length());
}
@Override
public String toString() {
return " " + value + " ";
}
public String getOperator() {
return value;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/index/OpenSearchRestDAO.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/index/OpenSearchRestDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.LocalDate;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
import org.apache.http.HttpStatus;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils;
import org.joda.time.DateTime;
import org.opensearch.action.DocWriteResponse;
import org.opensearch.action.bulk.BulkRequest;
import org.opensearch.action.delete.DeleteRequest;
import org.opensearch.action.delete.DeleteResponse;
import org.opensearch.action.get.GetRequest;
import org.opensearch.action.get.GetResponse;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.search.SearchRequest;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.action.update.UpdateRequest;
import org.opensearch.client.*;
import org.opensearch.client.core.CountRequest;
import org.opensearch.client.core.CountResponse;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.index.query.BoolQueryBuilder;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.search.SearchHit;
import org.opensearch.search.SearchHits;
import org.opensearch.search.builder.SearchSourceBuilder;
import org.opensearch.search.sort.FieldSortBuilder;
import org.opensearch.search.sort.SortOrder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.annotations.Trace;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.os.config.OpenSearchProperties;
import com.netflix.conductor.os.dao.query.parser.internal.ParserException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.type.MapType;
import com.fasterxml.jackson.databind.type.TypeFactory;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
@Trace
public class OpenSearchRestDAO extends OpenSearchBaseDAO implements IndexDAO {
private static final Logger logger = LoggerFactory.getLogger(OpenSearchRestDAO.class);
private static final String CLASS_NAME = OpenSearchRestDAO.class.getSimpleName();
private static final int CORE_POOL_SIZE = 6;
private static final long KEEP_ALIVE_TIME = 1L;
private static final String WORKFLOW_DOC_TYPE = "workflow";
private static final String TASK_DOC_TYPE = "task";
private static final String LOG_DOC_TYPE = "task_log";
private static final String EVENT_DOC_TYPE = "event";
private static final String MSG_DOC_TYPE = "message";
private static final TimeZone GMT = TimeZone.getTimeZone("GMT");
private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW");
private @interface HttpMethod {
String GET = "GET";
String POST = "POST";
String PUT = "PUT";
String HEAD = "HEAD";
}
private static final String className = OpenSearchRestDAO.class.getSimpleName();
private final String workflowIndexName;
private final String taskIndexName;
private final String eventIndexPrefix;
private String eventIndexName;
private final String messageIndexPrefix;
private String messageIndexName;
private String logIndexName;
private final String logIndexPrefix;
private final String clusterHealthColor;
private final RestHighLevelClient openSearchClient;
private final RestClient openSearchAdminClient;
private final ExecutorService executorService;
private final ExecutorService logExecutorService;
private final ConcurrentHashMap<String, BulkRequests> bulkRequests;
private final int indexBatchSize;
private final int asyncBufferFlushTimeout;
private final OpenSearchProperties properties;
private final RetryTemplate retryTemplate;
static {
SIMPLE_DATE_FORMAT.setTimeZone(GMT);
}
public OpenSearchRestDAO(
RestClientBuilder restClientBuilder,
RetryTemplate retryTemplate,
OpenSearchProperties properties,
ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
this.openSearchAdminClient = restClientBuilder.build();
this.openSearchClient = new RestHighLevelClient(restClientBuilder);
this.clusterHealthColor = properties.getClusterHealthColor();
this.bulkRequests = new ConcurrentHashMap<>();
this.indexBatchSize = properties.getIndexBatchSize();
this.asyncBufferFlushTimeout = (int) properties.getAsyncBufferFlushTimeout().getSeconds();
this.properties = properties;
this.indexPrefix = properties.getIndexPrefix();
this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE);
this.taskIndexName = getIndexName(TASK_DOC_TYPE);
this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE;
this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE;
this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE;
int workerQueueSize = properties.getAsyncWorkerQueueSize();
int maximumPoolSize = properties.getAsyncMaxPoolSize();
// Set up a workerpool for performing async operations.
this.executorService =
new ThreadPoolExecutor(
CORE_POOL_SIZE,
maximumPoolSize,
KEEP_ALIVE_TIME,
TimeUnit.MINUTES,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("indexQueue");
});
// Set up a workerpool for performing async operations for task_logs, event_executions,
// message
int corePoolSize = 1;
maximumPoolSize = 2;
long keepAliveTime = 30L;
this.logExecutorService =
new ThreadPoolExecutor(
corePoolSize,
maximumPoolSize,
keepAliveTime,
TimeUnit.SECONDS,
new LinkedBlockingQueue<>(workerQueueSize),
(runnable, executor) -> {
logger.warn(
"Request {} to async log dao discarded in executor {}",
runnable,
executor);
Monitors.recordDiscardedIndexingCount("logQueue");
});
Executors.newSingleThreadScheduledExecutor()
.scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS);
this.retryTemplate = retryTemplate;
}
@PreDestroy
private void shutdown() {
logger.info("Gracefully shutdown executor service");
shutdownExecutorService(logExecutorService);
shutdownExecutorService(executorService);
}
private void shutdownExecutorService(ExecutorService execService) {
try {
execService.shutdown();
if (execService.awaitTermination(30, TimeUnit.SECONDS)) {
logger.debug("tasks completed, shutting down");
} else {
logger.warn("Forcing shutdown after waiting for 30 seconds");
execService.shutdownNow();
}
} catch (InterruptedException ie) {
logger.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue");
execService.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
@PostConstruct
public void setup() throws Exception {
waitForHealthyCluster();
if (properties.isAutoIndexManagementEnabled()) {
createIndexesTemplates();
createWorkflowIndex();
createTaskIndex();
}
}
private void createIndexesTemplates() {
try {
initIndexesTemplates();
updateIndexesNames();
Executors.newScheduledThreadPool(1)
.scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS);
} catch (Exception e) {
logger.error("Error creating index templates!", e);
}
}
private void initIndexesTemplates() {
initIndexTemplate(LOG_DOC_TYPE);
initIndexTemplate(EVENT_DOC_TYPE);
initIndexTemplate(MSG_DOC_TYPE);
}
/** Initializes the index with the required templates and mappings. */
private void initIndexTemplate(String type) {
String template = "template_" + type;
try {
if (doesResourceNotExist("/_index_template/" + template)) {
logger.info("Creating the index template '" + template + "'");
InputStream stream =
OpenSearchRestDAO.class.getResourceAsStream("/" + template + ".json");
byte[] templateSource = IOUtils.toByteArray(stream);
HttpEntity entity =
new NByteArrayEntity(templateSource, ContentType.APPLICATION_JSON);
Request request = new Request(HttpMethod.PUT, "/_index_template/" + template);
request.setEntity(entity);
String test =
IOUtils.toString(
openSearchAdminClient
.performRequest(request)
.getEntity()
.getContent());
}
} catch (Exception e) {
logger.error("Failed to init " + template, e);
}
}
private void updateIndexesNames() {
logIndexName = updateIndexName(LOG_DOC_TYPE);
eventIndexName = updateIndexName(EVENT_DOC_TYPE);
messageIndexName = updateIndexName(MSG_DOC_TYPE);
}
private String updateIndexName(String type) {
String indexName =
this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date());
try {
addIndex(indexName);
return indexName;
} catch (IOException e) {
logger.error("Failed to update log index name: {}", indexName, e);
throw new NonTransientException(e.getMessage(), e);
}
}
private void createWorkflowIndex() {
String indexName = getIndexName(WORKFLOW_DOC_TYPE);
try {
addIndex(indexName, "/mappings_docType_workflow.json");
} catch (IOException e) {
logger.error("Failed to initialize index '{}'", indexName, e);
}
}
private void createTaskIndex() {
String indexName = getIndexName(TASK_DOC_TYPE);
try {
addIndex(indexName, "/mappings_docType_task.json");
} catch (IOException e) {
logger.error("Failed to initialize index '{}'", indexName, e);
}
}
/**
* Waits for the ES cluster to become green.
*
* @throws Exception If there is an issue connecting with the ES cluster.
*/
private void waitForHealthyCluster() throws Exception {
Map<String, String> params = new HashMap<>();
params.put("timeout", "30s");
params.put("wait_for_status", this.clusterHealthColor);
Request request = new Request("GET", "/_cluster/health");
request.addParameters(params);
openSearchAdminClient.performRequest(request);
}
/**
* Adds an index to opensearch if it does not exist.
*
* @param index The name of the index to create.
* @param mappingFilename Index mapping filename
* @throws IOException If an error occurred during requests to ES.
*/
private void addIndex(String index, final String mappingFilename) throws IOException {
logger.info("Adding index '{}'...", index);
String resourcePath = "/" + index;
if (doesResourceNotExist(resourcePath)) {
try {
ObjectNode setting = objectMapper.createObjectNode();
ObjectNode indexSetting = objectMapper.createObjectNode();
ObjectNode root = objectMapper.createObjectNode();
indexSetting.put("number_of_shards", properties.getIndexShardCount());
indexSetting.put("number_of_replicas", properties.getIndexReplicasCount());
JsonNode mappingNodeValue =
objectMapper.readTree(loadTypeMappingSource(mappingFilename));
root.set("settings", indexSetting);
root.set("mappings", mappingNodeValue);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(
new NStringEntity(
objectMapper.writeValueAsString(root),
ContentType.APPLICATION_JSON));
openSearchAdminClient.performRequest(request);
logger.info("Added '{}' index", index);
} catch (ResponseException e) {
boolean errorCreatingIndex = true;
Response errorResponse = e.getResponse();
if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) {
JsonNode root =
objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity()));
String errorCode = root.get("error").get("type").asText();
if ("index_already_exists_exception".equals(errorCode)) {
errorCreatingIndex = false;
}
}
if (errorCreatingIndex) {
throw e;
}
}
} else {
logger.info("Index '{}' already exists", index);
}
}
/**
* Adds an index to opensearch if it does not exist.
*
* @param index The name of the index to create.
* @throws IOException If an error occurred during requests to ES.
*/
private void addIndex(final String index) throws IOException {
logger.info("Adding index '{}'...", index);
String resourcePath = "/" + index;
if (doesResourceNotExist(resourcePath)) {
try {
ObjectNode setting = objectMapper.createObjectNode();
ObjectNode indexSetting = objectMapper.createObjectNode();
indexSetting.put("number_of_shards", properties.getIndexShardCount());
indexSetting.put("number_of_replicas", properties.getIndexReplicasCount());
setting.set("settings", indexSetting);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(
new NStringEntity(setting.toString(), ContentType.APPLICATION_JSON));
openSearchAdminClient.performRequest(request);
logger.info("Added '{}' index", index);
} catch (ResponseException e) {
boolean errorCreatingIndex = true;
Response errorResponse = e.getResponse();
if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) {
JsonNode root =
objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity()));
String errorCode = root.get("error").get("type").asText();
if ("index_already_exists_exception".equals(errorCode)) {
errorCreatingIndex = false;
}
}
if (errorCreatingIndex) {
throw e;
}
}
} else {
logger.info("Index '{}' already exists", index);
}
}
/**
* Adds a mapping type to an index if it does not exist.
*
* @param index The name of the index.
* @param mappingType The name of the mapping type.
* @param mappingFilename The name of the mapping file to use to add the mapping if it does not
* exist.
* @throws IOException If an error occurred during requests to ES.
*/
private void addMappingToIndex(
final String index, final String mappingType, final String mappingFilename)
throws IOException {
logger.info("Adding '{}' mapping to index '{}'...", mappingType, index);
String resourcePath = "/" + index + "/_mapping";
if (doesResourceNotExist(resourcePath)) {
HttpEntity entity =
new NByteArrayEntity(
loadTypeMappingSource(mappingFilename).getBytes(),
ContentType.APPLICATION_JSON);
Request request = new Request(HttpMethod.PUT, resourcePath);
request.setEntity(entity);
openSearchAdminClient.performRequest(request);
logger.info("Added '{}' mapping", mappingType);
} else {
logger.info("Mapping '{}' already exists", mappingType);
}
}
/**
* Determines whether a resource exists in ES. This will call a GET method to a particular path
* and return true if status 200; false otherwise.
*
* @param resourcePath The path of the resource to get.
* @return True if it exists; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceExist(final String resourcePath) throws IOException {
Request request = new Request(HttpMethod.HEAD, resourcePath);
Response response = openSearchAdminClient.performRequest(request);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
}
/**
* The inverse of doesResourceExist.
*
* @param resourcePath The path of the resource to check.
* @return True if it does not exist; false otherwise.
* @throws IOException If an error occurred during requests to ES.
*/
public boolean doesResourceNotExist(final String resourcePath) throws IOException {
return !doesResourceExist(resourcePath);
}
@Override
public void indexWorkflow(WorkflowSummary workflow) {
try {
long startTime = Instant.now().toEpochMilli();
String workflowId = workflow.getWorkflowId();
byte[] docBytes = objectMapper.writeValueAsBytes(workflow);
IndexRequest request =
new IndexRequest(workflowIndexName)
.id(workflowId)
.source(docBytes, XContentType.JSON);
openSearchClient.index(request, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing workflow: {}", endTime - startTime, workflowId);
Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
Monitors.error(className, "indexWorkflow");
logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexWorkflow(WorkflowSummary workflow) {
return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService);
}
@Override
public void indexTask(TaskSummary task) {
try {
long startTime = Instant.now().toEpochMilli();
String taskId = task.getTaskId();
indexObject(taskIndexName, TASK_DOC_TYPE, taskId, task);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing task:{} in workflow: {}",
endTime - startTime,
taskId,
task.getWorkflowId());
Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size());
} catch (Exception e) {
logger.error("Failed to index task: {}", task.getTaskId(), e);
}
}
@Override
public CompletableFuture<Void> asyncIndexTask(TaskSummary task) {
return CompletableFuture.runAsync(() -> indexTask(task), executorService);
}
@Override
public void addTaskExecutionLogs(List<TaskExecLog> taskExecLogs) {
if (taskExecLogs.isEmpty()) {
return;
}
long startTime = Instant.now().toEpochMilli();
BulkRequest bulkRequest = new BulkRequest();
for (TaskExecLog log : taskExecLogs) {
byte[] docBytes;
try {
docBytes = objectMapper.writeValueAsBytes(log);
} catch (JsonProcessingException e) {
logger.error("Failed to convert task log to JSON for task {}", log.getTaskId());
continue;
}
IndexRequest request = new IndexRequest(logIndexName);
request.source(docBytes, XContentType.JSON);
bulkRequest.add(request);
}
try {
openSearchClient.bulk(bulkRequest, RequestOptions.DEFAULT);
long endTime = Instant.now().toEpochMilli();
logger.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime);
Monitors.recordESIndexTime(
"index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
List<String> taskIds =
taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList());
logger.error("Failed to index task execution logs for tasks: {}", taskIds, e);
}
}
@Override
public CompletableFuture<Void> asyncAddTaskExecutionLogs(List<TaskExecLog> logs) {
return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService);
}
@Override
public List<TaskExecLog> getTaskExecutionLogs(String taskId) {
try {
BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("createdTime").order(SortOrder.ASC));
searchSourceBuilder.size(properties.getTaskLogResultLimit());
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(logIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
openSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapTaskExecLogsResponse(response);
} catch (Exception e) {
logger.error("Failed to get task execution logs for task: {}", taskId, e);
}
return null;
}
private List<TaskExecLog> mapTaskExecLogsResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<TaskExecLog> logs = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class);
logs.add(tel);
}
return logs;
}
@Override
public List<Message> getMessages(String queue) {
try {
BoolQueryBuilder query = boolQueryBuilder("queue='" + queue + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(messageIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
openSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapGetMessagesResponse(response);
} catch (Exception e) {
logger.error("Failed to get messages for queue: {}", queue, e);
}
return null;
}
private List<Message> mapGetMessagesResponse(SearchResponse response) throws IOException {
SearchHit[] hits = response.getHits().getHits();
TypeFactory factory = TypeFactory.defaultInstance();
MapType type = factory.constructMapType(HashMap.class, String.class, String.class);
List<Message> messages = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
Map<String, String> mapSource = objectMapper.readValue(source, type);
Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null);
messages.add(msg);
}
return messages;
}
@Override
public List<EventExecution> getEventExecutions(String event) {
try {
BoolQueryBuilder query = boolQueryBuilder("event='" + event + "'", "*");
// Create the searchObjectIdsViaExpression source
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchSourceBuilder.query(query);
searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC));
// Generate the actual request to send to ES.
SearchRequest searchRequest = new SearchRequest(eventIndexPrefix + "*");
searchRequest.source(searchSourceBuilder);
SearchResponse response =
openSearchClient.search(searchRequest, RequestOptions.DEFAULT);
return mapEventExecutionsResponse(response);
} catch (Exception e) {
logger.error("Failed to get executions for event: {}", event, e);
}
return null;
}
private List<EventExecution> mapEventExecutionsResponse(SearchResponse response)
throws IOException {
SearchHit[] hits = response.getHits().getHits();
List<EventExecution> executions = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
String source = hit.getSourceAsString();
EventExecution tel = objectMapper.readValue(source, EventExecution.class);
executions.add(tel);
}
return executions;
}
@Override
public void addMessage(String queue, Message message) {
try {
long startTime = Instant.now().toEpochMilli();
Map<String, Object> doc = new HashMap<>();
doc.put("messageId", message.getId());
doc.put("payload", message.getPayload());
doc.put("queue", queue);
doc.put("created", System.currentTimeMillis());
indexObject(messageIndexName, MSG_DOC_TYPE, doc);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing message: {}",
endTime - startTime,
message.getId());
Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime);
} catch (Exception e) {
logger.error("Failed to index message: {}", message.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddMessage(String queue, Message message) {
return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService);
}
@Override
public void addEventExecution(EventExecution eventExecution) {
try {
long startTime = Instant.now().toEpochMilli();
String id =
eventExecution.getName()
+ "."
+ eventExecution.getEvent()
+ "."
+ eventExecution.getMessageId()
+ "."
+ eventExecution.getId();
indexObject(eventIndexName, EVENT_DOC_TYPE, id, eventExecution);
long endTime = Instant.now().toEpochMilli();
logger.debug(
"Time taken {} for indexing event execution: {}",
endTime - startTime,
eventExecution.getId());
Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime);
Monitors.recordWorkerQueueSize(
"logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size());
} catch (Exception e) {
logger.error("Failed to index event execution: {}", eventExecution.getId(), e);
}
}
@Override
public CompletableFuture<Void> asyncAddEventExecution(EventExecution eventExecution) {
return CompletableFuture.runAsync(
() -> addEventExecution(eventExecution), logExecutorService);
}
@Override
public SearchResult<String> searchWorkflows(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectIdsViaExpression(
query, start, count, sort, freeText, WORKFLOW_DOC_TYPE);
} catch (Exception e) {
throw new NonTransientException(e.getMessage(), e);
}
}
@Override
public SearchResult<WorkflowSummary> searchWorkflowSummary(
String query, String freeText, int start, int count, List<String> sort) {
try {
return searchObjectsViaExpression(
query,
start,
count,
sort,
freeText,
WORKFLOW_DOC_TYPE,
false,
WorkflowSummary.class);
} catch (Exception e) {
throw new TransientException(e.getMessage(), e);
}
}
private <T> SearchResult<T> searchObjectsViaExpression(
String structuredQuery,
int start,
int size,
List<String> sortOptions,
String freeTextQuery,
String docType,
boolean idOnly,
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | true |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/index/OpenSearchBaseDAO.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/index/OpenSearchBaseDAO.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.opensearch.index.query.BoolQueryBuilder;
import org.opensearch.index.query.QueryBuilder;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.index.query.QueryStringQueryBuilder;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.os.dao.query.parser.Expression;
import com.netflix.conductor.os.dao.query.parser.internal.ParserException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
abstract class OpenSearchBaseDAO implements IndexDAO {
String indexPrefix;
ObjectMapper objectMapper;
String loadTypeMappingSource(String path) throws IOException {
return applyIndexPrefixToTemplate(
IOUtils.toString(OpenSearchBaseDAO.class.getResourceAsStream(path)));
}
private String applyIndexPrefixToTemplate(String text) throws JsonProcessingException {
String indexPatternsFieldName = "index_patterns";
JsonNode root = objectMapper.readTree(text);
if (root != null) {
JsonNode indexPatternsNodeValue = root.get(indexPatternsFieldName);
if (indexPatternsNodeValue != null && indexPatternsNodeValue.isArray()) {
ArrayList<String> patternsWithPrefix = new ArrayList<>();
indexPatternsNodeValue.forEach(
v -> {
String patternText = v.asText();
StringBuilder sb = new StringBuilder();
if (patternText.startsWith("*")) {
sb.append("*")
.append(indexPrefix)
.append("_")
.append(patternText.substring(1));
} else {
sb.append(indexPrefix).append("_").append(patternText);
}
patternsWithPrefix.add(sb.toString());
});
((ObjectNode) root)
.set(indexPatternsFieldName, objectMapper.valueToTree(patternsWithPrefix));
System.out.println(
objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root));
return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root);
}
}
return text;
}
org.opensearch.index.query.BoolQueryBuilder boolQueryBuilder(
String expression, String queryString) throws ParserException {
QueryBuilder queryBuilder = QueryBuilders.matchAllQuery();
if (StringUtils.isNotEmpty(expression)) {
Expression exp = Expression.fromString(expression);
queryBuilder = exp.getFilterBuilder();
}
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(queryString);
return QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
}
protected String getIndexName(String documentType) {
return indexPrefix + "_" + documentType;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/index/BulkRequestBuilderWrapper.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/index/BulkRequestBuilderWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.util.Objects;
import org.opensearch.action.bulk.BulkRequestBuilder;
import org.opensearch.action.bulk.BulkResponse;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequestBuilder}. */
public class BulkRequestBuilderWrapper {
private final BulkRequestBuilder bulkRequestBuilder;
public BulkRequestBuilderWrapper(@NonNull BulkRequestBuilder bulkRequestBuilder) {
this.bulkRequestBuilder = Objects.requireNonNull(bulkRequestBuilder);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequestBuilder) {
bulkRequestBuilder.add(Objects.requireNonNull(req));
}
}
public int numberOfActions() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.numberOfActions();
}
}
public org.opensearch.common.action.ActionFuture<BulkResponse> execute() {
synchronized (bulkRequestBuilder) {
return bulkRequestBuilder.execute();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/dao/index/BulkRequestWrapper.java | os-persistence/src/main/java/com/netflix/conductor/os/dao/index/BulkRequestWrapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.dao.index;
import java.util.Objects;
import org.opensearch.action.bulk.BulkRequest;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.update.UpdateRequest;
import org.springframework.lang.NonNull;
/** Thread-safe wrapper for {@link BulkRequest}. */
class BulkRequestWrapper {
private final BulkRequest bulkRequest;
BulkRequestWrapper(@NonNull BulkRequest bulkRequest) {
this.bulkRequest = Objects.requireNonNull(bulkRequest);
}
public void add(@NonNull UpdateRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
public void add(@NonNull IndexRequest req) {
synchronized (bulkRequest) {
bulkRequest.add(Objects.requireNonNull(req));
}
}
BulkRequest get() {
return bulkRequest;
}
int numberOfActions() {
synchronized (bulkRequest) {
return bulkRequest.numberOfActions();
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchConfiguration.java | os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.config;
import java.net.URL;
import java.util.List;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.opensearch.client.RestClient;
import org.opensearch.client.RestClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.retry.backoff.FixedBackOffPolicy;
import org.springframework.retry.support.RetryTemplate;
import com.netflix.conductor.dao.IndexDAO;
import com.netflix.conductor.os.dao.index.OpenSearchRestDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(OpenSearchProperties.class)
@Conditional(OpenSearchConditions.OpenSearchEnabled.class)
public class OpenSearchConfiguration {
private static final Logger log = LoggerFactory.getLogger(OpenSearchConfiguration.class);
@Bean
public RestClient restClient(RestClientBuilder restClientBuilder) {
return restClientBuilder.build();
}
@Bean
public RestClientBuilder osRestClientBuilder(OpenSearchProperties properties) {
RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs()));
if (properties.getRestClientConnectionRequestTimeout() > 0) {
builder.setRequestConfigCallback(
requestConfigBuilder ->
requestConfigBuilder.setConnectionRequestTimeout(
properties.getRestClientConnectionRequestTimeout()));
}
if (properties.getUsername() != null && properties.getPassword() != null) {
log.info(
"Configure OpenSearch with BASIC authentication. User:{}",
properties.getUsername());
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(
properties.getUsername(), properties.getPassword()));
builder.setHttpClientConfigCallback(
httpClientBuilder ->
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider));
} else {
log.info("Configure OpenSearch with no authentication.");
}
return builder;
}
@Primary
@Bean
public IndexDAO osIndexDAO(
RestClientBuilder restClientBuilder,
@Qualifier("osRetryTemplate") RetryTemplate retryTemplate,
OpenSearchProperties properties,
ObjectMapper objectMapper) {
String url = properties.getUrl();
return new OpenSearchRestDAO(restClientBuilder, retryTemplate, properties, objectMapper);
}
@Bean
public RetryTemplate osRetryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy();
fixedBackOffPolicy.setBackOffPeriod(1000L);
retryTemplate.setBackOffPolicy(fixedBackOffPolicy);
return retryTemplate;
}
private HttpHost[] convertToHttpHosts(List<URL> hosts) {
return hosts.stream()
.map(host -> new HttpHost(host.getHost(), host.getPort(), host.getProtocol()))
.toArray(HttpHost[]::new);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchProperties.java | os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.config;
import java.net.MalformedURLException;
import java.net.URL;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
@ConfigurationProperties("conductor.elasticsearch")
public class OpenSearchProperties {
/**
* The comma separated list of urls for the elasticsearch cluster. Format --
* host1:port1,host2:port2
*/
private String url = "localhost:9201";
/** The index prefix to be used when creating indices */
private String indexPrefix = "conductor";
/** The color of the elasticserach cluster to wait for to confirm healthy status */
private String clusterHealthColor = "green";
/** The size of the batch to be used for bulk indexing in async mode */
private int indexBatchSize = 1;
/** The size of the queue used for holding async indexing tasks */
private int asyncWorkerQueueSize = 100;
/** The maximum number of threads allowed in the async pool */
private int asyncMaxPoolSize = 12;
/**
* The time in seconds after which the async buffers will be flushed (if no activity) to prevent
* data loss
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration asyncBufferFlushTimeout = Duration.ofSeconds(10);
/** The number of shards that the index will be created with */
private int indexShardCount = 5;
/** The number of replicas that the index will be configured to have */
private int indexReplicasCount = 0;
/** The number of task log results that will be returned in the response */
private int taskLogResultLimit = 10;
/** The timeout in milliseconds used when requesting a connection from the connection manager */
private int restClientConnectionRequestTimeout = -1;
/** Used to control if index management is to be enabled or will be controlled externally */
private boolean autoIndexManagementEnabled = true;
/**
* Document types are deprecated in ES6 and removed from ES7. This property can be used to
* disable the use of specific document types with an override. This property is currently used
* in ES6 module.
*
* <p><em>Note that this property will only take effect if {@link
* OpenSearchProperties#isAutoIndexManagementEnabled} is set to false and index management is
* handled outside of this module.</em>
*/
private String documentTypeOverride = "";
/** Elasticsearch basic auth username */
private String username;
/** Elasticsearch basic auth password */
private String password;
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public String getIndexPrefix() {
return indexPrefix;
}
public void setIndexPrefix(String indexPrefix) {
this.indexPrefix = indexPrefix;
}
public String getClusterHealthColor() {
return clusterHealthColor;
}
public void setClusterHealthColor(String clusterHealthColor) {
this.clusterHealthColor = clusterHealthColor;
}
public int getIndexBatchSize() {
return indexBatchSize;
}
public void setIndexBatchSize(int indexBatchSize) {
this.indexBatchSize = indexBatchSize;
}
public int getAsyncWorkerQueueSize() {
return asyncWorkerQueueSize;
}
public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) {
this.asyncWorkerQueueSize = asyncWorkerQueueSize;
}
public int getAsyncMaxPoolSize() {
return asyncMaxPoolSize;
}
public void setAsyncMaxPoolSize(int asyncMaxPoolSize) {
this.asyncMaxPoolSize = asyncMaxPoolSize;
}
public Duration getAsyncBufferFlushTimeout() {
return asyncBufferFlushTimeout;
}
public void setAsyncBufferFlushTimeout(Duration asyncBufferFlushTimeout) {
this.asyncBufferFlushTimeout = asyncBufferFlushTimeout;
}
public int getIndexShardCount() {
return indexShardCount;
}
public void setIndexShardCount(int indexShardCount) {
this.indexShardCount = indexShardCount;
}
public int getIndexReplicasCount() {
return indexReplicasCount;
}
public void setIndexReplicasCount(int indexReplicasCount) {
this.indexReplicasCount = indexReplicasCount;
}
public int getTaskLogResultLimit() {
return taskLogResultLimit;
}
public void setTaskLogResultLimit(int taskLogResultLimit) {
this.taskLogResultLimit = taskLogResultLimit;
}
public int getRestClientConnectionRequestTimeout() {
return restClientConnectionRequestTimeout;
}
public void setRestClientConnectionRequestTimeout(int restClientConnectionRequestTimeout) {
this.restClientConnectionRequestTimeout = restClientConnectionRequestTimeout;
}
public boolean isAutoIndexManagementEnabled() {
return autoIndexManagementEnabled;
}
public void setAutoIndexManagementEnabled(boolean autoIndexManagementEnabled) {
this.autoIndexManagementEnabled = autoIndexManagementEnabled;
}
public String getDocumentTypeOverride() {
return documentTypeOverride;
}
public void setDocumentTypeOverride(String documentTypeOverride) {
this.documentTypeOverride = documentTypeOverride;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public List<URL> toURLs() {
String clusterAddress = getUrl();
String[] hosts = clusterAddress.split(",");
return Arrays.stream(hosts)
.map(
host ->
(host.startsWith("http://") || host.startsWith("https://"))
? toURL(host)
: toURL("http://" + host))
.collect(Collectors.toList());
}
private URL toURL(String url) {
try {
return new URL(url);
} catch (MalformedURLException e) {
throw new IllegalArgumentException(url + "can not be converted to java.net.URL");
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchConditions.java | os-persistence/src/main/java/com/netflix/conductor/os/config/OpenSearchConditions.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.os.config;
import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
public class OpenSearchConditions {
private OpenSearchConditions() {}
public static class OpenSearchEnabled extends AllNestedConditions {
OpenSearchEnabled() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
@SuppressWarnings("unused")
@ConditionalOnProperty(
name = "conductor.indexing.enabled",
havingValue = "true",
matchIfMissing = true)
static class enabledIndexing {}
@SuppressWarnings("unused")
@ConditionalOnProperty(name = "conductor.indexing.type", havingValue = "opensearch")
static class enabledOS {}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/test/java/com/netflix/conductor/client/grpc/WorkflowClientTest.java | grpc-client/src/test/java/com/netflix/conductor/client/grpc/WorkflowClientTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.WorkflowServiceGrpc;
import com.netflix.conductor.grpc.WorkflowServicePb;
import com.netflix.conductor.proto.WorkflowPb;
import com.netflix.conductor.proto.WorkflowSummaryPb;
import io.grpc.ManagedChannelBuilder;
import static junit.framework.TestCase.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(SpringRunner.class)
public class WorkflowClientTest {
@Mock ProtoMapper mockedProtoMapper;
@Mock WorkflowServiceGrpc.WorkflowServiceBlockingStub mockedStub;
WorkflowClient workflowClient;
@Before
public void init() {
workflowClient = new WorkflowClient("test", 0);
ReflectionTestUtils.setField(workflowClient, "stub", mockedStub);
ReflectionTestUtils.setField(workflowClient, "protoMapper", mockedProtoMapper);
}
@Test
public void testSearch() {
WorkflowSummary workflow = mock(WorkflowSummary.class);
WorkflowSummaryPb.WorkflowSummary workflowPB =
mock(WorkflowSummaryPb.WorkflowSummary.class);
when(mockedProtoMapper.fromProto(workflowPB)).thenReturn(workflow);
WorkflowServicePb.WorkflowSummarySearchResult result =
WorkflowServicePb.WorkflowSummarySearchResult.newBuilder()
.addResults(workflowPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder().setQuery("test query").build();
when(mockedStub.search(searchRequest)).thenReturn(result);
SearchResult<WorkflowSummary> searchResult = workflowClient.search("test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow, searchResult.getResults().get(0));
}
@Test
public void testSearchV2() {
Workflow workflow = mock(Workflow.class);
WorkflowPb.Workflow workflowPB = mock(WorkflowPb.Workflow.class);
when(mockedProtoMapper.fromProto(workflowPB)).thenReturn(workflow);
WorkflowServicePb.WorkflowSearchResult result =
WorkflowServicePb.WorkflowSearchResult.newBuilder()
.addResults(workflowPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder().setQuery("test query").build();
when(mockedStub.searchV2(searchRequest)).thenReturn(result);
SearchResult<Workflow> searchResult = workflowClient.searchV2("test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow, searchResult.getResults().get(0));
}
@Test
public void testSearchWithParams() {
WorkflowSummary workflow = mock(WorkflowSummary.class);
WorkflowSummaryPb.WorkflowSummary workflowPB =
mock(WorkflowSummaryPb.WorkflowSummary.class);
when(mockedProtoMapper.fromProto(workflowPB)).thenReturn(workflow);
WorkflowServicePb.WorkflowSummarySearchResult result =
WorkflowServicePb.WorkflowSummarySearchResult.newBuilder()
.addResults(workflowPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(5)
.setSort("*")
.setFreeText("*")
.setQuery("test query")
.build();
when(mockedStub.search(searchRequest)).thenReturn(result);
SearchResult<WorkflowSummary> searchResult =
workflowClient.search(1, 5, "*", "*", "test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow, searchResult.getResults().get(0));
}
@Test
public void testSearchV2WithParams() {
Workflow workflow = mock(Workflow.class);
WorkflowPb.Workflow workflowPB = mock(WorkflowPb.Workflow.class);
when(mockedProtoMapper.fromProto(workflowPB)).thenReturn(workflow);
WorkflowServicePb.WorkflowSearchResult result =
WorkflowServicePb.WorkflowSearchResult.newBuilder()
.addResults(workflowPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(5)
.setSort("*")
.setFreeText("*")
.setQuery("test query")
.build();
when(mockedStub.searchV2(searchRequest)).thenReturn(result);
SearchResult<Workflow> searchResult = workflowClient.searchV2(1, 5, "*", "*", "test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow, searchResult.getResults().get(0));
}
@Test
public void testSearchV2WithParamsWithManagedChannel() {
WorkflowClient workflowClient = createClientWithManagedChannel();
Workflow workflow = mock(Workflow.class);
WorkflowPb.Workflow workflowPB = mock(WorkflowPb.Workflow.class);
when(mockedProtoMapper.fromProto(workflowPB)).thenReturn(workflow);
WorkflowServicePb.WorkflowSearchResult result =
WorkflowServicePb.WorkflowSearchResult.newBuilder()
.addResults(workflowPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(5)
.setSort("*")
.setFreeText("*")
.setQuery("test query")
.build();
when(mockedStub.searchV2(searchRequest)).thenReturn(result);
SearchResult<Workflow> searchResult = workflowClient.searchV2(1, 5, "*", "*", "test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(workflow, searchResult.getResults().get(0));
}
public WorkflowClient createClientWithManagedChannel() {
WorkflowClient workflowClient =
new WorkflowClient(ManagedChannelBuilder.forAddress("test", 0));
ReflectionTestUtils.setField(workflowClient, "stub", mockedStub);
ReflectionTestUtils.setField(workflowClient, "protoMapper", mockedProtoMapper);
return workflowClient;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/test/java/com/netflix/conductor/client/grpc/EventClientTest.java | grpc-client/src/test/java/com/netflix/conductor/client/grpc/EventClientTest.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.grpc.EventServiceGrpc;
import com.netflix.conductor.grpc.EventServicePb;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.proto.EventHandlerPb;
import static junit.framework.TestCase.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(SpringRunner.class)
public class EventClientTest {
@Mock ProtoMapper mockedProtoMapper;
@Mock EventServiceGrpc.EventServiceBlockingStub mockedStub;
EventClient eventClient;
@Before
public void init() {
eventClient = new EventClient("test", 0);
ReflectionTestUtils.setField(eventClient, "stub", mockedStub);
ReflectionTestUtils.setField(eventClient, "protoMapper", mockedProtoMapper);
}
@Test
public void testRegisterEventHandler() {
EventHandler eventHandler = mock(EventHandler.class);
EventHandlerPb.EventHandler eventHandlerPB = mock(EventHandlerPb.EventHandler.class);
when(mockedProtoMapper.toProto(eventHandler)).thenReturn(eventHandlerPB);
EventServicePb.AddEventHandlerRequest request =
EventServicePb.AddEventHandlerRequest.newBuilder()
.setHandler(eventHandlerPB)
.build();
eventClient.registerEventHandler(eventHandler);
verify(mockedStub, times(1)).addEventHandler(request);
}
@Test
public void testUpdateEventHandler() {
EventHandler eventHandler = mock(EventHandler.class);
EventHandlerPb.EventHandler eventHandlerPB = mock(EventHandlerPb.EventHandler.class);
when(mockedProtoMapper.toProto(eventHandler)).thenReturn(eventHandlerPB);
EventServicePb.UpdateEventHandlerRequest request =
EventServicePb.UpdateEventHandlerRequest.newBuilder()
.setHandler(eventHandlerPB)
.build();
eventClient.updateEventHandler(eventHandler);
verify(mockedStub, times(1)).updateEventHandler(request);
}
@Test
public void testGetEventHandlers() {
EventHandler eventHandler = mock(EventHandler.class);
EventHandlerPb.EventHandler eventHandlerPB = mock(EventHandlerPb.EventHandler.class);
when(mockedProtoMapper.fromProto(eventHandlerPB)).thenReturn(eventHandler);
EventServicePb.GetEventHandlersForEventRequest request =
EventServicePb.GetEventHandlersForEventRequest.newBuilder()
.setEvent("test")
.setActiveOnly(true)
.build();
List<EventHandlerPb.EventHandler> result = new ArrayList<>();
result.add(eventHandlerPB);
when(mockedStub.getEventHandlersForEvent(request)).thenReturn(result.iterator());
Iterator<EventHandler> response = eventClient.getEventHandlers("test", true);
verify(mockedStub, times(1)).getEventHandlersForEvent(request);
assertEquals(response.next(), eventHandler);
}
@Test
public void testUnregisterEventHandler() {
EventClient eventClient = createClientWithManagedChannel();
EventServicePb.RemoveEventHandlerRequest request =
EventServicePb.RemoveEventHandlerRequest.newBuilder().setName("test").build();
eventClient.unregisterEventHandler("test");
verify(mockedStub, times(1)).removeEventHandler(request);
}
@Test
public void testUnregisterEventHandlerWithManagedChannel() {
EventServicePb.RemoveEventHandlerRequest request =
EventServicePb.RemoveEventHandlerRequest.newBuilder().setName("test").build();
eventClient.unregisterEventHandler("test");
verify(mockedStub, times(1)).removeEventHandler(request);
}
public EventClient createClientWithManagedChannel() {
EventClient eventClient = new EventClient("test", 0);
ReflectionTestUtils.setField(eventClient, "stub", mockedStub);
ReflectionTestUtils.setField(eventClient, "protoMapper", mockedProtoMapper);
return eventClient;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/test/java/com/netflix/conductor/client/grpc/TaskClientTest.java | grpc-client/src/test/java/com/netflix/conductor/client/grpc/TaskClientTest.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.util.ReflectionTestUtils;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.TaskServiceGrpc;
import com.netflix.conductor.grpc.TaskServicePb;
import com.netflix.conductor.proto.TaskPb;
import com.netflix.conductor.proto.TaskSummaryPb;
import io.grpc.ManagedChannelBuilder;
import static junit.framework.TestCase.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(SpringRunner.class)
public class TaskClientTest {
@Mock ProtoMapper mockedProtoMapper;
@Mock TaskServiceGrpc.TaskServiceBlockingStub mockedStub;
TaskClient taskClient;
@Before
public void init() {
taskClient = new TaskClient("test", 0);
ReflectionTestUtils.setField(taskClient, "stub", mockedStub);
ReflectionTestUtils.setField(taskClient, "protoMapper", mockedProtoMapper);
}
@Test
public void testSearch() {
TaskSummary taskSummary = mock(TaskSummary.class);
TaskSummaryPb.TaskSummary taskSummaryPB = mock(TaskSummaryPb.TaskSummary.class);
when(mockedProtoMapper.fromProto(taskSummaryPB)).thenReturn(taskSummary);
TaskServicePb.TaskSummarySearchResult result =
TaskServicePb.TaskSummarySearchResult.newBuilder()
.addResults(taskSummaryPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder().setQuery("test query").build();
when(mockedStub.search(searchRequest)).thenReturn(result);
SearchResult<TaskSummary> searchResult = taskClient.search("test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(taskSummary, searchResult.getResults().get(0));
}
@Test
public void testSearchV2() {
Task task = mock(Task.class);
TaskPb.Task taskPB = mock(TaskPb.Task.class);
when(mockedProtoMapper.fromProto(taskPB)).thenReturn(task);
TaskServicePb.TaskSearchResult result =
TaskServicePb.TaskSearchResult.newBuilder()
.addResults(taskPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder().setQuery("test query").build();
when(mockedStub.searchV2(searchRequest)).thenReturn(result);
SearchResult<Task> searchResult = taskClient.searchV2("test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(task, searchResult.getResults().get(0));
}
@Test
public void testSearchWithParams() {
TaskSummary taskSummary = mock(TaskSummary.class);
TaskSummaryPb.TaskSummary taskSummaryPB = mock(TaskSummaryPb.TaskSummary.class);
when(mockedProtoMapper.fromProto(taskSummaryPB)).thenReturn(taskSummary);
TaskServicePb.TaskSummarySearchResult result =
TaskServicePb.TaskSummarySearchResult.newBuilder()
.addResults(taskSummaryPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(5)
.setSort("*")
.setFreeText("*")
.setQuery("test query")
.build();
when(mockedStub.search(searchRequest)).thenReturn(result);
SearchResult<TaskSummary> searchResult = taskClient.search(1, 5, "*", "*", "test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(taskSummary, searchResult.getResults().get(0));
}
@Test
public void testSearchV2WithParams() {
Task task = mock(Task.class);
TaskPb.Task taskPB = mock(TaskPb.Task.class);
when(mockedProtoMapper.fromProto(taskPB)).thenReturn(task);
TaskServicePb.TaskSearchResult result =
TaskServicePb.TaskSearchResult.newBuilder()
.addResults(taskPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder()
.setStart(1)
.setSize(5)
.setSort("*")
.setFreeText("*")
.setQuery("test query")
.build();
when(mockedStub.searchV2(searchRequest)).thenReturn(result);
SearchResult<Task> searchResult = taskClient.searchV2(1, 5, "*", "*", "test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(task, searchResult.getResults().get(0));
}
@Test
public void testSearchWithChannelBuilder() {
TaskClient taskClient = createClientWithManagedChannel();
TaskSummary taskSummary = mock(TaskSummary.class);
TaskSummaryPb.TaskSummary taskSummaryPB = mock(TaskSummaryPb.TaskSummary.class);
when(mockedProtoMapper.fromProto(taskSummaryPB)).thenReturn(taskSummary);
TaskServicePb.TaskSummarySearchResult result =
TaskServicePb.TaskSummarySearchResult.newBuilder()
.addResults(taskSummaryPB)
.setTotalHits(1)
.build();
SearchPb.Request searchRequest =
SearchPb.Request.newBuilder().setQuery("test query").build();
when(mockedStub.search(searchRequest)).thenReturn(result);
SearchResult<TaskSummary> searchResult = taskClient.search("test query");
assertEquals(1, searchResult.getTotalHits());
assertEquals(taskSummary, searchResult.getResults().get(0));
}
private TaskClient createClientWithManagedChannel() {
TaskClient taskClient = new TaskClient(ManagedChannelBuilder.forAddress("test", 0));
ReflectionTestUtils.setField(taskClient, "stub", mockedStub);
ReflectionTestUtils.setField(taskClient, "protoMapper", mockedProtoMapper);
return taskClient;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/main/java/com/netflix/conductor/client/grpc/TaskClient.java | grpc-client/src/main/java/com/netflix/conductor/client/grpc/TaskClient.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskExecLog;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.TaskSummary;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.TaskServiceGrpc;
import com.netflix.conductor.grpc.TaskServicePb;
import com.netflix.conductor.proto.TaskPb;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import io.grpc.ManagedChannelBuilder;
import jakarta.annotation.Nullable;
public class TaskClient extends ClientBase {
private final TaskServiceGrpc.TaskServiceBlockingStub stub;
public TaskClient(String address, int port) {
super(address, port);
this.stub = TaskServiceGrpc.newBlockingStub(this.channel);
}
public TaskClient(ManagedChannelBuilder<?> builder) {
super(builder);
this.stub = TaskServiceGrpc.newBlockingStub(this.channel);
}
/**
* Perform a poll for a task of a specific task type.
*
* @param taskType The taskType to poll for
* @param domain The domain of the task type
* @param workerId Name of the client worker. Used for logging.
* @return Task waiting to be executed.
*/
public Task pollTask(String taskType, String workerId, String domain) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank");
Preconditions.checkArgument(StringUtils.isNotBlank(domain), "Domain cannot be blank");
Preconditions.checkArgument(StringUtils.isNotBlank(workerId), "Worker id cannot be blank");
TaskServicePb.PollResponse response =
stub.poll(
TaskServicePb.PollRequest.newBuilder()
.setTaskType(taskType)
.setWorkerId(workerId)
.setDomain(domain)
.build());
return protoMapper.fromProto(response.getTask());
}
/**
* Perform a batch poll for tasks by task type. Batch size is configurable by count.
*
* @param taskType Type of task to poll for
* @param workerId Name of the client worker. Used for logging.
* @param count Maximum number of tasks to be returned. Actual number of tasks returned can be
* less than this number.
* @param timeoutInMillisecond Long poll wait timeout.
* @return List of tasks awaiting to be executed.
*/
public List<Task> batchPollTasksByTaskType(
String taskType, String workerId, int count, int timeoutInMillisecond) {
return Lists.newArrayList(
batchPollTasksByTaskTypeAsync(taskType, workerId, count, timeoutInMillisecond));
}
/**
* Perform a batch poll for tasks by task type. Batch size is configurable by count. Returns an
* iterator that streams tasks as they become available through GRPC.
*
* @param taskType Type of task to poll for
* @param workerId Name of the client worker. Used for logging.
* @param count Maximum number of tasks to be returned. Actual number of tasks returned can be
* less than this number.
* @param timeoutInMillisecond Long poll wait timeout.
* @return Iterator of tasks awaiting to be executed.
*/
public Iterator<Task> batchPollTasksByTaskTypeAsync(
String taskType, String workerId, int count, int timeoutInMillisecond) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank");
Preconditions.checkArgument(StringUtils.isNotBlank(workerId), "Worker id cannot be blank");
Preconditions.checkArgument(count > 0, "Count must be greater than 0");
Iterator<TaskPb.Task> it =
stub.batchPoll(
TaskServicePb.BatchPollRequest.newBuilder()
.setTaskType(taskType)
.setWorkerId(workerId)
.setCount(count)
.setTimeout(timeoutInMillisecond)
.build());
return Iterators.transform(it, protoMapper::fromProto);
}
/**
* Updates the result of a task execution.
*
* @param taskResult TaskResults to be updated.
*/
public void updateTask(TaskResult taskResult) {
Preconditions.checkNotNull(taskResult, "Task result cannot be null");
stub.updateTask(
TaskServicePb.UpdateTaskRequest.newBuilder()
.setResult(protoMapper.toProto(taskResult))
.build());
}
/**
* Log execution messages for a task.
*
* @param taskId id of the task
* @param logMessage the message to be logged
*/
public void logMessageForTask(String taskId, String logMessage) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank");
stub.addLog(
TaskServicePb.AddLogRequest.newBuilder()
.setTaskId(taskId)
.setLog(logMessage)
.build());
}
/**
* Fetch execution logs for a task.
*
* @param taskId id of the task.
*/
public List<TaskExecLog> getTaskLogs(String taskId) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank");
return stub
.getTaskLogs(
TaskServicePb.GetTaskLogsRequest.newBuilder().setTaskId(taskId).build())
.getLogsList()
.stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList());
}
/**
* Retrieve information about the task
*
* @param taskId ID of the task
* @return Task details
*/
public Task getTaskDetails(String taskId) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskId), "Task id cannot be blank");
return protoMapper.fromProto(
stub.getTask(TaskServicePb.GetTaskRequest.newBuilder().setTaskId(taskId).build())
.getTask());
}
public int getQueueSizeForTask(String taskType) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank");
TaskServicePb.QueueSizesResponse sizes =
stub.getQueueSizesForTasks(
TaskServicePb.QueueSizesRequest.newBuilder()
.addTaskTypes(taskType)
.build());
return sizes.getQueueForTaskOrDefault(taskType, 0);
}
public SearchResult<TaskSummary> search(String query) {
return search(null, null, null, null, query);
}
public SearchResult<Task> searchV2(String query) {
return searchV2(null, null, null, null, query);
}
public SearchResult<TaskSummary> search(
@Nullable Integer start,
@Nullable Integer size,
@Nullable String sort,
@Nullable String freeText,
@Nullable String query) {
SearchPb.Request searchRequest = createSearchRequest(start, size, sort, freeText, query);
TaskServicePb.TaskSummarySearchResult result = stub.search(searchRequest);
return new SearchResult<>(
result.getTotalHits(),
result.getResultsList().stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList()));
}
public SearchResult<Task> searchV2(
@Nullable Integer start,
@Nullable Integer size,
@Nullable String sort,
@Nullable String freeText,
@Nullable String query) {
SearchPb.Request searchRequest = createSearchRequest(start, size, sort, freeText, query);
TaskServicePb.TaskSearchResult result = stub.searchV2(searchRequest);
return new SearchResult<>(
result.getTotalHits(),
result.getResultsList().stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList()));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/main/java/com/netflix/conductor/client/grpc/ClientBase.java | grpc-client/src/main/java/com/netflix/conductor/client/grpc/ClientBase.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.grpc.ProtoMapper;
import com.netflix.conductor.grpc.SearchPb;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import jakarta.annotation.Nullable;
abstract class ClientBase {
private static final Logger LOGGER = LoggerFactory.getLogger(ClientBase.class);
protected static ProtoMapper protoMapper = ProtoMapper.INSTANCE;
protected final ManagedChannel channel;
public ClientBase(String address, int port) {
this(ManagedChannelBuilder.forAddress(address, port).usePlaintext());
}
public ClientBase(ManagedChannelBuilder<?> builder) {
channel = builder.build();
}
public void shutdown() throws InterruptedException {
channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
}
SearchPb.Request createSearchRequest(
@Nullable Integer start,
@Nullable Integer size,
@Nullable String sort,
@Nullable String freeText,
@Nullable String query) {
SearchPb.Request.Builder request = SearchPb.Request.newBuilder();
if (start != null) request.setStart(start);
if (size != null) request.setSize(size);
if (sort != null) request.setSort(sort);
if (freeText != null) request.setFreeText(freeText);
if (query != null) request.setQuery(query);
return request.build();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/main/java/com/netflix/conductor/client/grpc/MetadataClient.java | grpc-client/src/main/java/com/netflix/conductor/client/grpc/MetadataClient.java | /*
* Copyright 2020 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.grpc.MetadataServiceGrpc;
import com.netflix.conductor.grpc.MetadataServicePb;
import com.google.common.base.Preconditions;
import io.grpc.ManagedChannelBuilder;
import jakarta.annotation.Nullable;
public class MetadataClient extends ClientBase {
private final MetadataServiceGrpc.MetadataServiceBlockingStub stub;
public MetadataClient(String address, int port) {
super(address, port);
this.stub = MetadataServiceGrpc.newBlockingStub(this.channel);
}
public MetadataClient(ManagedChannelBuilder<?> builder) {
super(builder);
this.stub = MetadataServiceGrpc.newBlockingStub(this.channel);
}
/**
* Register a workflow definition with the server
*
* @param workflowDef the workflow definition
*/
public void registerWorkflowDef(WorkflowDef workflowDef) {
Preconditions.checkNotNull(workflowDef, "Workflow definition cannot be null");
stub.createWorkflow(
MetadataServicePb.CreateWorkflowRequest.newBuilder()
.setWorkflow(protoMapper.toProto(workflowDef))
.build());
}
/**
* Updates a list of existing workflow definitions
*
* @param workflowDefs List of workflow definitions to be updated
*/
public void updateWorkflowDefs(List<WorkflowDef> workflowDefs) {
Preconditions.checkNotNull(workflowDefs, "Workflow defs list cannot be null");
stub.updateWorkflows(
MetadataServicePb.UpdateWorkflowsRequest.newBuilder()
.addAllDefs(workflowDefs.stream().map(protoMapper::toProto)::iterator)
.build());
}
/**
* Retrieve the workflow definition
*
* @param name the name of the workflow
* @param version the version of the workflow def
* @return Workflow definition for the given workflow and version
*/
public WorkflowDef getWorkflowDef(String name, @Nullable Integer version) {
Preconditions.checkArgument(StringUtils.isNotBlank(name), "name cannot be blank");
MetadataServicePb.GetWorkflowRequest.Builder request =
MetadataServicePb.GetWorkflowRequest.newBuilder().setName(name);
if (version != null) {
request.setVersion(version);
}
return protoMapper.fromProto(stub.getWorkflow(request.build()).getWorkflow());
}
/**
* Registers a list of task types with the conductor server
*
* @param taskDefs List of task types to be registered.
*/
public void registerTaskDefs(List<TaskDef> taskDefs) {
Preconditions.checkNotNull(taskDefs, "Task defs list cannot be null");
stub.createTasks(
MetadataServicePb.CreateTasksRequest.newBuilder()
.addAllDefs(taskDefs.stream().map(protoMapper::toProto)::iterator)
.build());
}
/**
* Updates an existing task definition
*
* @param taskDef the task definition to be updated
*/
public void updateTaskDef(TaskDef taskDef) {
Preconditions.checkNotNull(taskDef, "Task definition cannot be null");
stub.updateTask(
MetadataServicePb.UpdateTaskRequest.newBuilder()
.setTask(protoMapper.toProto(taskDef))
.build());
}
/**
* Retrieve the task definition of a given task type
*
* @param taskType type of task for which to retrieve the definition
* @return Task Definition for the given task type
*/
public TaskDef getTaskDef(String taskType) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank");
return protoMapper.fromProto(
stub.getTask(
MetadataServicePb.GetTaskRequest.newBuilder()
.setTaskType(taskType)
.build())
.getTask());
}
/**
* Removes the task definition of a task type from the conductor server. Use with caution.
*
* @param taskType Task type to be unregistered.
*/
public void unregisterTaskDef(String taskType) {
Preconditions.checkArgument(StringUtils.isNotBlank(taskType), "Task type cannot be blank");
stub.deleteTask(
MetadataServicePb.DeleteTaskRequest.newBuilder().setTaskType(taskType).build());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/main/java/com/netflix/conductor/client/grpc/EventClient.java | grpc-client/src/main/java/com/netflix/conductor/client/grpc/EventClient.java | /*
* Copyright 2022 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.Iterator;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.grpc.EventServiceGrpc;
import com.netflix.conductor.grpc.EventServicePb;
import com.netflix.conductor.proto.EventHandlerPb;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterators;
import io.grpc.ManagedChannelBuilder;
public class EventClient extends ClientBase {
private final EventServiceGrpc.EventServiceBlockingStub stub;
public EventClient(String address, int port) {
super(address, port);
this.stub = EventServiceGrpc.newBlockingStub(this.channel);
}
public EventClient(ManagedChannelBuilder<?> builder) {
super(builder);
this.stub = EventServiceGrpc.newBlockingStub(this.channel);
}
/**
* Register an event handler with the server
*
* @param eventHandler the event handler definition
*/
public void registerEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler, "Event handler definition cannot be null");
stub.addEventHandler(
EventServicePb.AddEventHandlerRequest.newBuilder()
.setHandler(protoMapper.toProto(eventHandler))
.build());
}
/**
* Updates an existing event handler
*
* @param eventHandler the event handler to be updated
*/
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler, "Event handler definition cannot be null");
stub.updateEventHandler(
EventServicePb.UpdateEventHandlerRequest.newBuilder()
.setHandler(protoMapper.toProto(eventHandler))
.build());
}
/**
* @param event name of the event
* @param activeOnly if true, returns only the active handlers
* @return Returns the list of all the event handlers for a given event
*/
public Iterator<EventHandler> getEventHandlers(String event, boolean activeOnly) {
Preconditions.checkArgument(StringUtils.isNotBlank(event), "Event cannot be blank");
EventServicePb.GetEventHandlersForEventRequest.Builder request =
EventServicePb.GetEventHandlersForEventRequest.newBuilder()
.setEvent(event)
.setActiveOnly(activeOnly);
Iterator<EventHandlerPb.EventHandler> it = stub.getEventHandlersForEvent(request.build());
return Iterators.transform(it, protoMapper::fromProto);
}
/**
* Removes the event handler from the conductor server
*
* @param name the name of the event handler
*/
public void unregisterEventHandler(String name) {
Preconditions.checkArgument(StringUtils.isNotBlank(name), "Name cannot be blank");
stub.removeEventHandler(
EventServicePb.RemoveEventHandlerRequest.newBuilder().setName(name).build());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/grpc-client/src/main/java/com/netflix/conductor/client/grpc/WorkflowClient.java | grpc-client/src/main/java/com/netflix/conductor/client/grpc/WorkflowClient.java | /*
* Copyright 2021 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.client.grpc;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.common.metadata.workflow.RerunWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.run.SearchResult;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.grpc.SearchPb;
import com.netflix.conductor.grpc.WorkflowServiceGrpc;
import com.netflix.conductor.grpc.WorkflowServicePb;
import com.netflix.conductor.proto.WorkflowPb;
import com.google.common.base.Preconditions;
import io.grpc.ManagedChannelBuilder;
import jakarta.annotation.Nullable;
public class WorkflowClient extends ClientBase {
private final WorkflowServiceGrpc.WorkflowServiceBlockingStub stub;
public WorkflowClient(String address, int port) {
super(address, port);
this.stub = WorkflowServiceGrpc.newBlockingStub(this.channel);
}
public WorkflowClient(ManagedChannelBuilder<?> builder) {
super(builder);
this.stub = WorkflowServiceGrpc.newBlockingStub(this.channel);
}
/**
* Starts a workflow
*
* @param startWorkflowRequest the {@link StartWorkflowRequest} object to start the workflow
* @return the id of the workflow instance that can be used for tracking
*/
public String startWorkflow(StartWorkflowRequest startWorkflowRequest) {
Preconditions.checkNotNull(startWorkflowRequest, "StartWorkflowRequest cannot be null");
return stub.startWorkflow(protoMapper.toProto(startWorkflowRequest)).getWorkflowId();
}
/**
* Retrieve a workflow by workflow id
*
* @param workflowId the id of the workflow
* @param includeTasks specify if the tasks in the workflow need to be returned
* @return the requested workflow
*/
public Workflow getWorkflow(String workflowId, boolean includeTasks) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
WorkflowPb.Workflow workflow =
stub.getWorkflowStatus(
WorkflowServicePb.GetWorkflowStatusRequest.newBuilder()
.setWorkflowId(workflowId)
.setIncludeTasks(includeTasks)
.build());
return protoMapper.fromProto(workflow);
}
/**
* Retrieve all workflows for a given correlation id and name
*
* @param name the name of the workflow
* @param correlationId the correlation id
* @param includeClosed specify if all workflows are to be returned or only running workflows
* @param includeTasks specify if the tasks in the workflow need to be returned
* @return list of workflows for the given correlation id and name
*/
public List<Workflow> getWorkflows(
String name, String correlationId, boolean includeClosed, boolean includeTasks) {
Preconditions.checkArgument(StringUtils.isNotBlank(name), "name cannot be blank");
Preconditions.checkArgument(
StringUtils.isNotBlank(correlationId), "correlationId cannot be blank");
WorkflowServicePb.GetWorkflowsResponse workflows =
stub.getWorkflows(
WorkflowServicePb.GetWorkflowsRequest.newBuilder()
.setName(name)
.addCorrelationId(correlationId)
.setIncludeClosed(includeClosed)
.setIncludeTasks(includeTasks)
.build());
if (!workflows.containsWorkflowsById(correlationId)) {
return Collections.emptyList();
}
return workflows.getWorkflowsByIdOrThrow(correlationId).getWorkflowsList().stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList());
}
/**
* Removes a workflow from the system
*
* @param workflowId the id of the workflow to be deleted
* @param archiveWorkflow flag to indicate if the workflow should be archived before deletion
*/
public void deleteWorkflow(String workflowId, boolean archiveWorkflow) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "Workflow id cannot be blank");
stub.removeWorkflow(
WorkflowServicePb.RemoveWorkflowRequest.newBuilder()
.setWorkflodId(workflowId)
.setArchiveWorkflow(archiveWorkflow)
.build());
}
/*
* Retrieve all running workflow instances for a given name and version
*
* @param workflowName the name of the workflow
* @param version the version of the workflow definition. Defaults to 1.
* @return the list of running workflow instances
*/
public List<String> getRunningWorkflow(String workflowName, @Nullable Integer version) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowName), "Workflow name cannot be blank");
WorkflowServicePb.GetRunningWorkflowsResponse workflows =
stub.getRunningWorkflows(
WorkflowServicePb.GetRunningWorkflowsRequest.newBuilder()
.setName(workflowName)
.setVersion(version == null ? 1 : version)
.build());
return workflows.getWorkflowIdsList();
}
/**
* Retrieve all workflow instances for a given workflow name between a specific time period
*
* @param workflowName the name of the workflow
* @param version the version of the workflow definition. Defaults to 1.
* @param startTime the start time of the period
* @param endTime the end time of the period
* @return returns a list of workflows created during the specified during the time period
*/
public List<String> getWorkflowsByTimePeriod(
String workflowName, int version, Long startTime, Long endTime) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowName), "Workflow name cannot be blank");
Preconditions.checkNotNull(startTime, "Start time cannot be null");
Preconditions.checkNotNull(endTime, "End time cannot be null");
// TODO
return null;
}
/*
* Starts the decision task for the given workflow instance
*
* @param workflowId the id of the workflow instance
*/
public void runDecider(String workflowId) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.decideWorkflow(
WorkflowServicePb.DecideWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.build());
}
/**
* Pause a workflow by workflow id
*
* @param workflowId the workflow id of the workflow to be paused
*/
public void pauseWorkflow(String workflowId) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.pauseWorkflow(
WorkflowServicePb.PauseWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.build());
}
/**
* Resume a paused workflow by workflow id
*
* @param workflowId the workflow id of the paused workflow
*/
public void resumeWorkflow(String workflowId) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.resumeWorkflow(
WorkflowServicePb.ResumeWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.build());
}
/**
* Skips a given task from a current RUNNING workflow
*
* @param workflowId the id of the workflow instance
* @param taskReferenceName the reference name of the task to be skipped
*/
public void skipTaskFromWorkflow(String workflowId, String taskReferenceName) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
Preconditions.checkArgument(
StringUtils.isNotBlank(taskReferenceName), "Task reference name cannot be blank");
stub.skipTaskFromWorkflow(
WorkflowServicePb.SkipTaskRequest.newBuilder()
.setWorkflowId(workflowId)
.setTaskReferenceName(taskReferenceName)
.build());
}
/**
* Reruns the workflow from a specific task
*
* @param rerunWorkflowRequest the request containing the task to rerun from
* @return the id of the workflow
*/
public String rerunWorkflow(RerunWorkflowRequest rerunWorkflowRequest) {
Preconditions.checkNotNull(rerunWorkflowRequest, "RerunWorkflowRequest cannot be null");
return stub.rerunWorkflow(protoMapper.toProto(rerunWorkflowRequest)).getWorkflowId();
}
/**
* Restart a completed workflow
*
* @param workflowId the workflow id of the workflow to be restarted
*/
public void restart(String workflowId, boolean useLatestDefinitions) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.restartWorkflow(
WorkflowServicePb.RestartWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.setUseLatestDefinitions(useLatestDefinitions)
.build());
}
/**
* Retries the last failed task in a workflow
*
* @param workflowId the workflow id of the workflow with the failed task
*/
public void retryLastFailedTask(String workflowId, boolean resumeSubworkflowTasks) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.retryWorkflow(
WorkflowServicePb.RetryWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.setResumeSubworkflowTasks(resumeSubworkflowTasks)
.build());
}
/**
* Resets the callback times of all IN PROGRESS tasks to 0 for the given workflow
*
* @param workflowId the id of the workflow
*/
public void resetCallbacksForInProgressTasks(String workflowId) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.resetWorkflowCallbacks(
WorkflowServicePb.ResetWorkflowCallbacksRequest.newBuilder()
.setWorkflowId(workflowId)
.build());
}
/**
* Terminates the execution of the given workflow instance
*
* @param workflowId the id of the workflow to be terminated
* @param reason the reason to be logged and displayed
*/
public void terminateWorkflow(String workflowId, String reason) {
Preconditions.checkArgument(
StringUtils.isNotBlank(workflowId), "workflow id cannot be blank");
stub.terminateWorkflow(
WorkflowServicePb.TerminateWorkflowRequest.newBuilder()
.setWorkflowId(workflowId)
.setReason(reason)
.build());
}
/**
* Search for workflows based on payload
*
* @param query the search query
* @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query
*/
public SearchResult<WorkflowSummary> search(String query) {
return search(null, null, null, null, query);
}
/**
* Search for workflows based on payload
*
* @param query the search query
* @return the {@link SearchResult} containing the {@link Workflow} that match the query
*/
public SearchResult<Workflow> searchV2(String query) {
return searchV2(null, null, null, null, query);
}
/**
* Paginated search for workflows based on payload
*
* @param start start value of page
* @param size number of workflows to be returned
* @param sort sort order
* @param freeText additional free text query
* @param query the search query
* @return the {@link SearchResult} containing the {@link WorkflowSummary} that match the query
*/
public SearchResult<WorkflowSummary> search(
@Nullable Integer start,
@Nullable Integer size,
@Nullable String sort,
@Nullable String freeText,
@Nullable String query) {
SearchPb.Request searchRequest = createSearchRequest(start, size, sort, freeText, query);
WorkflowServicePb.WorkflowSummarySearchResult result = stub.search(searchRequest);
return new SearchResult<>(
result.getTotalHits(),
result.getResultsList().stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList()));
}
/**
* Paginated search for workflows based on payload
*
* @param start start value of page
* @param size number of workflows to be returned
* @param sort sort order
* @param freeText additional free text query
* @param query the search query
* @return the {@link SearchResult} containing the {@link Workflow} that match the query
*/
public SearchResult<Workflow> searchV2(
@Nullable Integer start,
@Nullable Integer size,
@Nullable String sort,
@Nullable String freeText,
@Nullable String query) {
SearchPb.Request searchRequest = createSearchRequest(start, size, sort, freeText, query);
WorkflowServicePb.WorkflowSearchResult result = stub.searchV2(searchRequest);
return new SearchResult<>(
result.getTotalHits(),
result.getResultsList().stream()
.map(protoMapper::fromProto)
.collect(Collectors.toList()));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java | postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.controller;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import org.junit.Before;
import org.junit.Test;
import org.springframework.core.io.InputStreamResource;
import org.springframework.http.ResponseEntity;
import com.netflix.conductor.postgres.storage.PostgresPayloadStorage;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ExternalPostgresPayloadResourceTest {
private PostgresPayloadStorage mockPayloadStorage;
private ExternalPostgresPayloadResource postgresResource;
@Before
public void before() {
this.mockPayloadStorage = mock(PostgresPayloadStorage.class);
this.postgresResource = new ExternalPostgresPayloadResource(this.mockPayloadStorage);
}
@Test
public void testGetExternalStorageData() throws IOException {
String data = "Dummy data";
InputStream inputStreamData =
new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8));
when(mockPayloadStorage.download(anyString())).thenReturn(inputStreamData);
ResponseEntity<InputStreamResource> response =
postgresResource.getExternalStorageData("dummyKey.json");
assertNotNull(response.getBody());
assertEquals(
data,
new String(
response.getBody().getInputStream().readAllBytes(),
StandardCharsets.UTF_8));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java | postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.nio.file.Paths;
import java.util.Map;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.flywaydb.core.api.configuration.FluentConfiguration;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.testcontainers.containers.PostgreSQLContainer;
import com.netflix.conductor.postgres.config.PostgresPayloadProperties;
public class PostgresPayloadTestUtil {
private final DataSource dataSource;
private final PostgresPayloadProperties properties = new PostgresPayloadProperties();
public PostgresPayloadTestUtil(PostgreSQLContainer<?> postgreSQLContainer) {
this.dataSource =
DataSourceBuilder.create()
.url(postgreSQLContainer.getJdbcUrl())
.username(postgreSQLContainer.getUsername())
.password(postgreSQLContainer.getPassword())
.build();
flywayMigrate(dataSource);
}
private void flywayMigrate(DataSource dataSource) {
FluentConfiguration fluentConfiguration =
Flyway.configure()
.schemas("external")
.locations(Paths.get("db/migration_external_postgres").toString())
.dataSource(dataSource)
.placeholderReplacement(true)
.placeholders(
Map.of(
"tableName",
"external.external_payload",
"maxDataRows",
"5",
"maxDataDays",
"'1'",
"maxDataMonths",
"'1'",
"maxDataYears",
"'1'"));
Flyway flyway = fluentConfiguration.load();
flyway.migrate();
}
public DataSource getDataSource() {
return dataSource;
}
public PostgresPayloadProperties getTestProperties() {
return properties;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java | postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.testcontainers.containers.PostgreSQLContainer;
import org.testcontainers.utility.DockerImageName;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class PostgresPayloadStorageTest {
private PostgresPayloadTestUtil testPostgres;
private PostgresPayloadStorage executionPostgres;
public PostgreSQLContainer<?> postgreSQLContainer;
private final String inputString =
"Lorem Ipsum is simply dummy text of the printing and typesetting industry."
+ " Lorem Ipsum has been the industry's standard dummy text ever since the 1500s.";
private final String errorMessage = "{\"Error\": \"Data does not exist.\"}";
private final InputStream inputData;
private final String key = "dummyKey.json";
public PostgresPayloadStorageTest() {
inputData = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8));
}
@Before
public void setup() {
postgreSQLContainer =
new PostgreSQLContainer<>(DockerImageName.parse("postgres"))
.withDatabaseName("conductor");
postgreSQLContainer.start();
testPostgres = new PostgresPayloadTestUtil(postgreSQLContainer);
executionPostgres =
new PostgresPayloadStorage(
testPostgres.getTestProperties(),
testPostgres.getDataSource(),
new IDGenerator(),
errorMessage);
}
@Test
public void testWriteInputStreamToDb() throws IOException, SQLException {
executionPostgres.upload(key, inputData, inputData.available());
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement(
"SELECT data FROM external.external_payload WHERE id = 'dummyKey.json'");
ResultSet rs = stmt.executeQuery();
rs.next();
assertEquals(
inputString,
new String(rs.getBinaryStream(1).readAllBytes(), StandardCharsets.UTF_8));
}
@Test
public void testReadInputStreamFromDb() throws IOException, SQLException {
insertData();
assertEquals(
inputString,
new String(executionPostgres.download(key).readAllBytes(), StandardCharsets.UTF_8));
}
private void insertData() throws SQLException, IOException {
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement("INSERT INTO external.external_payload VALUES (?, ?)");
stmt.setString(1, key);
stmt.setBinaryStream(2, inputData, inputData.available());
stmt.executeUpdate();
}
@Test(timeout = 60 * 1000)
public void testMultithreadDownload()
throws ExecutionException, InterruptedException, SQLException, IOException {
AtomicInteger threadCounter = new AtomicInteger(0);
insertData();
int numberOfThread = 12;
int taskInThread = 100;
ArrayList<CompletableFuture<?>> completableFutures = new ArrayList<>();
Executor executor = Executors.newFixedThreadPool(numberOfThread);
IntStream.range(0, numberOfThread * taskInThread)
.forEach(
i ->
createFutureForDownloadOperation(
threadCounter, completableFutures, executor));
for (CompletableFuture<?> completableFuture : completableFutures) {
completableFuture.get();
}
assertCount(1);
assertEquals(numberOfThread * taskInThread, threadCounter.get());
}
private void createFutureForDownloadOperation(
AtomicInteger threadCounter,
ArrayList<CompletableFuture<?>> completableFutures,
Executor executor) {
CompletableFuture<Void> objectCompletableFuture =
CompletableFuture.supplyAsync(() -> downloadData(threadCounter), executor);
completableFutures.add(objectCompletableFuture);
}
private Void downloadData(AtomicInteger threadCounter) {
try {
assertEquals(
inputString,
new String(
executionPostgres.download(key).readAllBytes(),
StandardCharsets.UTF_8));
threadCounter.getAndIncrement();
return null;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void testReadNonExistentInputStreamFromDb() throws IOException, SQLException {
PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement("INSERT INTO external.external_payload VALUES (?, ?)");
stmt.setString(1, key);
stmt.setBinaryStream(2, inputData, inputData.available());
stmt.executeUpdate();
assertEquals(
errorMessage,
new String(
executionPostgres.download("non_existent_key.json").readAllBytes(),
StandardCharsets.UTF_8));
}
@Test
public void testMaxRowInTable() throws IOException, SQLException {
executionPostgres.upload(key, inputData, inputData.available());
executionPostgres.upload("dummyKey2.json", inputData, inputData.available());
executionPostgres.upload("dummyKey3.json", inputData, inputData.available());
executionPostgres.upload("dummyKey4.json", inputData, inputData.available());
executionPostgres.upload("dummyKey5.json", inputData, inputData.available());
executionPostgres.upload("dummyKey6.json", inputData, inputData.available());
executionPostgres.upload("dummyKey7.json", inputData, inputData.available());
assertCount(5);
}
@Test(timeout = 60 * 1000)
public void testMultithreadInsert()
throws SQLException, ExecutionException, InterruptedException {
AtomicInteger threadCounter = new AtomicInteger(0);
int numberOfThread = 12;
int taskInThread = 100;
ArrayList<CompletableFuture<?>> completableFutures = new ArrayList<>();
Executor executor = Executors.newFixedThreadPool(numberOfThread);
IntStream.range(0, numberOfThread * taskInThread)
.forEach(
i ->
createFutureForUploadOperation(
threadCounter, completableFutures, executor));
for (CompletableFuture<?> completableFuture : completableFutures) {
completableFuture.get();
}
assertCount(1);
assertEquals(numberOfThread * taskInThread, threadCounter.get());
}
private void createFutureForUploadOperation(
AtomicInteger threadCounter,
ArrayList<CompletableFuture<?>> completableFutures,
Executor executor) {
CompletableFuture<Void> objectCompletableFuture =
CompletableFuture.supplyAsync(() -> uploadData(threadCounter), executor);
completableFutures.add(objectCompletableFuture);
}
private Void uploadData(AtomicInteger threadCounter) {
try {
uploadData();
threadCounter.getAndIncrement();
return null;
} catch (IOException | SQLException e) {
throw new RuntimeException(e);
}
}
@Test
public void testHashEnsuringNoDuplicates()
throws IOException, SQLException, InterruptedException {
final String createdOn = uploadData();
Thread.sleep(500);
final String createdOnAfterUpdate = uploadData();
assertCount(1);
assertNotEquals(createdOnAfterUpdate, createdOn);
}
private String uploadData() throws SQLException, IOException {
final String location = getKey(inputString);
ByteArrayInputStream inputStream =
new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8));
executionPostgres.upload(location, inputStream, inputStream.available());
return getCreatedOn(location);
}
@Test
public void testDistinctHashedKey() {
final String location = getKey(inputString);
final String location2 = getKey(inputString);
final String location3 = getKey(inputString + "A");
assertNotEquals(location3, location);
assertEquals(location2, location);
}
private String getKey(String input) {
return executionPostgres
.getLocation(
ExternalPayloadStorage.Operation.READ,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
"",
input.getBytes(StandardCharsets.UTF_8))
.getUri();
}
private void assertCount(int expected) throws SQLException {
try (PreparedStatement stmt =
testPostgres
.getDataSource()
.getConnection()
.prepareStatement(
"SELECT count(id) FROM external.external_payload");
ResultSet rs = stmt.executeQuery()) {
rs.next();
assertEquals(expected, rs.getInt(1));
}
}
private String getCreatedOn(String key) throws SQLException {
try (Connection conn = testPostgres.getDataSource().getConnection();
PreparedStatement stmt =
conn.prepareStatement(
"SELECT created_on FROM external.external_payload WHERE id = ?")) {
stmt.setString(1, key);
try (ResultSet rs = stmt.executeQuery()) {
rs.next();
return rs.getString(1);
}
}
}
@After
public void teardown() throws SQLException {
testPostgres.getDataSource().getConnection().close();
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java | postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.controller;
import java.io.InputStream;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.core.io.InputStreamResource;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import io.swagger.v3.oas.annotations.Operation;
/**
* REST controller for pulling payload stream of data by key (externalPayloadPath) from PostgreSQL
* database
*/
@RestController
@RequestMapping(value = "/api/external/postgres")
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres")
public class ExternalPostgresPayloadResource {
private final ExternalPayloadStorage postgresService;
public ExternalPostgresPayloadResource(
@Qualifier("postgresExternalPayloadStorage") ExternalPayloadStorage postgresService) {
this.postgresService = postgresService;
}
@GetMapping("/{externalPayloadPath}")
@Operation(
summary =
"Get task or workflow by externalPayloadPath from External PostgreSQL Storage")
public ResponseEntity<InputStreamResource> getExternalStorageData(
@PathVariable("externalPayloadPath") String externalPayloadPath) {
InputStream inputStream = postgresService.download(externalPayloadPath);
InputStreamResource outputStreamBody = new InputStreamResource(inputStream);
return ResponseEntity.ok().contentType(MediaType.APPLICATION_JSON).body(outputStreamBody);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java | postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.storage;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.function.Supplier;
import javax.sql.DataSource;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.exception.NonTransientException;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.postgres.config.PostgresPayloadProperties;
/**
* Store and pull the external payload which consists of key and stream of data in PostgreSQL
* database
*/
public class PostgresPayloadStorage implements ExternalPayloadStorage {
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresPayloadStorage.class);
public static final String URI_SUFFIX_HASHED = ".hashed.json";
public static final String URI_SUFFIX = ".json";
public static final String URI_PREFIX_EXTERNAL = "/api/external/postgres/";
private final String defaultMessageToUser;
private final DataSource postgresDataSource;
private final IDGenerator idGenerator;
private final String tableName;
private final String conductorUrl;
public PostgresPayloadStorage(
PostgresPayloadProperties properties,
DataSource dataSource,
IDGenerator idGenerator,
String defaultMessageToUser) {
tableName = properties.getTableName();
conductorUrl = properties.getConductorUrl();
this.postgresDataSource = dataSource;
this.idGenerator = idGenerator;
this.defaultMessageToUser = defaultMessageToUser;
LOGGER.info("PostgreSQL Extenal Payload Storage initialized.");
}
/**
* @param operation the type of {@link Operation} to be performed
* @param payloadType the {@link PayloadType} that is being accessed
* @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the
* PostgreSQL object key for the json payload
*/
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path) {
return getLocationInternal(path, () -> idGenerator.generate() + URI_SUFFIX);
}
@Override
public ExternalStorageLocation getLocation(
Operation operation, PayloadType payloadType, String path, byte[] payloadBytes) {
return getLocationInternal(
path, () -> DigestUtils.sha256Hex(payloadBytes) + URI_SUFFIX_HASHED);
}
private ExternalStorageLocation getLocationInternal(
String path, Supplier<String> calculateKey) {
ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation();
String objectKey;
if (StringUtils.isNotBlank(path)) {
objectKey = path;
} else {
objectKey = calculateKey.get();
}
String uri = conductorUrl + URI_PREFIX_EXTERNAL + objectKey;
externalStorageLocation.setUri(uri);
externalStorageLocation.setPath(objectKey);
LOGGER.debug("External storage location URI: {}, location path: {}", uri, objectKey);
return externalStorageLocation;
}
/**
* Uploads the payload to the given PostgreSQL object key. It is expected that the caller
* retrieves the object key using {@link #getLocation(Operation, PayloadType, String)} before
* making this call.
*
* @param key the PostgreSQL key of the object to be uploaded
* @param payload an {@link InputStream} containing the json payload which is to be uploaded
* @param payloadSize the size of the json payload in bytes
*/
@Override
public void upload(String key, InputStream payload, long payloadSize) {
try (Connection conn = postgresDataSource.getConnection();
PreparedStatement stmt =
conn.prepareStatement(
"INSERT INTO "
+ tableName
+ " (id, data) VALUES (?, ?) ON CONFLICT(id) "
+ "DO UPDATE SET created_on=CURRENT_TIMESTAMP")) {
stmt.setString(1, key);
stmt.setBinaryStream(2, payload, payloadSize);
stmt.executeUpdate();
LOGGER.debug(
"External PostgreSQL uploaded key: {}, payload size: {}", key, payloadSize);
} catch (SQLException e) {
String msg = "Error uploading data into External PostgreSQL";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
}
/**
* Downloads the payload stored in the PostgreSQL.
*
* @param key the PostgreSQL key of the object
* @return an input stream containing the contents of the object. Caller is expected to close
* the input stream.
*/
@Override
public InputStream download(String key) {
InputStream inputStream;
try (Connection conn = postgresDataSource.getConnection();
PreparedStatement stmt =
conn.prepareStatement("SELECT data FROM " + tableName + " WHERE id = ?")) {
stmt.setString(1, key);
try (ResultSet rs = stmt.executeQuery()) {
if (!rs.next()) {
LOGGER.debug("External PostgreSQL data with this ID: {} does not exist", key);
return new ByteArrayInputStream(defaultMessageToUser.getBytes());
}
inputStream = rs.getBinaryStream(1);
LOGGER.debug("External PostgreSQL downloaded key: {}", key);
}
} catch (SQLException e) {
String msg = "Error downloading data from external PostgreSQL";
LOGGER.error(msg, e);
throw new NonTransientException(msg, e);
}
return inputStream;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java | postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.external-payload-storage.postgres")
public class PostgresPayloadProperties {
/** The PostgreSQL schema and table name where the payloads will be stored */
private String tableName = "external.external_payload";
/** Username for connecting to PostgreSQL database */
private String username;
/** Password for connecting to PostgreSQL database */
private String password;
/** URL for connecting to PostgreSQL database */
private String url;
/**
* Maximum count of data rows in PostgreSQL database. After overcoming this limit, the oldest
* data will be deleted.
*/
private long maxDataRows = Long.MAX_VALUE;
/**
* Maximum count of days of data age in PostgreSQL database. After overcoming limit, the oldest
* data will be deleted.
*/
private int maxDataDays = 0;
/**
* Maximum count of months of data age in PostgreSQL database. After overcoming limit, the
* oldest data will be deleted.
*/
private int maxDataMonths = 0;
/**
* Maximum count of years of data age in PostgreSQL database. After overcoming limit, the oldest
* data will be deleted.
*/
private int maxDataYears = 1;
/**
* URL, that can be used to pull the json configurations, that will be downloaded from
* PostgreSQL to the conductor server. For example: for local development it is
* "http://localhost:8080"
*/
private String conductorUrl = "";
public String getTableName() {
return tableName;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
public String getUrl() {
return url;
}
public String getConductorUrl() {
return conductorUrl;
}
public long getMaxDataRows() {
return maxDataRows;
}
public int getMaxDataDays() {
return maxDataDays;
}
public int getMaxDataMonths() {
return maxDataMonths;
}
public int getMaxDataYears() {
return maxDataYears;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public void setUsername(String username) {
this.username = username;
}
public void setPassword(String password) {
this.password = password;
}
public void setUrl(String url) {
this.url = url;
}
public void setConductorUrl(String conductorUrl) {
this.conductorUrl = conductorUrl;
}
public void setMaxDataRows(long maxDataRows) {
this.maxDataRows = maxDataRows;
}
public void setMaxDataDays(int maxDataDays) {
this.maxDataDays = maxDataDays;
}
public void setMaxDataMonths(int maxDataMonths) {
this.maxDataMonths = maxDataMonths;
}
public void setMaxDataYears(int maxDataYears) {
this.maxDataYears = maxDataYears;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java | postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.postgres.config;
import java.util.Map;
import javax.sql.DataSource;
import org.flywaydb.core.Flyway;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.DependsOn;
import org.springframework.context.annotation.Import;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.postgres.storage.PostgresPayloadStorage;
import jakarta.annotation.*;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(PostgresPayloadProperties.class)
@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres")
@Import(DataSourceAutoConfiguration.class)
public class PostgresPayloadConfiguration {
PostgresPayloadProperties properties;
DataSource dataSource;
IDGenerator idGenerator;
private static final String DEFAULT_MESSAGE_TO_USER =
"{\"Error\": \"Data with this ID does not exist or has been deleted from the external storage.\"}";
public PostgresPayloadConfiguration(
PostgresPayloadProperties properties, DataSource dataSource, IDGenerator idGenerator) {
this.properties = properties;
this.dataSource = dataSource;
this.idGenerator = idGenerator;
}
@Bean(initMethod = "migrate")
@PostConstruct
public Flyway flywayForExternalDb() {
return Flyway.configure()
.locations("classpath:db/migration_external_postgres")
.schemas("external")
.baselineOnMigrate(true)
.placeholderReplacement(true)
.placeholders(
Map.of(
"tableName",
properties.getTableName(),
"maxDataRows",
String.valueOf(properties.getMaxDataRows()),
"maxDataDays",
"'" + properties.getMaxDataDays() + "'",
"maxDataMonths",
"'" + properties.getMaxDataMonths() + "'",
"maxDataYears",
"'" + properties.getMaxDataYears() + "'"))
.dataSource(dataSource)
.load();
}
@Bean
@DependsOn({"flywayForExternalDb"})
public ExternalPayloadStorage postgresExternalPayloadStorage(
PostgresPayloadProperties properties) {
return new PostgresPayloadStorage(
properties, dataSource, idGenerator, DEFAULT_MESSAGE_TO_USER);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/test/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapperTest.java | kafka/src/test/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapperTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.HashMap;
import java.util.List;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
public class KafkaPublishTaskMapperTest {
private IDGenerator idGenerator;
private KafkaPublishTaskMapper kafkaTaskMapper;
@Rule public ExpectedException expectedException = ExpectedException.none();
@Before
public void setUp() {
ParametersUtils parametersUtils = mock(ParametersUtils.class);
MetadataDAO metadataDAO = mock(MetadataDAO.class);
kafkaTaskMapper = new KafkaPublishTaskMapper(parametersUtils, metadataDAO);
idGenerator = new IDGenerator();
}
@Test
public void getMappedTasks() {
// Given
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("kafka_task");
workflowTask.setType(TaskType.KAFKA_PUBLISH.name());
workflowTask.setTaskDefinition(new TaskDef("kafka_task"));
String taskId = idGenerator.generate();
String retriedTaskId = idGenerator.generate();
WorkflowModel workflow = new WorkflowModel();
WorkflowDef workflowDef = new WorkflowDef();
workflow.setWorkflowDefinition(workflowDef);
TaskMapperContext taskMapperContext =
TaskMapperContext.newBuilder()
.withWorkflowModel(workflow)
.withTaskDefinition(new TaskDef())
.withWorkflowTask(workflowTask)
.withTaskInput(new HashMap<>())
.withRetryCount(0)
.withRetryTaskId(retriedTaskId)
.withTaskId(taskId)
.build();
// when
List<TaskModel> mappedTasks = kafkaTaskMapper.getMappedTasks(taskMapperContext);
// Then
assertEquals(1, mappedTasks.size());
assertEquals(TaskType.KAFKA_PUBLISH.name(), mappedTasks.get(0).getTaskType());
}
@Test
public void getMappedTasks_WithoutTaskDef() {
// Given
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("kafka_task");
workflowTask.setType(TaskType.KAFKA_PUBLISH.name());
String taskId = idGenerator.generate();
String retriedTaskId = idGenerator.generate();
WorkflowModel workflow = new WorkflowModel();
WorkflowDef workflowDef = new WorkflowDef();
workflow.setWorkflowDefinition(workflowDef);
TaskDef taskdefinition = new TaskDef();
String testExecutionNameSpace = "testExecutionNameSpace";
taskdefinition.setExecutionNameSpace(testExecutionNameSpace);
String testIsolationGroupId = "testIsolationGroupId";
taskdefinition.setIsolationGroupId(testIsolationGroupId);
TaskMapperContext taskMapperContext =
TaskMapperContext.newBuilder()
.withWorkflowModel(workflow)
.withTaskDefinition(taskdefinition)
.withWorkflowTask(workflowTask)
.withTaskInput(new HashMap<>())
.withRetryCount(0)
.withRetryTaskId(retriedTaskId)
.withTaskId(taskId)
.build();
// when
List<TaskModel> mappedTasks = kafkaTaskMapper.getMappedTasks(taskMapperContext);
// Then
assertEquals(1, mappedTasks.size());
assertEquals(TaskType.KAFKA_PUBLISH.name(), mappedTasks.get(0).getTaskType());
assertEquals(testExecutionNameSpace, mappedTasks.get(0).getExecutionNameSpace());
assertEquals(testIsolationGroupId, mappedTasks.get(0).getIsolationGroupId());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java | kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.tasks.kafka;
import java.time.Duration;
import java.util.Properties;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.LongSerializer;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class KafkaProducerManagerTest {
@Test
public void testRequestTimeoutSetFromDefault() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(100),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
Properties props = manager.getProducerProperties(input);
assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "100");
}
@Test
public void testRequestTimeoutSetFromInput() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(100),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
input.setRequestTimeoutMs(200);
Properties props = manager.getProducerProperties(input);
assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "200");
}
@Test
public void testRequestTimeoutSetFromConfig() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(150),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
Properties props = manager.getProducerProperties(input);
assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "150");
}
@SuppressWarnings("rawtypes")
@Test(expected = RuntimeException.class)
public void testExecutionException() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(150),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
Producer producer = manager.getProducer(input);
assertNotNull(producer);
}
@SuppressWarnings("rawtypes")
@Test
public void testCacheInvalidation() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(150), Duration.ofMillis(500), 0, Duration.ofMillis(0));
KafkaPublishTask.Input input = getInput();
input.setBootStrapServers("");
Properties props = manager.getProducerProperties(input);
Producer producerMock = mock(Producer.class);
Producer producer = manager.getFromCache(props, () -> producerMock);
assertNotNull(producer);
verify(producerMock, times(1)).close();
}
@Test
public void testMaxBlockMsFromConfig() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(150),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
Properties props = manager.getProducerProperties(input);
assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "500");
}
@Test
public void testMaxBlockMsFromInput() {
KafkaProducerManager manager =
new KafkaProducerManager(
Duration.ofMillis(150),
Duration.ofMillis(500),
10,
Duration.ofMillis(120000));
KafkaPublishTask.Input input = getInput();
input.setMaxBlockMs(600);
Properties props = manager.getProducerProperties(input);
assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "600");
}
private KafkaPublishTask.Input getInput() {
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setTopic("testTopic");
input.setValue("TestMessage");
input.setKeySerializer(LongSerializer.class.getCanonicalName());
input.setBootStrapServers("servers");
return input;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java | kafka/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.tasks.kafka;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@SuppressWarnings({"unchecked", "rawtypes"})
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class KafkaPublishTaskTest {
@Autowired private ObjectMapper objectMapper;
@Test
public void missingRequest_Fail() {
KafkaPublishTask kafkaPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
TaskModel task = new TaskModel();
kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
}
@Test
public void missingValue_Fail() {
TaskModel task = new TaskModel();
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setBootStrapServers("localhost:9092");
input.setTopic("testTopic");
task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input);
KafkaPublishTask kPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
}
@Test
public void missingBootStrapServers_Fail() {
TaskModel task = new TaskModel();
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
Map<String, Object> value = new HashMap<>();
input.setValue(value);
input.setTopic("testTopic");
task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input);
KafkaPublishTask kPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
}
@Test
public void kafkaPublishExecutionException_Fail()
throws ExecutionException, InterruptedException {
TaskModel task = getTask();
KafkaProducerManager producerManager = mock(KafkaProducerManager.class);
KafkaPublishTask kafkaPublishTask = new KafkaPublishTask(producerManager, objectMapper);
Producer producer = mock(Producer.class);
when(producerManager.getProducer(any())).thenReturn(producer);
Future publishingFuture = mock(Future.class);
when(producer.send(any())).thenReturn(publishingFuture);
ExecutionException executionException = mock(ExecutionException.class);
when(executionException.getMessage()).thenReturn("Execution exception");
when(publishingFuture.get()).thenThrow(executionException);
kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertEquals(
"Failed to invoke kafka task due to: Execution exception",
task.getReasonForIncompletion());
}
@Test
public void kafkaPublishUnknownException_Fail() {
TaskModel task = getTask();
KafkaProducerManager producerManager = mock(KafkaProducerManager.class);
KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper);
Producer producer = mock(Producer.class);
when(producerManager.getProducer(any())).thenReturn(producer);
when(producer.send(any())).thenThrow(new RuntimeException("Unknown exception"));
kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.FAILED, task.getStatus());
assertEquals(
"Failed to invoke kafka task due to: Unknown exception",
task.getReasonForIncompletion());
}
@Test
public void kafkaPublishSuccess_Completed() {
TaskModel task = getTask();
KafkaProducerManager producerManager = mock(KafkaProducerManager.class);
KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper);
Producer producer = mock(Producer.class);
when(producerManager.getProducer(any())).thenReturn(producer);
when(producer.send(any())).thenReturn(mock(Future.class));
kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
}
@Test
public void kafkaPublishSuccess_AsyncComplete() {
TaskModel task = getTask();
task.getInputData().put("asyncComplete", true);
KafkaProducerManager producerManager = mock(KafkaProducerManager.class);
KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper);
Producer producer = mock(Producer.class);
when(producerManager.getProducer(any())).thenReturn(producer);
when(producer.send(any())).thenReturn(mock(Future.class));
kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class));
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
}
private TaskModel getTask() {
TaskModel task = new TaskModel();
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setBootStrapServers("localhost:9092");
Map<String, Object> value = new HashMap<>();
value.put("input_key1", "value1");
value.put("input_key2", 45.3d);
input.setValue(value);
input.setTopic("testTopic");
task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input);
return task;
}
@Test
public void integerSerializer_integerObject() {
KafkaPublishTask kPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setKeySerializer(IntegerSerializer.class.getCanonicalName());
input.setKey(String.valueOf(Integer.MAX_VALUE));
assertEquals(kPublishTask.getKey(input), Integer.MAX_VALUE);
}
@Test
public void longSerializer_longObject() {
KafkaPublishTask kPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setKeySerializer(LongSerializer.class.getCanonicalName());
input.setKey(String.valueOf(Long.MAX_VALUE));
assertEquals(kPublishTask.getKey(input), Long.MAX_VALUE);
}
@Test
public void noSerializer_StringObject() {
KafkaPublishTask kPublishTask =
new KafkaPublishTask(getKafkaProducerManager(), objectMapper);
KafkaPublishTask.Input input = new KafkaPublishTask.Input();
input.setKey("testStringKey");
assertEquals(kPublishTask.getKey(input), "testStringKey");
}
private KafkaProducerManager getKafkaProducerManager() {
return new KafkaProducerManager(
Duration.ofMillis(100), Duration.ofMillis(500), 120000, Duration.ofMillis(10));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/main/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapper.java | kafka/src/main/java/com/netflix/conductor/core/execution/mapper/KafkaPublishTaskMapper.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution.mapper;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
@Component
public class KafkaPublishTaskMapper implements TaskMapper {
public static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTaskMapper.class);
private final ParametersUtils parametersUtils;
private final MetadataDAO metadataDAO;
public KafkaPublishTaskMapper(ParametersUtils parametersUtils, MetadataDAO metadataDAO) {
this.parametersUtils = parametersUtils;
this.metadataDAO = metadataDAO;
}
@Override
public String getTaskType() {
return TaskType.KAFKA_PUBLISH.name();
}
/**
* This method maps a {@link WorkflowTask} of type {@link TaskType#KAFKA_PUBLISH} to a {@link
* TaskModel} in a {@link TaskModel.Status#SCHEDULED} state
*
* @param taskMapperContext: A wrapper class containing the {@link WorkflowTask}, {@link
* WorkflowDef}, {@link WorkflowModel} and a string representation of the TaskId
* @return a List with just one Kafka task
* @throws TerminateWorkflowException In case if the task definition does not exist
*/
@Override
public List<TaskModel> getMappedTasks(TaskMapperContext taskMapperContext)
throws TerminateWorkflowException {
LOGGER.debug("TaskMapperContext {} in KafkaPublishTaskMapper", taskMapperContext);
WorkflowTask workflowTask = taskMapperContext.getWorkflowTask();
WorkflowModel workflowModel = taskMapperContext.getWorkflowModel();
String taskId = taskMapperContext.getTaskId();
int retryCount = taskMapperContext.getRetryCount();
TaskDef taskDefinition =
Optional.ofNullable(taskMapperContext.getTaskDefinition())
.orElseGet(() -> metadataDAO.getTaskDef(workflowTask.getName()));
Map<String, Object> input =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflowModel, taskId, taskDefinition);
TaskModel kafkaPublishTask = taskMapperContext.createTaskModel();
kafkaPublishTask.setInputData(input);
kafkaPublishTask.setStatus(TaskModel.Status.SCHEDULED);
kafkaPublishTask.setRetryCount(retryCount);
kafkaPublishTask.setCallbackAfterSeconds(workflowTask.getStartDelay());
if (Objects.nonNull(taskDefinition)) {
kafkaPublishTask.setExecutionNameSpace(taskDefinition.getExecutionNameSpace());
kafkaPublishTask.setIsolationGroupId(taskDefinition.getIsolationGroupId());
kafkaPublishTask.setRateLimitPerFrequency(taskDefinition.getRateLimitPerFrequency());
kafkaPublishTask.setRateLimitFrequencyInSeconds(
taskDefinition.getRateLimitFrequencyInSeconds());
}
return Collections.singletonList(kafkaPublishTask);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java | kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.tasks.kafka;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.stream.Collectors;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.utils.Utils;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_KAFKA_PUBLISH;
@Component(TASK_TYPE_KAFKA_PUBLISH)
public class KafkaPublishTask extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTask.class);
static final String REQUEST_PARAMETER_NAME = "kafka_request";
private static final String MISSING_REQUEST =
"Missing Kafka request. Task input MUST have a '"
+ REQUEST_PARAMETER_NAME
+ "' key with KafkaTask.Input as value. See documentation for KafkaTask for required input parameters";
private static final String MISSING_BOOT_STRAP_SERVERS = "No boot strap servers specified";
private static final String MISSING_KAFKA_TOPIC =
"Missing Kafka topic. See documentation for KafkaTask for required input parameters";
private static final String MISSING_KAFKA_VALUE =
"Missing Kafka value. See documentation for KafkaTask for required input parameters";
private static final String FAILED_TO_INVOKE = "Failed to invoke kafka task due to: ";
private final ObjectMapper objectMapper;
private final String requestParameter;
private final KafkaProducerManager producerManager;
public KafkaPublishTask(KafkaProducerManager clientManager, ObjectMapper objectMapper) {
super(TASK_TYPE_KAFKA_PUBLISH);
this.requestParameter = REQUEST_PARAMETER_NAME;
this.producerManager = clientManager;
this.objectMapper = objectMapper;
LOGGER.info("KafkaTask initialized.");
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
long taskStartMillis = Instant.now().toEpochMilli();
task.setWorkerId(Utils.getServerId());
Object request = task.getInputData().get(requestParameter);
if (Objects.isNull(request)) {
markTaskAsFailed(task, MISSING_REQUEST);
return;
}
Input input = objectMapper.convertValue(request, Input.class);
if (StringUtils.isBlank(input.getBootStrapServers())) {
markTaskAsFailed(task, MISSING_BOOT_STRAP_SERVERS);
return;
}
if (StringUtils.isBlank(input.getTopic())) {
markTaskAsFailed(task, MISSING_KAFKA_TOPIC);
return;
}
if (Objects.isNull(input.getValue())) {
markTaskAsFailed(task, MISSING_KAFKA_VALUE);
return;
}
try {
Future<RecordMetadata> recordMetaDataFuture = kafkaPublish(input);
try {
recordMetaDataFuture.get();
if (isAsyncComplete(task)) {
task.setStatus(TaskModel.Status.IN_PROGRESS);
} else {
task.setStatus(TaskModel.Status.COMPLETED);
}
long timeTakenToCompleteTask = Instant.now().toEpochMilli() - taskStartMillis;
LOGGER.debug("Published message {}, Time taken {}", input, timeTakenToCompleteTask);
} catch (ExecutionException ec) {
LOGGER.error(
"Failed to invoke kafka task: {} - execution exception ",
task.getTaskId(),
ec);
markTaskAsFailed(task, FAILED_TO_INVOKE + ec.getMessage());
}
} catch (Exception e) {
LOGGER.error(
"Failed to invoke kafka task:{} for input {} - unknown exception",
task.getTaskId(),
input,
e);
markTaskAsFailed(task, FAILED_TO_INVOKE + e.getMessage());
}
}
private void markTaskAsFailed(TaskModel task, String reasonForIncompletion) {
task.setReasonForIncompletion(reasonForIncompletion);
task.setStatus(TaskModel.Status.FAILED);
}
/**
* @param input Kafka Request
* @return Future for execution.
*/
@SuppressWarnings({"unchecked", "rawtypes"})
private Future<RecordMetadata> kafkaPublish(Input input) throws Exception {
long startPublishingEpochMillis = Instant.now().toEpochMilli();
Producer producer = producerManager.getProducer(input);
long timeTakenToCreateProducer = Instant.now().toEpochMilli() - startPublishingEpochMillis;
LOGGER.debug("Time taken getting producer {}", timeTakenToCreateProducer);
Object key = getKey(input);
Iterable<Header> headers =
input.getHeaders().entrySet().stream()
.map(
header ->
new RecordHeader(
header.getKey(),
String.valueOf(header.getValue()).getBytes()))
.collect(Collectors.toList());
ProducerRecord rec =
new ProducerRecord(
input.getTopic(),
null,
null,
key,
objectMapper.writeValueAsString(input.getValue()),
headers);
Future send = producer.send(rec);
long timeTakenToPublish = Instant.now().toEpochMilli() - startPublishingEpochMillis;
LOGGER.debug("Time taken publishing {}", timeTakenToPublish);
return send;
}
@VisibleForTesting
Object getKey(Input input) {
String keySerializer = input.getKeySerializer();
if (LongSerializer.class.getCanonicalName().equals(keySerializer)) {
return Long.parseLong(String.valueOf(input.getKey()));
} else if (IntegerSerializer.class.getCanonicalName().equals(keySerializer)) {
return Integer.parseInt(String.valueOf(input.getKey()));
} else {
return String.valueOf(input.getKey());
}
}
@Override
public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
return false;
}
@Override
public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
task.setStatus(TaskModel.Status.CANCELED);
}
@Override
public boolean isAsync() {
return true;
}
public static class Input {
public static final String STRING_SERIALIZER = StringSerializer.class.getCanonicalName();
private Map<String, Object> headers = new HashMap<>();
private String bootStrapServers;
private Object key;
private Object value;
private Integer requestTimeoutMs;
private Integer maxBlockMs;
private String topic;
private String keySerializer = STRING_SERIALIZER;
public Map<String, Object> getHeaders() {
return headers;
}
public void setHeaders(Map<String, Object> headers) {
this.headers = headers;
}
public String getBootStrapServers() {
return bootStrapServers;
}
public void setBootStrapServers(String bootStrapServers) {
this.bootStrapServers = bootStrapServers;
}
public Object getKey() {
return key;
}
public void setKey(Object key) {
this.key = key;
}
public Object getValue() {
return value;
}
public void setValue(Object value) {
this.value = value;
}
public Integer getRequestTimeoutMs() {
return requestTimeoutMs;
}
public void setRequestTimeoutMs(Integer requestTimeoutMs) {
this.requestTimeoutMs = requestTimeoutMs;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getKeySerializer() {
return keySerializer;
}
public void setKeySerializer(String keySerializer) {
this.keySerializer = keySerializer;
}
public Integer getMaxBlockMs() {
return maxBlockMs;
}
public void setMaxBlockMs(Integer maxBlockMs) {
this.maxBlockMs = maxBlockMs;
}
@Override
public String toString() {
return "Input{"
+ "headers="
+ headers
+ ", bootStrapServers='"
+ bootStrapServers
+ '\''
+ ", key="
+ key
+ ", value="
+ value
+ ", requestTimeoutMs="
+ requestTimeoutMs
+ ", maxBlockMs="
+ maxBlockMs
+ ", topic='"
+ topic
+ '\''
+ ", keySerializer='"
+ keySerializer
+ '\''
+ '}';
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java | kafka/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.tasks.kafka;
import java.time.Duration;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
@SuppressWarnings("rawtypes")
@Component
public class KafkaProducerManager {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaProducerManager.class);
private final String requestTimeoutConfig;
private final Cache<Properties, Producer> kafkaProducerCache;
private final String maxBlockMsConfig;
private static final String STRING_SERIALIZER =
"org.apache.kafka.common.serialization.StringSerializer";
private static final RemovalListener<Properties, Producer> LISTENER =
notification -> {
if (notification.getValue() != null) {
notification.getValue().close();
LOGGER.info("Closed producer for {}", notification.getKey());
}
};
public KafkaProducerManager(
@Value("${conductor.tasks.kafka-publish.requestTimeout:100ms}") Duration requestTimeout,
@Value("${conductor.tasks.kafka-publish.maxBlock:500ms}") Duration maxBlock,
@Value("${conductor.tasks.kafka-publish.cacheSize:10}") int cacheSize,
@Value("${conductor.tasks.kafka-publish.cacheTime:120000ms}") Duration cacheTime) {
this.requestTimeoutConfig = String.valueOf(requestTimeout.toMillis());
this.maxBlockMsConfig = String.valueOf(maxBlock.toMillis());
this.kafkaProducerCache =
CacheBuilder.newBuilder()
.removalListener(LISTENER)
.maximumSize(cacheSize)
.expireAfterAccess(cacheTime.toMillis(), TimeUnit.MILLISECONDS)
.build();
}
public Producer getProducer(KafkaPublishTask.Input input) {
Properties configProperties = getProducerProperties(input);
return getFromCache(configProperties, () -> new KafkaProducer(configProperties));
}
@VisibleForTesting
Producer getFromCache(Properties configProperties, Callable<Producer> createProducerCallable) {
try {
return kafkaProducerCache.get(configProperties, createProducerCallable);
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
}
@VisibleForTesting
Properties getProducerProperties(KafkaPublishTask.Input input) {
Properties configProperties = new Properties();
configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, input.getBootStrapServers());
configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, input.getKeySerializer());
String requestTimeoutMs = requestTimeoutConfig;
if (Objects.nonNull(input.getRequestTimeoutMs())) {
requestTimeoutMs = String.valueOf(input.getRequestTimeoutMs());
}
String maxBlockMs = maxBlockMsConfig;
if (Objects.nonNull(input.getMaxBlockMs())) {
maxBlockMs = String.valueOf(input.getMaxBlockMs());
}
configProperties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs);
configProperties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs);
configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER);
return configProperties;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java | workflow-event-listener/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.test.listener;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.TestPropertySource;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.ConductorTestApp;
import com.netflix.conductor.common.metadata.tasks.Task;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskResult;
import com.netflix.conductor.common.metadata.workflow.StartWorkflowRequest;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.service.ExecutionService;
import com.netflix.conductor.service.MetadataService;
import com.netflix.conductor.service.WorkflowService;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.Task.Status.COMPLETED;
import static org.junit.Assert.assertEquals;
@RunWith(SpringRunner.class)
@SpringBootTest(
classes = ConductorTestApp.class,
properties = {
"conductor.db.type=memory",
"conductor.workflow-status-listener.type=queue_publisher",
"conductor.workflow-status-listener.queue-publisher.successQueue=dummy",
"conductor.workflow-status-listener.queue-publisher.failureQueue=dummy",
"conductor.workflow-status-listener.queue-publisher.finalizeQueue=final",
"conductor.app.workflow.name-validation.enabled=true"
})
@TestPropertySource(locations = "classpath:application-integrationtest.properties")
public class WorkflowStatusPublisherIntegrationTest {
private final String CALLBACK_QUEUE = "dummy";
private final String FINALIZED_QUEUE = "final";
private static final String LINEAR_WORKFLOW_T1_T2 = "junit_test_wf";
private static final int WORKFLOW_VERSION = 1;
private static final String INCOMPLETION_REASON = "test reason";
private static final String DEFAULT_OWNER_EMAIL = "test@harness.com";
@Autowired private ObjectMapper objectMapper;
@Autowired QueueDAO queueDAO;
@Autowired protected MetadataService metadataService;
@Autowired protected ExecutionService workflowExecutionService;
@Autowired protected WorkflowService workflowExecutor;
@Before
public void setUp() {
TaskDef taskDef = new TaskDef();
taskDef.setName("junit_task_1");
taskDef.setTimeoutSeconds(120);
taskDef.setResponseTimeoutSeconds(120);
taskDef.setRetryCount(1);
taskDef.setOwnerEmail(DEFAULT_OWNER_EMAIL);
metadataService.registerTaskDef(Collections.singletonList(taskDef));
}
@After
public void cleanUp() {
List<String> workflows =
metadataService.getWorkflowDefs().stream()
.map(WorkflowDef::getName)
.collect(Collectors.toList());
for (String wfName : workflows) {
List<String> running =
workflowExecutionService.getRunningWorkflows(wfName, WORKFLOW_VERSION);
for (String wfid : running) {
workflowExecutor.terminateWorkflow(wfid, "cleanup");
}
}
queueDAO.queuesDetail().keySet().forEach(queueDAO::flush);
}
@Test
public void testListenerOnTerminatedWorkflow() throws IOException {
String id =
startOrLoadWorkflowExecution(
LINEAR_WORKFLOW_T1_T2,
1,
"testWorkflowTerminatedListener",
new HashMap<>());
workflowExecutor.terminateWorkflow(id, INCOMPLETION_REASON);
List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200);
queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
WorkflowSummary payload =
objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
assertEquals(id, callbackMessages.get(0).getId());
assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId());
assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus());
assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion());
// check finalized queue
callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200);
queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
payload =
objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
assertEquals(id, callbackMessages.get(0).getId());
assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId());
assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus());
assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion());
}
@Test
public void testListenerOnCompletedWorkflow() throws IOException, InterruptedException {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName(LINEAR_WORKFLOW_T1_T2);
workflowDef.setDescription(workflowDef.getName());
workflowDef.setVersion(WORKFLOW_VERSION);
workflowDef.setSchemaVersion(2);
workflowDef.setOwnerEmail(DEFAULT_OWNER_EMAIL);
workflowDef.setWorkflowStatusListenerEnabled(true);
LinkedList<WorkflowTask> wftasks = new LinkedList<>();
WorkflowTask wft1 = new WorkflowTask();
wft1.setName("junit_task_1");
wft1.setTaskReferenceName("t1");
wftasks.add(wft1);
workflowDef.setTasks(wftasks);
metadataService.updateWorkflowDef(Collections.singletonList(workflowDef));
String id =
startOrLoadWorkflowExecution(
workflowDef.getName(), 1, "testWorkflowCompletedListener", new HashMap<>());
List<Task> tasks = workflowExecutionService.getTasks("junit_task_1", null, 1);
tasks.get(0).setStatus(COMPLETED);
workflowExecutionService.updateTask(new TaskResult(tasks.get(0)));
checkIfWorkflowIsCompleted(id);
List<Message> callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200);
queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
WorkflowSummary payload =
objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
assertEquals(id, callbackMessages.get(0).getId());
assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
assertEquals("testWorkflowCompletedListener", payload.getCorrelationId());
assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus());
// check finalized queue
callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200);
queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId());
payload =
objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class);
assertEquals(id, callbackMessages.get(0).getId());
assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType());
assertEquals("testWorkflowCompletedListener", payload.getCorrelationId());
assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus());
}
@SuppressWarnings("BusyWait")
private void checkIfWorkflowIsCompleted(String id) throws InterruptedException {
int statusRetrieveAttempts = 0;
while (workflowExecutor.getExecutionStatus(id, false).getStatus()
!= Workflow.WorkflowStatus.COMPLETED) {
if (statusRetrieveAttempts > 5) {
break;
}
Thread.sleep(100);
statusRetrieveAttempts++;
}
}
private String startOrLoadWorkflowExecution(
String workflowName, int version, String correlationId, Map<String, Object> input) {
StartWorkflowRequest startWorkflowInput = new StartWorkflowRequest();
startWorkflowInput.setName(workflowName);
startWorkflowInput.setVersion(version);
startWorkflowInput.setCorrelationId(correlationId);
startWorkflowInput.setInput(input);
return workflowExecutor.startWorkflow(startWorkflowInput);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java | workflow-event-listener/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener;
import java.util.UUID;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.contribs.listener.archive.ArchivingWorkflowStatusListener;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.model.WorkflowModel;
import static org.mockito.Mockito.*;
/**
* @author pavel.halabala
*/
public class ArchivingWorkflowStatusListenerTest {
WorkflowModel workflow;
ExecutionDAOFacade executionDAOFacade;
ArchivingWorkflowStatusListener listener;
@Before
public void before() {
workflow = new WorkflowModel();
WorkflowDef def = new WorkflowDef();
def.setName("name1");
def.setVersion(1);
workflow.setWorkflowDefinition(def);
workflow.setWorkflowId(UUID.randomUUID().toString());
executionDAOFacade = Mockito.mock(ExecutionDAOFacade.class);
listener = new ArchivingWorkflowStatusListener(executionDAOFacade);
}
@Test
public void testArchiveOnWorkflowCompleted() {
listener.onWorkflowCompleted(workflow);
verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true);
verifyNoMoreInteractions(executionDAOFacade);
}
@Test
public void testArchiveOnWorkflowTerminated() {
listener.onWorkflowTerminated(workflow);
verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true);
verifyNoMoreInteractions(executionDAOFacade);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.conductorqueue;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.workflow-status-listener.queue-publisher")
public class ConductorQueueStatusPublisherProperties {
private String successQueue = "_callbackSuccessQueue";
private String failureQueue = "_callbackFailureQueue";
private String finalizeQueue = "_callbackFinalizeQueue";
public String getSuccessQueue() {
return successQueue;
}
public void setSuccessQueue(String successQueue) {
this.successQueue = successQueue;
}
public String getFailureQueue() {
return failureQueue;
}
public void setFailureQueue(String failureQueue) {
this.failureQueue = failureQueue;
}
public String getFinalizeQueue() {
return finalizeQueue;
}
public void setFinalizeQueue(String finalizeQueue) {
this.finalizeQueue = finalizeQueue;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.conductorqueue;
import java.util.Collections;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.core.events.queue.Message;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Publishes a {@link Message} containing a {@link WorkflowSummary} to the undlerying {@link
* QueueDAO} implementation on a workflow completion or termination event.
*/
public class ConductorQueueStatusPublisher implements WorkflowStatusListener {
private static final Logger LOGGER =
LoggerFactory.getLogger(ConductorQueueStatusPublisher.class);
private final QueueDAO queueDAO;
private final ObjectMapper objectMapper;
private final String successStatusQueue;
private final String failureStatusQueue;
private final String finalizeStatusQueue;
public ConductorQueueStatusPublisher(
QueueDAO queueDAO,
ObjectMapper objectMapper,
ConductorQueueStatusPublisherProperties properties) {
this.queueDAO = queueDAO;
this.objectMapper = objectMapper;
this.successStatusQueue = properties.getSuccessQueue();
this.failureStatusQueue = properties.getFailureQueue();
this.finalizeStatusQueue = properties.getFinalizeQueue();
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
LOGGER.info("Publishing callback of workflow {} on completion ", workflow.getWorkflowId());
queueDAO.push(successStatusQueue, Collections.singletonList(workflowToMessage(workflow)));
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
LOGGER.info("Publishing callback of workflow {} on termination", workflow.getWorkflowId());
queueDAO.push(failureStatusQueue, Collections.singletonList(workflowToMessage(workflow)));
}
@Override
public void onWorkflowFinalized(WorkflowModel workflow) {
LOGGER.info("Publishing callback of workflow {} on finalization", workflow.getWorkflowId());
queueDAO.push(finalizeStatusQueue, Collections.singletonList(workflowToMessage(workflow)));
}
private Message workflowToMessage(WorkflowModel workflowModel) {
String jsonWfSummary;
WorkflowSummary summary = new WorkflowSummary(workflowModel.toWorkflow());
try {
jsonWfSummary = objectMapper.writeValueAsString(summary);
} catch (JsonProcessingException e) {
LOGGER.error(
"Failed to convert WorkflowSummary: {} to String. Exception: {}", summary, e);
throw new RuntimeException(e);
}
return new Message(workflowModel.getWorkflowId(), jsonWfSummary, null);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.conductorqueue;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.dao.QueueDAO;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration
@EnableConfigurationProperties(ConductorQueueStatusPublisherProperties.class)
@ConditionalOnProperty(
name = "conductor.workflow-status-listener.type",
havingValue = "queue_publisher")
public class ConductorQueueStatusPublisherConfiguration {
@Bean
public WorkflowStatusListener getWorkflowStatusListener(
QueueDAO queueDAO,
ConductorQueueStatusPublisherProperties properties,
ObjectMapper objectMapper) {
return new ConductorQueueStatusPublisher(queueDAO, objectMapper, properties);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.archive;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.WorkflowModel;
/**
* Provides default implementation of workflow archiving immediately after workflow is completed or
* terminated.
*
* @author pavel.halabala
*/
public class ArchivingWorkflowStatusListener implements WorkflowStatusListener {
private static final Logger LOGGER =
LoggerFactory.getLogger(ArchivingWorkflowStatusListener.class);
private final ExecutionDAOFacade executionDAOFacade;
public ArchivingWorkflowStatusListener(ExecutionDAOFacade executionDAOFacade) {
this.executionDAOFacade = executionDAOFacade;
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId());
this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true);
Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus());
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId());
this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true);
Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.archive;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
import org.springframework.core.env.Environment;
@ConfigurationProperties("conductor.workflow-status-listener.archival")
public class ArchivingWorkflowListenerProperties {
private final Environment environment;
@Autowired
public ArchivingWorkflowListenerProperties(Environment environment) {
this.environment = environment;
}
/** Type of archival */
public enum ArchivalType {
DEFAULT,
S3
}
/** Archival type that we need in place. By Default the value is default */
private ArchivalType workflowArchivalType = ArchivalType.DEFAULT;
/** name of the S3 bucket where we want to archive the workflow */
private String workflowS3ArchivalDefaultBucketName = "";
/** region of the S3 bucket where we want to archive the workflow */
private String workflowS3ArchivalBucketRegion = "us-east-1";
/**
* Set this variable to true if you want to archive only the workflows that didn't succeed. When
* true, only unsuccessful workflows will be archived, while both successful and unsuccessful
* workflows will be deleted from the datastore. This helps manage storage costs on S3 and keeps
* only the failed workflows for debugging.
*/
private Boolean workflowArchiveUnsuccessfulOnly = false;
/**
* The time to live in seconds for workflow archiving module. Currently, only RedisExecutionDAO
* supports this
*/
@DurationUnit(ChronoUnit.SECONDS)
private Duration ttlDuration = Duration.ZERO;
/** The number of threads to process the delay queue in workflow archival */
private int delayQueueWorkerThreadCount = 5;
public Duration getTtlDuration() {
return ttlDuration;
}
public void setTtlDuration(Duration ttlDuration) {
this.ttlDuration = ttlDuration;
}
public int getDelayQueueWorkerThreadCount() {
return delayQueueWorkerThreadCount;
}
public void setDelayQueueWorkerThreadCount(int delayQueueWorkerThreadCount) {
this.delayQueueWorkerThreadCount = delayQueueWorkerThreadCount;
}
public ArchivalType getWorkflowArchivalType() {
return workflowArchivalType;
}
public void setWorkflowArchivalType(ArchivalType workflowArchivalType) {
this.workflowArchivalType = workflowArchivalType;
}
public String getWorkflowS3ArchivalDefaultBucketName() {
return workflowS3ArchivalDefaultBucketName;
}
public void setWorkflowS3ArchivalDefaultBucketName(String workflowS3ArchivalDefaultBucketName) {
this.workflowS3ArchivalDefaultBucketName = workflowS3ArchivalDefaultBucketName;
}
public String getWorkflowS3ArchivalBucketRegion() {
return workflowS3ArchivalBucketRegion;
}
public void setWorkflowS3ArchivalBucketRegion(String workflowS3ArchivalBucketRegion) {
this.workflowS3ArchivalBucketRegion = workflowS3ArchivalBucketRegion;
}
public Boolean getWorkflowArchiveUnsuccessfulOnly() {
return workflowArchiveUnsuccessfulOnly;
}
public void setWorkflowArchiveUnsuccessfulOnly(Boolean workflowArchiveUnsuccessfulOnly) {
this.workflowArchiveUnsuccessfulOnly = workflowArchiveUnsuccessfulOnly;
}
/** The time to delay the archival of workflow */
public int getWorkflowArchivalDelay() {
return environment.getProperty(
"conductor.workflow-status-listener.archival.delaySeconds",
Integer.class,
environment.getProperty(
"conductor.app.asyncUpdateDelaySeconds", Integer.class, 60));
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.archive;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
@Configuration
@EnableConfigurationProperties(ArchivingWorkflowListenerProperties.class)
@ConditionalOnProperty(name = "conductor.workflow-status-listener.type", havingValue = "archive")
public class ArchivingWorkflowListenerConfiguration {
@Bean
public WorkflowStatusListener getWorkflowStatusListener(
ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) {
if (properties.getTtlDuration().getSeconds() > 0) {
return new ArchivingWithTTLWorkflowStatusListener(executionDAOFacade, properties);
} else if (properties.getWorkflowArchivalType()
== ArchivingWorkflowListenerProperties.ArchivalType.S3) {
return new ArchivingWorkflowToS3(executionDAOFacade, properties);
} else {
return new ArchivingWorkflowStatusListener(executionDAOFacade);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.archive;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.WorkflowModel;
import jakarta.annotation.*;
public class ArchivingWithTTLWorkflowStatusListener implements WorkflowStatusListener {
private static final Logger LOGGER =
LoggerFactory.getLogger(ArchivingWithTTLWorkflowStatusListener.class);
private final ExecutionDAOFacade executionDAOFacade;
private final int archiveTTLSeconds;
private final int delayArchiveSeconds;
private final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor;
public ArchivingWithTTLWorkflowStatusListener(
ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) {
this.executionDAOFacade = executionDAOFacade;
this.archiveTTLSeconds = (int) properties.getTtlDuration().getSeconds();
this.delayArchiveSeconds = properties.getWorkflowArchivalDelay();
this.scheduledThreadPoolExecutor =
new ScheduledThreadPoolExecutor(
properties.getDelayQueueWorkerThreadCount(),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to delay archiving index dropped in executor {}",
runnable,
executor);
Monitors.recordDiscardedArchivalCount();
});
this.scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true);
LOGGER.warn(
"Workflow removal with TTL is no longer supported, "
+ "when using this class, workflows will be removed immediately");
}
@PreDestroy
public void shutdownExecutorService() {
try {
LOGGER.info("Gracefully shutdown executor service");
scheduledThreadPoolExecutor.shutdown();
if (scheduledThreadPoolExecutor.awaitTermination(
delayArchiveSeconds, TimeUnit.SECONDS)) {
LOGGER.debug("tasks completed, shutting down");
} else {
LOGGER.warn("Forcing shutdown after waiting for {} seconds", delayArchiveSeconds);
scheduledThreadPoolExecutor.shutdownNow();
}
} catch (InterruptedException ie) {
LOGGER.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue");
scheduledThreadPoolExecutor.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId());
if (delayArchiveSeconds > 0) {
scheduledThreadPoolExecutor.schedule(
new DelayArchiveWorkflow(workflow, executionDAOFacade),
delayArchiveSeconds,
TimeUnit.SECONDS);
} else {
this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true);
Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus());
}
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId());
if (delayArchiveSeconds > 0) {
scheduledThreadPoolExecutor.schedule(
new DelayArchiveWorkflow(workflow, executionDAOFacade),
delayArchiveSeconds,
TimeUnit.SECONDS);
} else {
this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true);
Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus());
}
}
private class DelayArchiveWorkflow implements Runnable {
private final String workflowId;
private final String workflowName;
private final WorkflowModel.Status status;
private final ExecutionDAOFacade executionDAOFacade;
DelayArchiveWorkflow(WorkflowModel workflow, ExecutionDAOFacade executionDAOFacade) {
this.workflowId = workflow.getWorkflowId();
this.workflowName = workflow.getWorkflowName();
this.status = workflow.getStatus();
this.executionDAOFacade = executionDAOFacade;
}
@Override
public void run() {
try {
this.executionDAOFacade.removeWorkflow(workflowId, true);
LOGGER.info("Archived workflow {}", workflowId);
Monitors.recordWorkflowArchived(workflowName, status);
Monitors.recordArchivalDelayQueueSize(
scheduledThreadPoolExecutor.getQueue().size());
} catch (Exception e) {
LOGGER.error("Unable to archive workflow: {}", workflowId, e);
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowToS3.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowToS3.java | /*
* Copyright 2023 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.archive;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import jakarta.annotation.*;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
public class ArchivingWorkflowToS3 implements WorkflowStatusListener {
private static final Logger LOGGER = LoggerFactory.getLogger(ArchivingWorkflowToS3.class);
private final ExecutionDAOFacade executionDAOFacade;
private final ArchivingWorkflowListenerProperties properties;
private final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor;
private final S3Client s3Client;
private final String bucketName;
private final String bucketRegion;
private final ObjectMapper objectMapper;
private final int delayArchiveSeconds;
public ArchivingWorkflowToS3(
ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) {
this.executionDAOFacade = executionDAOFacade;
this.properties = properties;
bucketName = properties.getWorkflowS3ArchivalDefaultBucketName();
bucketRegion = properties.getWorkflowS3ArchivalBucketRegion();
s3Client = S3Client.builder().region(Region.of(bucketRegion)).build();
this.delayArchiveSeconds = properties.getWorkflowArchivalDelay();
objectMapper = new ObjectMapper();
this.scheduledThreadPoolExecutor =
new ScheduledThreadPoolExecutor(
properties.getDelayQueueWorkerThreadCount(),
(runnable, executor) -> {
LOGGER.warn(
"Request {} to delay S3 archiving index dropped in executor {}",
runnable,
executor);
Monitors.recordDiscardedArchivalCount();
});
this.scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true);
LOGGER.warn(
"Workflow removal archiving in S3 with TTL is no longer supported, "
+ "when using this class, workflows will be removed immediately");
}
@PreDestroy
public void shutdownExecutorService() {
try {
LOGGER.info("Gracefully shutdown executor service in S3 Archival Listener");
scheduledThreadPoolExecutor.shutdown();
if (scheduledThreadPoolExecutor.awaitTermination(
delayArchiveSeconds, TimeUnit.SECONDS)) {
LOGGER.debug("tasks completed, shutting down");
} else {
LOGGER.warn("Forcing shutdown after waiting for {} seconds", delayArchiveSeconds);
scheduledThreadPoolExecutor.shutdownNow();
}
// Close S3 client
s3Client.close();
} catch (InterruptedException ie) {
LOGGER.warn(
"Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue S3 Archival Listener");
scheduledThreadPoolExecutor.shutdownNow();
Thread.currentThread().interrupt();
}
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
archiveWorkflow(workflow);
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
archiveWorkflow(workflow);
}
private void archiveWorkflow(final WorkflowModel workflow) {
// Only archive unsuccessful workflows if enabled
if (!properties.getWorkflowArchiveUnsuccessfulOnly()
|| !workflow.getStatus().isSuccessful()) {
final String fileName = workflow.getWorkflowId() + ".json";
final String filePathPrefix = workflow.getWorkflowName();
final String fullFilePath = filePathPrefix + '/' + fileName;
try {
// Convert workflow to JSON string
String workflowJson = objectMapper.writeValueAsString(workflow);
// Create put object request
PutObjectRequest putObjectRequest =
PutObjectRequest.builder()
.bucket(bucketName)
.key(fullFilePath)
.contentType("application/json")
.build();
// Upload workflow as a json file to s3
s3Client.putObject(putObjectRequest, RequestBody.fromString(workflowJson));
LOGGER.debug(
"Archived workflow. Workflow Name :{} Workflow Id :{} Workflow Status :{} to S3 bucket:{}",
workflow.getWorkflowName(),
workflow.getWorkflowId(),
workflow.getStatus(),
bucketName);
} catch (final Exception e) {
LOGGER.error(
"Exception occurred when archiving workflow to S3. Workflow Name : {} Workflow Id : {} Workflow Status : {} :",
workflow.getWorkflowName(),
workflow.getWorkflowId(),
workflow.getStatus(),
e);
throw new RuntimeException(e);
}
}
if (delayArchiveSeconds > 0) {
scheduledThreadPoolExecutor.schedule(
new DelayS3ArchiveWorkflow(workflow, executionDAOFacade),
delayArchiveSeconds,
TimeUnit.SECONDS);
} else {
LOGGER.info(
"Archived workflow. Workflow Name : {} Workflow Id : {} Workflow Status : {}",
workflow.getWorkflowName(),
workflow.getWorkflowId(),
workflow.getStatus());
this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true);
Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus());
}
}
private class DelayS3ArchiveWorkflow implements Runnable {
private final String workflowId;
private final String workflowName;
private final WorkflowModel.Status status;
private final ExecutionDAOFacade executionDAOFacade;
DelayS3ArchiveWorkflow(WorkflowModel workflow, ExecutionDAOFacade executionDAOFacade) {
this.workflowId = workflow.getWorkflowId();
this.workflowName = workflow.getWorkflowName();
this.status = workflow.getStatus();
this.executionDAOFacade = executionDAOFacade;
}
@Override
public void run() {
try {
this.executionDAOFacade.removeWorkflow(workflowId, true);
LOGGER.info(
"Archived workflow. Workflow Name : {} Workflow Id : {} Workflow Status : {}",
workflowName,
workflowId,
status);
Monitors.recordWorkflowArchived(workflowName, status);
Monitors.recordArchivalDelayQueueSize(
scheduledThreadPoolExecutor.getQueue().size());
} catch (Exception e) {
LOGGER.error(
"Unable to archive workflow. Workflow Name : {} Workflow Id : {} Workflow Status : {}",
workflowName,
workflowId,
status,
e);
}
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangePublisherConfiguration.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangePublisherConfiguration.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.statuschange;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.contribs.listener.RestClientManager;
import com.netflix.conductor.contribs.listener.StatusNotifierNotificationProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
@Configuration
@EnableConfigurationProperties(StatusNotifierNotificationProperties.class)
@ConditionalOnProperty(
name = "conductor.workflow-status-listener.type",
havingValue = "workflow_publisher")
public class StatusChangePublisherConfiguration {
private static final Logger log =
LoggerFactory.getLogger(StatusChangePublisherConfiguration.class);
@Bean
public RestClientManager getRestClientManager(StatusNotifierNotificationProperties config) {
return new RestClientManager(config);
}
@Bean
public WorkflowStatusListener getWorkflowStatusListener(
RestClientManager restClientManager, ExecutionDAOFacade executionDAOFacade) {
return new StatusChangePublisher(restClientManager, executionDAOFacade);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangeNotification.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangeNotification.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.statuschange;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.common.run.Workflow;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.contribs.listener.StatusNotifier;
import com.fasterxml.jackson.annotation.JsonFilter;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ser.FilterProvider;
import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
@JsonFilter("SecretRemovalFilter")
class StatusChangeNotification extends WorkflowSummary {
private static final Logger LOGGER = LoggerFactory.getLogger(StatusChangePublisher.class);
private ObjectMapper objectMapper = new ObjectMapper();
private StatusNotifier statusNotifier;
StatusChangeNotification(Workflow workflow) {
super(workflow);
Map<String, Object> variables = workflow.getVariables();
Object statusNotifierVariable = variables.get("statusNotifier");
if (statusNotifier != null) {
try {
statusNotifier =
objectMapper.readValue(
statusNotifierVariable.toString(), new TypeReference<>() {});
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
}
public StatusNotifier getStatusNotifier() {
return statusNotifier;
}
String toJsonString() {
String jsonString;
try {
SimpleBeanPropertyFilter theFilter =
SimpleBeanPropertyFilter.serializeAllExcept("input", "output");
FilterProvider provider =
new SimpleFilterProvider().addFilter("SecretRemovalFilter", theFilter);
jsonString = objectMapper.writer(provider).writeValueAsString(this);
} catch (JsonProcessingException e) {
LOGGER.error(
"Failed to convert workflow {} id: {} to String. Exception: {}",
this.getWorkflowType(),
this.getWorkflowId(),
e);
throw new RuntimeException(e);
}
return jsonString;
}
/*
* https://github.com/Netflix/conductor/pull/2128
* To enable Workflow/Task Summary Input/Output JSON Serialization, use the following:
* conductor.app.summary-input-output-json-serialization.enabled=true
*/
String toJsonStringWithInputOutput() {
String jsonString;
ObjectMapper objectMapper = new ObjectMapper();
try {
SimpleBeanPropertyFilter emptyFilter = SimpleBeanPropertyFilter.serializeAllExcept();
FilterProvider provider =
new SimpleFilterProvider().addFilter("SecretRemovalFilter", emptyFilter);
jsonString = objectMapper.writer(provider).writeValueAsString(this);
} catch (JsonProcessingException e) {
LOGGER.error(
"Failed to convert workflow {} id: {} to String. Exception: {}",
this.getWorkflowType(),
this.getWorkflowId(),
e);
throw new RuntimeException(e);
}
return jsonString;
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangePublisher.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/statuschange/StatusChangePublisher.java | /*
* Copyright 2024 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.statuschange;
import java.io.IOException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingDeque;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.contribs.listener.RestClientManager;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.model.WorkflowModel;
@Singleton
public class StatusChangePublisher implements WorkflowStatusListener {
private static final Logger LOGGER = LoggerFactory.getLogger(StatusChangePublisher.class);
private static final Integer QDEPTH =
Integer.parseInt(
System.getenv().getOrDefault("ENV_WORKFLOW_NOTIFICATION_QUEUE_SIZE", "50"));
private BlockingQueue<WorkflowModel> blockingQueue = new LinkedBlockingDeque<>(QDEPTH);
private RestClientManager rcm;
private ExecutionDAOFacade executionDAOFacade;
class ExceptionHandler implements Thread.UncaughtExceptionHandler {
public void uncaughtException(Thread t, Throwable e) {
LOGGER.info("An exception has been captured\n");
LOGGER.info("Thread: {}\n", t.getName());
LOGGER.info("Exception: {}: {}\n", e.getClass().getName(), e.getMessage());
LOGGER.info("Stack Trace: \n");
e.printStackTrace(System.out);
LOGGER.info("Thread status: {}\n", t.getState());
new ConsumerThread().start();
}
}
class ConsumerThread extends Thread {
public void run() {
this.setUncaughtExceptionHandler(new ExceptionHandler());
String tName = Thread.currentThread().getName();
LOGGER.info("{}: Starting consumer thread", tName);
StatusChangeNotification statusChangeNotification = null;
WorkflowModel workflow = null;
while (true) {
try {
workflow = blockingQueue.take();
statusChangeNotification = new StatusChangeNotification(workflow.toWorkflow());
String jsonWorkflow = statusChangeNotification.toJsonString();
LOGGER.info("Publishing StatusChangeNotification: {}", jsonWorkflow);
publishStatusChangeNotification(statusChangeNotification);
LOGGER.debug(
"Workflow {} publish is successful.",
statusChangeNotification.getWorkflowId());
Thread.sleep(5);
} catch (Exception e) {
if (statusChangeNotification != null) {
LOGGER.error(
" Error while publishing workflow. Hence updating elastic search index workflowid {} workflowname {} correlationId {}",
workflow.getWorkflowId(),
workflow.getWorkflowName(),
workflow.getCorrelationId());
// TBD executionDAOFacade.indexWorkflow(workflow);
} else {
LOGGER.error("Failed to publish workflow: Workflow is NULL");
}
LOGGER.error("Error on publishing workflow", e);
}
}
}
}
@Inject
public StatusChangePublisher(RestClientManager rcm, ExecutionDAOFacade executionDAOFacade) {
this.rcm = rcm;
this.executionDAOFacade = executionDAOFacade;
ConsumerThread consumerThread = new ConsumerThread();
consumerThread.start();
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
LOGGER.debug(
"workflows completion {} {}", workflow.getWorkflowId(), workflow.getWorkflowName());
try {
blockingQueue.put(workflow);
} catch (Exception e) {
LOGGER.error(
"Failed to enqueue workflow: Id {} Name {}",
workflow.getWorkflowId(),
workflow.getWorkflowName());
LOGGER.error(e.toString());
}
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
LOGGER.debug(
"workflows termination {} {}",
workflow.getWorkflowId(),
workflow.getWorkflowName());
try {
blockingQueue.put(workflow);
} catch (Exception e) {
LOGGER.error(
"Failed to enqueue workflow: Id {} Name {}",
workflow.getWorkflowId(),
workflow.getWorkflowName());
LOGGER.error(e.getMessage());
}
}
@Override
public void onWorkflowCompletedIfEnabled(WorkflowModel workflow) {
onWorkflowCompleted(workflow);
}
@Override
public void onWorkflowTerminatedIfEnabled(WorkflowModel workflow) {
onWorkflowTerminated(workflow);
}
private void publishStatusChangeNotification(StatusChangeNotification statusChangeNotification)
throws IOException {
String jsonWorkflow = statusChangeNotification.toJsonStringWithInputOutput();
rcm.postNotification(
RestClientManager.NotificationType.WORKFLOW,
jsonWorkflow,
statusChangeNotification.getWorkflowId(),
statusChangeNotification.getStatusNotifier());
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisher.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisher.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.kafka;
import java.time.Duration;
import java.util.Map;
import org.apache.kafka.clients.producer.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import com.netflix.conductor.common.run.WorkflowSummary;
import com.netflix.conductor.common.run.WorkflowSummaryExtended;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
/** Kafka-based publisher for workflow status events. */
public class KafkaWorkflowStatusPublisher implements WorkflowStatusListener, DisposableBean {
private static final Logger LOGGER =
LoggerFactory.getLogger(KafkaWorkflowStatusPublisher.class);
private final KafkaProducer<String, String> producer;
private final Map<String, String> eventTopics;
private final String defaultTopic;
private final KafkaWorkflowStatusPublisherProperties properties;
private final ObjectMapper objectMapper;
public KafkaWorkflowStatusPublisher(
KafkaWorkflowStatusPublisherProperties properties, ObjectMapper objectMapper) {
this.eventTopics = properties.getEventTopics();
this.defaultTopic = properties.getDefaultTopic();
this.properties = properties;
this.objectMapper = objectMapper;
// Configure Kafka Producer
Map<String, Object> producerConfig = properties.toProducerConfig();
this.producer = new KafkaProducer<>(producerConfig);
}
@Override
public void destroy() {
if (producer != null) {
try {
producer.close(Duration.ofSeconds(10)); // Allow graceful shutdown
LOGGER.info("Kafka producer shut down gracefully.");
} catch (Exception e) {
LOGGER.error("Error shutting down Kafka producer", e);
}
}
}
@Override
public void onWorkflowStarted(WorkflowModel workflow) {
publishEvent(WorkflowEventType.STARTED, workflow);
}
@Override
public void onWorkflowRestarted(WorkflowModel workflow) {
publishEvent(WorkflowEventType.RESTARTED, workflow);
}
@Override
public void onWorkflowRerun(WorkflowModel workflow) {
publishEvent(WorkflowEventType.RERAN, workflow);
}
@Override
public void onWorkflowCompleted(WorkflowModel workflow) {
publishEvent(WorkflowEventType.COMPLETED, workflow);
}
@Override
public void onWorkflowTerminated(WorkflowModel workflow) {
publishEvent(WorkflowEventType.TERMINATED, workflow);
}
@Override
public void onWorkflowFinalized(WorkflowModel workflow) {
publishEvent(WorkflowEventType.FINALIZED, workflow);
}
@Override
public void onWorkflowPaused(WorkflowModel workflow) {
publishEvent(WorkflowEventType.PAUSED, workflow);
}
@Override
public void onWorkflowResumed(WorkflowModel workflow) {
publishEvent(WorkflowEventType.RESUMED, workflow);
}
@Override
public void onWorkflowRetried(WorkflowModel workflow) {
publishEvent(WorkflowEventType.RETRIED, workflow);
}
private void publishEvent(WorkflowEventType eventType, WorkflowModel workflow) {
try {
// Determine the correct topic
String topic = eventTopics.getOrDefault(eventType.toString(), defaultTopic);
LOGGER.debug("Publish event {} to topic {}", eventType.toString(), topic);
// Convert workflow to summary
WorkflowSummary workflowSummary = new WorkflowSummaryExtended(workflow.toWorkflow());
// Construct JSON message
Map<String, Object> message =
Map.of(
"workflowName", workflow.getWorkflowName(),
"eventType", eventType.toString(),
"payload", workflowSummary);
String jsonMessage = objectMapper.writeValueAsString(message);
// Create Kafka record
ProducerRecord<String, String> record =
new ProducerRecord<>(topic, workflow.getWorkflowId(), jsonMessage);
// Send message asynchronously
producer.send(
record,
(metadata, exception) -> {
if (exception != null) {
LOGGER.error(
"Failed to publish workflow event: {}", jsonMessage, exception);
} else {
LOGGER.debug(
"Published event: {}, Topic: {}, Partition: {}, Offset: {}",
eventType,
metadata.topic(),
metadata.partition(),
metadata.offset());
}
});
} catch (JsonProcessingException e) {
LOGGER.error(
"Error serializing workflow event for {}: {}", eventType, e.getMessage(), e);
} catch (Exception e) {
LOGGER.error("Unexpected error publishing event {}: {}", eventType, e.getMessage(), e);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisherConfiguration.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisherConfiguration.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.kafka;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.listener.WorkflowStatusListener;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration
@EnableConfigurationProperties(KafkaWorkflowStatusPublisherProperties.class)
@ConditionalOnProperty(name = "conductor.workflow-status-listener.type", havingValue = "kafka")
public class KafkaWorkflowStatusPublisherConfiguration {
@Bean
public WorkflowStatusListener getWorkflowStatusListener(
KafkaWorkflowStatusPublisherProperties properties, ObjectMapper objectMapper) {
return new KafkaWorkflowStatusPublisher(properties, objectMapper);
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
conductor-oss/conductor | https://github.com/conductor-oss/conductor/blob/aa7de922578fe59d1d145881299b1a8306dde3b0/workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisherProperties.java | workflow-event-listener/src/main/java/com/netflix/conductor/contribs/listener/kafka/KafkaWorkflowStatusPublisherProperties.java | /*
* Copyright 2025 Conductor Authors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.contribs.listener.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties(prefix = "conductor.workflow-status-listener.kafka")
public class KafkaWorkflowStatusPublisherProperties {
private Map<String, Object> producer = new HashMap<>();
/** Default Kafka topic where all workflow status events are published. */
private String defaultTopic = "workflow-status-events";
/**
* A map of event types to Kafka topics. If an event type has a specific topic, it will be
* published there instead of the default.
*/
private Map<String, String> eventTopics = new HashMap<>();
public Map<String, Object> getProducer() {
return producer;
}
public void setProducer(Map<String, Object> producer) {
this.producer = producer;
}
public String getDefaultTopic() {
return defaultTopic;
}
public void setDefaultTopic(String defaultTopic) {
this.defaultTopic = defaultTopic;
}
public Map<String, String> getEventTopics() {
return eventTopics;
}
public void setEventTopics(Map<String, String> eventTopics) {
this.eventTopics = eventTopics;
}
/**
* Generates configuration properties for Kafka producers. Maps against `ProducerConfig` keys.
*/
public Map<String, Object> toProducerConfig() {
return mapProperties(ProducerConfig.configNames(), producer);
}
/**
* Filters and maps properties based on the allowed keys for Kafka producer configuration.
*
* @param allowedKeys The allowed Kafka ProducerConfig keys.
* @param inputProperties The user-specified properties from application config.
* @return A filtered map containing only valid properties.
*/
private Map<String, Object> mapProperties(
Iterable<String> allowedKeys, Map<String, Object> inputProperties) {
Map<String, Object> config = new HashMap<>();
for (String key : allowedKeys) {
if (inputProperties.containsKey(key)) {
config.put(key, inputProperties.get(key));
}
}
// Ensure bootstrapServers is always set
setDefaultIfNullOrEmpty(config, ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka:29092");
// Set required default serializers
setDefaultIfNullOrEmpty(
config,
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
setDefaultIfNullOrEmpty(
config,
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
// Set default client ID
setDefaultIfNullOrEmpty(
config, ProducerConfig.CLIENT_ID_CONFIG, "workflow-status-producer");
return config;
}
/** Sets a default value if the key is missing or empty. */
private void setDefaultIfNullOrEmpty(
Map<String, Object> config, String key, String defaultValue) {
Object value = config.get(key);
if (value == null || (value instanceof String && ((String) value).isBlank())) {
config.put(key, defaultValue);
}
}
}
| java | Apache-2.0 | aa7de922578fe59d1d145881299b1a8306dde3b0 | 2026-01-04T14:46:58.351252Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/ApplicationTest.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/ApplicationTest.java | package com.springboot;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import com.springboot.bean.Student;
import com.springboot.service.StudentService;
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest(classes = Application.class)
public class ApplicationTest {
@Autowired
private StudentService studentService;
@Test
public void test1() throws Exception {
Student student1 = this.studentService.queryStudentBySno("001");
System.out.println("学号" + student1.getSno() + "的学生姓名为:" + student1.getName());
Student student2 = this.studentService.queryStudentBySno("001");
System.out.println("学号" + student2.getSno() + "的学生姓名为:" + student2.getName());
}
@Test
public void test2() throws Exception {
Student student1 = this.studentService.queryStudentBySno("001");
System.out.println("学号" + student1.getSno() + "的学生姓名为:" + student1.getName());
student1.setName("康康");
this.studentService.update(student1);
Student student2 = this.studentService.queryStudentBySno("001");
System.out.println("学号" + student2.getSno() + "的学生姓名为:" + student2.getName());
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/Application.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/Application.java | package com.springboot;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cache.annotation.EnableCaching;
@SpringBootApplication
@EnableCaching
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class,args);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/service/StudentService.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/service/StudentService.java | package com.springboot.service;
import org.springframework.cache.annotation.CacheConfig;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.CachePut;
import org.springframework.cache.annotation.Cacheable;
import com.springboot.bean.Student;
@CacheConfig(cacheNames = "student")
public interface StudentService {
@CachePut(key = "#p0.sno")
Student update(Student student);
@CacheEvict(key = "#p0", allEntries = true)
void deleteStudentBySno(String sno);
@Cacheable(key = "#p0")
Student queryStudentBySno(String sno);
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/service/impl/StudentServiceImpl.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/service/impl/StudentServiceImpl.java | package com.springboot.service.impl;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import com.springboot.bean.Student;
import com.springboot.mapper.StudentMapper;
import com.springboot.service.StudentService;
@Repository("studentService")
public class StudentServiceImpl implements StudentService{
@Autowired
private StudentMapper studentMapper;
@Override
public Student update(Student student) {
this.studentMapper.update(student);
return this.studentMapper.queryStudentBySno(student.getSno());
}
@Override
public void deleteStudentBySno(String sno) {
this.studentMapper.deleteStudentBySno(sno);
}
@Override
public Student queryStudentBySno(String sno) {
return this.studentMapper.queryStudentBySno(sno);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/bean/Student.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/bean/Student.java | package com.springboot.bean;
import java.io.Serializable;
public class Student implements Serializable{
private static final long serialVersionUID = -339516038496531943L;
private String sno;
private String name;
private String sex;
public String getSno() {
return sno;
}
public void setSno(String sno) {
this.sno = sno;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getSex() {
return sex;
}
public void setSex(String sex) {
this.sex = sex;
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/config/RedisConfig.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/config/RedisConfig.java | package com.springboot.config;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.CachingConfigurerSupport;
import org.springframework.cache.interceptor.KeyGenerator;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
@Configuration
public class RedisConfig extends CachingConfigurerSupport {
// 自定义缓存key生成策略
@Bean
public KeyGenerator keyGenerator() {
return new KeyGenerator() {
@Override
public Object generate(Object target, java.lang.reflect.Method method, Object... params) {
StringBuffer sb = new StringBuffer();
sb.append(target.getClass().getName());
sb.append(method.getName());
for (Object obj : params) {
sb.append(obj.toString());
}
return sb.toString();
}
};
}
// 缓存管理器
@Bean
public CacheManager cacheManager(@SuppressWarnings("rawtypes") RedisTemplate redisTemplate) {
RedisCacheManager cacheManager = new RedisCacheManager(redisTemplate);
// 设置缓存过期时间
cacheManager.setDefaultExpiration(10000);
return cacheManager;
}
@Bean
public RedisTemplate<String, String> redisTemplate(RedisConnectionFactory factory) {
StringRedisTemplate template = new StringRedisTemplate(factory);
setSerializer(template);// 设置序列化工具
template.afterPropertiesSet();
return template;
}
private void setSerializer(StringRedisTemplate template) {
@SuppressWarnings({ "rawtypes", "unchecked" })
Jackson2JsonRedisSerializer jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer(Object.class);
ObjectMapper om = new ObjectMapper();
om.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
om.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
jackson2JsonRedisSerializer.setObjectMapper(om);
template.setValueSerializer(jackson2JsonRedisSerializer);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/mapper/StudentMapper.java | 09.Spring-Boot-Redis-Cache/src/main/java/com/springboot/mapper/StudentMapper.java | package com.springboot.mapper;
import org.apache.ibatis.annotations.Delete;
import org.apache.ibatis.annotations.Mapper;
import org.apache.ibatis.annotations.Result;
import org.apache.ibatis.annotations.Results;
import org.apache.ibatis.annotations.Select;
import org.apache.ibatis.annotations.Update;
import org.springframework.cache.annotation.CacheConfig;
import com.springboot.bean.Student;
@Mapper
@CacheConfig(cacheNames = "student")
public interface StudentMapper {
@Update("update student set sname=#{name},ssex=#{sex} where sno=#{sno}")
int update(Student student);
@Delete("delete from student where sno=#{sno}")
void deleteStudentBySno(String sno);
@Select("select * from student where sno=#{sno}")
@Results(id = "student", value = { @Result(property = "sno", column = "sno", javaType = String.class),
@Result(property = "name", column = "sname", javaType = String.class),
@Result(property = "sex", column = "ssex", javaType = String.class) })
Student queryStudentBySno(String sno);
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/test/java/com/example/demo/DemoApplicationTests.java | 44.Spring-Boot-Autoconfiguration/src/test/java/com/example/demo/DemoApplicationTests.java | package com.example.demo;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
@RunWith(SpringRunner.class)
@SpringBootTest
public class DemoApplicationTests {
@Test
public void contextLoads() {
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/DemoApplication.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/DemoApplication.java | package com.example.demo;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/service/TestService.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/service/TestService.java | package com.example.demo.service;
import com.example.demo.annotation.FirstLevelService;
import com.example.demo.annotation.SecondLevelService;
/**
* @author MrBird
*/
@SecondLevelService
public class TestService {
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/TestEnableBootstap.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/TestEnableBootstap.java | package com.example.demo.bootstrap;
import com.example.demo.annotation.EnableHelloWorld;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.ConfigurableApplicationContext;
/**
* @author MrBird
*/
@EnableHelloWorld
public class TestEnableBootstap {
public static void main(String[] args) {
ConfigurableApplicationContext context = new SpringApplicationBuilder(TestEnableBootstap.class)
.web(WebApplicationType.NONE)
.run(args);
String hello = context.getBean("hello", String.class);
System.out.println("hello Bean: " + hello);
context.close();
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/ServiceBootstrap.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/ServiceBootstrap.java | package com.example.demo.bootstrap;
import com.example.demo.service.TestService;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.ComponentScan;
/**
* @author MrBird
*/
@ComponentScan("com.example.demo.service")
public class ServiceBootstrap {
public static void main(String[] args) {
ConfigurableApplicationContext context = new SpringApplicationBuilder(ServiceBootstrap.class)
.web(WebApplicationType.NONE)
.run(args);
TestService testService = context.getBean("testService", TestService.class);
System.out.println("TestService Bean: " + testService);
context.close();
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/EnableAutoConfigurationBootstrap.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/bootstrap/EnableAutoConfigurationBootstrap.java | package com.example.demo.bootstrap;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.ConfigurableApplicationContext;
/**
* @author MrBird
*/
@EnableAutoConfiguration
public class EnableAutoConfigurationBootstrap {
public static void main(String[] args) {
ConfigurableApplicationContext context = new SpringApplicationBuilder(EnableAutoConfigurationBootstrap.class)
.web(WebApplicationType.NONE)
.run(args);
String hello = context.getBean("hello", String.class);
System.out.println("hello Bean: " + hello);
context.close();
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/configuration/HelloWorldAutoConfiguration.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/configuration/HelloWorldAutoConfiguration.java | package com.example.demo.configuration;
import com.example.demo.annotation.EnableHelloWorld;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
/**
* @author MrBird
*/
@Configuration
@EnableHelloWorld
@ConditionalOnProperty(name = "helloworld", havingValue = "true")
public class HelloWorldAutoConfiguration {
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/configuration/HelloWorldConfiguration.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/configuration/HelloWorldConfiguration.java | package com.example.demo.configuration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @author MrBird
*/
@Configuration
public class HelloWorldConfiguration {
@Bean
public String hello() {
return "hello world";
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/selector/HelloWorldImportSelector.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/selector/HelloWorldImportSelector.java | package com.example.demo.selector;
import com.example.demo.configuration.HelloWorldConfiguration;
import org.springframework.context.annotation.ImportSelector;
import org.springframework.core.type.AnnotationMetadata;
/**
* @author MrBird
*/
public class HelloWorldImportSelector implements ImportSelector {
@Override
public String[] selectImports(AnnotationMetadata importingClassMetadata) {
return new String[]{HelloWorldConfiguration.class.getName()};
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/FirstLevelService.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/FirstLevelService.java | package com.example.demo.annotation;
import org.springframework.stereotype.Repository;
import org.springframework.stereotype.Service;
import java.lang.annotation.*;
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Service
public @interface FirstLevelService {
String value() default "";
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/EnableHelloWorld.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/EnableHelloWorld.java | package com.example.demo.annotation;
import com.example.demo.configuration.HelloWorldConfiguration;
import org.springframework.context.annotation.Import;
import java.lang.annotation.*;
/**
* @author MrBird
*/
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
// @Import(HelloWorldImportSelector.class)
@Import(HelloWorldConfiguration.class)
public @interface EnableHelloWorld {
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/SecondLevelService.java | 44.Spring-Boot-Autoconfiguration/src/main/java/com/example/demo/annotation/SecondLevelService.java | package com.example.demo.annotation;
import java.lang.annotation.*;
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@FirstLevelService
public @interface SecondLevelService {
String value() default "";
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/WebfluxApplication.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/WebfluxApplication.java | package com.example.webflux;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication
public class WebfluxApplication {
public static void main(String[] args) {
SpringApplication.run(WebfluxApplication.class, args);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/FluxTest.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/FluxTest.java | package com.example.webflux;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.Random;
/**
* @author MrBird
*/
public class FluxTest {
public static void main(String[] args) throws InterruptedException {
Flux.just("Hello", "World").subscribe(System.out::println);
Flux.fromArray(new Integer[]{1, 2, 3}).subscribe(System.out::println);
Flux.empty().subscribe(System.out::println);
Flux.range(1, 4).subscribe(System.out::println);
// Flux.interval(Duration.of(1, ChronoUnit.SECONDS)).subscribe(System.out::println);
Flux.generate(sink -> {
sink.next("Hello");
sink.complete();
}).subscribe(System.out::println);
final Random random = new Random();
Flux.generate(ArrayList::new, (list, sink) -> {
int value = random.nextInt(100);
list.add(value);
sink.next(value);
if (list.size() == 10) {
sink.complete();
}
return list;
}).subscribe(System.out::println);
Flux.create(sink -> {
for (int i = 0; i < 10; i++) {
sink.next(i);
}
sink.complete();
}).subscribe(System.out::println);
Flux.range(1, 10).filter(i -> i % 2 == 0).subscribe(System.out::println);
Flux.range(1, 20).take(10).subscribe(System.out::println);
Flux.range(1, 20).takeLast(10).subscribe(System.out::println);
Flux.range(1, 20).takeWhile(i -> i < 10).subscribe(System.out::println);
Flux.range(1, 20).takeUntil(i -> i == 10).subscribe(System.out::println);
Flux.range(1, 10).reduce((x, y) -> x + y).subscribe(System.out::println);
Flux.range(1, 10).reduceWith(() -> 10, (x, y) -> x + y).subscribe(System.out::println);
Flux.merge(
Flux.interval(Duration.of(500, ChronoUnit.MILLIS)).take(2),
Flux.interval(Duration.of(500, ChronoUnit.MILLIS)).take(2)
).toStream().forEach(System.out::println);
Flux.range(1, 100).buffer(20).subscribe(System.out::println);
Flux.range(1, 10).bufferUntil(i -> i % 2 == 0).subscribe(System.out::println);
Flux.range(1, 10).bufferWhile(i -> i % 2 == 0).subscribe(System.out::println);
Flux.just("a", "b", "c", "d")
.zipWith(Flux.just("e", "f", "g", "h", "i"))
.subscribe(System.out::println);
Flux.just("a", "b", "c", "d")
.zipWith(Flux.just("e", "f", "g", "h", "i"), (s1, s2) -> String.format("%s-%s", s1, s2))
.subscribe(System.out::println);
Flux.just(1, 2)
.concatWith(Mono.error(new IllegalStateException()))
.subscribe(System.out::println, System.err::println);
Flux.just(1, 2)
.concatWith(Mono.error(new IllegalStateException()))
.onErrorReturn(0)
.subscribe(System.out::println);
Flux.just(1, 2)
.concatWith(Mono.error(new IllegalArgumentException()))
.onErrorResume(e -> {
if (e instanceof IllegalStateException) {
return Mono.just(0);
} else if (e instanceof IllegalArgumentException) {
return Mono.just(-1);
}
return Mono.empty();
}).subscribe(System.out::println);
Thread.currentThread().join(20000);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/ViewController.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/ViewController.java | package com.example.webflux;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.GetMapping;
/**
* @author MrBird
*/
@Controller
public class ViewController {
@GetMapping("flux")
public String flux() {
return "flux";
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/TestController.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/TestController.java | package com.example.webflux;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import java.util.concurrent.TimeUnit;
import java.util.stream.IntStream;
/**
* @author MrBird
*/
@RestController
public class TestController {
// Mono 表示 0-1 个元素,Flux 0-N 个元素
private Logger logger = LoggerFactory.getLogger(this.getClass());
@GetMapping("sync")
public String sync() {
logger.info("sync method start");
String result = this.execute();
logger.info("sync method end");
return result;
}
@GetMapping("async/mono")
public Mono<String> asyncMono() {
logger.info("async method start");
Mono<String> result = Mono.fromSupplier(this::execute);
logger.info("async method end");
return result;
}
// SSE(Server Sent Event)
// https://developer.mozilla.org/zh-CN/docs/Server-sent_events/Using_server-sent_events
// http://www.ruanyifeng.com/blog/2017/05/server-sent_events.html
@GetMapping(value = "async/flux", produces = MediaType.TEXT_EVENT_STREAM_VALUE)
public Flux<String> asyncFlux() {
logger.info("async method start");
Flux<String> result = Flux.fromStream(IntStream.range(1, 5).mapToObj(i -> {
try {
TimeUnit.SECONDS.sleep(1);
} catch (InterruptedException e) {
e.printStackTrace();
}
return "int value:" + i;
}));
logger.info("async method end");
return result;
}
private String execute() {
try {
TimeUnit.SECONDS.sleep(2);
} catch (InterruptedException e) {
e.printStackTrace();
}
return "hello";
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/MonoTest.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/MonoTest.java | package com.example.webflux;
import reactor.core.publisher.Mono;
import java.util.Optional;
/**
* @author MrBird
*/
public class MonoTest {
public static void main(String[] args) {
Mono.just("are").subscribe(System.out::println);
Mono.empty().subscribe(System.out::println);
Mono.fromSupplier(() -> "you").subscribe(System.out::println);
Mono.justOrEmpty(Optional.of("ok")).subscribe(System.out::println);
Mono.create(sink -> sink.success("Hello")).subscribe(System.out::println);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/MonoFluxTest.java | 57.Spring-Boot-WebFlux/webflux/src/main/java/com/example/webflux/MonoFluxTest.java | package com.example.webflux;
import java.util.concurrent.TimeUnit;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class MonoFluxTest {
public static void main(String[] args) {
Subscriber<Integer> subscriber = new Subscriber<Integer>() {
private Subscription subscription;
@Override
public void onSubscribe(Subscription subscription) {
this.subscription = subscription;
this.subscription.request(1);
}
@Override
public void onNext(Integer item) {
System.out.println("接受到数据: " + item);
try {
TimeUnit.SECONDS.sleep(3);
} catch (InterruptedException e) {
e.printStackTrace();
}
this.subscription.request(1);
}
@Override
public void onError(Throwable throwable) {
throwable.printStackTrace();
this.subscription.cancel();
}
@Override
public void onComplete() {
System.out.println("处理完了!");
}
};
String[] strs = {"1", "2", "3"};
Flux.fromArray(strs).map(Integer::parseInt).subscribe(subscriber);
Mono.fromSupplier(() -> 1).map(s -> s + 1).subscribe(subscriber);
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
wuyouzhuguli/SpringAll | https://github.com/wuyouzhuguli/SpringAll/blob/614d2578d9495acf53cc02f2dee9c6131cc5e51a/57.Spring-Boot-WebFlux/async-servlet/src/SyncServlet.java | 57.Spring-Boot-WebFlux/async-servlet/src/SyncServlet.java | import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
/**
* @author MrBird
*/
@WebServlet(urlPatterns = "/sync")
public class SyncServlet extends HttpServlet {
private static final long serialVersionUID = 7583536145022393360L;
private Logger log = Logger.getLogger(SyncServlet.class.getName());
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) {
long start = System.currentTimeMillis();
this.execute(request, response);
log.info("总耗时:" + (System.currentTimeMillis() - start) + "ms");
}
private void execute(HttpServletRequest request, HttpServletResponse response) {
try {
TimeUnit.SECONDS.sleep(2);
} catch (InterruptedException e) {
e.printStackTrace();
}
try {
response.getWriter().append("hello");
} catch (IOException e) {
e.printStackTrace();
}
}
}
| java | MIT | 614d2578d9495acf53cc02f2dee9c6131cc5e51a | 2026-01-04T14:47:19.901108Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.