index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/zuul/zuul-discovery/src/test/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/test/java/com/netflix/zuul/discovery/DynamicServerResolverTest.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.truth.Truth;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.Builder;
import com.netflix.client.config.DefaultClientConfigImpl;
import com.netflix.niws.loadbalancer.DiscoveryEnabledServer;
import com.netflix.zuul.resolver.ResolverListener;
import org.junit.jupiter.api.Test;
import java.util.List;
class DynamicServerResolverTest {
@Test
void verifyListenerUpdates() {
class CustomListener implements ResolverListener<DiscoveryResult> {
private List<DiscoveryResult> resultSet = Lists.newArrayList();
@Override
public void onChange(List<DiscoveryResult> changedSet) {
resultSet = changedSet;
}
public List<DiscoveryResult> updatedList() {
return resultSet;
}
}
final CustomListener listener = new CustomListener();
final DynamicServerResolver resolver = new DynamicServerResolver(new DefaultClientConfigImpl());
resolver.setListener(listener);
final InstanceInfo first = Builder.newBuilder()
.setAppName("zuul-discovery-1")
.setHostName("zuul-discovery-1")
.setIPAddr("100.10.10.1")
.setPort(443)
.build();
final InstanceInfo second = Builder.newBuilder()
.setAppName("zuul-discovery-2")
.setHostName("zuul-discovery-2")
.setIPAddr("100.10.10.2")
.setPort(443)
.build();
final DiscoveryEnabledServer server1 = new DiscoveryEnabledServer(first, true);
final DiscoveryEnabledServer server2 = new DiscoveryEnabledServer(second, true);
resolver.onUpdate(ImmutableList.of(server1, server2), ImmutableList.of());
Truth.assertThat(listener.updatedList())
.containsExactly(new DiscoveryResult(server1), new DiscoveryResult(server2));
}
@Test
void properSentinelValueWhenServersUnavailable() {
final DynamicServerResolver resolver = new DynamicServerResolver(new DefaultClientConfigImpl());
final DiscoveryResult nonExistentServer = resolver.resolve(null);
Truth.assertThat(nonExistentServer).isSameInstanceAs(DiscoveryResult.EMPTY);
Truth.assertThat(nonExistentServer.getHost()).isEqualTo("undefined");
Truth.assertThat(nonExistentServer.getPort()).isEqualTo(-1);
}
}
| 6,500 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/discovery/DiscoveryResult.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
import com.google.common.annotations.VisibleForTesting;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.PortType;
import com.netflix.loadbalancer.LoadBalancerStats;
import com.netflix.loadbalancer.ServerStats;
import com.netflix.niws.loadbalancer.DiscoveryEnabledServer;
import javax.annotation.Nullable;
import java.util.Locale;
import java.util.Objects;
import java.util.Optional;
/**
* @author Argha C
* @since 2/25/21
* <p>
* Wraps a single instance of discovery enabled server, and stats related to it.
*/
public final class DiscoveryResult implements ResolverResult {
private final DiscoveryEnabledServer server;
private final ServerStats serverStats;
/**
* This exists to allow for a semblance of type safety, and encourages avoiding null checks on the underlying Server,
* thus representing a sentinel value for an empty resolution result.
*/
public static final DiscoveryResult EMPTY = DiscoveryResult.from(
InstanceInfo.Builder.newBuilder()
.setAppName("undefined")
.setHostName("undefined")
.setPort(-1)
.build(),
false);
public DiscoveryResult(DiscoveryEnabledServer server, LoadBalancerStats lbStats) {
this.server = server;
Objects.requireNonNull(lbStats, "Loadbalancer stats must be a valid instance");
this.serverStats = lbStats.getSingleServerStat(server);
}
/**
*
* This solely exists to create a result object from incomplete InstanceInfo.
* Usage of this for production code is strongly discouraged, since the underlying instances are prone to memory leaks
*/
public DiscoveryResult(DiscoveryEnabledServer server) {
this.server = server;
this.serverStats = new ServerStats() {
@Override
public String toString() {
return "no stats configured for server";
}
};
}
/**
*
* This convenience method exists for usage in tests. For production usage, please use the constructor linked:
* @see DiscoveryResult#DiscoveryResult(DiscoveryEnabledServer, LoadBalancerStats)
*/
@VisibleForTesting
public static DiscoveryResult from(InstanceInfo instanceInfo, boolean useSecurePort) {
final DiscoveryEnabledServer server = new DiscoveryEnabledServer(instanceInfo, useSecurePort);
return new DiscoveryResult(server);
}
public Optional<String> getIPAddr() {
if (this == DiscoveryResult.EMPTY) {
return Optional.empty();
}
if (server.getInstanceInfo() != null) {
String ip = server.getInstanceInfo().getIPAddr();
if (ip != null && !ip.isEmpty()) {
return Optional.of(ip);
}
return Optional.empty();
}
return Optional.empty();
}
@Override
public String getHost() {
return server == null ? "undefined" : server.getHost();
}
@Override
public boolean isDiscoveryEnabled() {
return server instanceof DiscoveryEnabledServer;
}
@Override
public int getPort() {
return server == null ? -1 : server.getPort();
}
public int getSecurePort() {
return server.getInstanceInfo().getSecurePort();
}
public boolean isSecurePortEnabled() {
return server.getInstanceInfo().isPortEnabled(PortType.SECURE);
}
public String getTarget() {
final InstanceInfo instanceInfo = server.getInstanceInfo();
if (server.getPort() == instanceInfo.getSecurePort()) {
return instanceInfo.getSecureVipAddress();
} else {
return instanceInfo.getVIPAddress();
}
}
public SimpleMetaInfo getMetaInfo() {
return new SimpleMetaInfo(server.getMetaInfo());
}
@Nullable
public String getAvailabilityZone() {
final InstanceInfo instanceInfo = server.getInstanceInfo();
if (instanceInfo.getDataCenterInfo() instanceof AmazonInfo) {
return ((AmazonInfo) instanceInfo.getDataCenterInfo()).getMetadata().get("availability-zone");
}
return null;
}
public String getZone() {
return server.getZone();
}
public String getServerId() {
return server.getInstanceInfo().getId();
}
public DiscoveryEnabledServer getServer() {
return server;
}
@VisibleForTesting
ServerStats getServerStats() {
return this.serverStats;
}
public String getASGName() {
return server.getInstanceInfo().getASGName();
}
public String getAppName() {
return server.getInstanceInfo().getAppName().toLowerCase(Locale.ROOT);
}
public void noteResponseTime(double msecs) {
serverStats.noteResponseTime(msecs);
}
public boolean isCircuitBreakerTripped() {
return serverStats.isCircuitBreakerTripped();
}
public void incrementActiveRequestsCount() {
serverStats.incrementActiveRequestsCount();
}
public void incrementOpenConnectionsCount() {
serverStats.incrementOpenConnectionsCount();
}
public void incrementSuccessiveConnectionFailureCount() {
serverStats.incrementSuccessiveConnectionFailureCount();
}
public void incrementNumRequests() {
serverStats.incrementNumRequests();
}
public int getOpenConnectionsCount() {
return serverStats.getOpenConnectionsCount();
}
public long getTotalRequestsCount() {
return serverStats.getTotalRequestsCount();
}
public int getActiveRequestsCount() {
return serverStats.getActiveRequestsCount();
}
public void decrementOpenConnectionsCount() {
serverStats.decrementOpenConnectionsCount();
}
public void decrementActiveRequestsCount() {
serverStats.decrementActiveRequestsCount();
}
public void clearSuccessiveConnectionFailureCount() {
serverStats.clearSuccessiveConnectionFailureCount();
}
public void addToFailureCount() {
serverStats.addToFailureCount();
}
public void stopPublishingStats() {
serverStats.close();
}
@Override
public int hashCode() {
return Objects.hashCode(server);
}
/**
* Two instances are deemed identical if they wrap the same underlying discovery server instance.
*/
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof DiscoveryResult)) {
return false;
}
final DiscoveryResult other = (DiscoveryResult) obj;
return server.equals(other.server);
}
}
| 6,501 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/discovery/DynamicServerResolver.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Throwables;
import com.google.common.collect.Sets;
import com.netflix.client.config.CommonClientConfigKey;
import com.netflix.client.config.IClientConfig;
import com.netflix.loadbalancer.DynamicServerListLoadBalancer;
import com.netflix.loadbalancer.Server;
import com.netflix.niws.loadbalancer.DiscoveryEnabledServer;
import com.netflix.zuul.resolver.Resolver;
import com.netflix.zuul.resolver.ResolverListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nullable;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
/**
* @author Argha C
* @since 2/25/21
*
* Implements a resolver, wrapping a ribbon load-balancer.
*/
public class DynamicServerResolver implements Resolver<DiscoveryResult> {
private static final Logger LOG = LoggerFactory.getLogger(DynamicServerResolver.class);
private final DynamicServerListLoadBalancer<?> loadBalancer;
private ResolverListener<DiscoveryResult> listener;
@Deprecated
public DynamicServerResolver(IClientConfig clientConfig, ResolverListener<DiscoveryResult> listener) {
this.loadBalancer = createLoadBalancer(clientConfig);
this.loadBalancer.addServerListChangeListener(this::onUpdate);
this.listener = listener;
}
public DynamicServerResolver(IClientConfig clientConfig) {
this(createLoadBalancer(clientConfig));
}
public DynamicServerResolver(DynamicServerListLoadBalancer<?> loadBalancer) {
this.loadBalancer = Objects.requireNonNull(loadBalancer);
}
@Override
public void setListener(ResolverListener<DiscoveryResult> listener) {
if (this.listener != null) {
LOG.warn("Ignoring call to setListener, because a listener was already set");
return;
}
this.listener = Objects.requireNonNull(listener);
this.loadBalancer.addServerListChangeListener(this::onUpdate);
}
@Override
public DiscoveryResult resolve(@Nullable Object key) {
final Server server = loadBalancer.chooseServer(key);
return server != null
? new DiscoveryResult((DiscoveryEnabledServer) server, loadBalancer.getLoadBalancerStats())
: DiscoveryResult.EMPTY;
}
@Override
public boolean hasServers() {
return !loadBalancer.getReachableServers().isEmpty();
}
@Override
public void shutdown() {
loadBalancer.shutdown();
}
private static DynamicServerListLoadBalancer<?> createLoadBalancer(IClientConfig clientConfig) {
// TODO(argha-c): Revisit this style of LB initialization post modularization. Ideally the LB should be
// pluggable.
// Use a hard coded string for the LB default name to avoid a dependency on Ribbon classes.
String loadBalancerClassName = clientConfig.get(
CommonClientConfigKey.NFLoadBalancerClassName, "com.netflix.loadbalancer.ZoneAwareLoadBalancer");
DynamicServerListLoadBalancer<?> lb;
try {
Class<?> clazz = Class.forName(loadBalancerClassName);
lb = clazz.asSubclass(DynamicServerListLoadBalancer.class)
.getConstructor()
.newInstance();
lb.initWithNiwsConfig(clientConfig);
} catch (Exception e) {
Throwables.throwIfUnchecked(e);
throw new IllegalStateException("Could not instantiate LoadBalancer " + loadBalancerClassName, e);
}
return lb;
}
@VisibleForTesting
void onUpdate(List<Server> oldList, List<Server> newList) {
Set<Server> oldSet = new HashSet<>(oldList);
Set<Server> newSet = new HashSet<>(newList);
final List<DiscoveryResult> discoveryResults = Sets.difference(oldSet, newSet).stream()
.map(server ->
new DiscoveryResult((DiscoveryEnabledServer) server, loadBalancer.getLoadBalancerStats()))
.collect(Collectors.toList());
listener.onChange(discoveryResults);
}
}
| 6,502 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/discovery/SimpleMetaInfo.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
import com.netflix.loadbalancer.Server.MetaInfo;
/**
* @author Argha C
* @since 2/25/21
*
* placeholder to mimic metainfo for a non-Eureka enabled server.
* This exists to preserve compatibility with some current logic, but should be revisited.
*/
public final class SimpleMetaInfo {
private final MetaInfo metaInfo;
public SimpleMetaInfo(MetaInfo metaInfo) {
this.metaInfo = metaInfo;
}
public String getServerGroup() {
return metaInfo.getServerGroup();
}
public String getServiceIdForDiscovery() {
return metaInfo.getServiceIdForDiscovery();
}
public String getInstanceId() {
return metaInfo.getInstanceId();
}
}
| 6,503 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/discovery/NonDiscoveryServer.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
import com.netflix.loadbalancer.Server;
import java.util.Objects;
/**
* @author Argha C
* @since 3/1/21
* <p>
* This exists merely to wrap a resolver lookup result, that is not discovery enabled.
*/
public final class NonDiscoveryServer implements ResolverResult {
private final Server server;
public NonDiscoveryServer(String host, int port) {
Objects.requireNonNull(host, "host name");
this.server = new Server(host, validatePort(port));
}
@Override
public String getHost() {
return server.getHost();
}
@Override
public int getPort() {
return server.getPort();
}
@Override
public boolean isDiscoveryEnabled() {
return false;
}
private int validatePort(int port) {
if (port < 0 || port > 0xFFFF) {
throw new IllegalArgumentException("port out of range:" + port);
}
return port;
}
}
| 6,504 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/discovery/ResolverResult.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.discovery;
/**
* @author Argha C
* @since 2/25/21
*
* Wraps the result of a resolution attempt.
* At this time, it doesn't encapsulate a collection of instances, but ideally should.
*/
public interface ResolverResult {
// TODO(argha-c): This should ideally model returning a collection of host/port pairs.
public String getHost();
public int getPort();
public boolean isDiscoveryEnabled();
}
| 6,505 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/resolver/Resolver.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.resolver;
/**
* @author Argha C
* @since 2/25/21
*
* Resolves a key to a discovery result type.
*/
public interface Resolver<T> {
/**
*
* @param key unique identifier that may be used by certain resolvers as part of lookup. Implementations
* can narrow this down to be nullable.
* @return the result of a resolver lookup
*/
// TODO(argha-c) Param needs to be typed, once the ribbon LB lookup API is figured out.
T resolve(Object key);
/**
* @return true if the resolver has available servers, false otherwise
*/
boolean hasServers();
/**
* hook to perform activities on shutdown
*/
void shutdown();
default void setListener(ResolverListener<T> listener) {}
}
| 6,506 |
0 | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul | Create_ds/zuul/zuul-discovery/src/main/java/com/netflix/zuul/resolver/ResolverListener.java | /*
* Copyright 2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.zuul.resolver;
import java.util.List;
/**
* @author Argha C
* @since 2/25/21
*
* Listener for resolver updates.
*/
public interface ResolverListener<T> {
/**
* Hook to respond to resolver updates
* @param removedSet the servers removed from the latest resolver update, but included in the previous update.
*/
void onChange(List<T> removedSet);
}
| 6,507 |
0 | Create_ds/blitz4j/src/test/java/com/netflix | Create_ds/blitz4j/src/test/java/com/netflix/blitz4j/TestBlitz4j.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.ConfigurationManager;
import com.netflix.logging.messaging.BatcherFactory;
import com.netflix.logging.messaging.MessageBatcher;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.CompositeMonitor;
import com.netflix.servo.monitor.Monitor;
public class TestBlitz4j {
protected Properties props = new Properties();
private static final String consoleSummarizeEvent = "stdout_summarizeEvent";
private static final String consoleEventsProcessed = "stdout_putInBuffer";
@After
public void tearDown() throws Exception {
props.clear();
MessageBatcher asyncBatcherConsole = BatcherFactory
.getBatcher(AsyncAppender.class.getName() + "." + "stdout");
Assert.assertNull(asyncBatcherConsole);
}
@Test
public void testAsyncAppenders() throws Exception {
props.setProperty("log4j.rootCategory", "OFF");
props.setProperty("log4j.logger.com.netflix.blitz4j.TestBlitz4j",
"INFO,stdout");
props.setProperty("log4j.logger.com.netflix.blitz4j.TestBlitz4j$1",
"INFO,stdout");
props.setProperty("log4j.appender.stdout",
"org.apache.log4j.ConsoleAppender");
props.setProperty("log4j.appender.stdout.layout",
"com.netflix.logging.log4jAdapter.NFPatternLayout");
props.setProperty("log4j.appender.stdout.layout.ConversionPattern",
"%d %-5p %C:%L [%t] [%M] %m%n");
props.setProperty("log4j.logger.asyncAppenders", "INFO,stdout");
props.setProperty(
"batcher.com.netflix.logging.AsyncAppender.stdout.waitTimeinMillis",
"120000");
LoggingConfiguration.getInstance().configure(props);
int noOfThreads = 100;
Thread[] tArray = new Thread[noOfThreads];
for (int i = 0; i < noOfThreads; i++) {
Thread t1 = new Thread(new Runnable() {
public void run() {
int i = 0;
while (i < 1000) {
i++;
Logger slflogger = LoggerFactory
.getLogger(TestBlitz4j.class);
slflogger.info("Testing named log with this string {}",
"Test String");
Thread.yield();
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
});
tArray[i] = t1;
}
Thread t2 = new Thread(new Runnable() {
public void run() {
int i = 0;
while (i <= 1000) {
try {
ConfigurationManager.getConfigInstance().setProperty(
"log4j.junk", (i++) + "");
Thread.sleep(10);
// NetflixConfiguration.getInstance().setProperty("log4j.junk1",
// (i++) + "");
// Thread.sleep(10);
Thread.yield();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
});
for (int i = 0; i < noOfThreads; i++) {
tArray[i].start();
}
t2.start();
t2.join();
for (int i = 0; i < noOfThreads; i++) {
tArray[i].join();
}
int numSummarizedConsole = 0;
try {
numSummarizedConsole = Integer
.valueOf(getMonitoringData(consoleSummarizeEvent));
} catch (Throwable e) {
}
int numAddedConsole = Integer
.valueOf(getMonitoringData(consoleEventsProcessed));
System.out
.println("The number of messages added to async batcher console: "
+ numAddedConsole);
System.out
.println("The number of messages summarized from async batcher console : "
+ numSummarizedConsole);
System.out.println("Total number of messages to asyncBatcher console: "
+ (numAddedConsole + numSummarizedConsole));
Assert.assertTrue(((numAddedConsole + numSummarizedConsole) >= 100000));
LoggingConfiguration.getInstance().stop();
}
@Test
public void testAsyncAppendersWithWhiteSpace() throws Exception {
props.setProperty("log4j.rootCategory", "OFF");
props.setProperty("log4j.logger.com.netflix.blitz4j.TestBlitz4j",
"INFO, stdout");
props.setProperty("log4j.logger.com.netflix.blitz4j.TestBlitz4j$1",
"INFO,stdout");
props.setProperty("log4j.appender.stdout",
"org.apache.log4j.ConsoleAppender");
props.setProperty("log4j.appender.stdout.layout",
"com.netflix.logging.log4jAdapter.NFPatternLayout");
props.setProperty("log4j.appender.stdout.layout.ConversionPattern",
"%d %-5p %C:%L [%t] [%M] %m%n");
props.setProperty("log4j.logger.asyncAppenders", "INFO,stdout");
props.setProperty(
"batcher.com.netflix.logging.AsyncAppender.stdout.waitTimeinMillis",
"120000");
LoggingConfiguration.getInstance().configure(props);
int noOfThreads = 100;
Thread[] tArray = new Thread[noOfThreads];
for (int i = 0; i < noOfThreads; i++) {
Thread t1 = new Thread(new Runnable() {
public void run() {
int i = 0;
while (i < 1000) {
i++;
Logger slflogger = LoggerFactory
.getLogger(TestBlitz4j.class);
slflogger.info("Testing named log with this string {}",
"Test String");
Thread.yield();
try {
Thread.sleep(10);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
});
tArray[i] = t1;
}
Thread t2 = new Thread(new Runnable() {
public void run() {
int i = 0;
while (i <= 1000) {
try {
ConfigurationManager.getConfigInstance().setProperty(
"log4j.junk", (i++) + "");
Thread.sleep(10);
// NetflixConfiguration.getInstance().setProperty("log4j.junk1",
// (i++) + "");
// Thread.sleep(10);
Thread.yield();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
});
for (int i = 0; i < noOfThreads; i++) {
tArray[i].start();
}
t2.start();
t2.join();
for (int i = 0; i < noOfThreads; i++) {
tArray[i].join();
}
int numSummarizedConsole = 0;
try {
numSummarizedConsole = Integer
.valueOf(getMonitoringData(consoleSummarizeEvent));
} catch (Throwable e) {
}
int numAddedConsole = Integer
.valueOf(getMonitoringData(consoleEventsProcessed));
System.out
.println("The number of messages added to async batcher console: "
+ numAddedConsole);
System.out
.println("The number of messages summarized from async batcher console : "
+ numSummarizedConsole);
System.out.println("Total number of messages to asyncBatcher console: "
+ (numAddedConsole + numSummarizedConsole));
Assert.assertTrue(((numAddedConsole + numSummarizedConsole) >= 100000));
LoggingConfiguration.getInstance().stop();
}
@Test
public void testReconfiguration() throws Exception {
Properties props = new Properties();
props.setProperty("log4j.rootCategory", "INFO,stdout");
props.setProperty("log4j.appender.stdout", "org.apache.log4j.ConsoleAppender");
props.setProperty("log4j.appender.stdout.layout","com.netflix.logging.log4jAdapter.NFPatternLayout");
props.setProperty("log4j.appender.stdout.layout.ConversionPattern", "%d %-5p %C:%L [%t] [%M] %m%n");
props.setProperty("log4j.logger.asyncAppenders", "INFO,stdout");
LoggingConfiguration.getInstance().configure(props);
org.slf4j.Logger slfLogger = LoggerFactory.getLogger(this.getClass());
ConfigurationManager.getConfigInstance().setProperty(
"log4j.logger.com.netflix.blitz4j.TestBlitz4j", "DEBUG");
Thread.sleep(5000);
Assert.assertTrue(slfLogger.isDebugEnabled());
slfLogger.debug("You should see this");
ConfigurationManager.getConfigInstance().setProperty(
"log4j.logger.com.netflix.blitz4j.TestBlitz4j", "INFO");
Thread.sleep(4000);
Assert.assertFalse(slfLogger.isDebugEnabled());
LoggingConfiguration.getInstance().stop();
}
@Test
public void testFullReconfiguration() throws Exception {
Properties props = new Properties();
props.setProperty("log4j.rootCategory", "INFO,stdout");
props.setProperty("log4j.appender.stdout", "org.apache.log4j.ConsoleAppender");
props.setProperty("log4j.appender.stdout.layout","com.netflix.logging.log4jAdapter.NFPatternLayout");
props.setProperty("log4j.appender.stdout.layout.ConversionPattern", "%d %-5p %C:%L [%t] [%M] %m%n");
props.setProperty("log4j.logger.asyncAppenders", "INFO,stdout");
LoggingConfiguration.getInstance().configure(props);
org.slf4j.Logger slfLogger = LoggerFactory.getLogger(this.getClass());
ConfigurationManager.getConfigInstance().setProperty(
"log4j.logger.com.netflix.blitz4j.TestBlitz4j", "DEBUG");
Thread.sleep(5000);
Assert.assertTrue(slfLogger.isDebugEnabled());
slfLogger.debug("You should see this");
ConfigurationManager.getConfigInstance().setProperty(
"log4j.logger.com.netflix.blitz4j.TestBlitz4j", "INFO");
Thread.sleep(4000);
Assert.assertFalse(slfLogger.isDebugEnabled());
LoggingConfiguration.getInstance().stop();
}
private String getMonitoringData(String metricName) {
Collection monitors = DefaultMonitorRegistry.getInstance()
.getRegisteredMonitors();
for (Object m : monitors) {
if (CompositeMonitor.class.isInstance(m)) {
CompositeMonitor monitor = (CompositeMonitor) m;
List<Monitor> monitorsList = monitor.getMonitors();
for (Monitor m1 : monitorsList) {
if (metricName.equalsIgnoreCase(m1.getConfig().getName())) {
return m1.getValue() + "";
}
}
}
}
return null;
}
}
| 6,508 |
0 | Create_ds/blitz4j/src/test/java/com/netflix | Create_ds/blitz4j/src/test/java/com/netflix/blitz4j/LoggingConfigurationTest.java | package com.netflix.blitz4j;
import java.util.Properties;
import org.junit.Assert;
import org.junit.Test;
public class LoggingConfigurationTest {
@Test
public void updateToDifferentConfigurationTriggersRefresh() {
LoggingConfiguration config = new LoggingConfiguration();
config.configure(new Properties());
Properties newProps = new Properties();
newProps.setProperty("log4j.logger.foo", "INFO");
newProps.setProperty("log4j.logger.bar", "INFO");
Assert.assertEquals(0, config.getRefreshCount());
config.reconfigure(newProps);
Assert.assertEquals(1, config.getRefreshCount());
Assert.assertEquals(newProps, config.getOverrideProperties());
config.reconfigure(newProps);
Assert.assertEquals(1, config.getRefreshCount());
Assert.assertEquals(newProps, config.getOverrideProperties());
}
@Test
public void updateWithSameConfigurationDoesNotTriggerRegresh() {
Properties props = new Properties();
props.setProperty("log4j.logger.foo", "INFO");
props.setProperty("log4j.logger.bar", "INFO");
LoggingConfiguration config = new LoggingConfiguration();
config.configure(props);
Assert.assertEquals(0, config.getRefreshCount());
config.reconfigure(props);
Assert.assertEquals(0, config.getRefreshCount());
}
@Test
public void updateWithSameConfigurationAndExistingOverridesDoesNotTriggerRegresh() {
Properties props = new Properties();
props.setProperty("log4j.logger.foo", "INFO");
LoggingConfiguration config = new LoggingConfiguration();
config.configure(props);
Assert.assertEquals(0, config.getRefreshCount());
config.setProperty(null, "log4j.logger.bar", "INFO", false);
Assert.assertEquals(1, config.getRefreshCount());
Properties fullProperties = new Properties();
fullProperties.setProperty("log4j.logger.foo", "INFO");
fullProperties.setProperty("log4j.logger.bar", "INFO");
config.reconfigure(fullProperties);
Assert.assertEquals(1, config.getRefreshCount());
}
}
| 6,509 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/LoggingContext.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import org.apache.log4j.Category;
import org.apache.log4j.Level;
import org.apache.log4j.MDC;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import java.util.HashSet;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/**
* The utility class that caches the context of logging such as location
* information.
*
* <p>
* It is expensive to find out the location information (ie) calling class, line
* number etc of the logger and hence caching would be useful whenever possible.
* This class also generates location information slightly more efficiently than
* log4j.
* <p>
*
* @author Karthik Ranganathan
*
*/
public class LoggingContext {
public static final String CONTEXT_LEVEL = "contextlevel";
private static final BlitzConfig CONFIGURATION = LoggingConfiguration.getInstance().getConfiguration();
private static final String LOCATION_INFO = "locationInfo";
private ThreadLocal<StackTraceElement> stackLocal = new ThreadLocal<StackTraceElement>();
private ThreadLocal<LoggingEvent> loggingEvent = new ThreadLocal<LoggingEvent>();
private ThreadLocal<Level> contextLevel = new ThreadLocal<Level>();
private final AtomicReference<HashSet<Category>> loggerNeedsLocationRef = new AtomicReference<>(new HashSet<Category>());
private static final LoggingContext instance = new LoggingContext();
private Timer stackTraceTimer = Monitors.newTimer("getStacktraceElement",
TimeUnit.NANOSECONDS);
private LoggingContext() {
try {
Monitors.registerObject(this);
} catch (Throwable e) {
if (CONFIGURATION.shouldPrintLoggingErrors()) {
e.printStackTrace();
}
}
}
/**
* Gets the starting calling stack trace element of a given stack which
* matches the given class name. Given the wrapper class name, the match
* continues until the last stack trace element of the wrapper class is
* matched.
*
* @param stackClass
* - The class to be matched for. Get the last matching class
* down the stack
* @return - StackTraceElement which denotes the calling point of given
* class or wrapper class
*/
public StackTraceElement getStackTraceElement(Class stackClass) {
Stopwatch s = stackTraceTimer.start();
Throwable t = new Throwable();
StackTraceElement[] stArray = t.getStackTrace();
int stackSize = stArray.length;
StackTraceElement st = null;
for (int i = 0; i < stackSize; i++) {
boolean found = false;
while (stArray[i].getClassName().equals(stackClass.getName())) {
++i;
found = true;
}
if (found) {
st = stArray[i];
}
}
s.stop();
return st;
}
/**
* Get the location information of the calling class
*
* @param wrapperClassName
* - The wrapper that indicates the caller
* @return the location information
*/
public LocationInfo getLocationInfo(Class wrapperClassName) {
LocationInfo locationInfo = null;
try {
if (stackLocal.get() == null) {
stackLocal.set(this.getStackTraceElement(wrapperClassName));
}
locationInfo = new LocationInfo(stackLocal.get().getFileName(),
stackLocal.get().getClassName(), stackLocal.get()
.getMethodName(), stackLocal.get().getLineNumber()
+ "");
} catch (Throwable e) {
if (CONFIGURATION
.shouldPrintLoggingErrors()) {
e.printStackTrace();
}
}
return locationInfo;
}
/**
* Clears any logging information that was cached for the purpose of
* logging.
*/
private void clearLocationInfo() {
MDC.remove(LOCATION_INFO);
stackLocal.set(null);
}
public static LoggingContext getInstance() {
return instance;
}
/**
* Generate the location information of the given logging event and cache
* it.
*
* @param event
* The logging event for which the location information needs to
* be determined.
* @return The location info object contains information about the logger.
*/
public LocationInfo generateLocationInfo(LoggingEvent event) {
// If the event is not the same, clear the cache
if (event != loggingEvent.get()) {
loggingEvent.set(event);
clearLocationInfo();
}
LocationInfo locationInfo = null;
try {
// We should only generate location info if the caller is using NFPatternLayout otherwise this is expensive and unused.
if (isUsingNFPatternLayout(event.getLogger())) {
locationInfo = LoggingContext
.getInstance()
.getLocationInfo(Class.forName(event.getFQNOfLoggerClass()));
if (locationInfo != null) {
MDC.put(LOCATION_INFO, locationInfo);
}
}
} catch (Throwable e) {
if (CONFIGURATION !=null && CONFIGURATION
.shouldPrintLoggingErrors()) {
e.printStackTrace();
}
}
return locationInfo;
}
public void shouldGenerateLocationInfo(Category logger) {
HashSet<Category> loggerNeedsLocation = loggerNeedsLocationRef.get();
// Add the logger to the set of loggers that needs location info.
do {
// If we've already seen this logger return immediately.
if (loggerNeedsLocation.contains(logger)) {
return;
}
// Try to add the logger
HashSet<Category> copy = new HashSet<>(loggerNeedsLocation);
copy.add(logger);
if (loggerNeedsLocationRef.compareAndSet(loggerNeedsLocation, copy)) {
return;
}
// If there's a conflict, pull the map out and try again.
loggerNeedsLocation = loggerNeedsLocationRef.get();
} while(true);
}
private boolean isUsingNFPatternLayout(Category logger) {
// Assume we don't need location info until proven otherwise
return logger != null && loggerNeedsLocationRef.get().contains(logger);
}
/**
* Get the location information of the logging event. If the information has
* been cached it is retrieved from the MDC (for asynchronous events MDCs
* are retained), else it is generated.
*
* @param event
* - The logging event
* @return- The location information of the logging event.
*/
public LocationInfo getLocationInfo(LoggingEvent event) {
if (event != loggingEvent.get()) {
loggingEvent.set(event);
clearLocationInfo();
}
// For async appenders, the locationInfo is set in the MDC and not with
// the thread since the thread that processes the logging is different
// from the one that
// generates location information.
LocationInfo locationInfo = (LocationInfo) event.getMDC(LOCATION_INFO);
if (locationInfo == null) {
locationInfo = this.generateLocationInfo(event);
}
return locationInfo;
}
/**
* Set the context {@link Level} for the request-based logging
* @param level - The level of logging to be enabled for this request
*/
public void setContextLevel(Level level) {
MDC.put(CONTEXT_LEVEL, level);
}
/**
* Clears the context {@link Level} set for the request-based logging
*/
public void clearContextLevel() {
MDC.remove(CONTEXT_LEVEL);
}
/**
* Get the context {@link Level} for the request-based logging
* @return level - The level of logging to be enabled for this request
*/
public Level getContextLevel() {
return (Level)MDC.get(CONTEXT_LEVEL);
}
}
| 6,510 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFRepositorySelector.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import org.apache.log4j.spi.LoggerRepository;
import org.apache.log4j.spi.RepositorySelector;
/**
*
* A Repository Selector class that overrides log4j to provide a less contended implementation.
* @author Karthik Ranganathan
*
*/
public class NFRepositorySelector implements RepositorySelector {
final LoggerRepository repository;
public NFRepositorySelector(LoggerRepository repository) {
this.repository = repository;
}
public LoggerRepository getLoggerRepository() {
return repository;
}
}
| 6,511 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFHierarchy.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.AbstractQueue;
import java.util.Iterator;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.log4j.Appender;
import org.apache.log4j.Category;
import org.apache.log4j.Hierarchy;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.LogLog;
import org.apache.log4j.spi.HierarchyEventListener;
import org.apache.log4j.spi.LoggerFactory;
/**
* A Hierarchy class that overrides log4j to provide a less contended
* implementation.
*
* @author Karthik Ranganathan
*
*/
public class NFHierarchy extends Hierarchy {
private LoggerFactory myFactory;
private AbstractQueue<HierarchyEventListener> listeners;
public NFHierarchy(Logger root) {
super(root);
myFactory = new NFCategoryFactory();
listeners = new ConcurrentLinkedQueue<HierarchyEventListener>();
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Hierarchy#getLogger(java.lang.String)
*/
@Override
public Logger getLogger(String name) {
return getLogger(name, myFactory);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.Hierarchy#addHierarchyEventListener(org.apache.log4j
* .spi.HierarchyEventListener)
*/
@Override
public void addHierarchyEventListener(HierarchyEventListener listener) {
if (listeners.contains(listener)) {
LogLog.warn("Ignoring attempt to add an existent listener.");
} else {
listeners.add(listener);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.Hierarchy#fireAddAppenderEvent(org.apache.log4j.Category
* , org.apache.log4j.Appender)
*/
@Override
public void fireAddAppenderEvent(Category logger, Appender appender) {
if (listeners != null) {
HierarchyEventListener listener;
for (Iterator<HierarchyEventListener> it = listeners.iterator(); it
.hasNext();) {
listener = it.next();
listener.addAppenderEvent(logger, appender);
}
}
}
public void fireRemoveAppenderEvent(Category logger, Appender appender) {
if (listeners != null) {
HierarchyEventListener listener;
for (Iterator<HierarchyEventListener> it = listeners.iterator(); it
.hasNext();) {
listener = it.next();
listener.removeAppenderEvent(logger, appender);
}
}
}
public void setLoggerFactory(LoggerFactory loggerFactory) {
this.myFactory = loggerFactory;
}
}
| 6,512 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFAppenderAttachableImpl.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.AbstractQueue;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.commons.collections.iterators.IteratorEnumeration;
import org.apache.log4j.Appender;
import org.apache.log4j.helpers.AppenderAttachableImpl;
import org.apache.log4j.spi.AppenderAttachable;
import org.apache.log4j.spi.LoggingEvent;
/**
* This class overrides log4j implementation to provide appender with less
* multi-threaded contention.
*
* @author Karthik Ranganathan
*
*/
public class NFAppenderAttachableImpl extends AppenderAttachableImpl implements
AppenderAttachable {
protected AbstractQueue<Appender> appenderList = new ConcurrentLinkedQueue<Appender>();
private AbstractQueue<String> configuredAppenderList = new ConcurrentLinkedQueue<String>();
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#addAppender(org.apache
* .log4j.Appender)
*/
@Override
public void addAppender(Appender newAppender) {
// Null values for newAppender parameter are strictly forbidden.
if (newAppender == null) {
return;
}
// If the appender is already there, add this one to the end before
// removing the
// previous one
boolean isAppenderPresent = appenderList.contains(newAppender);
String appenderName = newAppender.getName() != null ? newAppender.getName(): "default";
if (isAppenderPresent) {
appenderList.add(newAppender);
for (Iterator<Appender> it = appenderList.iterator(); it.hasNext();) {
Appender appender = it.next();
if (newAppender.equals(appender)) {
configuredAppenderList.add(appenderName);
it.remove();
appender.close();
return;
}
}
} else {
appenderList.add(newAppender);
}
configuredAppenderList.add(appenderName);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#appendLoopOnAppenders
* (org.apache.log4j.spi.LoggingEvent)
*/
@Override
public int appendLoopOnAppenders(LoggingEvent event) {
int size = 0;
Appender appender;
if (appenderList != null) {
size = appenderList.size();
Iterator<Appender> it = appenderList.iterator();
while (it.hasNext()) {
appender = (Appender) it.next();
appender.doAppend(event);
}
}
return size;
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.helpers.AppenderAttachableImpl#getAllAppenders()
*/
@Override
public Enumeration getAllAppenders() {
if (appenderList == null)
return null;
else {
Iterator<Appender> it = appenderList.iterator();
return new IteratorEnumeration(it);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#getAppender(java.lang
* .String)
*/
@Override
public Appender getAppender(String name) {
if (appenderList == null || name == null)
return null;
Appender appender;
Iterator<Appender> it = appenderList.iterator();
while (it.hasNext()) {
appender = (Appender) it.next();
if (name.equals(appender.getName())) {
return appender;
}
}
return null;
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#isAttached(org.apache
* .log4j.Appender)
*/
@Override
public boolean isAttached(Appender appender) {
if (appenderList == null || appender == null)
return false;
Appender a;
Iterator<Appender> it = appenderList.iterator();
while (it.hasNext()) {
a = (Appender) it.next();
if (a == appender) {
return true;
}
}
return false;
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.helpers.AppenderAttachableImpl#removeAllAppenders()
*/
@Override
public void removeAllAppenders() {
this.configuredAppenderList.clear();
if (appenderList != null) {
Iterator<Appender> it = appenderList.iterator();
while (it.hasNext()) {
Appender a = (Appender) it.next();
String[] asyncAppenders = LoggingConfiguration.getInstance()
.getConfiguration()
.getAsyncAppenderImplementationNames();
// For AsyncAppenders, we won't remove appenders.
// This call is primarily made during dynamic log4 reconfiguration and we will
// retain the ability to queue the messages.
for (String asyncAppender : asyncAppenders) {
if (!(asyncAppender.equals(a.getClass().getName()))) {
it.remove();
a.close();
}
}
}
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#removeAppender(org.apache
* .log4j.Appender)
*/
@Override
public void removeAppender(Appender appender) {
if (appender == null || appenderList == null)
return;
appenderList.remove(appender);
configuredAppenderList.remove(appender.getName());
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.helpers.AppenderAttachableImpl#removeAppender(java.lang
* .String)
*/
@Override
public void removeAppender(String name) {
if (name == null || appenderList == null)
return;
Iterator<Appender> it = appenderList.iterator();
while (it.hasNext()) {
Appender a = (Appender) it.next();
if (name.equals(a.getName())) {
it.remove();
configuredAppenderList.remove(a.getName());
break;
}
}
}
/**
* Reconciles the appender list after configuration to ensure that the asynchrnous
* appenders are not left over after the configuration. This is needed because the
* appenders are not cleaned out completely during configuration for it to retain the
* ability to not messages.
*/
public void reconcileAppenders() {
for (Appender appender : appenderList) {
if (!configuredAppenderList.contains(appender.getName())) {
appender.close();
appenderList.remove(appender);
}
}
}
}
| 6,513 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/LoggerCache.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.log4j.Logger;
/**
* The class that caches log4j loggers.
*
* <p>
* This will be much more contention free than log4j caching since the the cache uses a {@link java.util.concurrent.ConcurrentHashMap} instead of {@link java.util.Map}
* </p>
*
* @author Karthik Ranganathan
*
*/
public class LoggerCache {
private static LoggerCache instance = new LoggerCache();
private Map<String, Logger> appenderLoggerMap = new ConcurrentHashMap<String, Logger>(5000);
private LoggerCache() {
}
public static LoggerCache getInstance() {
return instance;
}
/**
* Get the logger to be used for the given class.
* @param clazz - The class for which the logger needs to be returned
* @return- The log4j logger object
*/
public Logger getOrCreateLogger(String clazz) {
Logger logger = appenderLoggerMap.get(clazz);
if (logger == null) {
// If multiple threads do the puts, that is fine as it is a one time thing
logger = Logger.getLogger(clazz);
appenderLoggerMap.put(clazz, logger);
}
return logger;
}
public void clearAll() {
appenderLoggerMap.clear();
}
}
| 6,514 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/AsyncAppender.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.text.MessageFormat;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.BasicCounter;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
import org.apache.log4j.Appender;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.helpers.AppenderAttachableImpl;
import org.apache.log4j.spi.AppenderAttachable;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import com.google.common.cache.CacheBuilder;
import com.netflix.logging.messaging.BatcherFactory;
import com.netflix.logging.messaging.MessageBatcher;
import com.netflix.logging.messaging.MessageProcessor;
import com.netflix.servo.annotations.DataSourceType;
/**
* A log4j appender implementation that logs the events asynchronously after
* storing the events in a buffer. The buffer implementation uses an instance of
* {@link com.netflix.logging.messaging.MessageBatcher}.
* <p>
* Incoming events are first stored in a queue and then worker thread(s) takes
* the messages and writes it to the underlying appenders. This makes the
* logging of the messages efficient for the following reasons
*
* 1) Logging threads do not block until the event is written to the
* destination, but block only until the message is written to the queue which
* should be way faster than having to wait until it is written to the
* underlying destination
*
* 2) During log storms, the in-memory buffer overflows the message to another
* structure which logs just the summary and not each log message
* </p>
* <p>
* By default the buffer holds up to 10K messages and summary up to 5K entries.
* Depending on the memory constraints and logging frequency, both these are
* configurable. The summary also starts dropping its entries when it stays
* there longer than 1 min which is configurable as well.
* </p>
*
* @author Karthik Ranganathan
*
*/
public class AsyncAppender extends AppenderSkeleton implements
AppenderAttachable {
private static final BlitzConfig CONFIGURATION = LoggingConfiguration
.getInstance().getConfiguration();
private static final int SLEEP_TIME_MS = 1;
private static final String BATCHER_NAME_LIMITER = ".";
private static final String APPENDER_NAME = "ASYNC";
private MessageBatcher<LoggingEvent> batcher;
private String originalAppenderName;
private static final String LOGGER_ASYNC_APPENDER = "asyncAppenders";
private AppenderAttachableImpl appenders = new AppenderAttachableImpl();
// The Map to the summary events
private ConcurrentMap<String, LogSummary> logSummaryMap = new ConcurrentHashMap<String, LogSummary>();
private Timer putBufferTimeTracer;
private Timer putDiscardMapTimeTracer;
private Timer locationInfoTimer;
private Timer saveThreadLocalTimer;
private Counter summarizeEventCounter;
private Counter discardEventCounter;
private Counter putInBufferCounter;
public AsyncAppender() {
this.name = APPENDER_NAME;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime
* result
+ ((originalAppenderName == null) ? 0 : originalAppenderName
.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
AsyncAppender other = (AsyncAppender) obj;
if (originalAppenderName == null) {
if (other.originalAppenderName != null)
return false;
} else if (!originalAppenderName.equals(other.originalAppenderName))
return false;
return true;
}
/**
* Initialize the batcher that stores the messages and calls the underlying
* appenders.
*
* @param appenderName
* - The name of the appender for which the batcher is created
*/
private void initBatcher(String appenderName) {
MessageProcessor<LoggingEvent> messageProcessor = new MessageProcessor<LoggingEvent>() {
@Override
public void process(List<LoggingEvent> objects) {
processLoggingEvents(objects);
}
};
String batcherName = this.getClass().getName() + BATCHER_NAME_LIMITER
+ appenderName;
batcher = BatcherFactory.createBatcher(batcherName, messageProcessor);
batcher.setTarget(messageProcessor);
}
/**
* Process the logging events. This is called by the batcher.
*
* @param loggingEvents
* - The logging events to be written to the underlying appender
*/
private void processLoggingEvents(List<LoggingEvent> loggingEvents) {
// Lazy initialization of the appender. This is needed because the
// original appenders configuration may be available only after the
// complete
// log4j initialization.
while (appenders.getAllAppenders() == null || (appenders != null && !appenders.getAllAppenders().hasMoreElements())) {
if ((batcher == null) || (batcher.isPaused())) {
try {
Thread.sleep(SLEEP_TIME_MS);
} catch (InterruptedException ignore) {
}
continue;
}
org.apache.log4j.Logger asyncLogger = LoggerCache.getInstance()
.getOrCreateLogger(LOGGER_ASYNC_APPENDER);
Appender originalAppender = asyncLogger
.getAppender(originalAppenderName);
if (originalAppender == null) {
try {
Thread.sleep(SLEEP_TIME_MS);
} catch (InterruptedException ignore) {
}
continue;
}
appenders.addAppender(originalAppender);
}
// First take the overflown summary events and put it back in the queue
for (Iterator<Entry<String, LogSummary>> iter = logSummaryMap
.entrySet().iterator(); iter.hasNext();) {
Entry<String, LogSummary> mapEntry = (Entry<String, LogSummary>) iter
.next();
// If the space is not available, then exit immediately
if (batcher.isSpaceAvailable()) {
LogSummary logSummary = mapEntry.getValue();
LoggingEvent event = logSummary.createEvent();
// Put the event in the queue and remove the event from the summary
if (batcher.process(event)) {
iter.remove();
} else {
break;
}
} else {
break;
}
}
// Process the events from the queue and call the underlying
// appender
for (LoggingEvent event : loggingEvents) {
appenders.appendLoopOnAppenders(event);
}
}
/*
* (non-Javadoc)
* @see org.apache.log4j.AppenderSkeleton#append(org.apache.log4j.spi.LoggingEvent)
*/
public void append(final LoggingEvent event) {
boolean isBufferSpaceAvailable = (batcher.isSpaceAvailable() && (logSummaryMap
.size() == 0));
boolean isBufferPutSuccessful = false;
LocationInfo locationInfo = null;
// Reject it when we have a fast property as these can be expensive
Stopwatch s = locationInfoTimer.start();
if (CONFIGURATION.shouldSummarizeOverflow(this.originalAppenderName)) {
if (CONFIGURATION.shouldGenerateBlitz4jLocationInfo()) {
locationInfo = LoggingContext.getInstance()
.generateLocationInfo(event);
} else if (CONFIGURATION.shouldGenerateLog4jLocationInfo()) {
locationInfo = event.getLocationInformation();
}
}
s.stop();
if (isBufferSpaceAvailable) {
// Save the thread local info in the event so that the
// processing threads can have access to the thread local of the arriving event
Stopwatch sThreadLocal = saveThreadLocalTimer.start();
saveThreadLocalInfo(event);
sThreadLocal.stop();
isBufferPutSuccessful = putInBuffer(event);
}
// If the buffer is full, then summarize the information
if (CONFIGURATION.shouldSummarizeOverflow(this.originalAppenderName) && (!isBufferPutSuccessful)) {
summarizeEventCounter.increment();
Stopwatch t = putDiscardMapTimeTracer.start();
String loggerKey = event.getLoggerName();
if (locationInfo != null) {
loggerKey = locationInfo.getClassName() + "_"
+ locationInfo.getLineNumber();
}
LogSummary summary = (LogSummary) logSummaryMap.get(loggerKey);
if (summary == null) {
// Saving the thread local info is needed only for the first
// time
// creation of the summary
saveThreadLocalInfo(event);
summary = new LogSummary(event);
logSummaryMap.put(loggerKey, summary);
} else {
// The event summary is already there, just increment the
// count
summary.add(event);
}
t.stop();
} else if (!CONFIGURATION.shouldSummarizeOverflow(this.originalAppenderName) && (!isBufferPutSuccessful)) {
// Record the event that are not summarized and which are just
// discarded
discardEventCounter.increment();
}
}
/**
* Sets the name of the underlying appender that is wrapped by this
* <code>AsyncAppender</code>
*
* @param name
* - The name of the underlying appender
*/
public void setOriginalAppenderName(String name) {
this.originalAppenderName = name;
this.initBatcher(this.originalAppenderName);
this.putBufferTimeTracer = Monitors.newTimer("putBuffer",
TimeUnit.NANOSECONDS);
this.putDiscardMapTimeTracer = Monitors.newTimer("putDiscardMap",
TimeUnit.NANOSECONDS);
this.locationInfoTimer = Monitors.newTimer("locationInfo",
TimeUnit.NANOSECONDS);
this.saveThreadLocalTimer = Monitors.newTimer("saveThreadLocal",
TimeUnit.NANOSECONDS);
// We register these counters differently to the above timers, because need to keep the metric names as they were when we were
// using DynamicCounter.increment() directly.
this.summarizeEventCounter = initAndRegisterCounter(this.originalAppenderName + "_summarizeEvent");
this.discardEventCounter = initAndRegisterCounter(this.originalAppenderName + "_discardEvent");
this.putInBufferCounter = initAndRegisterCounter(this.originalAppenderName + "_putInBuffer");
this.logSummaryMap = CacheBuilder
.newBuilder()
.initialCapacity(5000)
.maximumSize(
CONFIGURATION.getLogSummarySize(originalAppenderName))
.expireAfterWrite(
CONFIGURATION
.getLogSummaryExpiry(originalAppenderName),
TimeUnit.SECONDS).<String, LogSummary> build().asMap();
try {
Monitors.registerObject(this.originalAppenderName, this);
} catch (Throwable e) {
if (CONFIGURATION.shouldPrintLoggingErrors()) {
System.err.println("Cannot register monitor for AsyncAppender "
+ this.originalAppenderName);
e.printStackTrace();
}
}
}
/**
* Construct a new Counter, register it, and then return it.
*
* @param name String
* @return Counter
*/
private Counter initAndRegisterCounter(String name) {
BasicCounter counter = new BasicCounter(MonitorConfig.builder(name).build());
DefaultMonitorRegistry.getInstance().register(counter);
return counter;
}
/**
* Save the thread local info of the event in the event itself for
* processing later.
*
* @param event
* - The logging event for which the information should be saved
*/
private void saveThreadLocalInfo(final LoggingEvent event) {
// Set the NDC and thread name for the calling thread as these
// LoggingEvent fields were not set at event creation time.
event.getNDC();
event.getThreadName();
// Get a copy of this thread's MDC.
event.getMDCCopy();
}
/**
* Puts the logging events to the in-memory buffer.
*
* @param event
* - The event that needs to be put in the buffer.
* @return - true, if the put was successful, false otherwise
*/
private boolean putInBuffer(final LoggingEvent event) {
putInBufferCounter.increment();
Stopwatch t = putBufferTimeTracer.start();
boolean hasPut = false;
if (batcher.process(event)) {
hasPut = true;
} else {
hasPut = false;
}
t.stop();
return hasPut;
}
/**
* Summary of discarded logging events for a logger.
*/
private static final class LogSummary {
private LoggingEvent event;
private int count;
/**
* Create new instance.
*
* @param event
* event, may not be null.
*/
public LogSummary(final LoggingEvent event) {
count = 1;
this.event = event;
}
/**
* Add discarded event to summary.
*
* @param event
* event, may not be null.
*/
public void add(final LoggingEvent event) {
count++;
}
/**
* Create event with summary information.
*
* @return new event.
*/
public LoggingEvent createEvent() {
String msg = MessageFormat
.format("{1}[Summarized {0} messages of this type because the internal buffer was full]",
new Object[] { new Integer(count),
event.getMessage() });
LoggingEvent loggingEvent = new LoggingEvent(
event.getFQNOfLoggerClass(), event.getLogger(),
event.getTimeStamp(), event.getLevel(), msg, Thread
.currentThread().getName(),
event.getThrowableInformation(), null, null,
event.getProperties());
return loggingEvent;
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.AppenderSkeleton#close()
*/
@Override
public void close() {
synchronized (appenders) {
appenders.removeAllAppenders();
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.spi.AppenderAttachable#getAllAppenders()
*/
@Override
public Enumeration getAllAppenders() {
synchronized (appenders) {
return appenders.getAllAppenders();
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.AppenderAttachable#getAppender(java.lang.String)
*/
@Override
public Appender getAppender(final String name) {
synchronized (appenders) {
return appenders.getAppender(name);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.AppenderAttachable#isAttached(org.apache.log4j.Appender
* )
*/
@Override
public boolean isAttached(final Appender appender) {
synchronized (appenders) {
return appenders.isAttached(appender);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.AppenderSkeleton#requiresLayout()
*/
@Override
public boolean requiresLayout() {
return false;
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.spi.AppenderAttachable#removeAllAppenders()
*/
@Override
public void removeAllAppenders() {
synchronized (appenders) {
appenders.removeAllAppenders();
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.AppenderAttachable#removeAppender(org.apache.log4j
* .Appender)
*/
@Override
public void removeAppender(final Appender appender) {
synchronized (appenders) {
appenders.removeAppender(appender);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.AppenderAttachable#removeAppender(java.lang.String)
*/
@Override
public void removeAppender(final String name) {
synchronized (appenders) {
appenders.removeAppender(name);
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.AppenderAttachable#addAppender(org.apache.log4j.
* Appender)
*/
@Override
public void addAppender(final Appender newAppender) {
synchronized (appenders) {
appenders.addAppender(newAppender);
}
}
@com.netflix.servo.annotations.Monitor(name = "discardMapSize", type = DataSourceType.GAUGE)
public int getDiscadMapSize() {
return logSummaryMap.size();
}
@Override
public void doAppend(LoggingEvent event) {
this.append(event);
}
}
| 6,515 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/DefaultBlitz4jConfig.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.Enumeration;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import com.netflix.config.DynamicPropertyFactory;
/**
* The configuration class for blitz4j.
*
* All custom configurations can be specified by properties as defined by the
* configurations.
*
* @author Karthik Ranganathan
*
*/
public class DefaultBlitz4jConfig implements BlitzConfig {
private static final String GENERATE_LOG4J_LOCATION_INFO = "netflix.blitz4j.generateLog4jLocationInfo";
private static final String BLITZ4J_ASYNC_APPENDERS = "blitz4j.asyncAppenders";
private static final String GENERATE_BLITZ4J_LOCATIONINFO = "netflix.blitz4j.generateBlitz4jLocationInfo";
private static final String PROP_ASYNC_APPENDERS = "log4j.logger.asyncAppenders";
private static final String NETFLIX_BLITZ4J_PRINT_LOGGING_ERRORS = "netflix.blitz4j.printLoggingErrors";
private static final String NETFLIX_BLITZ4J_LOCKFREE = "netflix.blitz4j.lockfree";
// Use concurrent hash map to avoid multithreaded contention
private Map<String, Object> propsMap = new ConcurrentHashMap<String, Object>();
private static final DynamicPropertyFactory CONFIGURATION = DynamicPropertyFactory
.getInstance();
public DefaultBlitz4jConfig(Properties props) {
if (props != null) {
Enumeration enumeration = props.propertyNames();
while (enumeration.hasMoreElements()) {
String key = (String) enumeration.nextElement();
String propertyValue = props.getProperty(key);
this.propsMap.put(key, propertyValue);
}
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#shouldUseLockFree()
*/
@Override
public boolean shouldUseLockFree() {
return CONFIGURATION.getBooleanProperty(
NETFLIX_BLITZ4J_LOCKFREE,
Boolean.valueOf(this.getPropertyValue(NETFLIX_BLITZ4J_LOCKFREE,
"true"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#shouldPrintLoggingErrors()
*/
@Override
public boolean shouldPrintLoggingErrors() {
return CONFIGURATION.getBooleanProperty(
NETFLIX_BLITZ4J_PRINT_LOGGING_ERRORS,
Boolean.valueOf(this.getPropertyValue(
NETFLIX_BLITZ4J_PRINT_LOGGING_ERRORS, "false"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#getAsyncAppenders()
*/
@Override
public String[] getAsyncAppenders() {
return CONFIGURATION
.getStringProperty(PROP_ASYNC_APPENDERS,
this.getPropertyValue(PROP_ASYNC_APPENDERS, "OFF"))
.get().split(",");
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getLogSummaryExpiry(java.lang.String)
*/
@Override
public int getLogSummaryExpiry(String originalAppenderName) {
return CONFIGURATION.getIntProperty(
"netflix.blitz4j." + originalAppenderName
+ ".discardEntryExpireSeconds",
Integer.valueOf(this.getPropertyValue("netflix.blitz4j."
+ originalAppenderName + ".discardEntryExpireSeconds",
"60"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#getLogSummarySize(java.lang.String)
*/
@Override
public int getLogSummarySize(String originalAppenderName) {
return CONFIGURATION.getIntProperty(
"netflix.blitz4j." + originalAppenderName + ".discardMapSize",
Integer.valueOf(this.getPropertyValue("netflix.blitz4j."
+ originalAppenderName + ".discardMapSize", "10000")))
.get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#shouldGenerateBlitz4jLocationInfo()
*/
@Override
public boolean shouldGenerateBlitz4jLocationInfo() {
return CONFIGURATION.getBooleanProperty(
GENERATE_BLITZ4J_LOCATIONINFO,
Boolean.valueOf(this.getPropertyValue(
GENERATE_BLITZ4J_LOCATIONINFO, "true"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#shouldGenerateLog4jLocationInfo()
*/
@Override
public boolean shouldGenerateLog4jLocationInfo() {
return CONFIGURATION.getBooleanProperty(
GENERATE_LOG4J_LOCATION_INFO,
Boolean.valueOf(this.getPropertyValue(
GENERATE_LOG4J_LOCATION_INFO, "false")))
.get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#shouldSummarizeOverflow(java.lang.String)
*/
@Override
public boolean shouldSummarizeOverflow(String originalAppenderName) {
return CONFIGURATION
.getBooleanProperty(
"netflix.blitz4j." + originalAppenderName
+ ".summarizeOverflow",
Boolean.valueOf(this.getPropertyValue(
"netflix.blitz4j." + originalAppenderName
+ ".summarizeOverflow", "true"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getAsyncAppenderImplementationNames()
*/
@Override
public String[] getAsyncAppenderImplementationNames() {
return CONFIGURATION
.getStringProperty(
BLITZ4J_ASYNC_APPENDERS,
this.getPropertyValue(BLITZ4J_ASYNC_APPENDERS,
"com.netflix.blitz4j.AsyncAppender")).get()
.split(",");
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getBatcherQueueMaxMessages(java.lang.
* String)
*/
@Override
public int getBatcherQueueMaxMessages(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + "." + "queue.maxMessages",
Integer.valueOf(this.getPropertyValue(batcherName + "."
+ "queue.maxMessages", "10000"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#getBatchSize(java.lang.String)
*/
@Override
public int getBatchSize(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + "." + "batch.maxMessages",
Integer.valueOf(this.getPropertyValue(batcherName + "."
+ "batch.maxMessages", "30"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getBatcherWaitTimeBeforeShutdown(java
* .lang.String)
*/
@Override
public int getBatcherWaitTimeBeforeShutdown(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + ".waitTimeinMillis",
Integer.valueOf(this.getPropertyValue(batcherName
+ ".waitTimeinMillis", "10000"))).get();
}
/*
* (non-Javadoc)
*
* @see com.netflix.blitz4j.BlitzConfig#getBatcherMaxDelay(java.lang.String)
*/
@Override
public double getBatcherMaxDelay(String batcherName) {
return CONFIGURATION.getDoubleProperty(
batcherName + "." + "batch.maxDelay",
Double.valueOf(this.getPropertyValue(batcherName
+ ".waitTimeinMillis", "0.5"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#shouldWaitWhenBatcherQueueNotEmpty(java
* .lang.String)
*/
@Override
public boolean shouldWaitWhenBatcherQueueNotEmpty(String batcherName) {
return CONFIGURATION.getBooleanProperty(
batcherName + ".blocking",
Boolean.valueOf(this.getPropertyValue(
batcherName + ".blocking", "false"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getBatcherMinThreads(java.lang.String)
*/
@Override
public int getBatcherMinThreads(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + ".minThreads",
Integer.valueOf(this.getPropertyValue(batcherName
+ ".minThreads", "1"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getBatcherMaxThreads(java.lang.String)
*/
@Override
public int getBatcherMaxThreads(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + ".maxThreads",
Integer.valueOf(this.getPropertyValue(batcherName
+ ".maxThreads", "3"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#getBatcherThreadKeepAliveTime(java.lang
* .String)
*/
@Override
public int getBatcherThreadKeepAliveTime(String batcherName) {
return CONFIGURATION.getIntProperty(
batcherName + ".keepAliveTime",
Integer.valueOf(this.getPropertyValue(batcherName
+ ".keepAliveTime", "900"))).get();
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.blitz4j.BlitzConfig#shouldRejectWhenAllBatcherThreadsUsed
* (java.lang.String)
*/
@Override
public boolean shouldRejectWhenAllBatcherThreadsUsed(String batcherName) {
return CONFIGURATION.getBooleanProperty(
batcherName + ".rejectWhenFull",
Boolean.valueOf(this.getPropertyValue(batcherName
+ ".rejectWhenFull", "false"))).get();
}
private String getPropertyValue(String key, String defaultValue) {
String value = (String) propsMap.get(key);
if (value != null) {
return value;
} else {
return defaultValue;
}
}
@Override
public boolean shouldLoadLog4jPropertiesFromClassPath() {
return CONFIGURATION.getBooleanProperty(
"netflix.blitz4j" + ".shouldLoadLog4jProperties",
Boolean.valueOf(this.getPropertyValue("netflix.blitz4j"
+ ".shouldLoadLog4jProperties", "true"))).get();
}
}
| 6,516 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/BlitzConfig.java | package com.netflix.blitz4j;
public interface BlitzConfig {
/**
* Indicates whether blitz4j should use its less contended implementation.
*/
public abstract boolean shouldUseLockFree();
/**
* Indicates whether blitz4j should print the errors during logging for
* debugging.
*/
public abstract boolean shouldPrintLoggingErrors();
/**
* Get the list of asynchronous appenders. The configuration is specified
* similar to any log4j loging override
* (ie)log4j.logger.asyncAppenders=INFO,MYAPPENDER. The logging level in
* this definitions bears no specific significance and is only for
* completion.
*/
public abstract String[] getAsyncAppenders();
/**
* Get the time in seconds that the summary that is stored will expire.
*
* @param originalAppenderName
* - The name of the appender for which the logging is done
*/
public abstract int getLogSummaryExpiry(String originalAppenderName);
/**
* Get the size of the log summary information.
*
* @param originalAppenderName
* - The name of the appender for which the logging is done
*/
public abstract int getLogSummarySize(String originalAppenderName);
/**
* Checks whether the blitz4j based location information is generated or
* not.
*
* @return - true, if the location information need to be generated, false
* otherwise.
*/
public abstract boolean shouldGenerateBlitz4jLocationInfo();
/**
* Checks whether the log4j based location information is generated or not.
*
* @return - true, if the location information need to be generated, false
* otherwise.
*/
public abstract boolean shouldGenerateLog4jLocationInfo();
/**
* Checks whether the summary information should be generated when the
* asynchronous buffer becomes full.
*
* @param originalAppenderName
* - The appender name for which the logging is done
* @return - true, if the information should be summarized, false otherwise
*/
public abstract boolean shouldSummarizeOverflow(String originalAppenderName);
/**
* Get the list of asynchronous appender names so that they can be treated a
* little differently during dynamic reconfiguration.
*
* @return - The list of asynchronous appender names
*/
public abstract String[] getAsyncAppenderImplementationNames();
/**
* Gets the maximum number of messages allowed in the buffer.
*
* @param batcherName
* - The name of the batcher
* @return - an integer value denoting the size of the buffer
*/
public abstract int getBatcherQueueMaxMessages(String batcherName);
/**
* Gets the batch size of each batch for which the log processing is done.
*
* @param batcherName
* - The name of the batcher
* @return - an integer value denoting the size of the batch
*/
public abstract int getBatchSize(String batcherName);
/**
* Get the time to wait before the batcher flushes out all its messages in
* the buffer.
*
* @param batcherName
* - The name of the batcher
* @return - time in seconds
*/
public abstract int getBatcherWaitTimeBeforeShutdown(String batcherName);
/**
* Gets the time to wait for the messages to be batcher before it is given
* to processor threads.
*
* @param batcherName
* - The name of the batcher.
* @return - double value in seconds
*/
public abstract double getBatcherMaxDelay(String batcherName);
/**
* Checks to see whether the caller threads should block and wait if the
* internal buffer is full.
*
* @param batcherName
* - The name of the batcher.
* @return - true, if the caller threads should block and wait, false
* otherwise.
*/
public abstract boolean shouldWaitWhenBatcherQueueNotEmpty(
String batcherName);
/**
* Gets the minimum number of processing threads that should be run to
* handle the messages.
*
* @param batcherName
* - The name of the batcher.
* @return - an integer value indicating the minimum number of threads to be
* run
*/
public abstract int getBatcherMinThreads(String batcherName);
/**
* Gets the maximum number of processing threads that should be run to
* handle the messages.
*
* @param batcherName
* - The name of the batcher
* @return - an integer value indicating the maximum number of threads to be
* run
*/
public abstract int getBatcherMaxThreads(String batcherName);
/**
* Gets the time to keep the processing threads alive when they are idle.
*
* @param batcherName
* - The name of the batcher
* @return - time in seconds
*/
public abstract int getBatcherThreadKeepAliveTime(String batcherName);
/**
* Checks to see if the collector threads that hands the message to the
* processor threads should participate in processing or not when all the
* threads are used up.
*
* @param batcherName
* - The name of the batcher
* @return - true if the collector threads participates in processing, false
* if the processing is rejected. If the processing is rejected, it
* is retried indefinitely.
*/
public abstract boolean shouldRejectWhenAllBatcherThreadsUsed(
String batcherName);
/**
* Checks to see if the log4j.properties should be loaded from classpath during configuration.
* @return true, if log4j.properties need to be loaded, false otherwise.
*/
public boolean shouldLoadLog4jPropertiesFromClassPath();
} | 6,517 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/LoggingConfiguration.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.net.URL;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.configuration.ConfigurationConverter;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.apache.log4j.helpers.Loader;
import org.apache.log4j.spi.LoggerFactory;
import org.slf4j.Logger;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.config.ConfigurationManager;
import com.netflix.config.ExpandedConfigurationListenerAdapter;
import com.netflix.config.PropertyListener;
import com.netflix.logging.messaging.BatcherFactory;
import com.netflix.logging.messaging.MessageBatcher;
/**
* The main configuration class that bootstraps the <em>blitz4j</em>
* implementation.
*
* <p>
* The users can either use {@link #configure()} or
* {@link #configure(Properties)} to kick start the configuration. If the
* <code>log4j.configuration</code> is provided, the properties are additionally
* loaded from the provided {@link URL}.
* </p>
*
* <p>
* The list of appenders to be automatically converted can be provided by the
* property <code>log4j.logger.asyncAppenders</code>. The configuration takes
* these appenders and automatically enables them for asynchronous logging.
* </p>
*
* @author Karthik Ranganathan
*
*/
public class LoggingConfiguration implements PropertyListener {
private static final String LOG4J_ROOT_LOGGER = "log4j.rootLogger";
private static final String LOG4J_ROOT_CATEGORY = "log4j.rootCategory";
private static final String LOG4J_PROPERTIES = "log4j.properties";
private static final String BLITZ_LOGGER_FACTORY = "com.netflix.blitz4j.NFCategoryFactory";
private static final String PROP_LOG4J_CONFIGURATION = "log4j.configuration";
private static final Object guard = new Object();
private static final String PROP_LOG4J_LOGGER_FACTORY = "log4j.loggerFactory";
private static final String LOG4J_FACTORY_IMPL = "com.netflix.logging.log4jAdapter.NFCategoryFactory";
private static final String LOG4J_LOGGER_FACTORY = "log4j.loggerFactory";
private static final String PROP_LOG4J_ORIGINAL_APPENDER_NAME = "originalAppenderName";
private static final String LOG4J_PREFIX = "log4j.logger";
private static final String LOG4J_APPENDER_DELIMITER = ".";
private static final String LOG4J_APPENDER_PREFIX = "log4j.appender";
private static final String ASYNC_APPENDERNAME_SUFFIX = "_ASYNC";
private static final String ROOT_CATEGORY = "rootCategory";
private static final String ROOT_LOGGER = "rootLogger";
private Map<String, String> originalAsyncAppenderNameMap = new HashMap<String, String>();
private BlitzConfig blitz4jConfig;
private Properties initialProps = new Properties();
private Properties overrideProps = new Properties();
private final ExecutorService executorPool;
private final AtomicInteger pendingRefreshes = new AtomicInteger();
private final AtomicInteger refreshCount = new AtomicInteger();
private Logger logger;
private static final int MIN_DELAY_BETWEEN_REFRESHES = 200;
private static final CharSequence PROP_LOG4J_ASYNC_APPENDERS = "log4j.logger.asyncAppenders";
private static LoggingConfiguration instance = new LoggingConfiguration();
protected LoggingConfiguration() {
this.executorPool = Executors.newCachedThreadPool(
new ThreadFactoryBuilder()
.setDaemon(false)
.setNameFormat("DynamicLog4jListener")
.build());
}
/**
* Kick start the blitz4j implementation
*/
public void configure() {
this.configure(new Properties());
}
/**
* Kick start the blitz4j implementation.
*
* @param props
* - The overriding <em>log4j</em> properties if any.
*/
public void configure(Properties props) {
this.refreshCount.set(0);
this.overrideProps.clear();
this.originalAsyncAppenderNameMap.clear();
// First try to load the log4j configuration file from the classpath
String log4jConfigurationFile = System.getProperty(PROP_LOG4J_CONFIGURATION);
NFHierarchy nfHierarchy = null;
// Make log4j use blitz4j implementations
if ((!NFHierarchy.class.equals(LogManager.getLoggerRepository().getClass()))) {
nfHierarchy = new NFHierarchy(new NFRootLogger(org.apache.log4j.Level.INFO));
org.apache.log4j.LogManager.setRepositorySelector(new NFRepositorySelector(nfHierarchy), guard);
}
String log4jLoggerFactory = System.getProperty(PROP_LOG4J_LOGGER_FACTORY);
if (log4jLoggerFactory != null) {
this.initialProps.setProperty(PROP_LOG4J_LOGGER_FACTORY, log4jLoggerFactory);
if (nfHierarchy != null) {
try {
LoggerFactory loggerFactory = (LoggerFactory) Class.forName(log4jLoggerFactory).newInstance();
nfHierarchy.setLoggerFactory(loggerFactory);
} catch (Exception e) {
System.err.println("Cannot set the logger factory. Hence reverting to default.");
e.printStackTrace();
}
}
} else {
this.initialProps.setProperty(PROP_LOG4J_LOGGER_FACTORY, BLITZ_LOGGER_FACTORY);
}
if (log4jConfigurationFile != null) {
loadLog4jConfigurationFile(log4jConfigurationFile);
// First configure without async so that we can capture the output
// of dependent libraries
clearAsyncAppenderList();
PropertyConfigurator.configure(this.initialProps);
}
this.blitz4jConfig = new DefaultBlitz4jConfig(props);
if ((log4jConfigurationFile == null) && (blitz4jConfig.shouldLoadLog4jPropertiesFromClassPath())) {
try {
URL url = Loader.getResource(LOG4J_PROPERTIES);
if (url != null) {
try (InputStream in = url.openStream()) {
this.initialProps.load(in);
}
}
} catch (Exception t) {
System.err.println("Error loading properties from " + LOG4J_PROPERTIES);
}
}
Enumeration enumeration = props.propertyNames();
while (enumeration.hasMoreElements()) {
String key = (String) enumeration.nextElement();
String propertyValue = props.getProperty(key);
this.initialProps.setProperty(key, propertyValue);
}
this.blitz4jConfig = new DefaultBlitz4jConfig(this.initialProps);
String[] asyncAppenderArray = blitz4jConfig.getAsyncAppenders();
if (asyncAppenderArray == null) {
return;
}
for (int i = 0; i < asyncAppenderArray.length; i++) {
String oneAppenderName = asyncAppenderArray[i];
if ((i == 0) || (oneAppenderName == null)) {
continue;
}
oneAppenderName = oneAppenderName.trim();
String oneAsyncAppenderName = oneAppenderName + ASYNC_APPENDERNAME_SUFFIX;
originalAsyncAppenderNameMap.put(oneAppenderName, oneAsyncAppenderName);
}
try {
convertConfiguredAppendersToAsync(this.initialProps);
} catch (Exception e) {
throw new RuntimeException("Could not configure async appenders ",
e);
}
// Yes second time init required as properties would have been during async appender conversion
this.blitz4jConfig = new DefaultBlitz4jConfig(this.initialProps);
clearAsyncAppenderList();
PropertyConfigurator.configure(this.initialProps);
closeNonexistingAsyncAppenders();
this.logger = org.slf4j.LoggerFactory.getLogger(LoggingConfiguration.class);
ConfigurationManager.getConfigInstance().addConfigurationListener(
new ExpandedConfigurationListenerAdapter(this));
}
private void clearAsyncAppenderList() {
org.apache.log4j.Logger asyncLogger = LoggerCache.getInstance().getOrCreateLogger("asyncAppenders");
if (asyncLogger != null) {
asyncLogger.removeAllAppenders();
}
}
private void loadLog4jConfigurationFile(String log4jConfigurationFile) {
try {
URL url = new URL(log4jConfigurationFile);
try (InputStream in = url.openStream()) {
this.initialProps.load(in);
}
} catch (Exception t) {
throw new RuntimeException(
"Cannot load log4 configuration file specified in " + PROP_LOG4J_CONFIGURATION, t);
}
}
public static LoggingConfiguration getInstance() {
return instance;
}
public BlitzConfig getConfiguration() {
return this.blitz4jConfig;
}
public Properties getInitialProperties() {
Properties props = new Properties();
props.putAll(this.initialProps);
return props;
}
public Properties getOverrideProperties() {
Properties props = new Properties();
props.putAll(this.overrideProps);
return props;
}
public int getRefreshCount() {
return this.refreshCount.get();
}
/**
* Shuts down blitz4j cleanly by flushing out all the async related
* messages.
*/
public void stop() {
MessageBatcher batcher = null;
for (String originalAppenderName : originalAsyncAppenderNameMap.keySet()) {
String batcherName = AsyncAppender.class.getName() + "." + originalAppenderName;
batcher = BatcherFactory.getBatcher(batcherName);
if (batcher == null) {
continue;
}
batcher.stop();
}
for (String originalAppenderName : originalAsyncAppenderNameMap.keySet()) {
String batcherName = AsyncAppender.class.getName() + "." + originalAppenderName;
batcher = BatcherFactory.getBatcher(batcherName);
if (batcher == null) {
continue;
}
BatcherFactory.removeBatcher(batcherName);
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.config.PropertyListener#addProperty(java.lang.Object,
* java.lang.String, java.lang.Object, boolean)
*/
public synchronized void addProperty(Object source, String name, Object value, boolean beforeUpdate) {
if (beforeUpdate == false && isLog4JProperty(name)) {
overrideProps.put(name, value);
reConfigureAsynchronously();
}
}
/*
* (non-Javadoc)
*
* @see com.netflix.config.PropertyListener#clear(java.lang.Object, boolean)
*/
public void clear(Object source, boolean beforeUpdate) {
}
/*
* (non-Javadoc)
*
* @see com.netflix.config.PropertyListener#clearProperty(java.lang.Object,
* java.lang.String, java.lang.Object, boolean)
*/
public synchronized void clearProperty(Object source, String name, Object value, boolean beforeUpdate) {
if (beforeUpdate == false && isLog4JProperty(name)) {
overrideProps.remove(name);
reConfigureAsynchronously();
}
}
/*
* (non-Javadoc)
*
* @see
* com.netflix.config.PropertyListener#configSourceLoaded(java.lang.Object)
*/
public void configSourceLoaded(Object source) {
Properties props = ConfigurationConverter.getProperties(ConfigurationManager.getConfigInstance());
reconfigure(props);
}
/*
* (non-Javadoc)
*
* @see com.netflix.config.PropertyListener#setProperty(java.lang.Object,
* java.lang.String, java.lang.Object, boolean)
*/
public synchronized void setProperty(Object source, String name, Object value,
boolean beforeUpdate) {
if (beforeUpdate == false && isLog4JProperty(name)) {
overrideProps.put(name, value);
reConfigureAsynchronously();
}
}
/**
* Set a snapshot of all LOG4J properties and reconfigure if properties have been
* changed.
* @param props Complete set of ALL log4j configuration properties including all
* appenders and log level overrides
*/
public synchronized void reconfigure(Properties props) {
// First isolate any property that is different from the immutable
// set of original initialization properties
Properties newOverrideProps = new Properties();
for (Entry<Object, Object> prop : props.entrySet()) {
if (isLog4JProperty(prop.getKey().toString())) {
Object initialValue = initialProps.get(prop.getKey());
if (initialValue == null || !initialValue.equals(prop.getValue())) {
newOverrideProps.put(prop.getKey(), prop.getValue());
}
}
}
// Compare against our cached set of override
if (!overrideProps.equals(newOverrideProps)) {
this.overrideProps.clear();
this.overrideProps.putAll(newOverrideProps);
reConfigureAsynchronously();
}
}
/**
* Refresh the configuration asynchronously
*/
private void reConfigureAsynchronously() {
refreshCount.incrementAndGet();
if (pendingRefreshes.incrementAndGet() == 1) {
executorPool.submit(new Runnable() {
@Override
public void run() {
do {
try {
Thread.sleep(MIN_DELAY_BETWEEN_REFRESHES);
logger.info("Configuring log4j dynamically");
reconfigure();
}
catch (Exception th) {
logger.error("Cannot dynamically configure log4j :", th);
}
} while (0 != pendingRefreshes.getAndSet(0));
}
});
}
}
private synchronized Properties getConsolidatedProperties() {
logger.info("Override properties are :" + overrideProps);
Properties consolidatedProps = new Properties();
consolidatedProps.putAll(initialProps);
consolidatedProps.putAll(overrideProps);
return consolidatedProps;
}
/**
* Reconfigure log4j at run-time.
*
* @param name
* - The name of the property that changed
* @param value
* - The new value of the property
* @throws FileNotFoundException
* @throws ConfigurationException
*/
private void reconfigure() throws ConfigurationException, FileNotFoundException {
Properties consolidatedProps = getConsolidatedProperties();
logger.info("The root category for log4j.rootCategory now is {}", consolidatedProps.getProperty(LOG4J_ROOT_CATEGORY));
logger.info("The root category for log4j.rootLogger now is {}", consolidatedProps.getProperty(LOG4J_ROOT_LOGGER));
// Pause the async appenders so that the appenders are not accessed
for (String originalAppenderName : originalAsyncAppenderNameMap.keySet()) {
MessageBatcher asyncBatcher = BatcherFactory.getBatcher(AsyncAppender.class.getName() + "." + originalAppenderName);
if (asyncBatcher == null) {
continue;
}
asyncBatcher.pause();
}
// Configure log4j using the new set of properties
configureLog4j(consolidatedProps);
// Resume all the batchers to continue logging
for (String originalAppenderName : originalAsyncAppenderNameMap.keySet()) {
MessageBatcher asyncBatcher = BatcherFactory.getBatcher(AsyncAppender.class.getName() + "." + originalAppenderName);
if (asyncBatcher == null) {
continue;
}
asyncBatcher.resume();
}
}
/**
* Configure log4j with the given properties.
*
* @param props
* The properties that needs to be configured for log4j
* @throws ConfigurationException
* @throws FileNotFoundException
*/
private void configureLog4j(Properties props) throws ConfigurationException, FileNotFoundException {
if (blitz4jConfig.shouldUseLockFree() && (props.getProperty(LOG4J_LOGGER_FACTORY) == null)) {
props.setProperty(LOG4J_LOGGER_FACTORY, LOG4J_FACTORY_IMPL);
}
convertConfiguredAppendersToAsync(props);
clearAsyncAppenderList();
logger.info("Configuring log4j with properties :" + props);
PropertyConfigurator.configure(props);
}
/**
* Check if the property that is being changed is something that this
* configuration cares about.
*
* The implementation only cares about changes related to <code>log4j</code>
* properties.
*
* @param name
* -The name of the property which should be checked.
* @param beforeUpdate
* -true, if this call is made before the property has been
* updated, false otherwise.
* @return
*/
private boolean isLog4JProperty(String name) {
if (name == null) {
return false;
}
return name.startsWith(LOG4J_PREFIX);
}
/**
* Convert appenders specified by the property
* <code>log4j.logger.asyncAppender</code> to the blitz4j Asynchronous
* appenders.
*
* @param props
* - The properties that need to be passed into the log4j for
* configuration.
* @throws ConfigurationException
* @throws FileNotFoundException
*/
private void convertConfiguredAppendersToAsync(Properties props) throws ConfigurationException, FileNotFoundException {
for (Map.Entry<String, String> originalAsyncAppenderMapEntry : originalAsyncAppenderNameMap.entrySet()) {
String asyncAppenderName = originalAsyncAppenderMapEntry.getValue();
props.setProperty(LOG4J_APPENDER_PREFIX + LOG4J_APPENDER_DELIMITER + asyncAppenderName, AsyncAppender.class.getName());
// Set the original appender so that it can be fetched later after configuration
String originalAppenderName = originalAsyncAppenderMapEntry.getKey();
props.setProperty(LOG4J_APPENDER_PREFIX + LOG4J_APPENDER_DELIMITER
+ asyncAppenderName + LOG4J_APPENDER_DELIMITER
+ PROP_LOG4J_ORIGINAL_APPENDER_NAME, originalAppenderName);
// Set the batcher to reject the collector request instead of it
// participating in processing
this.initialProps.setProperty("batcher." + AsyncAppender.class.getName() + "." + originalAppenderName + "." + "rejectWhenFull", "true");
// Set the default value of the processing max threads to 1, if a
// value is not specified
String maxThreads = this.initialProps.getProperty("batcher." + AsyncAppender.class.getName() + "." + originalAppenderName + "." + "maxThreads");
if (maxThreads == null) {
this.initialProps.setProperty("batcher." + AsyncAppender.class.getName() + "." + originalAppenderName + "." + "maxThreads", "1");
}
for (Map.Entry mapEntry : props.entrySet()) {
String key = mapEntry.getKey().toString();
if ((key.contains(LOG4J_PREFIX) || key.contains(ROOT_CATEGORY) || key.contains(ROOT_LOGGER))
&& !key.contains(PROP_LOG4J_ASYNC_APPENDERS)
&& !key.contains(PROP_LOG4J_ORIGINAL_APPENDER_NAME)) {
Object value = mapEntry.getValue();
if (value != null) {
String[] values = (String.class.cast(value)).split(",");
String valueString = "";
int ctr = 0;
for (String oneValue : values) {
if (oneValue == null) {
continue;
}
++ctr;
if (originalAppenderName.equals(oneValue.trim())) {
oneValue = asyncAppenderName;
}
if (ctr != values.length) {
valueString = valueString + oneValue + ",";
} else {
valueString = valueString + oneValue;
}
}
mapEntry.setValue(valueString);
}
}
}
}
}
/**
* Closes any asynchronous appenders that were not removed during configuration.
*/
private void closeNonexistingAsyncAppenders() {
org.apache.log4j.Logger rootLogger = LogManager.getRootLogger();
if (NFLockFreeLogger.class.isInstance(rootLogger)) {
((NFLockFreeLogger)rootLogger).reconcileAppenders();
}
Enumeration enums = LogManager.getCurrentLoggers();
while (enums.hasMoreElements()) {
Object myLogger = enums.nextElement();
if (NFLockFreeLogger.class.isInstance(myLogger)) {
((NFLockFreeLogger)myLogger).reconcileAppenders();
}
}
}
}
| 6,518 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFLockFreeLogger.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import java.util.Enumeration;
import java.util.Vector;
import org.apache.log4j.Appender;
import org.apache.log4j.Category;
import org.apache.log4j.Hierarchy;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.helpers.AppenderAttachableImpl;
import org.apache.log4j.helpers.NullEnumeration;
import org.apache.log4j.spi.HierarchyEventListener;
import org.apache.log4j.spi.LoggingEvent;
/**
* A Logger class that overrides log4j to provide a less contended
* implementation.
*
* @author Karthik Ranganathan
*
*/
public class NFLockFreeLogger extends Logger {
AppenderAttachableImpl aai;
private LoggingContext loggingContext = LoggingContext.getInstance();;
protected NFLockFreeLogger(String name) {
super(name);
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#addAppender(org.apache.log4j.Appender)
*/
@Override
public void addAppender(Appender newAppender) {
if (aai == null) {
synchronized (this) {
if (aai == null) {
aai = new NFAppenderAttachableImpl();
}
}
}
aai.addAppender(newAppender);
repository.fireAddAppenderEvent(this, newAppender);
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.Category#callAppenders(org.apache.log4j.spi.LoggingEvent
* )
*/
@Override
public void callAppenders(LoggingEvent event) {
int writes = 0;
for (Category c = this; c != null; c = c.getParent()) {
if (!(NFLockFreeLogger.class.isInstance(c))) {
continue;
}
if (((NFLockFreeLogger) c).aai != null) {
int appenderWrite = ((NFLockFreeLogger) c).aai
.appendLoopOnAppenders(event);
writes += appenderWrite;
}
if (!c.getAdditivity()) {
break;
}
}
if (writes == 0) {
repository.emitNoAppenderWarning(this);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#getAllAppenders()
*/
@Override
public Enumeration getAllAppenders() {
if (aai == null)
return NullEnumeration.getInstance();
else
return aai.getAllAppenders();
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#getAppender(java.lang.String)
*/
@Override
public Appender getAppender(String name) {
if (aai == null || name == null)
return null;
return aai.getAppender(name);
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#isAttached(org.apache.log4j.Appender)
*/
@Override
public boolean isAttached(Appender appender) {
if (appender == null || aai == null)
return false;
else {
return aai.isAttached(appender);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#removeAllAppenders()
*/
@Override
public void removeAllAppenders() {
if (aai != null) {
Vector appenders = new Vector();
Enumeration iter = aai.getAllAppenders();
if (iter == null) {
return;
}
while (iter.hasMoreElements()) {
appenders.add(iter.nextElement());
}
aai.removeAllAppenders();
iter = appenders.elements();
while (iter.hasMoreElements()) {
fireRemoveAppenderEvent((Appender) iter.nextElement());
}
}
}
@Override
public Level getEffectiveLevel() {
if (loggingContext == null) {
return super.getEffectiveLevel();
}
// Return the logging level of the request if set
if (loggingContext.getContextLevel() != null) {
return loggingContext.getContextLevel();
}
return super.getEffectiveLevel();
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#removeAppender(org.apache.log4j.Appender)
*/
@Override
public void removeAppender(Appender appender) {
if (appender == null || aai == null) {
return;
}
boolean wasAttached = aai.isAttached(appender);
aai.removeAppender(appender);
if (wasAttached) {
fireRemoveAppenderEvent(appender);
}
}
/*
* (non-Javadoc)
*
* @see org.apache.log4j.Category#removeAppender(java.lang.String)
*/
@Override
public void removeAppender(String name) {
if (name == null || aai == null) {
return;
}
Appender appender = aai.getAppender(name);
aai.removeAppender(name);
if (appender != null) {
fireRemoveAppenderEvent(appender);
}
}
/**
* Reconciles the appender list after configuration to ensure that the asynchrnous
* appenders are not left over after the configuration. This is needed because the
* appenders are not cleaned out completely during configuration for it to retain the
* ability to not messages.
*/
public void reconcileAppenders() {
if (aai != null) {
((NFAppenderAttachableImpl)aai).reconcileAppenders();
}
}
private void fireRemoveAppenderEvent(final Appender appender) {
if (appender != null) {
if (repository instanceof Hierarchy) {
((NFHierarchy) repository).fireRemoveAppenderEvent(this,
appender);
} else if (repository instanceof HierarchyEventListener) {
((HierarchyEventListener) repository).removeAppenderEvent(this,
appender);
}
}
}
}
| 6,519 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFRootLogger.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import org.apache.log4j.*;
import org.apache.log4j.helpers.LogLog;
/**
* A Root Logger class that overrides log4j to provide a less contended implementation.
*
* @author Karthik Ranganathan
*
*/
public final class NFRootLogger extends NFLockFreeLogger {
public NFRootLogger(Level level) {
super("root");
setLevel(level);
}
}
| 6,520 |
0 | Create_ds/blitz4j/src/main/java/com/netflix | Create_ds/blitz4j/src/main/java/com/netflix/blitz4j/NFCategoryFactory.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.blitz4j;
import org.apache.log4j.Logger;
import org.apache.log4j.spi.LoggerFactory;
/**
* A Category factory that overrides log4j to provide a less contended
* implementation.
*
* @author Karthik Ranganathan
*
*/
public class NFCategoryFactory implements LoggerFactory {
public NFCategoryFactory() {
}
/*
* (non-Javadoc)
*
* @see
* org.apache.log4j.spi.LoggerFactory#makeNewLoggerInstance(java.lang.String
* )
*/
@Override
public Logger makeNewLoggerInstance(String name) {
return new NFLockFreeLogger(name);
}
}
| 6,521 |
0 | Create_ds/blitz4j/src/main/java/com/netflix/logging | Create_ds/blitz4j/src/main/java/com/netflix/logging/log4jAdapter/NFPatternLayout.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.logging.log4jAdapter;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.helpers.PatternParser;
/**
* Custom Pattern Layout class for less contended implementation.
*
* @author Karthik Ranganathan
*
*/
public class NFPatternLayout extends PatternLayout
{
public NFPatternLayout()
{
super();
}
public NFPatternLayout(String pattern)
{
super(pattern);
}
protected PatternParser createPatternParser(String pattern)
{
return (PatternParser) new NFPatternParser(pattern);
}
}
| 6,522 |
0 | Create_ds/blitz4j/src/main/java/com/netflix/logging | Create_ds/blitz4j/src/main/java/com/netflix/logging/log4jAdapter/NFPatternParser.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.logging.log4jAdapter;
import java.util.Arrays;
import java.util.List;
import org.apache.log4j.helpers.FormattingInfo;
import org.apache.log4j.helpers.PatternConverter;
import org.apache.log4j.helpers.PatternParser;
import org.apache.log4j.spi.LocationInfo;
import org.apache.log4j.spi.LoggingEvent;
import com.netflix.blitz4j.LoggingContext;
/**
* A custom parser class that provides a better performing implementation than the one in log4j for finding location information such
* as class, line number etc.
*
* @author Karthik Ranganathan
*/
public class NFPatternParser extends PatternParser {
private static List<Character> contextCharList = Arrays.asList(Character.valueOf('c'),
Character.valueOf('l'),
Character.valueOf('M'),
Character.valueOf('C'),
Character.valueOf('L'),
Character.valueOf('F'));
public NFPatternParser(String pattern) {
super(pattern);
}
protected void finalizeConverter(char c) {
if (contextCharList.contains(Character.valueOf(c))) {
PatternConverter pc = new NFPatternConverter(formattingInfo, c);
addConverter(pc);
currentLiteral.setLength(0);
} else {
super.finalizeConverter(c);
}
}
private static class NFPatternConverter extends PatternConverter {
private char type;
NFPatternConverter(FormattingInfo formattingInfo, char type) {
super(formattingInfo);
this.type = type;
}
@Override
public String convert(LoggingEvent event) {
LoggingContext.getInstance().shouldGenerateLocationInfo(event.getLogger());
LocationInfo locationInfo = LoggingContext.getInstance().getLocationInfo(event);
if (locationInfo == null) {
return "";
}
switch (type) {
case 'M':
return locationInfo.getMethodName();
case 'c':
return event.getLoggerName();
case 'C':
return locationInfo.getClassName();
case 'L':
return locationInfo.getLineNumber();
case 'l':
return (locationInfo.getFileName() + ":"
+ locationInfo.getClassName() + " "
+ locationInfo.getLineNumber() + " " + locationInfo
.getMethodName());
case 'F':
return locationInfo.getFileName();
}
return "";
}
}
} | 6,523 |
0 | Create_ds/blitz4j/src/main/java/com/netflix/logging | Create_ds/blitz4j/src/main/java/com/netflix/logging/messaging/BatcherFactory.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.logging.messaging;
import java.util.HashMap;
import java.util.Map;
/**
* A simple singleton factory class that finds the batchers by name. Batchers are also created by the factory if
* needed. The users of the batcher have to simply override the {@link com.netflix.logging.messaging.MessageProcessor}
* to specify what to do with the batched messages.
*
* It is the user's responsibility to make sure the name is unique (ie) a FQCN would be ideal for a name. The user
* should also remove the batcher from the cache during shutdown or when they do not need it.
*
* The methods are not synchronized for performance reasons and there is very little downside of not synchronizing it
* as the last put wins and the already existing objects are garbage collected.
*
*
* @author Karthik Ranganathan
*
*/
public class BatcherFactory {
private static BatcherFactory batcherFactory = new BatcherFactory();
// List of all batchers cached
private static Map<String, MessageBatcher> batcherMap = new HashMap<String, MessageBatcher>();;
/**
* Get a batcher by name
* @param name - The name of the batcher
* @return - the batcher associated with the name
*/
public static MessageBatcher getBatcher(String name) {
MessageBatcher batcher = batcherMap.get(name);
return batcher;
}
/**
* Creates the batcher. The user needs to make sure another batcher already exists before
* they create one.
*
* @param name - The name of the batcher to be created
* @param processor - The user override for actions to be performed on the batched messages.
*/
public static MessageBatcher createBatcher(String name,
MessageProcessor processor) {
MessageBatcher batcher = batcherMap.get(name);
if (batcher == null) {
synchronized (BatcherFactory.class) {
batcher = batcherMap.get(name);
if (batcher == null) {
batcher = new MessageBatcher(name, processor);
batcherMap.put(name, batcher);
}
}
}
return batcher;
}
/**
* Removes the batcher from the cache.
* @param name - The name of the batcher to be removed
*/
public static void removeBatcher(String name) {
batcherMap.remove(name);
}
}
| 6,524 |
0 | Create_ds/blitz4j/src/main/java/com/netflix/logging | Create_ds/blitz4j/src/main/java/com/netflix/logging/messaging/MessageBatcher.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.logging.messaging;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.blitz4j.BlitzConfig;
import com.netflix.blitz4j.LoggingConfiguration;
import com.netflix.config.DynamicBooleanProperty;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.monitor.Counter;
import com.netflix.servo.monitor.Monitors;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.monitor.Timer;
/**
* A general purpose batcher that combines messages into batches. Callers of
* process don't block. Configurable parameters control the number of messages
* that may be queued awaiting processing, the maximum size of a batch, the
* maximum time a message waits to be combined with others in a batch and the
* size of the pool of threads that process batches.
* <p>
* The implementation aims to avoid congestion, by working more efficiently as
* load increases. As messages arrive faster, the collector executes less code
* and batch sizes increase (up to the configured maximum). It should be more
* efficient to process a batch than to process the messages individually.
* <p>
* The implementation works by adding the arriving messages to a queue. The collector
* thread takes messages from the queue and collects them into batches. When a
* batch is big enough or old enough, the collector passes it to the processor,
* which passes the batch to the target stream.
* <p>
* The processor maintains a thread pool. If there's more work than threads, the
* collector participates in processing by default, and consequently stops
* collecting more batches.
*
* @author Karthik Ranganathan
*/
public class MessageBatcher<T> {
private static final BlitzConfig CONFIGURATION = LoggingConfiguration.getInstance().getConfiguration();
private static final String DOT = ".";
private static final String BATCHER_PREFIX = "batcher.";
private static final String COLLECTOR_SUFFIX = ".collector";
private boolean shouldCollectorShutdown;
List<Object> batch;
protected String name;
protected BlockingQueue queue;
protected int maxMessages;
protected static long maxDelay; // in nsec
protected Collector collector;
protected ThreadPoolExecutor processor;
protected MessageProcessor target = null;
/**
* The number of batches that are currently being processed by the target
* stream.
*/
protected final AtomicInteger concurrentBatches = new AtomicInteger(0);
protected Timer queueSizeTracer;
protected Timer batchSyncPutTracer;
protected Timer threadSubmitTracer;
protected Timer processTimeTracer;
protected Timer avgBatchSizeTracer;
protected Counter queueOverflowCounter;
private volatile boolean isShutDown;
private AtomicLong numberAdded = new AtomicLong();
private AtomicLong numberDropped = new AtomicLong();
private boolean blockingProperty;
private boolean isCollectorPaused;
private Counter processCount;
public static final String POOL_MAX_THREADS = "maxThreads";
public static final String POOL_MIN_THREADS = "minThreads";
public static final String POOL_KEEP_ALIVE_TIME = "keepAliveTime";
public MessageBatcher(String name, MessageProcessor target) {
this.name = BATCHER_PREFIX + name;
this.target = target;
queue = new ArrayBlockingQueue<T>(CONFIGURATION.getBatcherQueueMaxMessages(this.name));
setBatchMaxMessages(CONFIGURATION.getBatchSize(this.name));
batch = new ArrayList<Object>(maxMessages);
setBatchMaxDelay(CONFIGURATION
.getBatcherMaxDelay(this.name));
collector = new Collector(this, this.name + COLLECTOR_SUFFIX);
// Immediate Executor creates a factory that uses daemon threads
createProcessor(this.name);
queueSizeTracer = Monitors.newTimer("queue_size");
batchSyncPutTracer = Monitors.newTimer("waitTimeforBuffer");
avgBatchSizeTracer = Monitors.newTimer("batch_size");
processCount = Monitors.newCounter("messages_processed");
threadSubmitTracer = Monitors.newTimer("thread_invocation_time");
processTimeTracer = Monitors.newTimer("message_processTime");
queueOverflowCounter = Monitors.newCounter("queue_overflow");
blockingProperty = CONFIGURATION
.shouldWaitWhenBatcherQueueNotEmpty(this.name);
collector.setDaemon(true);
collector.start();
try {
Monitors.registerObject(this.name, this);
} catch (Throwable e) {
if (CONFIGURATION
.shouldPrintLoggingErrors()) {
e.printStackTrace();
}
}
}
/** Set the stream that will process each batch of messages. */
public synchronized void setTarget(MessageProcessor target) {
this.target = target;
}
/**
* Set the maximum number of messages in a batch. Setting this to 1 will
* prevent batching; that is, messages will be passed to
* target.processMessage one at a time.
*/
public synchronized void setBatchMaxMessages(int maxMessages) {
this.maxMessages = maxMessages;
}
/**
* Set the maximum time a message spends waiting to complete a full batch,
* in seconds. This doesn't limit the time spent in the queue.
*/
public synchronized void setBatchMaxDelay(double maxDelaySec) {
maxDelay = (long) (maxDelaySec * 1000000000);
}
/**
* Set the max threads for the processors
* @param maxThreads - max threads that can be launched for processing
*/
public void setProcessorMaxThreads(int maxThreads) {
if (processor.getCorePoolSize() > maxThreads) {
processor.setCorePoolSize(maxThreads);
}
processor.setMaximumPoolSize(maxThreads);
}
/**
* Checks to see if there is space available in the queue
*
* @return - true, if available false otherwise
*/
public boolean isSpaceAvailable() {
return (queue.remainingCapacity() > 0);
}
/**
* Processes the message sent to the batcher. This method just writes the
* message to the queue and returns immediately. If the queue is full, the
* messages are dropped immediately and corresponding counter is
* incremented.
*
* @param message
* - The message to be processed
* @return boolean - true if the message is queued for processing,false(this
* could happen if the queue is full) otherwise
*/
public boolean process(T message) {
// If this batcher has been shutdown, do not accept any more messages
if (isShutDown) {
return false;
}
try {
queueSizeTracer.record(queue.size());
} catch (Throwable ignored) {
}
if (!queue.offer(message)) {
numberDropped.incrementAndGet();
queueOverflowCounter.increment();
return false;
}
numberAdded.incrementAndGet();
return true;
}
/**
* Processes the message sent to the batcher. This method tries to write to
* the queue. If the queue is full, the send blocks and waits for the
* available space.
*
* @param message
* - The message to be processed
*/
public void processSync(T message) {
// If this batcher has been shutdown, do not accept any more messages
if (isShutDown) {
return;
}
try {
queueSizeTracer.record(queue.size());
} catch (Throwable ignored) {
}
try {
Stopwatch s = batchSyncPutTracer.start();
queue.put(message);
s.stop();
} catch (InterruptedException e) {
return;
}
numberAdded.incrementAndGet();
}
/**
* Processes the messages sent to the batcher. This method just writes the
* message to the queue and returns immediately. If the queue is full, the
* messages are dropped immediately and corresponding counter is
* incremented.
*
* @param objects
* - The messages to be processed
*/
public void process(List<T> objects) {
for (T message : objects) {
// If this batcher has been shutdown, do not accept any more
// messages
if (isShutDown) {
return;
}
process(message);
}
}
/**
* Processes the messages sent to the batcher. The messages are first queued
* and then will be processed by the
* {@link com.netflix.logging.messaging.MessageProcessor}
*
* @param objects
* - The messages to be processed
* @param sync
* - if true, waits for the queue to make space, if false returns
* immediately after dropping the message
*/
public void process(List<T> objects, boolean sync) {
for (T message : objects) {
// If this batcher has been shutdown, do not accept any more
// messages
if (isShutDown) {
return;
}
if (sync) {
processSync(message);
} else {
process(message);
}
}
}
/**
* Pause the collector. The collector stops picking up messages from the
* queue.
*/
public void pause() {
if (!isShutDown) {
this.isCollectorPaused = true;
}
}
public boolean isPaused() {
return this.isCollectorPaused;
}
/**
* Resume the collector. The collector resumes picking up messages from the
* queue and calling the processors.
*/
public void resume() {
if (!isShutDown) {
this.isCollectorPaused = false;
}
}
/**
* Stops the batcher. The Batcher has to wait for the other processes like
* the Collector and the Executor to complete. It waits until it is notified
* that the other processes have completed gracefully. The collector waits
* until there are no more messages in the queue(tries 3 times waiting for
* 0.5 seconds each) and then shuts down gracefully.
*
*
*/
public void stop() {
/*
* Sets the shutdown flag. Future sends to the batcher are not accepted.
* The processors wait for the current messages in the queue and with
* the processor or collector to complete
*/
isShutDown = true;
int waitTimeinMillis = CONFIGURATION.getBatcherWaitTimeBeforeShutdown(this.name);
long timeToWait = waitTimeinMillis + System.currentTimeMillis();
while ((queue.size() > 0 || batch.size() > 0)
&& (System.currentTimeMillis() < timeToWait)) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
break;
}
}
try {
shouldCollectorShutdown = true;
processor.shutdownNow();
/*
* processor.awaitTermination(10000, TimeUnit.SECONDS); if
* (!processor.isShutdown()) { processor.shutdownNow(); }
*/
} catch (Throwable e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* The class that processes the messages in a batch by calling the
* implementor of the MessageProcessor interface.
*
*
*/
private static class ProcessMessages implements Runnable {
public ProcessMessages(MessageBatcher stream, List batch) {
this.stream = stream;
this.batch = batch;
this.processMessagesTracer = stream.processTimeTracer;
this.avgConcurrentBatches = Monitors.newTimer(stream.name
+ ".concurrentBatches");
}
private final MessageBatcher stream;
private List batch;
private Timer processMessagesTracer;
private Timer avgConcurrentBatches;
/** Process the batch. */
public void run() {
try {
if (batch == null) {
return;
}
int inProcess = stream.concurrentBatches.incrementAndGet();
try {
avgConcurrentBatches.record(inProcess);
Stopwatch s = processMessagesTracer.start();
stream.target.process(batch);
s.stop();
} finally {
stream.concurrentBatches.decrementAndGet();
}
} catch (Throwable e) {
e.printStackTrace();
}
}
}
private class Collector extends Thread {
private static final int SLEEP_TIME_MS = 1;
private Timer processTimeTracer;
private Counter rejectedCounter = Monitors.newCounter(processCount
+ ".rejected");
private static final int RETRY_EXECUTION_TIMEOUT_MS = 1;
public Collector(MessageBatcher stream, String name) {
super(name);
processTimeTracer = Monitors.newTimer(name + ".processTime");
this.stream = stream;
queueSizeTracer = Monitors.newTimer(name
+ ".queue_size_at_drain");
}
private final MessageBatcher stream;
private final Timer queueSizeTracer;
/** Process messages from the queue, after grouping them into batches. */
public void run() {
int batchSize = 0;
while (!shouldCollectorShutdown) {
if (isCollectorPaused) {
try {
Thread.sleep(SLEEP_TIME_MS);
} catch (InterruptedException ignore) {
}
continue;
}
try {
if (batch.size() < stream.maxMessages) {
long now = System.nanoTime();
final long firstTime = now;
do {
if (stream.queue.drainTo(batch, stream.maxMessages
- batch.size()) <= 0) {
long maxWait = firstTime + stream.maxDelay
- now;
if (maxWait <= 0) { // timed out
break;
}
// Wait for a message to arrive:
Object nextMessage = null;
try {
nextMessage = stream.queue.poll(maxWait,
TimeUnit.NANOSECONDS);
} catch (InterruptedException ignore) {
}
if (nextMessage == null) { // timed out
break;
}
batch.add(nextMessage);
now = System.nanoTime();
}
} while (batch.size() < stream.maxMessages);
}
batchSize = batch.size();
if (batchSize > 0) {
try {
queueSizeTracer.record(stream.queue.size());
} catch (Exception ignored) {
}
avgBatchSizeTracer.record(batchSize);
Stopwatch s = processTimeTracer.start();
boolean retryExecution = false;
do {
try {
stream.processor.execute(new ProcessMessages(
stream, batch));
retryExecution = false;
} catch (RejectedExecutionException re) {
rejectedCounter.increment();
retryExecution = true;
Thread.sleep(RETRY_EXECUTION_TIMEOUT_MS);
}
} while (retryExecution);
processCount.increment(batchSize);
s.stop();
batch = new ArrayList(stream.maxMessages);
}
} catch (Throwable e) {
if (CONFIGURATION.shouldPrintLoggingErrors()) {
e.printStackTrace();
}
}
} // - while (!shutdownCollector)
} // - run()
}
/**
* The size of the the queue in which the messages are batches
*
* @return- size of the queue
*/
@Monitor(name = "batcherQueueSize", type = DataSourceType.GAUGE)
public int getSize() {
if (queue != null) {
return queue.size();
} else {
return 0;
}
}
/**
* Resets the statistics that keeps the count of number of messages added to
* this batcher.
*/
public void resetNumberAdded() {
numberAdded.set(0);
}
/**
* Resets the statistics that keeps the count of number of messages dropped
* by this batcher.
*/
public void resetNumberDropped() {
numberDropped.set(0);
}
/**
* Gets the statistics count of number of messages added to this batcher.
*/
@Monitor(name = "numberAdded", type = DataSourceType.GAUGE)
public long getNumberAdded() {
return numberAdded.get();
}
/**
* Gets the statistics count of number of messages dropped by this batcher.
*/
@Monitor(name = "numberDropped", type = DataSourceType.GAUGE)
public long getNumberDropped() {
return numberDropped.get();
}
/**
* Gets the information whether the batcher is blocking or not blocking. By
* default, the batcher is non-blocking and the messages are just dropped if
* the queue is full.
*
* If the batcher is made blocking, the sends block and wait indefinitely
* until space is made in the batcher.
*
* @return - true if blocking, false otherwise
*/
@Monitor(name = "blocking", type = DataSourceType.INFORMATIONAL)
public boolean isBlocking() {
return blockingProperty;
}
private void createProcessor(String name) {
int minThreads = CONFIGURATION
.getBatcherMinThreads(this.name);
int maxThreads = CONFIGURATION
.getBatcherMaxThreads(this.name);
int keepAliveTime = CONFIGURATION.getBatcherThreadKeepAliveTime(this.name);
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true).setNameFormat(this.name + "-process").build();
this.processor = new ThreadPoolExecutor(minThreads, maxThreads,
keepAliveTime, TimeUnit.SECONDS, new SynchronousQueue(),
threadFactory);
boolean shouldRejectWhenFull = CONFIGURATION
.shouldRejectWhenAllBatcherThreadsUsed(this.name);
if (!shouldRejectWhenFull) {
this.processor
.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy() {
@Override
public void rejectedExecution(Runnable r,
ThreadPoolExecutor e) {
super.rejectedExecution(r, e);
}
});
}
}
}
| 6,525 |
0 | Create_ds/blitz4j/src/main/java/com/netflix/logging | Create_ds/blitz4j/src/main/java/com/netflix/logging/messaging/MessageProcessor.java | /*
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.logging.messaging;
import java.util.List;
/**
* An interface for handling the batched messages. The implementers need to
* define what needs to be done with the batched messages.
*
* @author Karthik Ranganathan
*
*/
public interface MessageProcessor<T> {
/**
* Contract for handling the batched objects.
*
* @param objects
* - The list of objects that are batched.
*/
void process(List<T> objects);
}
| 6,526 |
0 | Create_ds/aroma-paper-artifacts/reference | Create_ds/aroma-paper-artifacts/reference/data/example_query.java | class C{
public void m() {
StringBuffer strBuf = new StringBuffer();
Iterator itr = list.iterator();
while (itr.hasNext()) {
itr.next();
}
}
} | 6,527 |
0 | Create_ds/aroma-paper-artifacts/reference/src/main | Create_ds/aroma-paper-artifacts/reference/src/main/java/ConvertJava.java | import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.tree.ParseTree;
import org.antlr.v4.runtime.tree.RuleNode;
import org.antlr.v4.runtime.tree.TerminalNodeImpl;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.antlr.v4.runtime.tree.Tree;
import org.json.JSONArray;
import org.json.JSONObject;
public class ConvertJava {
private static final int MAX_DEPTH = 1000;
Vocabulary vocab;
ArrayList<String> identifiersRuleNames =
new ArrayList<String>(
Arrays.asList(
"IDENTIFIER",
"localVar",
"CHAR_LITERAL",
"STRING_LITERAL",
"BOOL_LITERAL",
"NULL_LITERAL",
"DECIMAL_LITERAL",
"HEX_LITERAL",
"OCT_LITERAL",
"BINARY_LITERAL",
"FLOAT_LITERAL",
"HEX_FLOAT_LITERAL"));
ArrayList<String> localVarContexts =
new ArrayList<String>(
Arrays.asList("variableDeclaratorId", "primary", "catchClause", "lambdaParameters"));
List<String> ruleNames = null;
private void setRuleNames(Parser recog) {
String[] ruleNames = recog != null ? recog.getRuleNames() : null;
this.ruleNames = ruleNames != null ? Arrays.asList(ruleNames) : null;
}
public String getRuleName(Tree t) {
int ruleIndex = ((RuleNode) t).getRuleContext().getRuleIndex();
return ruleNames.get(ruleIndex);
}
public void openWriter(String file) throws FileNotFoundException {
writer = new PrintWriter(file);
}
public void closeWriter() {
writer.close();
}
private int totalFiles = 0;
private int successFullFiles = 0;
private int totalMethods = 0;
public void serializeFile(String f, String startSymbol) {
try {
long t1, t2, t3;
t1 = System.currentTimeMillis();
totalFiles++;
Class classDefinition;
Class[] type;
Object[] obj;
thisFileName = f;
stackDepth = 0;
Lexer lexer = new JavaLexer(new ANTLRFileStream(f));
CommonTokenStream tokens = new CommonTokenStream(lexer);
vocab = lexer.getVocabulary();
Parser parser = new JavaParser(tokens);
parser.setErrorHandler(new BailErrorStrategy());
Method method = parser.getClass().getMethod(startSymbol);
ParserRuleContext t = (ParserRuleContext) method.invoke(parser);
parser.setBuildParseTree(false);
setRuleNames(parser);
t2 = System.currentTimeMillis();
JSONArray tree = getSerializedTree(t, tokens);
if (tree.length() == 2) {
tree = tree.getJSONArray(1);
}
successFullFiles++;
t3 = System.currentTimeMillis();
System.out.println("Parsing, Processing times: " + (t2 - t1) + ", " + (t3 - t2));
System.out.println(
"Total processed files, Successfully processed file, total methods: "
+ totalFiles
+ ", "
+ successFullFiles
+ ", "
+ totalMethods
+ ", "
+ thisFileName);
// System.out.println(tree.toString(4));
} catch (Exception e) {
System.out.println(
"Total processed files, Successfully processed file, total methods: "
+ totalFiles
+ ", "
+ successFullFiles
+ ", "
+ totalMethods
+ ", "
+ thisFileName);
System.err.println("Parser Exception: " + e);
e.printStackTrace(); // so we can get the stack trace
}
}
private String getLeadingOrTrailing(ParseTree tree, CommonTokenStream tokens, boolean isBefore) {
int lastIndexOfToken;
StringBuilder builder = new StringBuilder("");
lastIndexOfToken = ((TerminalNodeImpl) tree).getSymbol().getTokenIndex();
List<Token> ws = null;
int HIDDEN = 1;
if (lastIndexOfToken < 0) {
return "";
}
if (isBefore) {
ws = tokens.getHiddenTokensToLeft(lastIndexOfToken, HIDDEN);
} else if (lastIndexOfToken >= 0 || lastIndexOfToken == -2) {
ws = tokens.getHiddenTokensToRight(lastIndexOfToken, HIDDEN);
}
if (ws != null) {
for (Token wst : ws) {
builder.append(wst.getText());
}
}
return builder.toString();
}
private boolean childHasLeaf;
private String thisClassName;
private String thisMethodName;
private String thisFileName;
private int beginLine, endLine;
private PrintWriter writer;
private int stackDepth = 0;
private void setClassName(String thisRuleName, RuleContext t, int i) {
if (thisRuleName.equals("classDeclaration") && i > 0) {
ParseTree prev = t.getChild(i - 1);
ParseTree curr = t.getChild(i);
if (prev instanceof TerminalNodeImpl
&& curr instanceof TerminalNodeImpl
&& prev.getText().equals("class")) {
Token thisToken = ((TerminalNodeImpl) curr).getSymbol();
String ruleName = vocab.getDisplayName(thisToken.getType());
if (ruleName.equals("IDENTIFIER")) {
thisClassName = thisToken.getText();
//System.out.println("Processing Class: " + thisClassName);
}
}
}
}
// private void setMethodName(String thisRuleName, RuleContext t) {
// if (thisRuleName.equals("methodDeclaration")) {
// //System.out.println("Processing Method: " + t.getText());
// this.thisMethodName = t.getChild(1).getText();
// //System.out.println("*********"+this.thisMethodName);
// }
//
// }
private void dumpMethodAst(String thisRuleName, JSONArray simpleTree) {
if (thisClassName != null && thisRuleName.equals("methodBody")) {
if (simpleTree.length() == 2) {
try {
simpleTree = simpleTree.getJSONArray(1);
} catch (Exception e) {
// System.err.println(simpleTree);
// e.printStackTrace();
// System.out.println("In " + thisFileName + ":" + thisClassName + ":" + thisMethodName+":"+beginLine);
return;
}
}
JSONObject tmp = new JSONObject();
tmp.put("path", thisFileName);
tmp.put("class", thisClassName);
tmp.put("method", thisMethodName);
tmp.put("beginline", beginLine);
tmp.put("endline", endLine);
tmp.put("ast", simpleTree);
writer.println(tmp);
writer.flush();
totalMethods++;
//System.out.println("Logged " + thisFileName + ":" + thisClassName + ":" + thisMethodName);
}
}
private JSONArray getSerializedTree(RuleContext t, CommonTokenStream tokens) {
stackDepth++;
int n = t.getChildCount();
boolean hasLeaf = false;
if (n == 0 || stackDepth > MAX_DEPTH) {
childHasLeaf = false;
stackDepth--;
return null;
}
String thisRuleName = getRuleName(t);
String oldClassName = null;
String oldMethodName = null;
int oldBeginLine = 0;
if (thisRuleName.equals("classDeclaration")) {
oldClassName = thisClassName;
}
if (thisRuleName.equals("methodDeclaration")) {
oldMethodName = thisMethodName;
thisMethodName = ((TerminalNodeImpl) t.getChild(1)).getText();
oldBeginLine = beginLine;
beginLine = ((TerminalNodeImpl) t.getChild(1)).getSymbol().getLine();
}
JSONArray simpleTree = new JSONArray();
simpleTree.put("");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < n; i++) {
ParseTree tree = t.getChild(i);
if (tree instanceof TerminalNodeImpl) {
String s = tree.getText();
if (!s.equals("<EOF>")) {
Token thisToken = ((TerminalNodeImpl) tree).getSymbol();
String ruleName = vocab.getDisplayName(thisToken.getType());
String ws1 = getLeadingOrTrailing(tree, tokens, true);
String ws2 = getLeadingOrTrailing(tree, tokens, false);
JSONObject tok = new JSONObject();
tok.put("token", s);
tok.put("leading", ws1);
tok.put("trailing", ws2);
boolean isLeaf;
if (identifiersRuleNames.contains(ruleName)) {
if (localVarContexts.contains(thisRuleName)) {
tok.put("var", true);
// System.out.println(s);
}
isLeaf = true;
sb.append("#");
hasLeaf = true;
setClassName(thisRuleName, t, i);
} else {
isLeaf = false;
sb.append(s);
}
if (isLeaf) tok.put("leaf", isLeaf);
tok.put("line", thisToken.getLine());
endLine = thisToken.getLine();
simpleTree.put(tok);
}
} else {
JSONArray child = getSerializedTree((RuleContext) tree, tokens);
if (child != null && child.length() > 0) {
if (child.length() == 2) {
simpleTree.put(child.get(1));
sb.append(child.get(0));
hasLeaf = hasLeaf || childHasLeaf;
} else if (!childHasLeaf
&& !child.get(0).equals("{}")) { // see the while(m.find()){} query
sb.append(child.get(0));
for (int j = 1; j < child.length(); j++) {
simpleTree.put(child.get(j));
}
} else {
sb.append("#");
hasLeaf = true;
simpleTree.put(child);
}
}
}
}
simpleTree.put(0, sb.toString());
childHasLeaf = hasLeaf;
dumpMethodAst(thisRuleName, simpleTree);
if (thisRuleName.equals("classDeclaration")) {
thisClassName = oldClassName;
}
if (thisRuleName.equals("methodDeclaration")) {
thisMethodName = oldMethodName;
beginLine = oldBeginLine;
}
stackDepth--;
return simpleTree;
}
public static void main(String args[]) throws IOException {
ConvertJava p = new ConvertJava();
p.openWriter(args[1]);
if (Files.isRegularFile(new File(args[2]).toPath())) {
p.serializeFile(args[2], args[0]);
} else {
Files.walk(Paths.get(args[2]))
.filter(path -> !Files.isDirectory(path) && path.toString().endsWith(".java"))
.forEach(path -> p.serializeFile(path.normalize().toString(), args[0]));
}
p.closeWriter();
}
}
| 6,528 |
0 | Create_ds/sagemaker-spark/sagemaker-spark-sdk/src/main/scala/aialgorithms | Create_ds/sagemaker-spark/sagemaker-spark-sdk/src/main/scala/aialgorithms/proto2/RecordProto2.java | // Generated by the protocol buffer compiler. DO NOT EDIT!
// source: AIAlgorithmsProtobufSchema/p.proto
package aialgorithms.proto2;
public final class RecordProto2 {
private RecordProto2() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface Float32TensorOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated float values = 1 [packed = true];
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
java.util.List<java.lang.Float> getValuesList();
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
int getValuesCount();
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
float getValues(int index);
// repeated uint64 keys = 2 [packed = true];
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
java.util.List<java.lang.Long> getKeysList();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
int getKeysCount();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
long getKeys(int index);
// repeated uint64 shape = 3 [packed = true];
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
java.util.List<java.lang.Long> getShapeList();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
int getShapeCount();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
long getShape(int index);
}
/**
* Protobuf type {@code aialgorithms.proto2.Float32Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as doubles (float64).
* </pre>
*/
public static final class Float32Tensor extends
com.google.protobuf.GeneratedMessage
implements Float32TensorOrBuilder {
// Use Float32Tensor.newBuilder() to construct.
private Float32Tensor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Float32Tensor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Float32Tensor defaultInstance;
public static Float32Tensor getDefaultInstance() {
return defaultInstance;
}
public Float32Tensor getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Float32Tensor(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 13: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Float>();
mutable_bitField0_ |= 0x00000001;
}
values_.add(input.readFloat());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
values_ = new java.util.ArrayList<java.lang.Float>();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
values_.add(input.readFloat());
}
input.popLimit(limit);
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
keys_.add(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
keys_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
case 24: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
shape_.add(input.readUInt64());
break;
}
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
shape_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float32Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float32Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Float32Tensor.class, aialgorithms.proto2.RecordProto2.Float32Tensor.Builder.class);
}
public static com.google.protobuf.Parser<Float32Tensor> PARSER =
new com.google.protobuf.AbstractParser<Float32Tensor>() {
public Float32Tensor parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Float32Tensor(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Float32Tensor> getParserForType() {
return PARSER;
}
// repeated float values = 1 [packed = true];
public static final int VALUES_FIELD_NUMBER = 1;
private java.util.List<java.lang.Float> values_;
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Float>
getValuesList() {
return values_;
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public float getValues(int index) {
return values_.get(index);
}
private int valuesMemoizedSerializedSize = -1;
// repeated uint64 keys = 2 [packed = true];
public static final int KEYS_FIELD_NUMBER = 2;
private java.util.List<java.lang.Long> keys_;
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return keys_;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
private int keysMemoizedSerializedSize = -1;
// repeated uint64 shape = 3 [packed = true];
public static final int SHAPE_FIELD_NUMBER = 3;
private java.util.List<java.lang.Long> shape_;
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return shape_;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
private int shapeMemoizedSerializedSize = -1;
private void initFields() {
values_ = java.util.Collections.emptyList();
keys_ = java.util.Collections.emptyList();
shape_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getValuesList().size() > 0) {
output.writeRawVarint32(10);
output.writeRawVarint32(valuesMemoizedSerializedSize);
}
for (int i = 0; i < values_.size(); i++) {
output.writeFloatNoTag(values_.get(i));
}
if (getKeysList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(keysMemoizedSerializedSize);
}
for (int i = 0; i < keys_.size(); i++) {
output.writeUInt64NoTag(keys_.get(i));
}
if (getShapeList().size() > 0) {
output.writeRawVarint32(26);
output.writeRawVarint32(shapeMemoizedSerializedSize);
}
for (int i = 0; i < shape_.size(); i++) {
output.writeUInt64NoTag(shape_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
dataSize = 4 * getValuesList().size();
size += dataSize;
if (!getValuesList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
valuesMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < keys_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(keys_.get(i));
}
size += dataSize;
if (!getKeysList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
keysMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < shape_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(shape_.get(i));
}
size += dataSize;
if (!getShapeList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
shapeMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float32Tensor parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Float32Tensor prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Float32Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as doubles (float64).
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float32Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float32Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Float32Tensor.class, aialgorithms.proto2.RecordProto2.Float32Tensor.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Float32Tensor.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float32Tensor_descriptor;
}
public aialgorithms.proto2.RecordProto2.Float32Tensor getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Float32Tensor build() {
aialgorithms.proto2.RecordProto2.Float32Tensor result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Float32Tensor buildPartial() {
aialgorithms.proto2.RecordProto2.Float32Tensor result = new aialgorithms.proto2.RecordProto2.Float32Tensor(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.values_ = values_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.keys_ = keys_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.shape_ = shape_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Float32Tensor) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Float32Tensor)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Float32Tensor other) {
if (other == aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance()) return this;
if (!other.values_.isEmpty()) {
if (values_.isEmpty()) {
values_ = other.values_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureValuesIsMutable();
values_.addAll(other.values_);
}
onChanged();
}
if (!other.keys_.isEmpty()) {
if (keys_.isEmpty()) {
keys_ = other.keys_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureKeysIsMutable();
keys_.addAll(other.keys_);
}
onChanged();
}
if (!other.shape_.isEmpty()) {
if (shape_.isEmpty()) {
shape_ = other.shape_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureShapeIsMutable();
shape_.addAll(other.shape_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Float32Tensor parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Float32Tensor) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated float values = 1 [packed = true];
private java.util.List<java.lang.Float> values_ = java.util.Collections.emptyList();
private void ensureValuesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Float>(values_);
bitField0_ |= 0x00000001;
}
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Float>
getValuesList() {
return java.util.Collections.unmodifiableList(values_);
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public float getValues(int index) {
return values_.get(index);
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder setValues(
int index, float value) {
ensureValuesIsMutable();
values_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addValues(float value) {
ensureValuesIsMutable();
values_.add(value);
onChanged();
return this;
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addAllValues(
java.lang.Iterable<? extends java.lang.Float> values) {
ensureValuesIsMutable();
super.addAll(values, values_);
onChanged();
return this;
}
/**
* <code>repeated float values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder clearValues() {
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// repeated uint64 keys = 2 [packed = true];
private java.util.List<java.lang.Long> keys_ = java.util.Collections.emptyList();
private void ensureKeysIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>(keys_);
bitField0_ |= 0x00000002;
}
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return java.util.Collections.unmodifiableList(keys_);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder setKeys(
int index, long value) {
ensureKeysIsMutable();
keys_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addKeys(long value) {
ensureKeysIsMutable();
keys_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addAllKeys(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureKeysIsMutable();
super.addAll(values, keys_);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder clearKeys() {
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// repeated uint64 shape = 3 [packed = true];
private java.util.List<java.lang.Long> shape_ = java.util.Collections.emptyList();
private void ensureShapeIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>(shape_);
bitField0_ |= 0x00000004;
}
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return java.util.Collections.unmodifiableList(shape_);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder setShape(
int index, long value) {
ensureShapeIsMutable();
shape_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addShape(long value) {
ensureShapeIsMutable();
shape_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addAllShape(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureShapeIsMutable();
super.addAll(values, shape_);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder clearShape() {
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Float32Tensor)
}
static {
defaultInstance = new Float32Tensor(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Float32Tensor)
}
public interface Float64TensorOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated double values = 1 [packed = true];
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
java.util.List<java.lang.Double> getValuesList();
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
int getValuesCount();
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
double getValues(int index);
// repeated uint64 keys = 2 [packed = true];
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
java.util.List<java.lang.Long> getKeysList();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
int getKeysCount();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
long getKeys(int index);
// repeated uint64 shape = 3 [packed = true];
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
java.util.List<java.lang.Long> getShapeList();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
int getShapeCount();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
long getShape(int index);
}
/**
* Protobuf type {@code aialgorithms.proto2.Float64Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as doubles (float64).
* </pre>
*/
public static final class Float64Tensor extends
com.google.protobuf.GeneratedMessage
implements Float64TensorOrBuilder {
// Use Float64Tensor.newBuilder() to construct.
private Float64Tensor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Float64Tensor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Float64Tensor defaultInstance;
public static Float64Tensor getDefaultInstance() {
return defaultInstance;
}
public Float64Tensor getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Float64Tensor(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 9: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Double>();
mutable_bitField0_ |= 0x00000001;
}
values_.add(input.readDouble());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
values_ = new java.util.ArrayList<java.lang.Double>();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
values_.add(input.readDouble());
}
input.popLimit(limit);
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
keys_.add(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
keys_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
case 24: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
shape_.add(input.readUInt64());
break;
}
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
shape_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float64Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float64Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Float64Tensor.class, aialgorithms.proto2.RecordProto2.Float64Tensor.Builder.class);
}
public static com.google.protobuf.Parser<Float64Tensor> PARSER =
new com.google.protobuf.AbstractParser<Float64Tensor>() {
public Float64Tensor parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Float64Tensor(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Float64Tensor> getParserForType() {
return PARSER;
}
// repeated double values = 1 [packed = true];
public static final int VALUES_FIELD_NUMBER = 1;
private java.util.List<java.lang.Double> values_;
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Double>
getValuesList() {
return values_;
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public double getValues(int index) {
return values_.get(index);
}
private int valuesMemoizedSerializedSize = -1;
// repeated uint64 keys = 2 [packed = true];
public static final int KEYS_FIELD_NUMBER = 2;
private java.util.List<java.lang.Long> keys_;
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return keys_;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
private int keysMemoizedSerializedSize = -1;
// repeated uint64 shape = 3 [packed = true];
public static final int SHAPE_FIELD_NUMBER = 3;
private java.util.List<java.lang.Long> shape_;
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return shape_;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
private int shapeMemoizedSerializedSize = -1;
private void initFields() {
values_ = java.util.Collections.emptyList();
keys_ = java.util.Collections.emptyList();
shape_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getValuesList().size() > 0) {
output.writeRawVarint32(10);
output.writeRawVarint32(valuesMemoizedSerializedSize);
}
for (int i = 0; i < values_.size(); i++) {
output.writeDoubleNoTag(values_.get(i));
}
if (getKeysList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(keysMemoizedSerializedSize);
}
for (int i = 0; i < keys_.size(); i++) {
output.writeUInt64NoTag(keys_.get(i));
}
if (getShapeList().size() > 0) {
output.writeRawVarint32(26);
output.writeRawVarint32(shapeMemoizedSerializedSize);
}
for (int i = 0; i < shape_.size(); i++) {
output.writeUInt64NoTag(shape_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
dataSize = 8 * getValuesList().size();
size += dataSize;
if (!getValuesList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
valuesMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < keys_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(keys_.get(i));
}
size += dataSize;
if (!getKeysList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
keysMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < shape_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(shape_.get(i));
}
size += dataSize;
if (!getShapeList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
shapeMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Float64Tensor parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Float64Tensor prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Float64Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as doubles (float64).
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float64Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float64Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Float64Tensor.class, aialgorithms.proto2.RecordProto2.Float64Tensor.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Float64Tensor.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Float64Tensor_descriptor;
}
public aialgorithms.proto2.RecordProto2.Float64Tensor getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Float64Tensor build() {
aialgorithms.proto2.RecordProto2.Float64Tensor result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Float64Tensor buildPartial() {
aialgorithms.proto2.RecordProto2.Float64Tensor result = new aialgorithms.proto2.RecordProto2.Float64Tensor(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.values_ = values_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.keys_ = keys_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.shape_ = shape_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Float64Tensor) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Float64Tensor)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Float64Tensor other) {
if (other == aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance()) return this;
if (!other.values_.isEmpty()) {
if (values_.isEmpty()) {
values_ = other.values_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureValuesIsMutable();
values_.addAll(other.values_);
}
onChanged();
}
if (!other.keys_.isEmpty()) {
if (keys_.isEmpty()) {
keys_ = other.keys_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureKeysIsMutable();
keys_.addAll(other.keys_);
}
onChanged();
}
if (!other.shape_.isEmpty()) {
if (shape_.isEmpty()) {
shape_ = other.shape_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureShapeIsMutable();
shape_.addAll(other.shape_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Float64Tensor parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Float64Tensor) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated double values = 1 [packed = true];
private java.util.List<java.lang.Double> values_ = java.util.Collections.emptyList();
private void ensureValuesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Double>(values_);
bitField0_ |= 0x00000001;
}
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Double>
getValuesList() {
return java.util.Collections.unmodifiableList(values_);
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public double getValues(int index) {
return values_.get(index);
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder setValues(
int index, double value) {
ensureValuesIsMutable();
values_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addValues(double value) {
ensureValuesIsMutable();
values_.add(value);
onChanged();
return this;
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addAllValues(
java.lang.Iterable<? extends java.lang.Double> values) {
ensureValuesIsMutable();
super.addAll(values, values_);
onChanged();
return this;
}
/**
* <code>repeated double values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder clearValues() {
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// repeated uint64 keys = 2 [packed = true];
private java.util.List<java.lang.Long> keys_ = java.util.Collections.emptyList();
private void ensureKeysIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>(keys_);
bitField0_ |= 0x00000002;
}
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return java.util.Collections.unmodifiableList(keys_);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder setKeys(
int index, long value) {
ensureKeysIsMutable();
keys_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addKeys(long value) {
ensureKeysIsMutable();
keys_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addAllKeys(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureKeysIsMutable();
super.addAll(values, keys_);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder clearKeys() {
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// repeated uint64 shape = 3 [packed = true];
private java.util.List<java.lang.Long> shape_ = java.util.Collections.emptyList();
private void ensureShapeIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>(shape_);
bitField0_ |= 0x00000004;
}
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return java.util.Collections.unmodifiableList(shape_);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder setShape(
int index, long value) {
ensureShapeIsMutable();
shape_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addShape(long value) {
ensureShapeIsMutable();
shape_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addAllShape(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureShapeIsMutable();
super.addAll(values, shape_);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder clearShape() {
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Float64Tensor)
}
static {
defaultInstance = new Float64Tensor(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Float64Tensor)
}
public interface Int32TensorOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated int32 values = 1 [packed = true];
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
java.util.List<java.lang.Integer> getValuesList();
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
int getValuesCount();
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
int getValues(int index);
// repeated uint64 keys = 2 [packed = true];
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
java.util.List<java.lang.Long> getKeysList();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
int getKeysCount();
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
long getKeys(int index);
// repeated uint64 shape = 3 [packed = true];
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
java.util.List<java.lang.Long> getShapeList();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
int getShapeCount();
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
long getShape(int index);
}
/**
* Protobuf type {@code aialgorithms.proto2.Int32Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as 32-bit ints (int32).
* </pre>
*/
public static final class Int32Tensor extends
com.google.protobuf.GeneratedMessage
implements Int32TensorOrBuilder {
// Use Int32Tensor.newBuilder() to construct.
private Int32Tensor(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Int32Tensor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Int32Tensor defaultInstance;
public static Int32Tensor getDefaultInstance() {
return defaultInstance;
}
public Int32Tensor getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Int32Tensor(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Integer>();
mutable_bitField0_ |= 0x00000001;
}
values_.add(input.readInt32());
break;
}
case 10: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) {
values_ = new java.util.ArrayList<java.lang.Integer>();
mutable_bitField0_ |= 0x00000001;
}
while (input.getBytesUntilLimit() > 0) {
values_.add(input.readInt32());
}
input.popLimit(limit);
break;
}
case 16: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
keys_.add(input.readUInt64());
break;
}
case 18: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) {
keys_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000002;
}
while (input.getBytesUntilLimit() > 0) {
keys_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
case 24: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
shape_.add(input.readUInt64());
break;
}
case 26: {
int length = input.readRawVarint32();
int limit = input.pushLimit(length);
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
shape_ = new java.util.ArrayList<java.lang.Long>();
mutable_bitField0_ |= 0x00000004;
}
while (input.getBytesUntilLimit() > 0) {
shape_.add(input.readUInt64());
}
input.popLimit(limit);
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Int32Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Int32Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Int32Tensor.class, aialgorithms.proto2.RecordProto2.Int32Tensor.Builder.class);
}
public static com.google.protobuf.Parser<Int32Tensor> PARSER =
new com.google.protobuf.AbstractParser<Int32Tensor>() {
public Int32Tensor parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Int32Tensor(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Int32Tensor> getParserForType() {
return PARSER;
}
// repeated int32 values = 1 [packed = true];
public static final int VALUES_FIELD_NUMBER = 1;
private java.util.List<java.lang.Integer> values_;
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Integer>
getValuesList() {
return values_;
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValues(int index) {
return values_.get(index);
}
private int valuesMemoizedSerializedSize = -1;
// repeated uint64 keys = 2 [packed = true];
public static final int KEYS_FIELD_NUMBER = 2;
private java.util.List<java.lang.Long> keys_;
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return keys_;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
private int keysMemoizedSerializedSize = -1;
// repeated uint64 shape = 3 [packed = true];
public static final int SHAPE_FIELD_NUMBER = 3;
private java.util.List<java.lang.Long> shape_;
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return shape_;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
private int shapeMemoizedSerializedSize = -1;
private void initFields() {
values_ = java.util.Collections.emptyList();
keys_ = java.util.Collections.emptyList();
shape_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (getValuesList().size() > 0) {
output.writeRawVarint32(10);
output.writeRawVarint32(valuesMemoizedSerializedSize);
}
for (int i = 0; i < values_.size(); i++) {
output.writeInt32NoTag(values_.get(i));
}
if (getKeysList().size() > 0) {
output.writeRawVarint32(18);
output.writeRawVarint32(keysMemoizedSerializedSize);
}
for (int i = 0; i < keys_.size(); i++) {
output.writeUInt64NoTag(keys_.get(i));
}
if (getShapeList().size() > 0) {
output.writeRawVarint32(26);
output.writeRawVarint32(shapeMemoizedSerializedSize);
}
for (int i = 0; i < shape_.size(); i++) {
output.writeUInt64NoTag(shape_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < values_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(values_.get(i));
}
size += dataSize;
if (!getValuesList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
valuesMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < keys_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(keys_.get(i));
}
size += dataSize;
if (!getKeysList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
keysMemoizedSerializedSize = dataSize;
}
{
int dataSize = 0;
for (int i = 0; i < shape_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeUInt64SizeNoTag(shape_.get(i));
}
size += dataSize;
if (!getShapeList().isEmpty()) {
size += 1;
size += com.google.protobuf.CodedOutputStream
.computeInt32SizeNoTag(dataSize);
}
shapeMemoizedSerializedSize = dataSize;
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Int32Tensor parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Int32Tensor prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Int32Tensor}
*
* <pre>
* A sparse or dense rank-R tensor that stores data as 32-bit ints (int32).
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Int32Tensor_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Int32Tensor_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Int32Tensor.class, aialgorithms.proto2.RecordProto2.Int32Tensor.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Int32Tensor.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Int32Tensor_descriptor;
}
public aialgorithms.proto2.RecordProto2.Int32Tensor getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Int32Tensor build() {
aialgorithms.proto2.RecordProto2.Int32Tensor result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Int32Tensor buildPartial() {
aialgorithms.proto2.RecordProto2.Int32Tensor result = new aialgorithms.proto2.RecordProto2.Int32Tensor(this);
int from_bitField0_ = bitField0_;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = java.util.Collections.unmodifiableList(values_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.values_ = values_;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = java.util.Collections.unmodifiableList(keys_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.keys_ = keys_;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = java.util.Collections.unmodifiableList(shape_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.shape_ = shape_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Int32Tensor) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Int32Tensor)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Int32Tensor other) {
if (other == aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance()) return this;
if (!other.values_.isEmpty()) {
if (values_.isEmpty()) {
values_ = other.values_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureValuesIsMutable();
values_.addAll(other.values_);
}
onChanged();
}
if (!other.keys_.isEmpty()) {
if (keys_.isEmpty()) {
keys_ = other.keys_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureKeysIsMutable();
keys_.addAll(other.keys_);
}
onChanged();
}
if (!other.shape_.isEmpty()) {
if (shape_.isEmpty()) {
shape_ = other.shape_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureShapeIsMutable();
shape_.addAll(other.shape_);
}
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Int32Tensor parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Int32Tensor) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated int32 values = 1 [packed = true];
private java.util.List<java.lang.Integer> values_ = java.util.Collections.emptyList();
private void ensureValuesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
values_ = new java.util.ArrayList<java.lang.Integer>(values_);
bitField0_ |= 0x00000001;
}
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public java.util.List<java.lang.Integer>
getValuesList() {
return java.util.Collections.unmodifiableList(values_);
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValuesCount() {
return values_.size();
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public int getValues(int index) {
return values_.get(index);
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder setValues(
int index, int value) {
ensureValuesIsMutable();
values_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addValues(int value) {
ensureValuesIsMutable();
values_.add(value);
onChanged();
return this;
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder addAllValues(
java.lang.Iterable<? extends java.lang.Integer> values) {
ensureValuesIsMutable();
super.addAll(values, values_);
onChanged();
return this;
}
/**
* <code>repeated int32 values = 1 [packed = true];</code>
*
* <pre>
* Each value in the vector. If keys is empty this is treated as a
* dense vector.
* </pre>
*/
public Builder clearValues() {
values_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// repeated uint64 keys = 2 [packed = true];
private java.util.List<java.lang.Long> keys_ = java.util.Collections.emptyList();
private void ensureKeysIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
keys_ = new java.util.ArrayList<java.lang.Long>(keys_);
bitField0_ |= 0x00000002;
}
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public java.util.List<java.lang.Long>
getKeysList() {
return java.util.Collections.unmodifiableList(keys_);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public int getKeysCount() {
return keys_.size();
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public long getKeys(int index) {
return keys_.get(index);
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder setKeys(
int index, long value) {
ensureKeysIsMutable();
keys_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addKeys(long value) {
ensureKeysIsMutable();
keys_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder addAllKeys(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureKeysIsMutable();
super.addAll(values, keys_);
onChanged();
return this;
}
/**
* <code>repeated uint64 keys = 2 [packed = true];</code>
*
* <pre>
* If not empty then the vector is treated as sparse with
* each key specifying the location of the value in the sparse vector.
* </pre>
*/
public Builder clearKeys() {
keys_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
return this;
}
// repeated uint64 shape = 3 [packed = true];
private java.util.List<java.lang.Long> shape_ = java.util.Collections.emptyList();
private void ensureShapeIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
shape_ = new java.util.ArrayList<java.lang.Long>(shape_);
bitField0_ |= 0x00000004;
}
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public java.util.List<java.lang.Long>
getShapeList() {
return java.util.Collections.unmodifiableList(shape_);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public int getShapeCount() {
return shape_.size();
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public long getShape(int index) {
return shape_.get(index);
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder setShape(
int index, long value) {
ensureShapeIsMutable();
shape_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addShape(long value) {
ensureShapeIsMutable();
shape_.add(value);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder addAllShape(
java.lang.Iterable<? extends java.lang.Long> values) {
ensureShapeIsMutable();
super.addAll(values, shape_);
onChanged();
return this;
}
/**
* <code>repeated uint64 shape = 3 [packed = true];</code>
*
* <pre>
* Optional shape which will allow the vector to represent a matrix.
* e.g. if shape = [ 10, 20 ] then floor(keys[i] / 10) will give the row
* and keys[i] % 20 will give the column.
* This also supports n-dimensonal tensors.
* NB. this must be specified if the tensor is sparse.
* </pre>
*/
public Builder clearShape() {
shape_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Int32Tensor)
}
static {
defaultInstance = new Int32Tensor(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Int32Tensor)
}
public interface BytesOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated bytes value = 1;
/**
* <code>repeated bytes value = 1;</code>
*/
java.util.List<com.google.protobuf.ByteString> getValueList();
/**
* <code>repeated bytes value = 1;</code>
*/
int getValueCount();
/**
* <code>repeated bytes value = 1;</code>
*/
com.google.protobuf.ByteString getValue(int index);
// optional string content_type = 2;
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
boolean hasContentType();
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
java.lang.String getContentType();
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
com.google.protobuf.ByteString
getContentTypeBytes();
}
/**
* Protobuf type {@code aialgorithms.proto2.Bytes}
*
* <pre>
* Support for storing binary data for parsing in other ways (such as JPEG/etc).
* This is an example of another type of value and may not immediately be supported.
* </pre>
*/
public static final class Bytes extends
com.google.protobuf.GeneratedMessage
implements BytesOrBuilder {
// Use Bytes.newBuilder() to construct.
private Bytes(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Bytes(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Bytes defaultInstance;
public static Bytes getDefaultInstance() {
return defaultInstance;
}
public Bytes getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Bytes(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
value_ = new java.util.ArrayList<com.google.protobuf.ByteString>();
mutable_bitField0_ |= 0x00000001;
}
value_.add(input.readBytes());
break;
}
case 18: {
bitField0_ |= 0x00000001;
contentType_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
value_ = java.util.Collections.unmodifiableList(value_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Bytes_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Bytes_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Bytes.class, aialgorithms.proto2.RecordProto2.Bytes.Builder.class);
}
public static com.google.protobuf.Parser<Bytes> PARSER =
new com.google.protobuf.AbstractParser<Bytes>() {
public Bytes parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Bytes(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Bytes> getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated bytes value = 1;
public static final int VALUE_FIELD_NUMBER = 1;
private java.util.List<com.google.protobuf.ByteString> value_;
/**
* <code>repeated bytes value = 1;</code>
*/
public java.util.List<com.google.protobuf.ByteString>
getValueList() {
return value_;
}
/**
* <code>repeated bytes value = 1;</code>
*/
public int getValueCount() {
return value_.size();
}
/**
* <code>repeated bytes value = 1;</code>
*/
public com.google.protobuf.ByteString getValue(int index) {
return value_.get(index);
}
// optional string content_type = 2;
public static final int CONTENT_TYPE_FIELD_NUMBER = 2;
private java.lang.Object contentType_;
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public boolean hasContentType() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public java.lang.String getContentType() {
java.lang.Object ref = contentType_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
contentType_ = s;
}
return s;
}
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public com.google.protobuf.ByteString
getContentTypeBytes() {
java.lang.Object ref = contentType_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
contentType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
value_ = java.util.Collections.emptyList();
contentType_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < value_.size(); i++) {
output.writeBytes(1, value_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(2, getContentTypeBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
{
int dataSize = 0;
for (int i = 0; i < value_.size(); i++) {
dataSize += com.google.protobuf.CodedOutputStream
.computeBytesSizeNoTag(value_.get(i));
}
size += dataSize;
size += 1 * getValueList().size();
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(2, getContentTypeBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Bytes parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Bytes prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Bytes}
*
* <pre>
* Support for storing binary data for parsing in other ways (such as JPEG/etc).
* This is an example of another type of value and may not immediately be supported.
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.BytesOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Bytes_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Bytes_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Bytes.class, aialgorithms.proto2.RecordProto2.Bytes.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Bytes.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
value_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
contentType_ = "";
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Bytes_descriptor;
}
public aialgorithms.proto2.RecordProto2.Bytes getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Bytes build() {
aialgorithms.proto2.RecordProto2.Bytes result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Bytes buildPartial() {
aialgorithms.proto2.RecordProto2.Bytes result = new aialgorithms.proto2.RecordProto2.Bytes(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
value_ = java.util.Collections.unmodifiableList(value_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.value_ = value_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000001;
}
result.contentType_ = contentType_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Bytes) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Bytes)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Bytes other) {
if (other == aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance()) return this;
if (!other.value_.isEmpty()) {
if (value_.isEmpty()) {
value_ = other.value_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureValueIsMutable();
value_.addAll(other.value_);
}
onChanged();
}
if (other.hasContentType()) {
bitField0_ |= 0x00000002;
contentType_ = other.contentType_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Bytes parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Bytes) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated bytes value = 1;
private java.util.List<com.google.protobuf.ByteString> value_ = java.util.Collections.emptyList();
private void ensureValueIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
value_ = new java.util.ArrayList<com.google.protobuf.ByteString>(value_);
bitField0_ |= 0x00000001;
}
}
/**
* <code>repeated bytes value = 1;</code>
*/
public java.util.List<com.google.protobuf.ByteString>
getValueList() {
return java.util.Collections.unmodifiableList(value_);
}
/**
* <code>repeated bytes value = 1;</code>
*/
public int getValueCount() {
return value_.size();
}
/**
* <code>repeated bytes value = 1;</code>
*/
public com.google.protobuf.ByteString getValue(int index) {
return value_.get(index);
}
/**
* <code>repeated bytes value = 1;</code>
*/
public Builder setValue(
int index, com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureValueIsMutable();
value_.set(index, value);
onChanged();
return this;
}
/**
* <code>repeated bytes value = 1;</code>
*/
public Builder addValue(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
ensureValueIsMutable();
value_.add(value);
onChanged();
return this;
}
/**
* <code>repeated bytes value = 1;</code>
*/
public Builder addAllValue(
java.lang.Iterable<? extends com.google.protobuf.ByteString> values) {
ensureValueIsMutable();
super.addAll(values, value_);
onChanged();
return this;
}
/**
* <code>repeated bytes value = 1;</code>
*/
public Builder clearValue() {
value_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
return this;
}
// optional string content_type = 2;
private java.lang.Object contentType_ = "";
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public boolean hasContentType() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public java.lang.String getContentType() {
java.lang.Object ref = contentType_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
contentType_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public com.google.protobuf.ByteString
getContentTypeBytes() {
java.lang.Object ref = contentType_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
contentType_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public Builder setContentType(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
contentType_ = value;
onChanged();
return this;
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public Builder clearContentType() {
bitField0_ = (bitField0_ & ~0x00000002);
contentType_ = getDefaultInstance().getContentType();
onChanged();
return this;
}
/**
* <code>optional string content_type = 2;</code>
*
* <pre>
* Stores the content type of the data if known.
* This will allow the possibility of using decoders for common formats
* in the future.
* </pre>
*/
public Builder setContentTypeBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
contentType_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Bytes)
}
static {
defaultInstance = new Bytes(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Bytes)
}
public interface ValueOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
boolean hasFloat32Tensor();
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
aialgorithms.proto2.RecordProto2.Float32Tensor getFloat32Tensor();
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder getFloat32TensorOrBuilder();
// optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
boolean hasFloat64Tensor();
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
aialgorithms.proto2.RecordProto2.Float64Tensor getFloat64Tensor();
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder getFloat64TensorOrBuilder();
// optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
boolean hasInt32Tensor();
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
aialgorithms.proto2.RecordProto2.Int32Tensor getInt32Tensor();
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder getInt32TensorOrBuilder();
// optional .aialgorithms.proto2.Bytes bytes = 9;
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
boolean hasBytes();
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
aialgorithms.proto2.RecordProto2.Bytes getBytes();
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
aialgorithms.proto2.RecordProto2.BytesOrBuilder getBytesOrBuilder();
}
/**
* Protobuf type {@code aialgorithms.proto2.Value}
*/
public static final class Value extends
com.google.protobuf.GeneratedMessage
implements ValueOrBuilder {
// Use Value.newBuilder() to construct.
private Value(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Value(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Value defaultInstance;
public static Value getDefaultInstance() {
return defaultInstance;
}
public Value getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Value(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 18: {
aialgorithms.proto2.RecordProto2.Float32Tensor.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = float32Tensor_.toBuilder();
}
float32Tensor_ = input.readMessage(aialgorithms.proto2.RecordProto2.Float32Tensor.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(float32Tensor_);
float32Tensor_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 26: {
aialgorithms.proto2.RecordProto2.Float64Tensor.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = float64Tensor_.toBuilder();
}
float64Tensor_ = input.readMessage(aialgorithms.proto2.RecordProto2.Float64Tensor.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(float64Tensor_);
float64Tensor_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 58: {
aialgorithms.proto2.RecordProto2.Int32Tensor.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = int32Tensor_.toBuilder();
}
int32Tensor_ = input.readMessage(aialgorithms.proto2.RecordProto2.Int32Tensor.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(int32Tensor_);
int32Tensor_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 74: {
aialgorithms.proto2.RecordProto2.Bytes.Builder subBuilder = null;
if (((bitField0_ & 0x00000008) == 0x00000008)) {
subBuilder = bytes_.toBuilder();
}
bytes_ = input.readMessage(aialgorithms.proto2.RecordProto2.Bytes.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(bytes_);
bytes_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000008;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Value_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Value_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Value.class, aialgorithms.proto2.RecordProto2.Value.Builder.class);
}
public static com.google.protobuf.Parser<Value> PARSER =
new com.google.protobuf.AbstractParser<Value>() {
public Value parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Value(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Value> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;
public static final int FLOAT32_TENSOR_FIELD_NUMBER = 2;
private aialgorithms.proto2.RecordProto2.Float32Tensor float32Tensor_;
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public boolean hasFloat32Tensor() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Float32Tensor getFloat32Tensor() {
return float32Tensor_;
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder getFloat32TensorOrBuilder() {
return float32Tensor_;
}
// optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;
public static final int FLOAT64_TENSOR_FIELD_NUMBER = 3;
private aialgorithms.proto2.RecordProto2.Float64Tensor float64Tensor_;
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public boolean hasFloat64Tensor() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public aialgorithms.proto2.RecordProto2.Float64Tensor getFloat64Tensor() {
return float64Tensor_;
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder getFloat64TensorOrBuilder() {
return float64Tensor_;
}
// optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;
public static final int INT32_TENSOR_FIELD_NUMBER = 7;
private aialgorithms.proto2.RecordProto2.Int32Tensor int32Tensor_;
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public boolean hasInt32Tensor() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public aialgorithms.proto2.RecordProto2.Int32Tensor getInt32Tensor() {
return int32Tensor_;
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder getInt32TensorOrBuilder() {
return int32Tensor_;
}
// optional .aialgorithms.proto2.Bytes bytes = 9;
public static final int BYTES_FIELD_NUMBER = 9;
private aialgorithms.proto2.RecordProto2.Bytes bytes_;
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public boolean hasBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public aialgorithms.proto2.RecordProto2.Bytes getBytes() {
return bytes_;
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public aialgorithms.proto2.RecordProto2.BytesOrBuilder getBytesOrBuilder() {
return bytes_;
}
private void initFields() {
float32Tensor_ = aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance();
float64Tensor_ = aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance();
int32Tensor_ = aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance();
bytes_ = aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(2, float32Tensor_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(3, float64Tensor_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(7, int32Tensor_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeMessage(9, bytes_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, float32Tensor_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, float64Tensor_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, int32Tensor_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(9, bytes_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Value parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Value parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Value parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Value prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Value}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.ValueOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Value_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Value_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Value.class, aialgorithms.proto2.RecordProto2.Value.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Value.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getFloat32TensorFieldBuilder();
getFloat64TensorFieldBuilder();
getInt32TensorFieldBuilder();
getBytesFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (float32TensorBuilder_ == null) {
float32Tensor_ = aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance();
} else {
float32TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (float64TensorBuilder_ == null) {
float64Tensor_ = aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance();
} else {
float64TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
if (int32TensorBuilder_ == null) {
int32Tensor_ = aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance();
} else {
int32TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
if (bytesBuilder_ == null) {
bytes_ = aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance();
} else {
bytesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Value_descriptor;
}
public aialgorithms.proto2.RecordProto2.Value getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Value.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Value build() {
aialgorithms.proto2.RecordProto2.Value result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Value buildPartial() {
aialgorithms.proto2.RecordProto2.Value result = new aialgorithms.proto2.RecordProto2.Value(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (float32TensorBuilder_ == null) {
result.float32Tensor_ = float32Tensor_;
} else {
result.float32Tensor_ = float32TensorBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (float64TensorBuilder_ == null) {
result.float64Tensor_ = float64Tensor_;
} else {
result.float64Tensor_ = float64TensorBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
if (int32TensorBuilder_ == null) {
result.int32Tensor_ = int32Tensor_;
} else {
result.int32Tensor_ = int32TensorBuilder_.build();
}
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
if (bytesBuilder_ == null) {
result.bytes_ = bytes_;
} else {
result.bytes_ = bytesBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Value) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Value)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Value other) {
if (other == aialgorithms.proto2.RecordProto2.Value.getDefaultInstance()) return this;
if (other.hasFloat32Tensor()) {
mergeFloat32Tensor(other.getFloat32Tensor());
}
if (other.hasFloat64Tensor()) {
mergeFloat64Tensor(other.getFloat64Tensor());
}
if (other.hasInt32Tensor()) {
mergeInt32Tensor(other.getInt32Tensor());
}
if (other.hasBytes()) {
mergeBytes(other.getBytes());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Value parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Value) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;
private aialgorithms.proto2.RecordProto2.Float32Tensor float32Tensor_ = aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float32Tensor, aialgorithms.proto2.RecordProto2.Float32Tensor.Builder, aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder> float32TensorBuilder_;
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public boolean hasFloat32Tensor() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Float32Tensor getFloat32Tensor() {
if (float32TensorBuilder_ == null) {
return float32Tensor_;
} else {
return float32TensorBuilder_.getMessage();
}
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public Builder setFloat32Tensor(aialgorithms.proto2.RecordProto2.Float32Tensor value) {
if (float32TensorBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
float32Tensor_ = value;
onChanged();
} else {
float32TensorBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public Builder setFloat32Tensor(
aialgorithms.proto2.RecordProto2.Float32Tensor.Builder builderForValue) {
if (float32TensorBuilder_ == null) {
float32Tensor_ = builderForValue.build();
onChanged();
} else {
float32TensorBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public Builder mergeFloat32Tensor(aialgorithms.proto2.RecordProto2.Float32Tensor value) {
if (float32TensorBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
float32Tensor_ != aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance()) {
float32Tensor_ =
aialgorithms.proto2.RecordProto2.Float32Tensor.newBuilder(float32Tensor_).mergeFrom(value).buildPartial();
} else {
float32Tensor_ = value;
}
onChanged();
} else {
float32TensorBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public Builder clearFloat32Tensor() {
if (float32TensorBuilder_ == null) {
float32Tensor_ = aialgorithms.proto2.RecordProto2.Float32Tensor.getDefaultInstance();
onChanged();
} else {
float32TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Float32Tensor.Builder getFloat32TensorBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getFloat32TensorFieldBuilder().getBuilder();
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder getFloat32TensorOrBuilder() {
if (float32TensorBuilder_ != null) {
return float32TensorBuilder_.getMessageOrBuilder();
} else {
return float32Tensor_;
}
}
/**
* <code>optional .aialgorithms.proto2.Float32Tensor float32_tensor = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float32Tensor, aialgorithms.proto2.RecordProto2.Float32Tensor.Builder, aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder>
getFloat32TensorFieldBuilder() {
if (float32TensorBuilder_ == null) {
float32TensorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float32Tensor, aialgorithms.proto2.RecordProto2.Float32Tensor.Builder, aialgorithms.proto2.RecordProto2.Float32TensorOrBuilder>(
float32Tensor_,
getParentForChildren(),
isClean());
float32Tensor_ = null;
}
return float32TensorBuilder_;
}
// optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;
private aialgorithms.proto2.RecordProto2.Float64Tensor float64Tensor_ = aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float64Tensor, aialgorithms.proto2.RecordProto2.Float64Tensor.Builder, aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder> float64TensorBuilder_;
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public boolean hasFloat64Tensor() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public aialgorithms.proto2.RecordProto2.Float64Tensor getFloat64Tensor() {
if (float64TensorBuilder_ == null) {
return float64Tensor_;
} else {
return float64TensorBuilder_.getMessage();
}
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public Builder setFloat64Tensor(aialgorithms.proto2.RecordProto2.Float64Tensor value) {
if (float64TensorBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
float64Tensor_ = value;
onChanged();
} else {
float64TensorBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public Builder setFloat64Tensor(
aialgorithms.proto2.RecordProto2.Float64Tensor.Builder builderForValue) {
if (float64TensorBuilder_ == null) {
float64Tensor_ = builderForValue.build();
onChanged();
} else {
float64TensorBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public Builder mergeFloat64Tensor(aialgorithms.proto2.RecordProto2.Float64Tensor value) {
if (float64TensorBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
float64Tensor_ != aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance()) {
float64Tensor_ =
aialgorithms.proto2.RecordProto2.Float64Tensor.newBuilder(float64Tensor_).mergeFrom(value).buildPartial();
} else {
float64Tensor_ = value;
}
onChanged();
} else {
float64TensorBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public Builder clearFloat64Tensor() {
if (float64TensorBuilder_ == null) {
float64Tensor_ = aialgorithms.proto2.RecordProto2.Float64Tensor.getDefaultInstance();
onChanged();
} else {
float64TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public aialgorithms.proto2.RecordProto2.Float64Tensor.Builder getFloat64TensorBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getFloat64TensorFieldBuilder().getBuilder();
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
public aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder getFloat64TensorOrBuilder() {
if (float64TensorBuilder_ != null) {
return float64TensorBuilder_.getMessageOrBuilder();
} else {
return float64Tensor_;
}
}
/**
* <code>optional .aialgorithms.proto2.Float64Tensor float64_tensor = 3;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float64Tensor, aialgorithms.proto2.RecordProto2.Float64Tensor.Builder, aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder>
getFloat64TensorFieldBuilder() {
if (float64TensorBuilder_ == null) {
float64TensorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Float64Tensor, aialgorithms.proto2.RecordProto2.Float64Tensor.Builder, aialgorithms.proto2.RecordProto2.Float64TensorOrBuilder>(
float64Tensor_,
getParentForChildren(),
isClean());
float64Tensor_ = null;
}
return float64TensorBuilder_;
}
// optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;
private aialgorithms.proto2.RecordProto2.Int32Tensor int32Tensor_ = aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Int32Tensor, aialgorithms.proto2.RecordProto2.Int32Tensor.Builder, aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder> int32TensorBuilder_;
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public boolean hasInt32Tensor() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public aialgorithms.proto2.RecordProto2.Int32Tensor getInt32Tensor() {
if (int32TensorBuilder_ == null) {
return int32Tensor_;
} else {
return int32TensorBuilder_.getMessage();
}
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public Builder setInt32Tensor(aialgorithms.proto2.RecordProto2.Int32Tensor value) {
if (int32TensorBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
int32Tensor_ = value;
onChanged();
} else {
int32TensorBuilder_.setMessage(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public Builder setInt32Tensor(
aialgorithms.proto2.RecordProto2.Int32Tensor.Builder builderForValue) {
if (int32TensorBuilder_ == null) {
int32Tensor_ = builderForValue.build();
onChanged();
} else {
int32TensorBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public Builder mergeInt32Tensor(aialgorithms.proto2.RecordProto2.Int32Tensor value) {
if (int32TensorBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004) &&
int32Tensor_ != aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance()) {
int32Tensor_ =
aialgorithms.proto2.RecordProto2.Int32Tensor.newBuilder(int32Tensor_).mergeFrom(value).buildPartial();
} else {
int32Tensor_ = value;
}
onChanged();
} else {
int32TensorBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000004;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public Builder clearInt32Tensor() {
if (int32TensorBuilder_ == null) {
int32Tensor_ = aialgorithms.proto2.RecordProto2.Int32Tensor.getDefaultInstance();
onChanged();
} else {
int32TensorBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public aialgorithms.proto2.RecordProto2.Int32Tensor.Builder getInt32TensorBuilder() {
bitField0_ |= 0x00000004;
onChanged();
return getInt32TensorFieldBuilder().getBuilder();
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
public aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder getInt32TensorOrBuilder() {
if (int32TensorBuilder_ != null) {
return int32TensorBuilder_.getMessageOrBuilder();
} else {
return int32Tensor_;
}
}
/**
* <code>optional .aialgorithms.proto2.Int32Tensor int32_tensor = 7;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Int32Tensor, aialgorithms.proto2.RecordProto2.Int32Tensor.Builder, aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder>
getInt32TensorFieldBuilder() {
if (int32TensorBuilder_ == null) {
int32TensorBuilder_ = new com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Int32Tensor, aialgorithms.proto2.RecordProto2.Int32Tensor.Builder, aialgorithms.proto2.RecordProto2.Int32TensorOrBuilder>(
int32Tensor_,
getParentForChildren(),
isClean());
int32Tensor_ = null;
}
return int32TensorBuilder_;
}
// optional .aialgorithms.proto2.Bytes bytes = 9;
private aialgorithms.proto2.RecordProto2.Bytes bytes_ = aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Bytes, aialgorithms.proto2.RecordProto2.Bytes.Builder, aialgorithms.proto2.RecordProto2.BytesOrBuilder> bytesBuilder_;
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public boolean hasBytes() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public aialgorithms.proto2.RecordProto2.Bytes getBytes() {
if (bytesBuilder_ == null) {
return bytes_;
} else {
return bytesBuilder_.getMessage();
}
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public Builder setBytes(aialgorithms.proto2.RecordProto2.Bytes value) {
if (bytesBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
bytes_ = value;
onChanged();
} else {
bytesBuilder_.setMessage(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public Builder setBytes(
aialgorithms.proto2.RecordProto2.Bytes.Builder builderForValue) {
if (bytesBuilder_ == null) {
bytes_ = builderForValue.build();
onChanged();
} else {
bytesBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000008;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public Builder mergeBytes(aialgorithms.proto2.RecordProto2.Bytes value) {
if (bytesBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008) &&
bytes_ != aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance()) {
bytes_ =
aialgorithms.proto2.RecordProto2.Bytes.newBuilder(bytes_).mergeFrom(value).buildPartial();
} else {
bytes_ = value;
}
onChanged();
} else {
bytesBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000008;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public Builder clearBytes() {
if (bytesBuilder_ == null) {
bytes_ = aialgorithms.proto2.RecordProto2.Bytes.getDefaultInstance();
onChanged();
} else {
bytesBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public aialgorithms.proto2.RecordProto2.Bytes.Builder getBytesBuilder() {
bitField0_ |= 0x00000008;
onChanged();
return getBytesFieldBuilder().getBuilder();
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
public aialgorithms.proto2.RecordProto2.BytesOrBuilder getBytesOrBuilder() {
if (bytesBuilder_ != null) {
return bytesBuilder_.getMessageOrBuilder();
} else {
return bytes_;
}
}
/**
* <code>optional .aialgorithms.proto2.Bytes bytes = 9;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Bytes, aialgorithms.proto2.RecordProto2.Bytes.Builder, aialgorithms.proto2.RecordProto2.BytesOrBuilder>
getBytesFieldBuilder() {
if (bytesBuilder_ == null) {
bytesBuilder_ = new com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Bytes, aialgorithms.proto2.RecordProto2.Bytes.Builder, aialgorithms.proto2.RecordProto2.BytesOrBuilder>(
bytes_,
getParentForChildren(),
isClean());
bytes_ = null;
}
return bytesBuilder_;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Value)
}
static {
defaultInstance = new Value(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Value)
}
public interface MapEntryOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional string key = 1;
/**
* <code>optional string key = 1;</code>
*/
boolean hasKey();
/**
* <code>optional string key = 1;</code>
*/
java.lang.String getKey();
/**
* <code>optional string key = 1;</code>
*/
com.google.protobuf.ByteString
getKeyBytes();
// optional .aialgorithms.proto2.Value value = 2;
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
boolean hasValue();
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
aialgorithms.proto2.RecordProto2.Value getValue();
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
aialgorithms.proto2.RecordProto2.ValueOrBuilder getValueOrBuilder();
}
/**
* Protobuf type {@code aialgorithms.proto2.MapEntry}
*/
public static final class MapEntry extends
com.google.protobuf.GeneratedMessage
implements MapEntryOrBuilder {
// Use MapEntry.newBuilder() to construct.
private MapEntry(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private MapEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final MapEntry defaultInstance;
public static MapEntry getDefaultInstance() {
return defaultInstance;
}
public MapEntry getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private MapEntry(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
key_ = input.readBytes();
break;
}
case 18: {
aialgorithms.proto2.RecordProto2.Value.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = value_.toBuilder();
}
value_ = input.readMessage(aialgorithms.proto2.RecordProto2.Value.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(value_);
value_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_MapEntry_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_MapEntry_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.MapEntry.class, aialgorithms.proto2.RecordProto2.MapEntry.Builder.class);
}
public static com.google.protobuf.Parser<MapEntry> PARSER =
new com.google.protobuf.AbstractParser<MapEntry>() {
public MapEntry parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new MapEntry(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<MapEntry> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional string key = 1;
public static final int KEY_FIELD_NUMBER = 1;
private java.lang.Object key_;
/**
* <code>optional string key = 1;</code>
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string key = 1;</code>
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
key_ = s;
}
return s;
}
}
/**
* <code>optional string key = 1;</code>
*/
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional .aialgorithms.proto2.Value value = 2;
public static final int VALUE_FIELD_NUMBER = 2;
private aialgorithms.proto2.RecordProto2.Value value_;
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Value getValue() {
return value_;
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.ValueOrBuilder getValueOrBuilder() {
return value_;
}
private void initFields() {
key_ = "";
value_ = aialgorithms.proto2.RecordProto2.Value.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getKeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, value_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getKeyBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, value_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.MapEntry parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.MapEntry prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.MapEntry}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.MapEntryOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_MapEntry_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_MapEntry_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.MapEntry.class, aialgorithms.proto2.RecordProto2.MapEntry.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.MapEntry.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getValueFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
key_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
if (valueBuilder_ == null) {
value_ = aialgorithms.proto2.RecordProto2.Value.getDefaultInstance();
} else {
valueBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_MapEntry_descriptor;
}
public aialgorithms.proto2.RecordProto2.MapEntry getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.MapEntry build() {
aialgorithms.proto2.RecordProto2.MapEntry result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.MapEntry buildPartial() {
aialgorithms.proto2.RecordProto2.MapEntry result = new aialgorithms.proto2.RecordProto2.MapEntry(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.key_ = key_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (valueBuilder_ == null) {
result.value_ = value_;
} else {
result.value_ = valueBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.MapEntry) {
return mergeFrom((aialgorithms.proto2.RecordProto2.MapEntry)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.MapEntry other) {
if (other == aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance()) return this;
if (other.hasKey()) {
bitField0_ |= 0x00000001;
key_ = other.key_;
onChanged();
}
if (other.hasValue()) {
mergeValue(other.getValue());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.MapEntry parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.MapEntry) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional string key = 1;
private java.lang.Object key_ = "";
/**
* <code>optional string key = 1;</code>
*/
public boolean hasKey() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string key = 1;</code>
*/
public java.lang.String getKey() {
java.lang.Object ref = key_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
key_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string key = 1;</code>
*/
public com.google.protobuf.ByteString
getKeyBytes() {
java.lang.Object ref = key_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
key_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string key = 1;</code>
*/
public Builder setKey(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
/**
* <code>optional string key = 1;</code>
*/
public Builder clearKey() {
bitField0_ = (bitField0_ & ~0x00000001);
key_ = getDefaultInstance().getKey();
onChanged();
return this;
}
/**
* <code>optional string key = 1;</code>
*/
public Builder setKeyBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
key_ = value;
onChanged();
return this;
}
// optional .aialgorithms.proto2.Value value = 2;
private aialgorithms.proto2.RecordProto2.Value value_ = aialgorithms.proto2.RecordProto2.Value.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Value, aialgorithms.proto2.RecordProto2.Value.Builder, aialgorithms.proto2.RecordProto2.ValueOrBuilder> valueBuilder_;
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public boolean hasValue() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Value getValue() {
if (valueBuilder_ == null) {
return value_;
} else {
return valueBuilder_.getMessage();
}
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public Builder setValue(aialgorithms.proto2.RecordProto2.Value value) {
if (valueBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
value_ = value;
onChanged();
} else {
valueBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public Builder setValue(
aialgorithms.proto2.RecordProto2.Value.Builder builderForValue) {
if (valueBuilder_ == null) {
value_ = builderForValue.build();
onChanged();
} else {
valueBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public Builder mergeValue(aialgorithms.proto2.RecordProto2.Value value) {
if (valueBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
value_ != aialgorithms.proto2.RecordProto2.Value.getDefaultInstance()) {
value_ =
aialgorithms.proto2.RecordProto2.Value.newBuilder(value_).mergeFrom(value).buildPartial();
} else {
value_ = value;
}
onChanged();
} else {
valueBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public Builder clearValue() {
if (valueBuilder_ == null) {
value_ = aialgorithms.proto2.RecordProto2.Value.getDefaultInstance();
onChanged();
} else {
valueBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.Value.Builder getValueBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getValueFieldBuilder().getBuilder();
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
public aialgorithms.proto2.RecordProto2.ValueOrBuilder getValueOrBuilder() {
if (valueBuilder_ != null) {
return valueBuilder_.getMessageOrBuilder();
} else {
return value_;
}
}
/**
* <code>optional .aialgorithms.proto2.Value value = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Value, aialgorithms.proto2.RecordProto2.Value.Builder, aialgorithms.proto2.RecordProto2.ValueOrBuilder>
getValueFieldBuilder() {
if (valueBuilder_ == null) {
valueBuilder_ = new com.google.protobuf.SingleFieldBuilder<
aialgorithms.proto2.RecordProto2.Value, aialgorithms.proto2.RecordProto2.Value.Builder, aialgorithms.proto2.RecordProto2.ValueOrBuilder>(
value_,
getParentForChildren(),
isClean());
value_ = null;
}
return valueBuilder_;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.MapEntry)
}
static {
defaultInstance = new MapEntry(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.MapEntry)
}
public interface RecordOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// repeated .aialgorithms.proto2.MapEntry features = 1;
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
java.util.List<aialgorithms.proto2.RecordProto2.MapEntry>
getFeaturesList();
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
aialgorithms.proto2.RecordProto2.MapEntry getFeatures(int index);
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
int getFeaturesCount();
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getFeaturesOrBuilderList();
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getFeaturesOrBuilder(
int index);
// repeated .aialgorithms.proto2.MapEntry label = 2;
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
java.util.List<aialgorithms.proto2.RecordProto2.MapEntry>
getLabelList();
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
aialgorithms.proto2.RecordProto2.MapEntry getLabel(int index);
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
int getLabelCount();
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getLabelOrBuilderList();
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getLabelOrBuilder(
int index);
// optional string uid = 3;
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
boolean hasUid();
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
java.lang.String getUid();
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
com.google.protobuf.ByteString
getUidBytes();
// optional string metadata = 4;
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
boolean hasMetadata();
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
java.lang.String getMetadata();
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
com.google.protobuf.ByteString
getMetadataBytes();
// optional string configuration = 5;
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
boolean hasConfiguration();
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
java.lang.String getConfiguration();
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
com.google.protobuf.ByteString
getConfigurationBytes();
}
/**
* Protobuf type {@code aialgorithms.proto2.Record}
*/
public static final class Record extends
com.google.protobuf.GeneratedMessage
implements RecordOrBuilder {
// Use Record.newBuilder() to construct.
private Record(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private Record(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final Record defaultInstance;
public static Record getDefaultInstance() {
return defaultInstance;
}
public Record getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private Record(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
features_ = new java.util.ArrayList<aialgorithms.proto2.RecordProto2.MapEntry>();
mutable_bitField0_ |= 0x00000001;
}
features_.add(input.readMessage(aialgorithms.proto2.RecordProto2.MapEntry.PARSER, extensionRegistry));
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
label_ = new java.util.ArrayList<aialgorithms.proto2.RecordProto2.MapEntry>();
mutable_bitField0_ |= 0x00000002;
}
label_.add(input.readMessage(aialgorithms.proto2.RecordProto2.MapEntry.PARSER, extensionRegistry));
break;
}
case 26: {
bitField0_ |= 0x00000001;
uid_ = input.readBytes();
break;
}
case 34: {
bitField0_ |= 0x00000002;
metadata_ = input.readBytes();
break;
}
case 42: {
bitField0_ |= 0x00000004;
configuration_ = input.readBytes();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
features_ = java.util.Collections.unmodifiableList(features_);
}
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
label_ = java.util.Collections.unmodifiableList(label_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Record_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Record_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Record.class, aialgorithms.proto2.RecordProto2.Record.Builder.class);
}
public static com.google.protobuf.Parser<Record> PARSER =
new com.google.protobuf.AbstractParser<Record>() {
public Record parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new Record(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<Record> getParserForType() {
return PARSER;
}
private int bitField0_;
// repeated .aialgorithms.proto2.MapEntry features = 1;
public static final int FEATURES_FIELD_NUMBER = 1;
private java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> features_;
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> getFeaturesList() {
return features_;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getFeaturesOrBuilderList() {
return features_;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public int getFeaturesCount() {
return features_.size();
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry getFeatures(int index) {
return features_.get(index);
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getFeaturesOrBuilder(
int index) {
return features_.get(index);
}
// repeated .aialgorithms.proto2.MapEntry label = 2;
public static final int LABEL_FIELD_NUMBER = 2;
private java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> label_;
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> getLabelList() {
return label_;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getLabelOrBuilderList() {
return label_;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public int getLabelCount() {
return label_.size();
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry getLabel(int index) {
return label_.get(index);
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getLabelOrBuilder(
int index) {
return label_.get(index);
}
// optional string uid = 3;
public static final int UID_FIELD_NUMBER = 3;
private java.lang.Object uid_;
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public boolean hasUid() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public java.lang.String getUid() {
java.lang.Object ref = uid_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
uid_ = s;
}
return s;
}
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public com.google.protobuf.ByteString
getUidBytes() {
java.lang.Object ref = uid_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
uid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string metadata = 4;
public static final int METADATA_FIELD_NUMBER = 4;
private java.lang.Object metadata_;
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public boolean hasMetadata() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public java.lang.String getMetadata() {
java.lang.Object ref = metadata_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
metadata_ = s;
}
return s;
}
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public com.google.protobuf.ByteString
getMetadataBytes() {
java.lang.Object ref = metadata_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
metadata_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// optional string configuration = 5;
public static final int CONFIGURATION_FIELD_NUMBER = 5;
private java.lang.Object configuration_;
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public boolean hasConfiguration() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public java.lang.String getConfiguration() {
java.lang.Object ref = configuration_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
configuration_ = s;
}
return s;
}
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public com.google.protobuf.ByteString
getConfigurationBytes() {
java.lang.Object ref = configuration_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configuration_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
private void initFields() {
features_ = java.util.Collections.emptyList();
label_ = java.util.Collections.emptyList();
uid_ = "";
metadata_ = "";
configuration_ = "";
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
for (int i = 0; i < features_.size(); i++) {
output.writeMessage(1, features_.get(i));
}
for (int i = 0; i < label_.size(); i++) {
output.writeMessage(2, label_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(3, getUidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeBytes(4, getMetadataBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBytes(5, getConfigurationBytes());
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
for (int i = 0; i < features_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, features_.get(i));
}
for (int i = 0; i < label_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, label_.get(i));
}
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(3, getUidBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(4, getMetadataBytes());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(5, getConfigurationBytes());
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Record parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Record parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static aialgorithms.proto2.RecordProto2.Record parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(aialgorithms.proto2.RecordProto2.Record prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code aialgorithms.proto2.Record}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements aialgorithms.proto2.RecordProto2.RecordOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Record_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Record_fieldAccessorTable
.ensureFieldAccessorsInitialized(
aialgorithms.proto2.RecordProto2.Record.class, aialgorithms.proto2.RecordProto2.Record.Builder.class);
}
// Construct using aialgorithms.proto2.RecordProto2.Record.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getFeaturesFieldBuilder();
getLabelFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (featuresBuilder_ == null) {
features_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
} else {
featuresBuilder_.clear();
}
if (labelBuilder_ == null) {
label_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
labelBuilder_.clear();
}
uid_ = "";
bitField0_ = (bitField0_ & ~0x00000004);
metadata_ = "";
bitField0_ = (bitField0_ & ~0x00000008);
configuration_ = "";
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return aialgorithms.proto2.RecordProto2.internal_static_aialgorithms_proto2_Record_descriptor;
}
public aialgorithms.proto2.RecordProto2.Record getDefaultInstanceForType() {
return aialgorithms.proto2.RecordProto2.Record.getDefaultInstance();
}
public aialgorithms.proto2.RecordProto2.Record build() {
aialgorithms.proto2.RecordProto2.Record result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public aialgorithms.proto2.RecordProto2.Record buildPartial() {
aialgorithms.proto2.RecordProto2.Record result = new aialgorithms.proto2.RecordProto2.Record(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (featuresBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001)) {
features_ = java.util.Collections.unmodifiableList(features_);
bitField0_ = (bitField0_ & ~0x00000001);
}
result.features_ = features_;
} else {
result.features_ = featuresBuilder_.build();
}
if (labelBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
label_ = java.util.Collections.unmodifiableList(label_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.label_ = label_;
} else {
result.label_ = labelBuilder_.build();
}
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000001;
}
result.uid_ = uid_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000002;
}
result.metadata_ = metadata_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000004;
}
result.configuration_ = configuration_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof aialgorithms.proto2.RecordProto2.Record) {
return mergeFrom((aialgorithms.proto2.RecordProto2.Record)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(aialgorithms.proto2.RecordProto2.Record other) {
if (other == aialgorithms.proto2.RecordProto2.Record.getDefaultInstance()) return this;
if (featuresBuilder_ == null) {
if (!other.features_.isEmpty()) {
if (features_.isEmpty()) {
features_ = other.features_;
bitField0_ = (bitField0_ & ~0x00000001);
} else {
ensureFeaturesIsMutable();
features_.addAll(other.features_);
}
onChanged();
}
} else {
if (!other.features_.isEmpty()) {
if (featuresBuilder_.isEmpty()) {
featuresBuilder_.dispose();
featuresBuilder_ = null;
features_ = other.features_;
bitField0_ = (bitField0_ & ~0x00000001);
featuresBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getFeaturesFieldBuilder() : null;
} else {
featuresBuilder_.addAllMessages(other.features_);
}
}
}
if (labelBuilder_ == null) {
if (!other.label_.isEmpty()) {
if (label_.isEmpty()) {
label_ = other.label_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureLabelIsMutable();
label_.addAll(other.label_);
}
onChanged();
}
} else {
if (!other.label_.isEmpty()) {
if (labelBuilder_.isEmpty()) {
labelBuilder_.dispose();
labelBuilder_ = null;
label_ = other.label_;
bitField0_ = (bitField0_ & ~0x00000002);
labelBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLabelFieldBuilder() : null;
} else {
labelBuilder_.addAllMessages(other.label_);
}
}
}
if (other.hasUid()) {
bitField0_ |= 0x00000004;
uid_ = other.uid_;
onChanged();
}
if (other.hasMetadata()) {
bitField0_ |= 0x00000008;
metadata_ = other.metadata_;
onChanged();
}
if (other.hasConfiguration()) {
bitField0_ |= 0x00000010;
configuration_ = other.configuration_;
onChanged();
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
aialgorithms.proto2.RecordProto2.Record parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (aialgorithms.proto2.RecordProto2.Record) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// repeated .aialgorithms.proto2.MapEntry features = 1;
private java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> features_ =
java.util.Collections.emptyList();
private void ensureFeaturesIsMutable() {
if (!((bitField0_ & 0x00000001) == 0x00000001)) {
features_ = new java.util.ArrayList<aialgorithms.proto2.RecordProto2.MapEntry>(features_);
bitField0_ |= 0x00000001;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder> featuresBuilder_;
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> getFeaturesList() {
if (featuresBuilder_ == null) {
return java.util.Collections.unmodifiableList(features_);
} else {
return featuresBuilder_.getMessageList();
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public int getFeaturesCount() {
if (featuresBuilder_ == null) {
return features_.size();
} else {
return featuresBuilder_.getCount();
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry getFeatures(int index) {
if (featuresBuilder_ == null) {
return features_.get(index);
} else {
return featuresBuilder_.getMessage(index);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder setFeatures(
int index, aialgorithms.proto2.RecordProto2.MapEntry value) {
if (featuresBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFeaturesIsMutable();
features_.set(index, value);
onChanged();
} else {
featuresBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder setFeatures(
int index, aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.set(index, builderForValue.build());
onChanged();
} else {
featuresBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder addFeatures(aialgorithms.proto2.RecordProto2.MapEntry value) {
if (featuresBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFeaturesIsMutable();
features_.add(value);
onChanged();
} else {
featuresBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder addFeatures(
int index, aialgorithms.proto2.RecordProto2.MapEntry value) {
if (featuresBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureFeaturesIsMutable();
features_.add(index, value);
onChanged();
} else {
featuresBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder addFeatures(
aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.add(builderForValue.build());
onChanged();
} else {
featuresBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder addFeatures(
int index, aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.add(index, builderForValue.build());
onChanged();
} else {
featuresBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder addAllFeatures(
java.lang.Iterable<? extends aialgorithms.proto2.RecordProto2.MapEntry> values) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
super.addAll(values, features_);
onChanged();
} else {
featuresBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder clearFeatures() {
if (featuresBuilder_ == null) {
features_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000001);
onChanged();
} else {
featuresBuilder_.clear();
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public Builder removeFeatures(int index) {
if (featuresBuilder_ == null) {
ensureFeaturesIsMutable();
features_.remove(index);
onChanged();
} else {
featuresBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder getFeaturesBuilder(
int index) {
return getFeaturesFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getFeaturesOrBuilder(
int index) {
if (featuresBuilder_ == null) {
return features_.get(index); } else {
return featuresBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getFeaturesOrBuilderList() {
if (featuresBuilder_ != null) {
return featuresBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(features_);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder addFeaturesBuilder() {
return getFeaturesFieldBuilder().addBuilder(
aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance());
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder addFeaturesBuilder(
int index) {
return getFeaturesFieldBuilder().addBuilder(
index, aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance());
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry features = 1;</code>
*
* <pre>
* Map from the name of the feature to the value.
*
* For vectors and libsvm-like datasets,
* a single feature with the name `values`
* should be specified.
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry.Builder>
getFeaturesBuilderList() {
return getFeaturesFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getFeaturesFieldBuilder() {
if (featuresBuilder_ == null) {
featuresBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>(
features_,
((bitField0_ & 0x00000001) == 0x00000001),
getParentForChildren(),
isClean());
features_ = null;
}
return featuresBuilder_;
}
// repeated .aialgorithms.proto2.MapEntry label = 2;
private java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> label_ =
java.util.Collections.emptyList();
private void ensureLabelIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
label_ = new java.util.ArrayList<aialgorithms.proto2.RecordProto2.MapEntry>(label_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder> labelBuilder_;
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry> getLabelList() {
if (labelBuilder_ == null) {
return java.util.Collections.unmodifiableList(label_);
} else {
return labelBuilder_.getMessageList();
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public int getLabelCount() {
if (labelBuilder_ == null) {
return label_.size();
} else {
return labelBuilder_.getCount();
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry getLabel(int index) {
if (labelBuilder_ == null) {
return label_.get(index);
} else {
return labelBuilder_.getMessage(index);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder setLabel(
int index, aialgorithms.proto2.RecordProto2.MapEntry value) {
if (labelBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLabelIsMutable();
label_.set(index, value);
onChanged();
} else {
labelBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder setLabel(
int index, aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (labelBuilder_ == null) {
ensureLabelIsMutable();
label_.set(index, builderForValue.build());
onChanged();
} else {
labelBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder addLabel(aialgorithms.proto2.RecordProto2.MapEntry value) {
if (labelBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLabelIsMutable();
label_.add(value);
onChanged();
} else {
labelBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder addLabel(
int index, aialgorithms.proto2.RecordProto2.MapEntry value) {
if (labelBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLabelIsMutable();
label_.add(index, value);
onChanged();
} else {
labelBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder addLabel(
aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (labelBuilder_ == null) {
ensureLabelIsMutable();
label_.add(builderForValue.build());
onChanged();
} else {
labelBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder addLabel(
int index, aialgorithms.proto2.RecordProto2.MapEntry.Builder builderForValue) {
if (labelBuilder_ == null) {
ensureLabelIsMutable();
label_.add(index, builderForValue.build());
onChanged();
} else {
labelBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder addAllLabel(
java.lang.Iterable<? extends aialgorithms.proto2.RecordProto2.MapEntry> values) {
if (labelBuilder_ == null) {
ensureLabelIsMutable();
super.addAll(values, label_);
onChanged();
} else {
labelBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder clearLabel() {
if (labelBuilder_ == null) {
label_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
labelBuilder_.clear();
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public Builder removeLabel(int index) {
if (labelBuilder_ == null) {
ensureLabelIsMutable();
label_.remove(index);
onChanged();
} else {
labelBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder getLabelBuilder(
int index) {
return getLabelFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntryOrBuilder getLabelOrBuilder(
int index) {
if (labelBuilder_ == null) {
return label_.get(index); } else {
return labelBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public java.util.List<? extends aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getLabelOrBuilderList() {
if (labelBuilder_ != null) {
return labelBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(label_);
}
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder addLabelBuilder() {
return getLabelFieldBuilder().addBuilder(
aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance());
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public aialgorithms.proto2.RecordProto2.MapEntry.Builder addLabelBuilder(
int index) {
return getLabelFieldBuilder().addBuilder(
index, aialgorithms.proto2.RecordProto2.MapEntry.getDefaultInstance());
}
/**
* <code>repeated .aialgorithms.proto2.MapEntry label = 2;</code>
*
* <pre>
* Optional set of labels for this record.
* Similar to features field above, the key used for
* generic scalar / vector labels should ve 'values'
* </pre>
*/
public java.util.List<aialgorithms.proto2.RecordProto2.MapEntry.Builder>
getLabelBuilderList() {
return getLabelFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>
getLabelFieldBuilder() {
if (labelBuilder_ == null) {
labelBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
aialgorithms.proto2.RecordProto2.MapEntry, aialgorithms.proto2.RecordProto2.MapEntry.Builder, aialgorithms.proto2.RecordProto2.MapEntryOrBuilder>(
label_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
label_ = null;
}
return labelBuilder_;
}
// optional string uid = 3;
private java.lang.Object uid_ = "";
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public boolean hasUid() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public java.lang.String getUid() {
java.lang.Object ref = uid_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
uid_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public com.google.protobuf.ByteString
getUidBytes() {
java.lang.Object ref = uid_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
uid_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder setUid(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
uid_ = value;
onChanged();
return this;
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder clearUid() {
bitField0_ = (bitField0_ & ~0x00000004);
uid_ = getDefaultInstance().getUid();
onChanged();
return this;
}
/**
* <code>optional string uid = 3;</code>
*
* <pre>
* Unique identifier for this record in the dataset.
*
* Whilst not necessary, this allows better
* debugging where there are data issues.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder setUidBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000004;
uid_ = value;
onChanged();
return this;
}
// optional string metadata = 4;
private java.lang.Object metadata_ = "";
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public boolean hasMetadata() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public java.lang.String getMetadata() {
java.lang.Object ref = metadata_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
metadata_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public com.google.protobuf.ByteString
getMetadataBytes() {
java.lang.Object ref = metadata_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
metadata_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder setMetadata(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
metadata_ = value;
onChanged();
return this;
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder clearMetadata() {
bitField0_ = (bitField0_ & ~0x00000008);
metadata_ = getDefaultInstance().getMetadata();
onChanged();
return this;
}
/**
* <code>optional string metadata = 4;</code>
*
* <pre>
* Textual metadata describing the record.
*
* This may include JSON-serialized information
* about the source of the record.
*
* This is not used by the algorithm directly.
* </pre>
*/
public Builder setMetadataBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000008;
metadata_ = value;
onChanged();
return this;
}
// optional string configuration = 5;
private java.lang.Object configuration_ = "";
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public boolean hasConfiguration() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public java.lang.String getConfiguration() {
java.lang.Object ref = configuration_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
configuration_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public com.google.protobuf.ByteString
getConfigurationBytes() {
java.lang.Object ref = configuration_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
configuration_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public Builder setConfiguration(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
configuration_ = value;
onChanged();
return this;
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public Builder clearConfiguration() {
bitField0_ = (bitField0_ & ~0x00000010);
configuration_ = getDefaultInstance().getConfiguration();
onChanged();
return this;
}
/**
* <code>optional string configuration = 5;</code>
*
* <pre>
* Optional serialized JSON object that allows per-record
* hyper-parameters/configuration/other information to be set.
*
* The meaning/interpretation of this field is defined by
* the algorithm author and may not be supported.
*
* This is used to pass additional inference configuration
* when batch inference is used (e.g. types of scores to return).
* </pre>
*/
public Builder setConfigurationBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000010;
configuration_ = value;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:aialgorithms.proto2.Record)
}
static {
defaultInstance = new Record(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:aialgorithms.proto2.Record)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Float32Tensor_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Float32Tensor_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Float64Tensor_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Float64Tensor_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Int32Tensor_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Int32Tensor_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Bytes_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Bytes_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Value_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Value_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_MapEntry_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_MapEntry_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_aialgorithms_proto2_Record_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_aialgorithms_proto2_Record_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\"AIAlgorithmsProtobufSchema/p.proto\022\023ai" +
"algorithms.proto2\"H\n\rFloat32Tensor\022\022\n\006va" +
"lues\030\001 \003(\002B\002\020\001\022\020\n\004keys\030\002 \003(\004B\002\020\001\022\021\n\005shap" +
"e\030\003 \003(\004B\002\020\001\"H\n\rFloat64Tensor\022\022\n\006values\030\001" +
" \003(\001B\002\020\001\022\020\n\004keys\030\002 \003(\004B\002\020\001\022\021\n\005shape\030\003 \003(" +
"\004B\002\020\001\"F\n\013Int32Tensor\022\022\n\006values\030\001 \003(\005B\002\020\001" +
"\022\020\n\004keys\030\002 \003(\004B\002\020\001\022\021\n\005shape\030\003 \003(\004B\002\020\001\",\n" +
"\005Bytes\022\r\n\005value\030\001 \003(\014\022\024\n\014content_type\030\002 " +
"\001(\t\"\342\001\n\005Value\022:\n\016float32_tensor\030\002 \001(\0132\"." +
"aialgorithms.proto2.Float32Tensor\022:\n\016flo",
"at64_tensor\030\003 \001(\0132\".aialgorithms.proto2." +
"Float64Tensor\0226\n\014int32_tensor\030\007 \001(\0132 .ai" +
"algorithms.proto2.Int32Tensor\022)\n\005bytes\030\t" +
" \001(\0132\032.aialgorithms.proto2.Bytes\"B\n\010MapE" +
"ntry\022\013\n\003key\030\001 \001(\t\022)\n\005value\030\002 \001(\0132\032.aialg" +
"orithms.proto2.Value\"\235\001\n\006Record\022/\n\010featu" +
"res\030\001 \003(\0132\035.aialgorithms.proto2.MapEntry" +
"\022,\n\005label\030\002 \003(\0132\035.aialgorithms.proto2.Ma" +
"pEntry\022\013\n\003uid\030\003 \001(\t\022\020\n\010metadata\030\004 \001(\t\022\025\n" +
"\rconfiguration\030\005 \001(\tB\016B\014RecordProto2"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_aialgorithms_proto2_Float32Tensor_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_aialgorithms_proto2_Float32Tensor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Float32Tensor_descriptor,
new java.lang.String[] { "Values", "Keys", "Shape", });
internal_static_aialgorithms_proto2_Float64Tensor_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_aialgorithms_proto2_Float64Tensor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Float64Tensor_descriptor,
new java.lang.String[] { "Values", "Keys", "Shape", });
internal_static_aialgorithms_proto2_Int32Tensor_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_aialgorithms_proto2_Int32Tensor_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Int32Tensor_descriptor,
new java.lang.String[] { "Values", "Keys", "Shape", });
internal_static_aialgorithms_proto2_Bytes_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_aialgorithms_proto2_Bytes_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Bytes_descriptor,
new java.lang.String[] { "Value", "ContentType", });
internal_static_aialgorithms_proto2_Value_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_aialgorithms_proto2_Value_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Value_descriptor,
new java.lang.String[] { "Float32Tensor", "Float64Tensor", "Int32Tensor", "Bytes", });
internal_static_aialgorithms_proto2_MapEntry_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_aialgorithms_proto2_MapEntry_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_MapEntry_descriptor,
new java.lang.String[] { "Key", "Value", });
internal_static_aialgorithms_proto2_Record_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_aialgorithms_proto2_Record_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_aialgorithms_proto2_Record_descriptor,
new java.lang.String[] { "Features", "Label", "Uid", "Metadata", "Configuration", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}
| 6,529 |
0 | Create_ds/sagemaker-spark/sagemaker-spark-sdk/src/main/scala/com/amazonaws/services/sagemaker/sparksdk | Create_ds/sagemaker-spark/sagemaker-spark-sdk/src/main/scala/com/amazonaws/services/sagemaker/sparksdk/protobuf/RecordIOOutputFormat.java | /*
* Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.sagemaker.sparksdk.protobuf;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import java.io.IOException;
import java.io.OutputStream;
/**
* A Hadoop {@link FileOutputFormat} that writes {@link BytesWritable} as Amazon Record protobuf
* messages in a recordIO encoded file.
*
* @see [[https://aws.amazon.com/sagemaker/latest/dg/cdf-training.html/]] for more information on
* the Amazon Record data format.
* @see [[https://mxnet.incubator.apache.org/architecture/note_data_loading.html]] for more
* information on recordIO
*/
public class RecordIOOutputFormat extends FileOutputFormat<NullWritable, BytesWritable> {
public static class SageMakerProtobufRecordWriter extends
RecordWriter<NullWritable, BytesWritable> {
private OutputStream out;
public SageMakerProtobufRecordWriter(OutputStream out) {
this.out = out;
}
@Override
public void write(NullWritable nullWritable, BytesWritable bytesWritable)
throws IOException, InterruptedException {
byte[] bytes = ProtobufConverter.byteArrayToRecordIOEncodedByteArray(bytesWritable.getBytes());
out.write(bytes, 0, bytes.length);
}
@Override
public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
this.out.close();
}
}
@Override
public RecordWriter<NullWritable, BytesWritable> getRecordWriter(TaskAttemptContext taskAttemptContext)
throws IOException, InterruptedException {
Path file = getDefaultWorkFile(taskAttemptContext, "sagemaker");
FileSystem fs = file.getFileSystem(taskAttemptContext.getConfiguration());
FSDataOutputStream out = fs.create(file, true);
return new SageMakerProtobufRecordWriter(out);
}
}
| 6,530 |
0 | Create_ds/conductor/annotations-processor/src/test/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/test/java/com/netflix/conductor/annotationsprocessor/protogen/ProtoGenTest.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.io.File;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.List;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import static org.junit.Assert.*;
public class ProtoGenTest {
private static final Charset charset = StandardCharsets.UTF_8;
@Rule public TemporaryFolder folder = new TemporaryFolder();
@Test
public void happyPath() throws Exception {
File rootDir = folder.getRoot();
String protoPackage = "protoPackage";
String javaPackage = "abc.protogen.example";
String goPackage = "goPackage";
String sourcePackage = "com.example";
String mapperPackage = "mapperPackage";
File jarFile = new File("./build/libs/example.jar");
assertTrue(jarFile.exists());
File mapperDir = new File(rootDir, "mapperDir");
mapperDir.mkdirs();
File protosDir = new File(rootDir, "protosDir");
protosDir.mkdirs();
File modelDir = new File(protosDir, "model");
modelDir.mkdirs();
ProtoGen generator = new ProtoGen(protoPackage, javaPackage, goPackage);
generator.processPackage(jarFile, sourcePackage);
generator.writeMapper(mapperDir, mapperPackage);
generator.writeProtos(protosDir);
List<File> models = Lists.newArrayList(modelDir.listFiles());
assertEquals(1, models.size());
File exampleProtoFile =
models.stream().filter(f -> f.getName().equals("example.proto")).findFirst().get();
assertTrue(exampleProtoFile.length() > 0);
assertEquals(
Resources.asCharSource(Resources.getResource("example.proto.txt"), charset).read(),
Files.asCharSource(exampleProtoFile, charset).read());
}
}
| 6,531 |
0 | Create_ds/conductor/annotations-processor/src/example/java/com | Create_ds/conductor/annotations-processor/src/example/java/com/example/Example.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.example;
import com.netflix.conductor.annotations.protogen.ProtoField;
import com.netflix.conductor.annotations.protogen.ProtoMessage;
@ProtoMessage
public class Example {
@ProtoField(id = 1)
public String name;
@ProtoField(id = 2)
public Long count;
}
| 6,532 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/Message.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.lang.model.element.Modifier;
import com.netflix.conductor.annotations.protogen.ProtoField;
import com.netflix.conductor.annotations.protogen.ProtoMessage;
import com.netflix.conductor.annotationsprocessor.protogen.types.AbstractType;
import com.netflix.conductor.annotationsprocessor.protogen.types.MessageType;
import com.netflix.conductor.annotationsprocessor.protogen.types.TypeMapper;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeSpec;
public class Message extends AbstractMessage {
public Message(Class<?> cls, MessageType parent) {
super(cls, parent);
for (java.lang.reflect.Field field : clazz.getDeclaredFields()) {
ProtoField ann = field.getAnnotation(ProtoField.class);
if (ann == null) continue;
fields.add(new MessageField(ann.id(), field));
}
}
protected ProtoMessage getAnnotation() {
return (ProtoMessage) this.clazz.getAnnotation(ProtoMessage.class);
}
@Override
public String getProtoClass() {
return "message";
}
@Override
protected void javaMapToProto(TypeSpec.Builder type) {
if (!getAnnotation().toProto() || getAnnotation().wrapper()) return;
ClassName javaProtoType = (ClassName) this.type.getJavaProtoType();
MethodSpec.Builder method = MethodSpec.methodBuilder("toProto");
method.addModifiers(Modifier.PUBLIC);
method.returns(javaProtoType);
method.addParameter(this.clazz, "from");
method.addStatement(
"$T to = $T.newBuilder()", javaProtoType.nestedClass("Builder"), javaProtoType);
for (Field field : this.fields) {
if (field instanceof MessageField) {
AbstractType fieldType = ((MessageField) field).getAbstractType();
fieldType.mapToProto(field.getName(), method);
}
}
method.addStatement("return to.build()");
type.addMethod(method.build());
}
@Override
protected void javaMapFromProto(TypeSpec.Builder type) {
if (!getAnnotation().fromProto() || getAnnotation().wrapper()) return;
MethodSpec.Builder method = MethodSpec.methodBuilder("fromProto");
method.addModifiers(Modifier.PUBLIC);
method.returns(this.clazz);
method.addParameter(this.type.getJavaProtoType(), "from");
method.addStatement("$T to = new $T()", this.clazz, this.clazz);
for (Field field : this.fields) {
if (field instanceof MessageField) {
AbstractType fieldType = ((MessageField) field).getAbstractType();
fieldType.mapFromProto(field.getName(), method);
}
}
method.addStatement("return to");
type.addMethod(method.build());
}
public static class MessageField extends Field {
protected AbstractType type;
protected MessageField(int index, java.lang.reflect.Field field) {
super(index, field);
}
public AbstractType getAbstractType() {
if (type == null) {
type = TypeMapper.INSTANCE.get(field.getGenericType());
}
return type;
}
private static Pattern CAMEL_CASE_RE = Pattern.compile("(?<=[a-z])[A-Z]");
private static String toUnderscoreCase(String input) {
Matcher m = CAMEL_CASE_RE.matcher(input);
StringBuilder sb = new StringBuilder();
while (m.find()) {
m.appendReplacement(sb, "_" + m.group());
}
m.appendTail(sb);
return sb.toString().toLowerCase();
}
@Override
public String getProtoTypeDeclaration() {
return String.format(
"%s %s = %d",
getAbstractType().getProtoType(), toUnderscoreCase(getName()), getProtoIndex());
}
@Override
public void getDependencies(Set<String> deps) {
getAbstractType().getDependencies(deps);
}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {
getAbstractType().generateAbstractMethods(specs);
}
}
}
| 6,533 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/ProtoGenTask.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.io.File;
import java.io.IOException;
public class ProtoGenTask {
private String protoPackage;
private String javaPackage;
private String goPackage;
private File protosDir;
private File mapperDir;
private String mapperPackage;
private File sourceJar;
private String sourcePackage;
public String getProtoPackage() {
return protoPackage;
}
public void setProtoPackage(String protoPackage) {
this.protoPackage = protoPackage;
}
public String getJavaPackage() {
return javaPackage;
}
public void setJavaPackage(String javaPackage) {
this.javaPackage = javaPackage;
}
public String getGoPackage() {
return goPackage;
}
public void setGoPackage(String goPackage) {
this.goPackage = goPackage;
}
public File getProtosDir() {
return protosDir;
}
public void setProtosDir(File protosDir) {
this.protosDir = protosDir;
}
public File getMapperDir() {
return mapperDir;
}
public void setMapperDir(File mapperDir) {
this.mapperDir = mapperDir;
}
public String getMapperPackage() {
return mapperPackage;
}
public void setMapperPackage(String mapperPackage) {
this.mapperPackage = mapperPackage;
}
public File getSourceJar() {
return sourceJar;
}
public void setSourceJar(File sourceJar) {
this.sourceJar = sourceJar;
}
public String getSourcePackage() {
return sourcePackage;
}
public void setSourcePackage(String sourcePackage) {
this.sourcePackage = sourcePackage;
}
public void generate() {
ProtoGen generator = new ProtoGen(protoPackage, javaPackage, goPackage);
try {
generator.processPackage(sourceJar, sourcePackage);
generator.writeMapper(mapperDir, mapperPackage);
generator.writeProtos(protosDir);
} catch (IOException e) {
System.err.printf("protogen: failed with %s\n", e);
}
}
public static void main(String[] args) {
if (args == null || args.length < 8) {
throw new RuntimeException(
"protogen configuration incomplete, please provide all required (8) inputs");
}
ProtoGenTask task = new ProtoGenTask();
int argsId = 0;
task.setProtoPackage(args[argsId++]);
task.setJavaPackage(args[argsId++]);
task.setGoPackage(args[argsId++]);
task.setProtosDir(new File(args[argsId++]));
task.setMapperDir(new File(args[argsId++]));
task.setMapperPackage(args[argsId++]);
task.setSourceJar(new File(args[argsId++]));
task.setSourcePackage(args[argsId]);
System.out.println("Running protogen with arguments: " + task);
task.generate();
System.out.println("protogen completed.");
}
@Override
public String toString() {
return "ProtoGenTask{"
+ "protoPackage='"
+ protoPackage
+ '\''
+ ", javaPackage='"
+ javaPackage
+ '\''
+ ", goPackage='"
+ goPackage
+ '\''
+ ", protosDir="
+ protosDir
+ ", mapperDir="
+ mapperDir
+ ", mapperPackage='"
+ mapperPackage
+ '\''
+ ", sourceJar="
+ sourceJar
+ ", sourcePackage='"
+ sourcePackage
+ '\''
+ '}';
}
}
| 6,534 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/ProtoGen.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.*;
import javax.annotation.Generated;
import javax.lang.model.element.Modifier;
import com.netflix.conductor.annotations.protogen.ProtoMessage;
import com.github.jknack.handlebars.EscapingStrategy;
import com.github.jknack.handlebars.Handlebars;
import com.github.jknack.handlebars.Template;
import com.github.jknack.handlebars.io.ClassPathTemplateLoader;
import com.github.jknack.handlebars.io.TemplateLoader;
import com.google.common.reflect.ClassPath;
import com.squareup.javapoet.AnnotationSpec;
import com.squareup.javapoet.JavaFile;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeSpec;
public class ProtoGen {
private static final String GENERATOR_NAME =
"com.netflix.conductor.annotationsprocessor.protogen";
private String protoPackageName;
private String javaPackageName;
private String goPackageName;
private List<ProtoFile> protoFiles = new ArrayList<>();
public ProtoGen(String protoPackageName, String javaPackageName, String goPackageName) {
this.protoPackageName = protoPackageName;
this.javaPackageName = javaPackageName;
this.goPackageName = goPackageName;
}
public void writeMapper(File root, String mapperPackageName) throws IOException {
TypeSpec.Builder protoMapper =
TypeSpec.classBuilder("AbstractProtoMapper")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.addAnnotation(
AnnotationSpec.builder(Generated.class)
.addMember("value", "$S", GENERATOR_NAME)
.build());
Set<MethodSpec> abstractMethods = new HashSet<>();
protoFiles.sort(
new Comparator<ProtoFile>() {
public int compare(ProtoFile p1, ProtoFile p2) {
String n1 = p1.getMessage().getName();
String n2 = p2.getMessage().getName();
return n1.compareTo(n2);
}
});
for (ProtoFile protoFile : protoFiles) {
AbstractMessage elem = protoFile.getMessage();
elem.generateJavaMapper(protoMapper);
elem.generateAbstractMethods(abstractMethods);
}
protoMapper.addMethods(abstractMethods);
JavaFile javaFile =
JavaFile.builder(mapperPackageName, protoMapper.build()).indent(" ").build();
File filename = new File(root, "AbstractProtoMapper.java");
try (Writer writer = new FileWriter(filename.toString())) {
System.out.printf("protogen: writing '%s'...\n", filename);
javaFile.writeTo(writer);
}
}
public void writeProtos(File root) throws IOException {
TemplateLoader loader = new ClassPathTemplateLoader("/templates", ".proto");
Handlebars handlebars =
new Handlebars(loader)
.infiniteLoops(true)
.prettyPrint(true)
.with(EscapingStrategy.NOOP);
Template protoFile = handlebars.compile("file");
for (ProtoFile file : protoFiles) {
File filename = new File(root, file.getFilePath());
try (Writer writer = new FileWriter(filename)) {
System.out.printf("protogen: writing '%s'...\n", filename);
protoFile.apply(file, writer);
}
}
}
public void processPackage(File jarFile, String packageName) throws IOException {
if (!jarFile.isFile()) throw new IOException("missing Jar file " + jarFile);
URL[] urls = new URL[] {jarFile.toURI().toURL()};
ClassLoader loader =
new URLClassLoader(urls, Thread.currentThread().getContextClassLoader());
ClassPath cp = ClassPath.from(loader);
System.out.printf("protogen: processing Jar '%s'\n", jarFile);
for (ClassPath.ClassInfo info : cp.getTopLevelClassesRecursive(packageName)) {
try {
processClass(info.load());
} catch (NoClassDefFoundError ignored) {
}
}
}
public void processClass(Class<?> obj) {
if (obj.isAnnotationPresent(ProtoMessage.class)) {
System.out.printf("protogen: found %s\n", obj.getCanonicalName());
protoFiles.add(new ProtoFile(obj, protoPackageName, javaPackageName, goPackageName));
}
}
}
| 6,535 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/AbstractMessage.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import com.netflix.conductor.annotations.protogen.ProtoEnum;
import com.netflix.conductor.annotations.protogen.ProtoMessage;
import com.netflix.conductor.annotationsprocessor.protogen.types.MessageType;
import com.netflix.conductor.annotationsprocessor.protogen.types.TypeMapper;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeSpec;
public abstract class AbstractMessage {
protected Class<?> clazz;
protected MessageType type;
protected List<Field> fields = new ArrayList<Field>();
protected List<AbstractMessage> nested = new ArrayList<>();
public AbstractMessage(Class<?> cls, MessageType parentType) {
assert cls.isAnnotationPresent(ProtoMessage.class)
|| cls.isAnnotationPresent(ProtoEnum.class);
this.clazz = cls;
this.type = TypeMapper.INSTANCE.declare(cls, parentType);
for (Class<?> nested : clazz.getDeclaredClasses()) {
if (nested.isEnum()) addNestedEnum(nested);
else addNestedClass(nested);
}
}
private void addNestedEnum(Class<?> cls) {
ProtoEnum ann = (ProtoEnum) cls.getAnnotation(ProtoEnum.class);
if (ann != null) {
nested.add(new Enum(cls, this.type));
}
}
private void addNestedClass(Class<?> cls) {
ProtoMessage ann = (ProtoMessage) cls.getAnnotation(ProtoMessage.class);
if (ann != null) {
nested.add(new Message(cls, this.type));
}
}
public abstract String getProtoClass();
protected abstract void javaMapToProto(TypeSpec.Builder builder);
protected abstract void javaMapFromProto(TypeSpec.Builder builder);
public void generateJavaMapper(TypeSpec.Builder builder) {
javaMapToProto(builder);
javaMapFromProto(builder);
for (AbstractMessage abstractMessage : this.nested) {
abstractMessage.generateJavaMapper(builder);
}
}
public void generateAbstractMethods(Set<MethodSpec> specs) {
for (Field field : fields) {
field.generateAbstractMethods(specs);
}
for (AbstractMessage elem : nested) {
elem.generateAbstractMethods(specs);
}
}
public void findDependencies(Set<String> dependencies) {
for (Field field : fields) {
field.getDependencies(dependencies);
}
for (AbstractMessage elem : nested) {
elem.findDependencies(dependencies);
}
}
public List<AbstractMessage> getNested() {
return nested;
}
public List<Field> getFields() {
return fields;
}
public String getName() {
return clazz.getSimpleName();
}
public abstract static class Field {
protected int protoIndex;
protected java.lang.reflect.Field field;
protected Field(int index, java.lang.reflect.Field field) {
this.protoIndex = index;
this.field = field;
}
public abstract String getProtoTypeDeclaration();
public int getProtoIndex() {
return protoIndex;
}
public String getName() {
return field.getName();
}
public String getProtoName() {
return field.getName().toUpperCase();
}
public void getDependencies(Set<String> deps) {}
public void generateAbstractMethods(Set<MethodSpec> specs) {}
}
}
| 6,536 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/Enum.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import javax.lang.model.element.Modifier;
import com.netflix.conductor.annotationsprocessor.protogen.types.MessageType;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
import com.squareup.javapoet.TypeSpec;
public class Enum extends AbstractMessage {
public enum MapType {
FROM_PROTO("fromProto"),
TO_PROTO("toProto");
private final String methodName;
MapType(String m) {
methodName = m;
}
public String getMethodName() {
return methodName;
}
}
public Enum(Class cls, MessageType parent) {
super(cls, parent);
int protoIndex = 0;
for (java.lang.reflect.Field field : cls.getDeclaredFields()) {
if (field.isEnumConstant()) fields.add(new EnumField(protoIndex++, field));
}
}
@Override
public String getProtoClass() {
return "enum";
}
private MethodSpec javaMap(MapType mt, TypeName from, TypeName to) {
MethodSpec.Builder method = MethodSpec.methodBuilder(mt.getMethodName());
method.addModifiers(Modifier.PUBLIC);
method.returns(to);
method.addParameter(from, "from");
method.addStatement("$T to", to);
method.beginControlFlow("switch (from)");
for (Field field : fields) {
String fromName = (mt == MapType.TO_PROTO) ? field.getName() : field.getProtoName();
String toName = (mt == MapType.TO_PROTO) ? field.getProtoName() : field.getName();
method.addStatement("case $L: to = $T.$L; break", fromName, to, toName);
}
method.addStatement(
"default: throw new $T(\"Unexpected enum constant: \" + from)",
IllegalArgumentException.class);
method.endControlFlow();
method.addStatement("return to");
return method.build();
}
@Override
protected void javaMapFromProto(TypeSpec.Builder type) {
type.addMethod(
javaMap(
MapType.FROM_PROTO,
this.type.getJavaProtoType(),
TypeName.get(this.clazz)));
}
@Override
protected void javaMapToProto(TypeSpec.Builder type) {
type.addMethod(
javaMap(MapType.TO_PROTO, TypeName.get(this.clazz), this.type.getJavaProtoType()));
}
public class EnumField extends Field {
protected EnumField(int index, java.lang.reflect.Field field) {
super(index, field);
}
@Override
public String getProtoTypeDeclaration() {
return String.format("%s = %d", getProtoName(), getProtoIndex());
}
}
}
| 6,537 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/ProtoFile.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen;
import java.util.HashSet;
import java.util.Set;
import com.netflix.conductor.annotationsprocessor.protogen.types.TypeMapper;
import com.squareup.javapoet.ClassName;
public class ProtoFile {
public static String PROTO_SUFFIX = "Pb";
private ClassName baseClass;
private AbstractMessage message;
private String filePath;
private String protoPackageName;
private String javaPackageName;
private String goPackageName;
public ProtoFile(
Class<?> object,
String protoPackageName,
String javaPackageName,
String goPackageName) {
this.protoPackageName = protoPackageName;
this.javaPackageName = javaPackageName;
this.goPackageName = goPackageName;
String className = object.getSimpleName() + PROTO_SUFFIX;
this.filePath = "model/" + object.getSimpleName().toLowerCase() + ".proto";
this.baseClass = ClassName.get(this.javaPackageName, className);
this.message = new Message(object, TypeMapper.INSTANCE.baseClass(baseClass, filePath));
}
public String getJavaClassName() {
return baseClass.simpleName();
}
public String getFilePath() {
return filePath;
}
public String getProtoPackageName() {
return protoPackageName;
}
public String getJavaPackageName() {
return javaPackageName;
}
public String getGoPackageName() {
return goPackageName;
}
public AbstractMessage getMessage() {
return message;
}
public Set<String> getIncludes() {
Set<String> includes = new HashSet<>();
message.findDependencies(includes);
includes.remove(this.getFilePath());
return includes;
}
}
| 6,538 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/AbstractType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.Set;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
public abstract class AbstractType {
Type javaType;
TypeName javaProtoType;
AbstractType(Type javaType, TypeName javaProtoType) {
this.javaType = javaType;
this.javaProtoType = javaProtoType;
}
public Type getJavaType() {
return javaType;
}
public TypeName getJavaProtoType() {
return javaProtoType;
}
public abstract String getProtoType();
public abstract TypeName getRawJavaType();
public abstract void mapToProto(String field, MethodSpec.Builder method);
public abstract void mapFromProto(String field, MethodSpec.Builder method);
public abstract void getDependencies(Set<String> deps);
public abstract void generateAbstractMethods(Set<MethodSpec> specs);
protected String javaMethodName(String m, String field) {
String fieldName = field.substring(0, 1).toUpperCase() + field.substring(1);
return m + fieldName;
}
private static class ProtoCase {
static String convert(String s) {
StringBuilder out = new StringBuilder(s.length());
final int len = s.length();
int i = 0;
int j = -1;
while ((j = findWordBoundary(s, ++j)) != -1) {
out.append(normalizeWord(s.substring(i, j)));
if (j < len && s.charAt(j) == '_') j++;
i = j;
}
if (i == 0) return normalizeWord(s);
if (i < len) out.append(normalizeWord(s.substring(i)));
return out.toString();
}
private static boolean isWordBoundary(char c) {
return (c >= 'A' && c <= 'Z');
}
private static int findWordBoundary(CharSequence sequence, int start) {
int length = sequence.length();
if (start >= length) return -1;
if (isWordBoundary(sequence.charAt(start))) {
int i = start;
while (i < length && isWordBoundary(sequence.charAt(i))) i++;
return i;
} else {
for (int i = start; i < length; i++) {
final char c = sequence.charAt(i);
if (c == '_' || isWordBoundary(c)) return i;
}
return -1;
}
}
private static String normalizeWord(String word) {
if (word.length() < 2) return word.toUpperCase();
return word.substring(0, 1).toUpperCase() + word.substring(1).toLowerCase();
}
}
protected String protoMethodName(String m, String field) {
return m + ProtoCase.convert(field);
}
}
| 6,539 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/WrappedType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.Set;
import javax.lang.model.element.Modifier;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
public class WrappedType extends AbstractType {
private AbstractType realType;
private MessageType wrappedType;
public static WrappedType wrap(GenericType realType) {
Type valueType = realType.getValueType().getJavaType();
if (!(valueType instanceof Class))
throw new IllegalArgumentException("cannot wrap primitive type: " + valueType);
String className = ((Class) valueType).getSimpleName() + realType.getWrapperSuffix();
MessageType wrappedType = TypeMapper.INSTANCE.get(className);
if (wrappedType == null)
throw new IllegalArgumentException("missing wrapper class: " + className);
return new WrappedType(realType, wrappedType);
}
public WrappedType(AbstractType realType, MessageType wrappedType) {
super(realType.getJavaType(), wrappedType.getJavaProtoType());
this.realType = realType;
this.wrappedType = wrappedType;
}
@Override
public String getProtoType() {
return wrappedType.getProtoType();
}
@Override
public TypeName getRawJavaType() {
return realType.getRawJavaType();
}
@Override
public void mapToProto(String field, MethodSpec.Builder method) {
wrappedType.mapToProto(field, method);
}
@Override
public void mapFromProto(String field, MethodSpec.Builder method) {
wrappedType.mapFromProto(field, method);
}
@Override
public void getDependencies(Set<String> deps) {
this.realType.getDependencies(deps);
this.wrappedType.getDependencies(deps);
}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {
MethodSpec fromProto =
MethodSpec.methodBuilder("fromProto")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.returns(this.realType.getJavaType())
.addParameter(this.wrappedType.getJavaProtoType(), "in")
.build();
MethodSpec toProto =
MethodSpec.methodBuilder("toProto")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.returns(this.wrappedType.getJavaProtoType())
.addParameter(this.realType.getJavaType(), "in")
.build();
specs.add(fromProto);
specs.add(toProto);
}
}
| 6,540 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/ExternMessageType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.Set;
import javax.lang.model.element.Modifier;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
public class ExternMessageType extends MessageType {
private String externProtoType;
public ExternMessageType(
Type javaType, ClassName javaProtoType, String externProtoType, String protoFilePath) {
super(javaType, javaProtoType, protoFilePath);
this.externProtoType = externProtoType;
}
@Override
public String getProtoType() {
return externProtoType;
}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {
MethodSpec fromProto =
MethodSpec.methodBuilder("fromProto")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.returns(this.getJavaType())
.addParameter(this.getJavaProtoType(), "in")
.build();
MethodSpec toProto =
MethodSpec.methodBuilder("toProto")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.returns(this.getJavaProtoType())
.addParameter(this.getJavaType(), "in")
.build();
specs.add(fromProto);
specs.add(toProto);
}
}
| 6,541 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/ScalarType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.Set;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
public class ScalarType extends AbstractType {
private String protoType;
public ScalarType(Type javaType, TypeName javaProtoType, String protoType) {
super(javaType, javaProtoType);
this.protoType = protoType;
}
@Override
public String getProtoType() {
return protoType;
}
@Override
public TypeName getRawJavaType() {
return getJavaProtoType();
}
@Override
public void mapFromProto(String field, MethodSpec.Builder method) {
method.addStatement(
"to.$L( from.$L() )", javaMethodName("set", field), protoMethodName("get", field));
}
private boolean isNullableType() {
final Type jt = getJavaType();
return jt.equals(Boolean.class)
|| jt.equals(Byte.class)
|| jt.equals(Character.class)
|| jt.equals(Short.class)
|| jt.equals(Integer.class)
|| jt.equals(Long.class)
|| jt.equals(Double.class)
|| jt.equals(Float.class)
|| jt.equals(String.class);
}
@Override
public void mapToProto(String field, MethodSpec.Builder method) {
final boolean nullable = isNullableType();
String getter =
(getJavaType().equals(boolean.class) || getJavaType().equals(Boolean.class))
? javaMethodName("is", field)
: javaMethodName("get", field);
if (nullable) method.beginControlFlow("if (from.$L() != null)", getter);
method.addStatement("to.$L( from.$L() )", protoMethodName("set", field), getter);
if (nullable) method.endControlFlow();
}
@Override
public void getDependencies(Set<String> deps) {}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {}
}
| 6,542 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/MapType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.HashMap;
import java.util.Map;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.ParameterizedTypeName;
import com.squareup.javapoet.TypeName;
public class MapType extends GenericType {
private AbstractType keyType;
private AbstractType valueType;
public MapType(Type type) {
super(type);
}
@Override
public String getWrapperSuffix() {
return "Map";
}
@Override
public AbstractType getValueType() {
if (valueType == null) {
valueType = resolveGenericParam(1);
}
return valueType;
}
public AbstractType getKeyType() {
if (keyType == null) {
keyType = resolveGenericParam(0);
}
return keyType;
}
@Override
public void mapToProto(String field, MethodSpec.Builder method) {
AbstractType valueType = getValueType();
if (valueType instanceof ScalarType) {
method.addStatement(
"to.$L( from.$L() )",
protoMethodName("putAll", field),
javaMethodName("get", field));
} else {
TypeName typeName =
ParameterizedTypeName.get(
Map.Entry.class,
getKeyType().getJavaType(),
getValueType().getJavaType());
method.beginControlFlow(
"for ($T pair : from.$L().entrySet())", typeName, javaMethodName("get", field));
method.addStatement(
"to.$L( pair.getKey(), toProto( pair.getValue() ) )",
protoMethodName("put", field));
method.endControlFlow();
}
}
@Override
public void mapFromProto(String field, MethodSpec.Builder method) {
AbstractType valueType = getValueType();
if (valueType instanceof ScalarType) {
method.addStatement(
"to.$L( from.$L() )",
javaMethodName("set", field),
protoMethodName("get", field) + "Map");
} else {
Type keyType = getKeyType().getJavaType();
Type valueTypeJava = getValueType().getJavaType();
TypeName valueTypePb = getValueType().getJavaProtoType();
ParameterizedTypeName entryType =
ParameterizedTypeName.get(
ClassName.get(Map.Entry.class), TypeName.get(keyType), valueTypePb);
ParameterizedTypeName mapType =
ParameterizedTypeName.get(Map.class, keyType, valueTypeJava);
ParameterizedTypeName hashMapType =
ParameterizedTypeName.get(HashMap.class, keyType, valueTypeJava);
String mapName = field + "Map";
method.addStatement("$T $L = new $T()", mapType, mapName, hashMapType);
method.beginControlFlow(
"for ($T pair : from.$L().entrySet())",
entryType,
protoMethodName("get", field) + "Map");
method.addStatement("$L.put( pair.getKey(), fromProto( pair.getValue() ) )", mapName);
method.endControlFlow();
method.addStatement("to.$L($L)", javaMethodName("set", field), mapName);
}
}
@Override
public TypeName resolveJavaProtoType() {
return ParameterizedTypeName.get(
(ClassName) getRawJavaType(),
getKeyType().getJavaProtoType(),
getValueType().getJavaProtoType());
}
@Override
public String getProtoType() {
AbstractType keyType = getKeyType();
AbstractType valueType = getValueType();
if (!(keyType instanceof ScalarType)) {
throw new IllegalArgumentException(
"cannot map non-scalar map key: " + this.getJavaType());
}
return String.format("map<%s, %s>", keyType.getProtoType(), valueType.getProtoType());
}
}
| 6,543 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/ListType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.stream.Collectors;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.ParameterizedTypeName;
import com.squareup.javapoet.TypeName;
public class ListType extends GenericType {
private AbstractType valueType;
public ListType(Type type) {
super(type);
}
@Override
public String getWrapperSuffix() {
return "List";
}
@Override
public AbstractType getValueType() {
if (valueType == null) {
valueType = resolveGenericParam(0);
}
return valueType;
}
@Override
public void mapToProto(String field, MethodSpec.Builder method) {
AbstractType subtype = getValueType();
if (subtype instanceof ScalarType) {
method.addStatement(
"to.$L( from.$L() )",
protoMethodName("addAll", field),
javaMethodName("get", field));
} else {
method.beginControlFlow(
"for ($T elem : from.$L())",
subtype.getJavaType(),
javaMethodName("get", field));
method.addStatement("to.$L( toProto(elem) )", protoMethodName("add", field));
method.endControlFlow();
}
}
@Override
public void mapFromProto(String field, MethodSpec.Builder method) {
AbstractType subtype = getValueType();
Type entryType = subtype.getJavaType();
Class collector = TypeMapper.PROTO_LIST_TYPES.get(getRawType());
if (subtype instanceof ScalarType) {
if (entryType.equals(String.class)) {
method.addStatement(
"to.$L( from.$L().stream().collect($T.toCollection($T::new)) )",
javaMethodName("set", field),
protoMethodName("get", field) + "List",
Collectors.class,
collector);
} else {
method.addStatement(
"to.$L( from.$L() )",
javaMethodName("set", field),
protoMethodName("get", field) + "List");
}
} else {
method.addStatement(
"to.$L( from.$L().stream().map(this::fromProto).collect($T.toCollection($T::new)) )",
javaMethodName("set", field),
protoMethodName("get", field) + "List",
Collectors.class,
collector);
}
}
@Override
public TypeName resolveJavaProtoType() {
return ParameterizedTypeName.get(
(ClassName) getRawJavaType(), getValueType().getJavaProtoType());
}
@Override
public String getProtoType() {
return "repeated " + getValueType().getProtoType();
}
}
| 6,544 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/MessageType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.Type;
import java.util.List;
import java.util.Set;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
public class MessageType extends AbstractType {
private String protoFilePath;
public MessageType(Type javaType, ClassName javaProtoType, String protoFilePath) {
super(javaType, javaProtoType);
this.protoFilePath = protoFilePath;
}
@Override
public String getProtoType() {
List<String> classes = ((ClassName) getJavaProtoType()).simpleNames();
return String.join(".", classes.subList(1, classes.size()));
}
public String getProtoFilePath() {
return protoFilePath;
}
@Override
public TypeName getRawJavaType() {
return getJavaProtoType();
}
@Override
public void mapToProto(String field, MethodSpec.Builder method) {
final String getter = javaMethodName("get", field);
method.beginControlFlow("if (from.$L() != null)", getter);
method.addStatement("to.$L( toProto( from.$L() ) )", protoMethodName("set", field), getter);
method.endControlFlow();
}
private boolean isEnum() {
Type clazz = getJavaType();
return (clazz instanceof Class<?>) && ((Class) clazz).isEnum();
}
@Override
public void mapFromProto(String field, MethodSpec.Builder method) {
if (!isEnum()) method.beginControlFlow("if (from.$L())", protoMethodName("has", field));
method.addStatement(
"to.$L( fromProto( from.$L() ) )",
javaMethodName("set", field),
protoMethodName("get", field));
if (!isEnum()) method.endControlFlow();
}
@Override
public void getDependencies(Set<String> deps) {
deps.add(protoFilePath);
}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {}
}
| 6,545 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/TypeMapper.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.*;
import com.google.protobuf.Any;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.TypeName;
public class TypeMapper {
static Map<Type, Class> PROTO_LIST_TYPES = new HashMap<>();
static {
PROTO_LIST_TYPES.put(List.class, ArrayList.class);
PROTO_LIST_TYPES.put(Set.class, HashSet.class);
PROTO_LIST_TYPES.put(LinkedList.class, LinkedList.class);
}
public static TypeMapper INSTANCE = new TypeMapper();
private Map<Type, AbstractType> types = new HashMap<>();
public void addScalarType(Type t, String protoType) {
types.put(t, new ScalarType(t, TypeName.get(t), protoType));
}
public void addMessageType(Class<?> t, MessageType message) {
types.put(t, message);
}
public TypeMapper() {
addScalarType(int.class, "int32");
addScalarType(Integer.class, "int32");
addScalarType(long.class, "int64");
addScalarType(Long.class, "int64");
addScalarType(String.class, "string");
addScalarType(boolean.class, "bool");
addScalarType(Boolean.class, "bool");
addMessageType(
Object.class,
new ExternMessageType(
Object.class,
ClassName.get("com.google.protobuf", "Value"),
"google.protobuf.Value",
"google/protobuf/struct.proto"));
addMessageType(
Any.class,
new ExternMessageType(
Any.class,
ClassName.get(Any.class),
"google.protobuf.Any",
"google/protobuf/any.proto"));
}
public AbstractType get(Type t) {
if (!types.containsKey(t)) {
if (t instanceof ParameterizedType) {
Type raw = ((ParameterizedType) t).getRawType();
if (PROTO_LIST_TYPES.containsKey(raw)) {
types.put(t, new ListType(t));
} else if (raw.equals(Map.class)) {
types.put(t, new MapType(t));
}
}
}
if (!types.containsKey(t)) {
throw new IllegalArgumentException("Cannot map type: " + t);
}
return types.get(t);
}
public MessageType get(String className) {
for (Map.Entry<Type, AbstractType> pair : types.entrySet()) {
AbstractType t = pair.getValue();
if (t instanceof MessageType) {
if (((Class) t.getJavaType()).getSimpleName().equals(className))
return (MessageType) t;
}
}
return null;
}
public MessageType declare(Class type, MessageType parent) {
return declare(type, (ClassName) parent.getJavaProtoType(), parent.getProtoFilePath());
}
public MessageType declare(Class type, ClassName parentType, String protoFilePath) {
String simpleName = type.getSimpleName();
MessageType t = new MessageType(type, parentType.nestedClass(simpleName), protoFilePath);
if (types.containsKey(type)) {
throw new IllegalArgumentException("duplicate type declaration: " + type);
}
types.put(type, t);
return t;
}
public MessageType baseClass(ClassName className, String protoFilePath) {
return new MessageType(Object.class, className, protoFilePath);
}
}
| 6,546 |
0 | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen | Create_ds/conductor/annotations-processor/src/main/java/com/netflix/conductor/annotationsprocessor/protogen/types/GenericType.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.annotationsprocessor.protogen.types;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.Set;
import com.squareup.javapoet.ClassName;
import com.squareup.javapoet.MethodSpec;
import com.squareup.javapoet.TypeName;
abstract class GenericType extends AbstractType {
public GenericType(Type type) {
super(type, null);
}
protected Class getRawType() {
ParameterizedType tt = (ParameterizedType) this.getJavaType();
return (Class) tt.getRawType();
}
protected AbstractType resolveGenericParam(int idx) {
ParameterizedType tt = (ParameterizedType) this.getJavaType();
Type[] types = tt.getActualTypeArguments();
AbstractType abstractType = TypeMapper.INSTANCE.get(types[idx]);
if (abstractType instanceof GenericType) {
return WrappedType.wrap((GenericType) abstractType);
}
return abstractType;
}
public abstract String getWrapperSuffix();
public abstract AbstractType getValueType();
public abstract TypeName resolveJavaProtoType();
@Override
public TypeName getRawJavaType() {
return ClassName.get(getRawType());
}
@Override
public void getDependencies(Set<String> deps) {
getValueType().getDependencies(deps);
}
@Override
public void generateAbstractMethods(Set<MethodSpec> specs) {
getValueType().generateAbstractMethods(specs);
}
@Override
public TypeName getJavaProtoType() {
if (javaProtoType == null) {
javaProtoType = resolveJavaProtoType();
}
return javaProtoType;
}
}
| 6,547 |
0 | Create_ds/conductor/json-jq-task/src/test/java/com/netflix/conductor/tasks | Create_ds/conductor/json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.json;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import com.netflix.conductor.common.config.ObjectMapperProvider;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.*;
public class JsonJqTransformTest {
private final ObjectMapper objectMapper = new ObjectMapperProvider().getObjectMapper();
@Test
public void dataShouldBeCorrectlySelected() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put("queryExpression", ".inputJson.key[0]");
final Map<String, Object> inputJson = new HashMap<>();
inputJson.put("key", Collections.singletonList("VALUE"));
inputData.put("inputJson", inputJson);
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertEquals("VALUE", task.getOutputData().get("result").toString());
assertEquals("[\"VALUE\"]", task.getOutputData().get("resultList").toString());
}
@Test
public void simpleErrorShouldBeDisplayed() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put("queryExpression", "{");
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertTrue(
((String) task.getOutputData().get("error"))
.startsWith("Encountered \"<EOF>\" at line 1, column 1."));
}
@Test
public void nestedExceptionsWithNACausesShouldBeDisregarded() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> inputData = new HashMap<>();
inputData.put(
"queryExpression",
"{officeID: (.inputJson.OIDs | unique)[], requestedIndicatorList: .inputJson.requestedindicatorList}");
final Map<String, Object> inputJson = new HashMap<>();
inputJson.put("OIDs", Collections.singletonList("VALUE"));
final Map<String, Object> indicatorList = new HashMap<>();
indicatorList.put("indicator", "AFA");
indicatorList.put("value", false);
inputJson.put("requestedindicatorList", Collections.singletonList(indicatorList));
inputData.put("inputJson", inputJson);
task.setInputData(inputData);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertTrue(
((String) task.getOutputData().get("error"))
.startsWith("Encountered \" \"[\" \"[ \"\" at line 1"));
}
@Test
public void mapResultShouldBeCorrectlyExtracted() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
Map<String, Object> inputData = new HashMap<>();
inputData.put("method", "POST");
inputData.put("successExpression", null);
inputData.put("requestTransform", "{name: (.body.name + \" you are a \" + .body.title) }");
inputData.put("responseTransform", "{result: \"reply: \" + .response.body.message}");
taskInput.put("input", inputData);
taskInput.put(
"queryExpression",
"{ requestTransform: .input.requestTransform // \".body\" , responseTransform: .input.responseTransform // \".response.body\", method: .input.method // \"GET\", document: .input.document // \"rgt_results\", successExpression: .input.successExpression // \"true\" }");
task.setInputData(taskInput);
task.setOutputData(new HashMap<>());
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof Map);
HashMap<String, Object> result =
(HashMap<String, Object>) task.getOutputData().get("result");
assertEquals("POST", result.get("method"));
assertEquals(
"{name: (.body.name + \" you are a \" + .body.title) }",
result.get("requestTransform"));
assertEquals(
"{result: \"reply: \" + .response.body.message}", result.get("responseTransform"));
}
@Test
public void stringResultShouldBeCorrectlyExtracted() {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("data", new ArrayList<>());
taskInput.put(
"queryExpression", "if(.data | length >0) then \"EXISTS\" else \"CREATE\" end");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof String);
String result = (String) task.getOutputData().get("result");
assertEquals("CREATE", result);
}
@SuppressWarnings("unchecked")
@Test
public void listResultShouldBeCorrectlyExtracted() throws JsonProcessingException {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
String json =
"{ \"request\": { \"transitions\": [ { \"name\": \"redeliver\" }, { \"name\": \"redeliver_from_validation_error\" }, { \"name\": \"redelivery\" } ] } }";
Map<String, Object> inputData = objectMapper.readValue(json, Map.class);
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("inputData", inputData);
taskInput.put("queryExpression", ".inputData.request.transitions | map(.name)");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertTrue(task.getOutputData().get("result") instanceof List);
List<Object> result = (List<Object>) task.getOutputData().get("result");
assertEquals(3, result.size());
}
@Test
public void nullResultShouldBeCorrectlyExtracted() throws JsonProcessingException {
final JsonJqTransform jsonJqTransform = new JsonJqTransform(objectMapper);
final WorkflowModel workflow = new WorkflowModel();
final TaskModel task = new TaskModel();
final Map<String, Object> taskInput = new HashMap<>();
taskInput.put("queryExpression", "null");
task.setInputData(taskInput);
jsonJqTransform.start(workflow, task, null);
assertNull(task.getOutputData().get("error"));
assertNull(task.getOutputData().get("result"));
}
}
| 6,548 |
0 | Create_ds/conductor/json-jq-task/src/main/java/com/netflix/conductor/tasks | Create_ds/conductor/json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.tasks.json;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.benmanes.caffeine.cache.CacheLoader;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.LoadingCache;
import net.thisptr.jackson.jq.JsonQuery;
import net.thisptr.jackson.jq.Scope;
@Component(JsonJqTransform.NAME)
public class JsonJqTransform extends WorkflowSystemTask {
private static final Logger LOGGER = LoggerFactory.getLogger(JsonJqTransform.class);
public static final String NAME = "JSON_JQ_TRANSFORM";
private static final String QUERY_EXPRESSION_PARAMETER = "queryExpression";
private static final String OUTPUT_RESULT = "result";
private static final String OUTPUT_RESULT_LIST = "resultList";
private static final String OUTPUT_ERROR = "error";
private static final TypeReference<Map<String, Object>> mapType = new TypeReference<>() {};
private final TypeReference<List<Object>> listType = new TypeReference<>() {};
private final Scope rootScope;
private final ObjectMapper objectMapper;
private final LoadingCache<String, JsonQuery> queryCache = createQueryCache();
@Autowired
public JsonJqTransform(ObjectMapper objectMapper) {
super(NAME);
this.objectMapper = objectMapper;
this.rootScope = Scope.newEmptyScope();
this.rootScope.loadFunctions(Scope.class.getClassLoader());
}
@Override
public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) {
final Map<String, Object> taskInput = task.getInputData();
final String queryExpression = (String) taskInput.get(QUERY_EXPRESSION_PARAMETER);
if (queryExpression == null) {
task.setReasonForIncompletion(
"Missing '" + QUERY_EXPRESSION_PARAMETER + "' in input parameters");
task.setStatus(TaskModel.Status.FAILED);
return;
}
try {
final JsonNode input = objectMapper.valueToTree(taskInput);
final JsonQuery query = queryCache.get(queryExpression);
final Scope childScope = Scope.newChildScope(rootScope);
final List<JsonNode> result = query.apply(childScope, input);
task.setStatus(TaskModel.Status.COMPLETED);
if (result == null) {
task.addOutput(OUTPUT_RESULT, null);
task.addOutput(OUTPUT_RESULT_LIST, null);
} else if (result.isEmpty()) {
task.addOutput(OUTPUT_RESULT, null);
task.addOutput(OUTPUT_RESULT_LIST, result);
} else {
task.addOutput(OUTPUT_RESULT, extractBody(result.get(0)));
task.addOutput(OUTPUT_RESULT_LIST, result);
}
} catch (final Exception e) {
LOGGER.error(
"Error executing task: {} in workflow: {}",
task.getTaskId(),
workflow.getWorkflowId(),
e);
task.setStatus(TaskModel.Status.FAILED);
final String message = extractFirstValidMessage(e);
task.setReasonForIncompletion(message);
task.addOutput(OUTPUT_ERROR, message);
}
}
private LoadingCache<String, JsonQuery> createQueryCache() {
final CacheLoader<String, JsonQuery> loader = JsonQuery::compile;
return Caffeine.newBuilder()
.expireAfterWrite(1, TimeUnit.HOURS)
.maximumSize(1000)
.build(loader);
}
@Override
public boolean execute(
WorkflowModel workflow, TaskModel task, WorkflowExecutor workflowExecutor) {
this.start(workflow, task, workflowExecutor);
return true;
}
private String extractFirstValidMessage(final Exception e) {
Throwable currentStack = e;
final List<String> messages = new ArrayList<>();
messages.add(currentStack.getMessage());
while (currentStack.getCause() != null) {
currentStack = currentStack.getCause();
messages.add(currentStack.getMessage());
}
return messages.stream().filter(it -> !it.contains("N/A")).findFirst().orElse("");
}
private Object extractBody(JsonNode node) {
if (node.isNull()) {
return null;
} else if (node.isObject()) {
return objectMapper.convertValue(node, mapType);
} else if (node.isArray()) {
return objectMapper.convertValue(node, listType);
} else if (node.isBoolean()) {
return node.asBoolean();
} else if (node.isNumber()) {
if (node.isIntegralNumber()) {
return node.asLong();
} else {
return node.asDouble();
}
} else {
return node.asText();
}
}
}
| 6,549 |
0 | Create_ds/conductor/redis-lock/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-lock/src/test/java/com/netflix/conductor/redis/lock/RedisLockTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.lock;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.redisson.Redisson;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.redisson.config.Config;
import com.netflix.conductor.redislock.config.RedisLockProperties;
import com.netflix.conductor.redislock.config.RedisLockProperties.REDIS_SERVER_TYPE;
import com.netflix.conductor.redislock.lock.RedisLock;
import redis.embedded.RedisServer;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RedisLockTest {
private static RedisLock redisLock;
private static Config config;
private static RedissonClient redisson;
private static RedisServer redisServer = null;
@BeforeClass
public static void setUp() throws Exception {
String testServerAddress = "redis://127.0.0.1:6371";
redisServer = new RedisServer(6371);
if (redisServer.isActive()) {
redisServer.stop();
}
redisServer.start();
RedisLockProperties properties = mock(RedisLockProperties.class);
when(properties.getServerType()).thenReturn(REDIS_SERVER_TYPE.SINGLE);
when(properties.getServerAddress()).thenReturn(testServerAddress);
when(properties.getServerMasterName()).thenReturn("master");
when(properties.getNamespace()).thenReturn("");
when(properties.isIgnoreLockingExceptions()).thenReturn(false);
Config redissonConfig = new Config();
redissonConfig.useSingleServer().setAddress(testServerAddress).setTimeout(10000);
redisLock = new RedisLock((Redisson) Redisson.create(redissonConfig), properties);
// Create another instance of redisson for tests.
RedisLockTest.config = new Config();
RedisLockTest.config.useSingleServer().setAddress(testServerAddress).setTimeout(10000);
redisson = Redisson.create(RedisLockTest.config);
}
@AfterClass
public static void tearDown() {
redisServer.stop();
}
@Test
public void testLocking() {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
assertTrue(redisLock.acquireLock(lockId, 1000, 1000, TimeUnit.MILLISECONDS));
}
@Test
public void testLockExpiration() throws InterruptedException {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 1000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
Thread.sleep(2000);
RLock lock = redisson.getLock(lockId);
assertFalse(lock.isLocked());
}
@Test
public void testLockReentry() throws InterruptedException {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 60000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
Thread.sleep(1000);
// get the lock back
isLocked = redisLock.acquireLock(lockId, 1000, 1000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
RLock lock = redisson.getLock(lockId);
assertTrue(isLocked);
}
@Test
public void testReleaseLock() {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 10000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
redisLock.releaseLock(lockId);
RLock lock = redisson.getLock(lockId);
assertFalse(lock.isLocked());
}
@Test
public void testLockReleaseAndAcquire() throws InterruptedException {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 10000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
redisLock.releaseLock(lockId);
Worker worker1 = new Worker(redisLock, lockId);
worker1.start();
worker1.join();
assertTrue(worker1.isLocked);
}
@Test
public void testLockingDuplicateThreads() throws InterruptedException {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
Worker worker1 = new Worker(redisLock, lockId);
Worker worker2 = new Worker(redisLock, lockId);
worker1.start();
worker2.start();
worker1.join();
worker2.join();
// Ensure only one of them had got the lock.
assertFalse(worker1.isLocked && worker2.isLocked);
assertTrue(worker1.isLocked || worker2.isLocked);
}
@Test
public void testDuplicateLockAcquireFailure() throws InterruptedException {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
Worker worker1 = new Worker(redisLock, lockId, 100L, 60000L);
worker1.start();
worker1.join();
boolean isLocked = redisLock.acquireLock(lockId, 500L, 1000L, TimeUnit.MILLISECONDS);
// Ensure only one of them had got the lock.
assertFalse(isLocked);
assertTrue(worker1.isLocked);
}
@Test
public void testReacquireLostKey() {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 10000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
// Delete key from the cluster to reacquire
// Simulating the case when cluster goes down and possibly loses some keys.
redisson.getKeys().flushall();
isLocked = redisLock.acquireLock(lockId, 100, 10000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
}
@Test
public void testReleaseLockTwice() {
redisson.getKeys().flushall();
String lockId = "abcd-1234";
boolean isLocked = redisLock.acquireLock(lockId, 1000, 10000, TimeUnit.MILLISECONDS);
assertTrue(isLocked);
redisLock.releaseLock(lockId);
redisLock.releaseLock(lockId);
}
private static class Worker extends Thread {
private final RedisLock lock;
private final String lockID;
boolean isLocked;
private Long timeToTry = 50L;
private Long leaseTime = 1000L;
Worker(RedisLock lock, String lockID) {
super("TestWorker-" + lockID);
this.lock = lock;
this.lockID = lockID;
}
Worker(RedisLock lock, String lockID, Long timeToTry, Long leaseTime) {
super("TestWorker-" + lockID);
this.lock = lock;
this.lockID = lockID;
this.timeToTry = timeToTry;
this.leaseTime = leaseTime;
}
@Override
public void run() {
isLocked = lock.acquireLock(lockID, timeToTry, leaseTime, TimeUnit.MILLISECONDS);
}
}
}
| 6,550 |
0 | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock/config/RedisLockConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redislock.config;
import java.util.Arrays;
import org.redisson.Redisson;
import org.redisson.config.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.redislock.config.RedisLockProperties.REDIS_SERVER_TYPE;
import com.netflix.conductor.redislock.lock.RedisLock;
@Configuration
@EnableConfigurationProperties(RedisLockProperties.class)
@ConditionalOnProperty(name = "conductor.workflow-execution-lock.type", havingValue = "redis")
public class RedisLockConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisLockConfiguration.class);
@Bean
public Redisson getRedisson(RedisLockProperties properties) {
RedisLockProperties.REDIS_SERVER_TYPE redisServerType;
try {
redisServerType = properties.getServerType();
} catch (IllegalArgumentException ie) {
final String message =
"Invalid Redis server type: "
+ properties.getServerType()
+ ", supported values are: "
+ Arrays.toString(REDIS_SERVER_TYPE.values());
LOGGER.error(message);
throw new RuntimeException(message, ie);
}
String redisServerAddress = properties.getServerAddress();
String redisServerPassword = properties.getServerPassword();
String masterName = properties.getServerMasterName();
Config redisConfig = new Config();
if (properties.getNumNettyThreads() != null && properties.getNumNettyThreads() > 0) {
redisConfig.setNettyThreads(properties.getNumNettyThreads());
}
int connectionTimeout = 10000;
switch (redisServerType) {
case SINGLE:
LOGGER.info("Setting up Redis Single Server for RedisLockConfiguration");
redisConfig
.useSingleServer()
.setAddress(redisServerAddress)
.setPassword(redisServerPassword)
.setTimeout(connectionTimeout);
break;
case CLUSTER:
LOGGER.info("Setting up Redis Cluster for RedisLockConfiguration");
redisConfig
.useClusterServers()
.setScanInterval(2000) // cluster state scan interval in milliseconds
.addNodeAddress(redisServerAddress.split(","))
.setPassword(redisServerPassword)
.setTimeout(connectionTimeout)
.setSlaveConnectionMinimumIdleSize(
properties.getClusterReplicaConnectionMinIdleSize())
.setSlaveConnectionPoolSize(
properties.getClusterReplicaConnectionPoolSize())
.setMasterConnectionMinimumIdleSize(
properties.getClusterPrimaryConnectionMinIdleSize())
.setMasterConnectionPoolSize(
properties.getClusterPrimaryConnectionPoolSize());
break;
case SENTINEL:
LOGGER.info("Setting up Redis Sentinel Servers for RedisLockConfiguration");
redisConfig
.useSentinelServers()
.setScanInterval(2000)
.setMasterName(masterName)
.addSentinelAddress(redisServerAddress)
.setPassword(redisServerPassword)
.setTimeout(connectionTimeout);
break;
}
return (Redisson) Redisson.create(redisConfig);
}
@Bean
public Lock provideLock(Redisson redisson, RedisLockProperties properties) {
return new RedisLock(redisson, properties);
}
}
| 6,551 |
0 | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock/config/RedisLockProperties.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redislock.config;
import org.springframework.boot.context.properties.ConfigurationProperties;
@ConfigurationProperties("conductor.redis-lock")
public class RedisLockProperties {
/** The redis server configuration to be used. */
private REDIS_SERVER_TYPE serverType = REDIS_SERVER_TYPE.SINGLE;
/** The address of the redis server following format -- host:port */
private String serverAddress = "redis://127.0.0.1:6379";
/** The password for redis authentication */
private String serverPassword = null;
/** The master server name used by Redis Sentinel servers and master change monitoring task */
private String serverMasterName = "master";
/** The namespace to use to prepend keys used for locking in redis */
private String namespace = "";
/** The number of natty threads to use */
private Integer numNettyThreads;
/** If using Cluster Mode, you can use this to set num of min idle connections for replica */
private int clusterReplicaConnectionMinIdleSize = 24;
/** If using Cluster Mode, you can use this to set num of min idle connections for replica */
private int clusterReplicaConnectionPoolSize = 64;
/** If using Cluster Mode, you can use this to set num of min idle connections for replica */
private int clusterPrimaryConnectionMinIdleSize = 24;
/** If using Cluster Mode, you can use this to set num of min idle connections for replica */
private int clusterPrimaryConnectionPoolSize = 64;
/**
* Enable to otionally continue without a lock to not block executions until the locking service
* becomes available
*/
private boolean ignoreLockingExceptions = false;
public REDIS_SERVER_TYPE getServerType() {
return serverType;
}
public void setServerType(REDIS_SERVER_TYPE serverType) {
this.serverType = serverType;
}
public String getServerAddress() {
return serverAddress;
}
public void setServerAddress(String serverAddress) {
this.serverAddress = serverAddress;
}
public String getServerPassword() {
return serverPassword;
}
public void setServerPassword(String serverPassword) {
this.serverPassword = serverPassword;
}
public String getServerMasterName() {
return serverMasterName;
}
public void setServerMasterName(String serverMasterName) {
this.serverMasterName = serverMasterName;
}
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
public boolean isIgnoreLockingExceptions() {
return ignoreLockingExceptions;
}
public void setIgnoreLockingExceptions(boolean ignoreLockingExceptions) {
this.ignoreLockingExceptions = ignoreLockingExceptions;
}
public Integer getNumNettyThreads() {
return numNettyThreads;
}
public void setNumNettyThreads(Integer numNettyThreads) {
this.numNettyThreads = numNettyThreads;
}
public Integer getClusterReplicaConnectionMinIdleSize() {
return clusterReplicaConnectionMinIdleSize;
}
public void setClusterReplicaConnectionMinIdleSize(
Integer clusterReplicaConnectionMinIdleSize) {
this.clusterReplicaConnectionMinIdleSize = clusterReplicaConnectionMinIdleSize;
}
public Integer getClusterReplicaConnectionPoolSize() {
return clusterReplicaConnectionPoolSize;
}
public void setClusterReplicaConnectionPoolSize(Integer clusterReplicaConnectionPoolSize) {
this.clusterReplicaConnectionPoolSize = clusterReplicaConnectionPoolSize;
}
public Integer getClusterPrimaryConnectionMinIdleSize() {
return clusterPrimaryConnectionMinIdleSize;
}
public void setClusterPrimaryConnectionMinIdleSize(
Integer clusterPrimaryConnectionMinIdleSize) {
this.clusterPrimaryConnectionMinIdleSize = clusterPrimaryConnectionMinIdleSize;
}
public Integer getClusterPrimaryConnectionPoolSize() {
return clusterPrimaryConnectionPoolSize;
}
public void setClusterPrimaryConnectionPoolSize(Integer clusterPrimaryConnectionPoolSize) {
this.clusterPrimaryConnectionPoolSize = clusterPrimaryConnectionPoolSize;
}
public enum REDIS_SERVER_TYPE {
SINGLE,
CLUSTER,
SENTINEL
}
}
| 6,552 |
0 | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock | Create_ds/conductor/redis-lock/src/main/java/com/netflix/conductor/redislock/lock/RedisLock.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redislock.lock;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.redisson.Redisson;
import org.redisson.api.RLock;
import org.redisson.api.RedissonClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.core.sync.Lock;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.redislock.config.RedisLockProperties;
public class RedisLock implements Lock {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisLock.class);
private final RedisLockProperties properties;
private final RedissonClient redisson;
private static String LOCK_NAMESPACE = "";
public RedisLock(Redisson redisson, RedisLockProperties properties) {
this.properties = properties;
this.redisson = redisson;
LOCK_NAMESPACE = properties.getNamespace();
}
@Override
public void acquireLock(String lockId) {
RLock lock = redisson.getLock(parseLockId(lockId));
lock.lock();
}
@Override
public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) {
RLock lock = redisson.getLock(parseLockId(lockId));
try {
return lock.tryLock(timeToTry, unit);
} catch (Exception e) {
return handleAcquireLockFailure(lockId, e);
}
}
/**
* @param lockId resource to lock on
* @param timeToTry blocks up to timeToTry duration in attempt to acquire the lock
* @param leaseTime Lock lease expiration duration. Redisson default is -1, meaning it holds the
* lock until explicitly unlocked.
* @param unit time unit
* @return
*/
@Override
public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) {
RLock lock = redisson.getLock(parseLockId(lockId));
try {
return lock.tryLock(timeToTry, leaseTime, unit);
} catch (Exception e) {
return handleAcquireLockFailure(lockId, e);
}
}
@Override
public void releaseLock(String lockId) {
RLock lock = redisson.getLock(parseLockId(lockId));
try {
lock.unlock();
} catch (IllegalMonitorStateException e) {
// Releasing a lock twice using Redisson can cause this exception, which can be ignored.
}
}
@Override
public void deleteLock(String lockId) {
// Noop for Redlock algorithm as releaseLock / unlock deletes it.
}
private String parseLockId(String lockId) {
if (StringUtils.isEmpty(lockId)) {
throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId);
}
return LOCK_NAMESPACE + "." + lockId;
}
private boolean handleAcquireLockFailure(String lockId, Exception e) {
LOGGER.error("Failed to acquireLock for lockId: {}", lockId, e);
Monitors.recordAcquireLockFailure(e.getClass().getName());
// A Valid failure to acquire lock when another thread has acquired it returns false.
// However, when an exception is thrown while acquiring lock, due to connection or others
// issues,
// we can optionally continue without a "lock" to not block executions until Locking service
// is available.
return properties.isIgnoreLockingExceptions();
}
}
| 6,553 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/config | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/config/utils/RedisQueuesShardingStrategyProviderTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config.utils;
import java.util.Collections;
import org.junit.Test;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RedisQueuesShardingStrategyProviderTest {
@Test
public void testStrategy() {
ShardSupplier shardSupplier = mock(ShardSupplier.class);
doReturn("current").when(shardSupplier).getCurrentShard();
RedisQueuesShardingStrategyProvider.LocalOnlyStrategy strat =
new RedisQueuesShardingStrategyProvider.LocalOnlyStrategy(shardSupplier);
assertEquals("current", strat.getNextShard(Collections.emptyList(), new Message("a", "b")));
}
@Test
public void testProvider() {
ShardSupplier shardSupplier = mock(ShardSupplier.class);
RedisProperties properties = mock(RedisProperties.class);
when(properties.getQueueShardingStrategy()).thenReturn("localOnly");
RedisQueuesShardingStrategyProvider stratProvider =
new RedisQueuesShardingStrategyProvider(shardSupplier, properties);
assertTrue(
stratProvider.get()
instanceof RedisQueuesShardingStrategyProvider.LocalOnlyStrategy);
}
}
| 6,554 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/RedisPollDataDAOTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import org.junit.Before;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.dao.PollDataDAOTest;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import redis.clients.jedis.commands.JedisCommands;
import static org.mockito.Mockito.mock;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class RedisPollDataDAOTest extends PollDataDAOTest {
private PollDataDAO redisPollDataDAO;
@Autowired private ObjectMapper objectMapper;
@Before
public void init() {
ConductorProperties conductorProperties = mock(ConductorProperties.class);
RedisProperties properties = mock(RedisProperties.class);
JedisCommands jedisMock = new JedisMock();
JedisProxy jedisProxy = new JedisProxy(jedisMock);
redisPollDataDAO =
new RedisPollDataDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Override
protected PollDataDAO getPollDataDAO() {
return redisPollDataDAO;
}
}
| 6,555 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/RedisMetadataDAOTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.UUID;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskDef.RetryLogic;
import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import redis.clients.jedis.commands.JedisCommands;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class RedisMetadataDAOTest {
private RedisMetadataDAO redisMetadataDAO;
@Autowired private ObjectMapper objectMapper;
@Before
public void init() {
ConductorProperties conductorProperties = mock(ConductorProperties.class);
RedisProperties properties = mock(RedisProperties.class);
when(properties.getTaskDefCacheRefreshInterval()).thenReturn(Duration.ofSeconds(60));
JedisCommands jedisMock = new JedisMock();
JedisProxy jedisProxy = new JedisProxy(jedisMock);
redisMetadataDAO =
new RedisMetadataDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Test(expected = ConflictException.class)
public void testDup() {
WorkflowDef def = new WorkflowDef();
def.setName("testDup");
def.setVersion(1);
redisMetadataDAO.createWorkflowDef(def);
redisMetadataDAO.createWorkflowDef(def);
}
@Test
public void testWorkflowDefOperations() {
WorkflowDef def = new WorkflowDef();
def.setName("test");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
redisMetadataDAO.createWorkflowDef(def);
List<WorkflowDef> all = redisMetadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
WorkflowDef found = redisMetadataDAO.getWorkflowDef("test", 1).get();
assertEquals(def, found);
def.setVersion(2);
redisMetadataDAO.createWorkflowDef(def);
all = redisMetadataDAO.getAllWorkflowDefs();
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals(1, all.get(0).getVersion());
found = redisMetadataDAO.getLatestWorkflowDef(def.getName()).get();
assertEquals(def.getName(), found.getName());
assertEquals(def.getVersion(), found.getVersion());
assertEquals(2, found.getVersion());
all = redisMetadataDAO.getAllVersions(def.getName());
assertNotNull(all);
assertEquals(2, all.size());
assertEquals("test", all.get(0).getName());
assertEquals("test", all.get(1).getName());
assertEquals(1, all.get(0).getVersion());
assertEquals(2, all.get(1).getVersion());
def.setDescription("updated");
redisMetadataDAO.updateWorkflowDef(def);
found = redisMetadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get();
assertEquals(def.getDescription(), found.getDescription());
List<String> allnames = redisMetadataDAO.findAll();
assertNotNull(allnames);
assertEquals(1, allnames.size());
assertEquals(def.getName(), allnames.get(0));
redisMetadataDAO.removeWorkflowDef("test", 1);
Optional<WorkflowDef> deleted = redisMetadataDAO.getWorkflowDef("test", 1);
assertFalse(deleted.isPresent());
redisMetadataDAO.removeWorkflowDef("test", 2);
Optional<WorkflowDef> latestDef = redisMetadataDAO.getLatestWorkflowDef("test");
assertFalse(latestDef.isPresent());
WorkflowDef[] workflowDefsArray = new WorkflowDef[3];
for (int i = 1; i <= 3; i++) {
workflowDefsArray[i - 1] = new WorkflowDef();
workflowDefsArray[i - 1].setName("test");
workflowDefsArray[i - 1].setVersion(i);
workflowDefsArray[i - 1].setDescription("description");
workflowDefsArray[i - 1].setCreatedBy("unit_test");
workflowDefsArray[i - 1].setCreateTime(1L);
workflowDefsArray[i - 1].setOwnerApp("ownerApp");
workflowDefsArray[i - 1].setUpdatedBy("unit_test2");
workflowDefsArray[i - 1].setUpdateTime(2L);
redisMetadataDAO.createWorkflowDef(workflowDefsArray[i - 1]);
}
redisMetadataDAO.removeWorkflowDef("test", 1);
redisMetadataDAO.removeWorkflowDef("test", 2);
WorkflowDef workflow = redisMetadataDAO.getLatestWorkflowDef("test").get();
assertEquals(workflow.getVersion(), 3);
}
@Test
public void testGetAllWorkflowDefsLatestVersions() {
WorkflowDef def = new WorkflowDef();
def.setName("test1");
def.setVersion(1);
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setOwnerApp("ownerApp");
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
redisMetadataDAO.createWorkflowDef(def);
def.setName("test2");
redisMetadataDAO.createWorkflowDef(def);
def.setVersion(2);
redisMetadataDAO.createWorkflowDef(def);
def.setName("test3");
def.setVersion(1);
redisMetadataDAO.createWorkflowDef(def);
def.setVersion(2);
redisMetadataDAO.createWorkflowDef(def);
def.setVersion(3);
redisMetadataDAO.createWorkflowDef(def);
// Placed the values in a map because they might not be stored in order of defName.
// To test, needed to confirm that the versions are correct for the definitions.
Map<String, WorkflowDef> allMap =
redisMetadataDAO.getAllWorkflowDefsLatestVersions().stream()
.collect(Collectors.toMap(WorkflowDef::getName, Function.identity()));
assertNotNull(allMap);
assertEquals(3, allMap.size());
assertEquals(1, allMap.get("test1").getVersion());
assertEquals(2, allMap.get("test2").getVersion());
assertEquals(3, allMap.get("test3").getVersion());
}
@Test(expected = NotFoundException.class)
public void removeInvalidWorkflowDef() {
redisMetadataDAO.removeWorkflowDef("hello", 1);
}
@Test
public void testTaskDefOperations() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(RetryLogic.FIXED);
def.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
def.setRateLimitPerFrequency(50);
def.setRateLimitFrequencyInSeconds(1);
redisMetadataDAO.createTaskDef(def);
TaskDef found = redisMetadataDAO.getTaskDef(def.getName());
assertEquals(def, found);
def.setDescription("updated description");
redisMetadataDAO.updateTaskDef(def);
found = redisMetadataDAO.getTaskDef(def.getName());
assertEquals(def, found);
assertEquals("updated description", found.getDescription());
for (int i = 0; i < 9; i++) {
TaskDef tdf = new TaskDef("taskA" + i);
redisMetadataDAO.createTaskDef(tdf);
}
List<TaskDef> all = redisMetadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(10, all.size());
Set<String> allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet());
assertEquals(10, allnames.size());
List<String> sorted = allnames.stream().sorted().collect(Collectors.toList());
assertEquals(def.getName(), sorted.get(0));
for (int i = 0; i < 9; i++) {
assertEquals(def.getName() + i, sorted.get(i + 1));
}
for (int i = 0; i < 9; i++) {
redisMetadataDAO.removeTaskDef(def.getName() + i);
}
all = redisMetadataDAO.getAllTaskDefs();
assertNotNull(all);
assertEquals(1, all.size());
assertEquals(def.getName(), all.get(0).getName());
}
@Test(expected = NotFoundException.class)
public void testRemoveTaskDef() {
redisMetadataDAO.removeTaskDef("test" + UUID.randomUUID());
}
@Test
public void testDefaultsAreSetForResponseTimeout() {
TaskDef def = new TaskDef("taskA");
def.setDescription("description");
def.setCreatedBy("unit_test");
def.setCreateTime(1L);
def.setInputKeys(Arrays.asList("a", "b", "c"));
def.setOutputKeys(Arrays.asList("01", "o2"));
def.setOwnerApp("ownerApp");
def.setRetryCount(3);
def.setRetryDelaySeconds(100);
def.setRetryLogic(RetryLogic.FIXED);
def.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY);
def.setUpdatedBy("unit_test2");
def.setUpdateTime(2L);
def.setRateLimitPerFrequency(50);
def.setRateLimitFrequencyInSeconds(1);
def.setResponseTimeoutSeconds(0);
redisMetadataDAO.createTaskDef(def);
TaskDef found = redisMetadataDAO.getTaskDef(def.getName());
assertEquals(found.getResponseTimeoutSeconds(), 3600);
found.setTimeoutSeconds(200);
found.setResponseTimeoutSeconds(0);
redisMetadataDAO.updateTaskDef(found);
TaskDef foundNew = redisMetadataDAO.getTaskDef(def.getName());
assertEquals(foundNew.getResponseTimeoutSeconds(), 199);
}
}
| 6,556 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/RedisExecutionDAOTest.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.time.Duration;
import java.util.Collections;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.ExecutionDAOTest;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import redis.clients.jedis.commands.JedisCommands;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class RedisExecutionDAOTest extends ExecutionDAOTest {
private RedisExecutionDAO executionDAO;
@Autowired private ObjectMapper objectMapper;
@Before
public void init() {
ConductorProperties conductorProperties = mock(ConductorProperties.class);
RedisProperties properties = mock(RedisProperties.class);
when(properties.getEventExecutionPersistenceTTL()).thenReturn(Duration.ofSeconds(5));
JedisCommands jedisMock = new JedisMock();
JedisProxy jedisProxy = new JedisProxy(jedisMock);
executionDAO =
new RedisExecutionDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Test
public void testCorrelateTaskToWorkflowInDS() {
String workflowId = "workflowId";
String taskId = "taskId1";
String taskDefName = "task1";
TaskDef def = new TaskDef();
def.setName("task1");
def.setConcurrentExecLimit(1);
TaskModel task = new TaskModel();
task.setTaskId(taskId);
task.setWorkflowInstanceId(workflowId);
task.setReferenceTaskName("ref_name");
task.setTaskDefName(taskDefName);
task.setTaskType(taskDefName);
task.setStatus(TaskModel.Status.IN_PROGRESS);
List<TaskModel> tasks = executionDAO.createTasks(Collections.singletonList(task));
assertNotNull(tasks);
assertEquals(1, tasks.size());
executionDAO.correlateTaskToWorkflowInDS(taskId, workflowId);
tasks = executionDAO.getTasksForWorkflow(workflowId);
assertNotNull(tasks);
assertEquals(workflowId, tasks.get(0).getWorkflowInstanceId());
assertEquals(taskId, tasks.get(0).getTaskId());
}
@Override
protected ExecutionDAO getExecutionDAO() {
return executionDAO;
}
}
| 6,557 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/BaseDynoDAOTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mock;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class BaseDynoDAOTest {
@Mock private JedisProxy jedisProxy;
@Mock private ObjectMapper objectMapper;
private RedisProperties properties;
private ConductorProperties conductorProperties;
private BaseDynoDAO baseDynoDAO;
@Before
public void setUp() {
properties = mock(RedisProperties.class);
conductorProperties = mock(ConductorProperties.class);
this.baseDynoDAO =
new BaseDynoDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Test
public void testNsKey() {
assertEquals("", baseDynoDAO.nsKey());
String[] keys = {"key1", "key2"};
assertEquals("key1.key2", baseDynoDAO.nsKey(keys));
when(properties.getWorkflowNamespacePrefix()).thenReturn("test");
assertEquals("test", baseDynoDAO.nsKey());
assertEquals("test.key1.key2", baseDynoDAO.nsKey(keys));
when(conductorProperties.getStack()).thenReturn("stack");
assertEquals("test.stack.key1.key2", baseDynoDAO.nsKey(keys));
}
}
| 6,558 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/DynoQueueDAOTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.RedisQueues;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import redis.clients.jedis.commands.JedisCommands;
import static com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider.LOCAL_ONLY_STRATEGY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class DynoQueueDAOTest {
private QueueDAO queueDAO;
@Before
public void init() {
RedisProperties properties = mock(RedisProperties.class);
when(properties.getQueueShardingStrategy()).thenReturn(LOCAL_ONLY_STRATEGY);
JedisCommands jedisMock = new JedisMock();
ShardSupplier shardSupplier =
new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return new HashSet<>(Collections.singletonList("a"));
}
@Override
public String getCurrentShard() {
return "a";
}
@Override
public String getShardForHost(Host host) {
return "a";
}
};
ShardingStrategy shardingStrategy =
new RedisQueuesShardingStrategyProvider(shardSupplier, properties).get();
RedisQueues redisQueues =
new RedisQueues(
jedisMock, jedisMock, "", shardSupplier, 60_000, 60_000, shardingStrategy);
queueDAO = new DynoQueueDAO(redisQueues);
}
@Rule public ExpectedException expected = ExpectedException.none();
@Test
public void test() {
String queueName = "TestQueue";
long offsetTimeInSecond = 0;
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.push(queueName, messageId, offsetTimeInSecond);
}
int size = queueDAO.getSize(queueName);
assertEquals(10, size);
Map<String, Long> details = queueDAO.queuesDetail();
assertEquals(1, details.size());
assertEquals(10L, details.get(queueName).longValue());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
List<String> popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(10, popped.size());
Map<String, Map<String, Map<String, Long>>> verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
long shardSize = verbose.get(queueName).get("a").get("size");
long unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(10, unackedSize);
popped.forEach(messageId -> queueDAO.ack(queueName, messageId));
verbose = queueDAO.queuesDetailVerbose();
assertEquals(1, verbose.size());
shardSize = verbose.get(queueName).get("a").get("size");
unackedSize = verbose.get(queueName).get("a").get("uacked");
assertEquals(0, shardSize);
assertEquals(0, unackedSize);
popped = queueDAO.pop(queueName, 10, 100);
assertNotNull(popped);
assertEquals(0, popped.size());
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
size = queueDAO.getSize(queueName);
assertEquals(10, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.remove(queueName, messageId);
}
size = queueDAO.getSize(queueName);
assertEquals(0, size);
for (int i = 0; i < 10; i++) {
String messageId = "msg" + i;
queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond);
}
queueDAO.flush(queueName);
size = queueDAO.getSize(queueName);
assertEquals(0, size);
}
}
| 6,559 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/RedisRateLimitDAOTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.UUID;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import redis.clients.jedis.commands.JedisCommands;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class RedisRateLimitDAOTest {
private RedisRateLimitingDAO rateLimitingDao;
@Autowired private ObjectMapper objectMapper;
@Before
public void init() {
ConductorProperties conductorProperties = mock(ConductorProperties.class);
RedisProperties properties = mock(RedisProperties.class);
JedisCommands jedisMock = new JedisMock();
JedisProxy jedisProxy = new JedisProxy(jedisMock);
rateLimitingDao =
new RedisRateLimitingDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Test
public void testExceedsRateLimitWhenNoRateLimitSet() {
TaskDef taskDef = new TaskDef("TestTaskDefinition");
TaskModel task = new TaskModel();
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName(taskDef.getName());
assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef));
}
@Test
public void testExceedsRateLimitWithinLimit() {
TaskDef taskDef = new TaskDef("TestTaskDefinition");
taskDef.setRateLimitFrequencyInSeconds(60);
taskDef.setRateLimitPerFrequency(20);
TaskModel task = new TaskModel();
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName(taskDef.getName());
assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef));
}
@Test
public void testExceedsRateLimitOutOfLimit() {
TaskDef taskDef = new TaskDef("TestTaskDefinition");
taskDef.setRateLimitFrequencyInSeconds(60);
taskDef.setRateLimitPerFrequency(1);
TaskModel task = new TaskModel();
task.setTaskId(UUID.randomUUID().toString());
task.setTaskDefName(taskDef.getName());
assertFalse(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef));
assertTrue(rateLimitingDao.exceedsRateLimitPerFrequency(task, taskDef));
}
}
| 6,560 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAOTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.List;
import java.util.UUID;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.common.metadata.events.EventHandler.Action;
import com.netflix.conductor.common.metadata.events.EventHandler.Action.Type;
import com.netflix.conductor.common.metadata.events.EventHandler.StartWorkflow;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import redis.clients.jedis.commands.JedisCommands;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.mockito.Mockito.mock;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class RedisEventHandlerDAOTest {
private RedisEventHandlerDAO redisEventHandlerDAO;
@Autowired private ObjectMapper objectMapper;
@Before
public void init() {
ConductorProperties conductorProperties = mock(ConductorProperties.class);
RedisProperties properties = mock(RedisProperties.class);
JedisCommands jedisMock = new JedisMock();
JedisProxy jedisProxy = new JedisProxy(jedisMock);
redisEventHandlerDAO =
new RedisEventHandlerDAO(jedisProxy, objectMapper, conductorProperties, properties);
}
@Test
public void testEventHandlers() {
String event1 = "SQS::arn:account090:sqstest1";
String event2 = "SQS::arn:account090:sqstest2";
EventHandler eventHandler = new EventHandler();
eventHandler.setName(UUID.randomUUID().toString());
eventHandler.setActive(false);
Action action = new Action();
action.setAction(Type.start_workflow);
action.setStart_workflow(new StartWorkflow());
action.getStart_workflow().setName("test_workflow");
eventHandler.getActions().add(action);
eventHandler.setEvent(event1);
redisEventHandlerDAO.addEventHandler(eventHandler);
List<EventHandler> allEventHandlers = redisEventHandlerDAO.getAllEventHandlers();
assertNotNull(allEventHandlers);
assertEquals(1, allEventHandlers.size());
assertEquals(eventHandler.getName(), allEventHandlers.get(0).getName());
assertEquals(eventHandler.getEvent(), allEventHandlers.get(0).getEvent());
List<EventHandler> byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size()); // event is marked as in-active
eventHandler.setActive(true);
eventHandler.setEvent(event2);
redisEventHandlerDAO.updateEventHandler(eventHandler);
allEventHandlers = redisEventHandlerDAO.getAllEventHandlers();
assertNotNull(allEventHandlers);
assertEquals(1, allEventHandlers.size());
byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event1, true);
assertNotNull(byEvents);
assertEquals(0, byEvents.size());
byEvents = redisEventHandlerDAO.getEventHandlersForEvent(event2, true);
assertNotNull(byEvents);
assertEquals(1, byEvents.size());
}
}
| 6,561 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/jedis/ConfigurationHostSupplierTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.List;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier;
import com.netflix.dyno.connectionpool.Host;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ConfigurationHostSupplierTest {
private RedisProperties properties;
private ConfigurationHostSupplier configurationHostSupplier;
@Before
public void setUp() {
properties = mock(RedisProperties.class);
configurationHostSupplier = new ConfigurationHostSupplier(properties);
}
@Test
public void getHost() {
when(properties.getHosts()).thenReturn("dyno1:8102:us-east-1c");
List<Host> hosts = configurationHostSupplier.getHosts();
assertEquals(1, hosts.size());
Host firstHost = hosts.get(0);
assertEquals("dyno1", firstHost.getHostName());
assertEquals(8102, firstHost.getPort());
assertEquals("us-east-1c", firstHost.getRack());
assertTrue(firstHost.isUp());
}
@Test
public void getMultipleHosts() {
when(properties.getHosts()).thenReturn("dyno1:8102:us-east-1c;dyno2:8103:us-east-1c");
List<Host> hosts = configurationHostSupplier.getHosts();
assertEquals(2, hosts.size());
Host firstHost = hosts.get(0);
assertEquals("dyno1", firstHost.getHostName());
assertEquals(8102, firstHost.getPort());
assertEquals("us-east-1c", firstHost.getRack());
assertTrue(firstHost.isUp());
Host secondHost = hosts.get(1);
assertEquals("dyno2", secondHost.getHostName());
assertEquals(8103, secondHost.getPort());
assertEquals("us-east-1c", secondHost.getRack());
assertTrue(secondHost.isUp());
}
@Test
public void getAuthenticatedHost() {
when(properties.getHosts()).thenReturn("redis1:6432:us-east-1c:password");
List<Host> hosts = configurationHostSupplier.getHosts();
assertEquals(1, hosts.size());
Host firstHost = hosts.get(0);
assertEquals("redis1", firstHost.getHostName());
assertEquals(6432, firstHost.getPort());
assertEquals("us-east-1c", firstHost.getRack());
assertEquals("password", firstHost.getPassword());
assertTrue(firstHost.isUp());
}
}
| 6,562 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisClusterTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.AbstractMap;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import org.junit.Test;
import org.mockito.Mockito;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JedisClusterTest {
private final redis.clients.jedis.JedisCluster mockCluster =
mock(redis.clients.jedis.JedisCluster.class);
private final JedisCluster jedisCluster = new JedisCluster(mockCluster);
@Test
public void testSet() {
jedisCluster.set("key", "value");
jedisCluster.set("key", "value", SetParams.setParams());
}
@Test
public void testGet() {
jedisCluster.get("key");
}
@Test
public void testExists() {
jedisCluster.exists("key");
}
@Test
public void testPersist() {
jedisCluster.persist("key");
}
@Test
public void testType() {
jedisCluster.type("key");
}
@Test
public void testExpire() {
jedisCluster.expire("key", 1337);
}
@Test
public void testPexpire() {
jedisCluster.pexpire("key", 1337);
}
@Test
public void testExpireAt() {
jedisCluster.expireAt("key", 1337);
}
@Test
public void testPexpireAt() {
jedisCluster.pexpireAt("key", 1337);
}
@Test
public void testTtl() {
jedisCluster.ttl("key");
}
@Test
public void testPttl() {
jedisCluster.pttl("key");
}
@Test
public void testSetbit() {
jedisCluster.setbit("key", 1337, "value");
jedisCluster.setbit("key", 1337, true);
}
@Test
public void testGetbit() {
jedisCluster.getbit("key", 1337);
}
@Test
public void testSetrange() {
jedisCluster.setrange("key", 1337, "value");
}
@Test
public void testGetrange() {
jedisCluster.getrange("key", 1337, 1338);
}
@Test
public void testGetSet() {
jedisCluster.getSet("key", "value");
}
@Test
public void testSetnx() {
jedisCluster.setnx("test", "value");
}
@Test
public void testSetex() {
jedisCluster.setex("key", 1337, "value");
}
@Test
public void testPsetex() {
jedisCluster.psetex("key", 1337, "value");
}
@Test
public void testDecrBy() {
jedisCluster.decrBy("key", 1337);
}
@Test
public void testDecr() {
jedisCluster.decr("key");
}
@Test
public void testIncrBy() {
jedisCluster.incrBy("key", 1337);
}
@Test
public void testIncrByFloat() {
jedisCluster.incrByFloat("key", 1337);
}
@Test
public void testIncr() {
jedisCluster.incr("key");
}
@Test
public void testAppend() {
jedisCluster.append("key", "value");
}
@Test
public void testSubstr() {
jedisCluster.substr("key", 1337, 1338);
}
@Test
public void testHset() {
jedisCluster.hset("key", "field", "value");
}
@Test
public void testHget() {
jedisCluster.hget("key", "field");
}
@Test
public void testHsetnx() {
jedisCluster.hsetnx("key", "field", "value");
}
@Test
public void testHmset() {
jedisCluster.hmset("key", new HashMap<>());
}
@Test
public void testHmget() {
jedisCluster.hmget("key", "fields");
}
@Test
public void testHincrBy() {
jedisCluster.hincrBy("key", "field", 1337);
}
@Test
public void testHincrByFloat() {
jedisCluster.hincrByFloat("key", "field", 1337);
}
@Test
public void testHexists() {
jedisCluster.hexists("key", "field");
}
@Test
public void testHdel() {
jedisCluster.hdel("key", "field");
}
@Test
public void testHlen() {
jedisCluster.hlen("key");
}
@Test
public void testHkeys() {
jedisCluster.hkeys("key");
}
@Test
public void testHvals() {
jedisCluster.hvals("key");
}
@Test
public void testGgetAll() {
jedisCluster.hgetAll("key");
}
@Test
public void testRpush() {
jedisCluster.rpush("key", "string");
}
@Test
public void testLpush() {
jedisCluster.lpush("key", "string");
}
@Test
public void testLlen() {
jedisCluster.llen("key");
}
@Test
public void testLrange() {
jedisCluster.lrange("key", 1337, 1338);
}
@Test
public void testLtrim() {
jedisCluster.ltrim("key", 1337, 1338);
}
@Test
public void testLindex() {
jedisCluster.lindex("key", 1337);
}
@Test
public void testLset() {
jedisCluster.lset("key", 1337, "value");
}
@Test
public void testLrem() {
jedisCluster.lrem("key", 1337, "value");
}
@Test
public void testLpop() {
jedisCluster.lpop("key");
}
@Test
public void testRpop() {
jedisCluster.rpop("key");
}
@Test
public void testSadd() {
jedisCluster.sadd("key", "member");
}
@Test
public void testSmembers() {
jedisCluster.smembers("key");
}
@Test
public void testSrem() {
jedisCluster.srem("key", "member");
}
@Test
public void testSpop() {
jedisCluster.spop("key");
jedisCluster.spop("key", 1337);
}
@Test
public void testScard() {
jedisCluster.scard("key");
}
@Test
public void testSismember() {
jedisCluster.sismember("key", "member");
}
@Test
public void testSrandmember() {
jedisCluster.srandmember("key");
jedisCluster.srandmember("key", 1337);
}
@Test
public void testStrlen() {
jedisCluster.strlen("key");
}
@Test
public void testZadd() {
jedisCluster.zadd("key", new HashMap<>());
jedisCluster.zadd("key", new HashMap<>(), ZAddParams.zAddParams());
jedisCluster.zadd("key", 1337, "members");
jedisCluster.zadd("key", 1337, "members", ZAddParams.zAddParams());
}
@Test
public void testZrange() {
jedisCluster.zrange("key", 1337, 1338);
}
@Test
public void testZrem() {
jedisCluster.zrem("key", "member");
}
@Test
public void testZincrby() {
jedisCluster.zincrby("key", 1337, "member");
jedisCluster.zincrby("key", 1337, "member", ZIncrByParams.zIncrByParams());
}
@Test
public void testZrank() {
jedisCluster.zrank("key", "member");
}
@Test
public void testZrevrank() {
jedisCluster.zrevrank("key", "member");
}
@Test
public void testZrevrange() {
jedisCluster.zrevrange("key", 1337, 1338);
}
@Test
public void testZrangeWithScores() {
jedisCluster.zrangeWithScores("key", 1337, 1338);
}
@Test
public void testZrevrangeWithScores() {
jedisCluster.zrevrangeWithScores("key", 1337, 1338);
}
@Test
public void testZcard() {
jedisCluster.zcard("key");
}
@Test
public void testZscore() {
jedisCluster.zscore("key", "member");
}
@Test
public void testSort() {
jedisCluster.sort("key");
jedisCluster.sort("key", new SortingParams());
}
@Test
public void testZcount() {
jedisCluster.zcount("key", "min", "max");
jedisCluster.zcount("key", 1337, 1338);
}
@Test
public void testZrangeByScore() {
jedisCluster.zrangeByScore("key", "min", "max");
jedisCluster.zrangeByScore("key", 1337, 1338);
jedisCluster.zrangeByScore("key", "min", "max", 1337, 1338);
jedisCluster.zrangeByScore("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrevrangeByScore() {
jedisCluster.zrevrangeByScore("key", "max", "min");
jedisCluster.zrevrangeByScore("key", 1337, 1338);
jedisCluster.zrevrangeByScore("key", "max", "min", 1337, 1338);
jedisCluster.zrevrangeByScore("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrangeByScoreWithScores() {
jedisCluster.zrangeByScoreWithScores("key", "min", "max");
jedisCluster.zrangeByScoreWithScores("key", "min", "max", 1337, 1338);
jedisCluster.zrangeByScoreWithScores("key", 1337, 1338);
jedisCluster.zrangeByScoreWithScores("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrevrangeByScoreWithScores() {
jedisCluster.zrevrangeByScoreWithScores("key", "max", "min");
jedisCluster.zrevrangeByScoreWithScores("key", "max", "min", 1337, 1338);
jedisCluster.zrevrangeByScoreWithScores("key", 1337, 1338);
jedisCluster.zrevrangeByScoreWithScores("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZremrangeByRank() {
jedisCluster.zremrangeByRank("key", 1337, 1338);
}
@Test
public void testZremrangeByScore() {
jedisCluster.zremrangeByScore("key", "start", "end");
jedisCluster.zremrangeByScore("key", 1337, 1338);
}
@Test
public void testZlexcount() {
jedisCluster.zlexcount("key", "min", "max");
}
@Test
public void testZrangeByLex() {
jedisCluster.zrangeByLex("key", "min", "max");
jedisCluster.zrangeByLex("key", "min", "max", 1337, 1338);
}
@Test
public void testZrevrangeByLex() {
jedisCluster.zrevrangeByLex("key", "max", "min");
jedisCluster.zrevrangeByLex("key", "max", "min", 1337, 1338);
}
@Test
public void testZremrangeByLex() {
jedisCluster.zremrangeByLex("key", "min", "max");
}
@Test
public void testLinsert() {
jedisCluster.linsert("key", ListPosition.AFTER, "pivot", "value");
}
@Test
public void testLpushx() {
jedisCluster.lpushx("key", "string");
}
@Test
public void testRpushx() {
jedisCluster.rpushx("key", "string");
}
@Test
public void testBlpop() {
jedisCluster.blpop(1337, "arg");
}
@Test
public void testBrpop() {
jedisCluster.brpop(1337, "arg");
}
@Test
public void testDel() {
jedisCluster.del("key");
}
@Test
public void testEcho() {
jedisCluster.echo("string");
}
@Test(expected = UnsupportedOperationException.class)
public void testMove() {
jedisCluster.move("key", 1337);
}
@Test
public void testBitcount() {
jedisCluster.bitcount("key");
jedisCluster.bitcount("key", 1337, 1338);
}
@Test(expected = UnsupportedOperationException.class)
public void testBitpos() {
jedisCluster.bitpos("key", true);
}
@Test
public void testHscan() {
jedisCluster.hscan("key", "cursor");
ScanResult<Entry<byte[], byte[]>> scanResult =
new ScanResult<>(
"cursor".getBytes(),
Arrays.asList(
new AbstractMap.SimpleEntry<>("key1".getBytes(), "val1".getBytes()),
new AbstractMap.SimpleEntry<>(
"key2".getBytes(), "val2".getBytes())));
when(mockCluster.hscan(Mockito.any(), Mockito.any(), Mockito.any(ScanParams.class)))
.thenReturn(scanResult);
ScanResult<Map.Entry<String, String>> result =
jedisCluster.hscan("key", "cursor", new ScanParams());
assertEquals("cursor", result.getCursor());
assertEquals(2, result.getResult().size());
assertEquals("val1", result.getResult().get(0).getValue());
}
@Test
public void testSscan() {
jedisCluster.sscan("key", "cursor");
ScanResult<byte[]> scanResult =
new ScanResult<>(
"sscursor".getBytes(), Arrays.asList("val1".getBytes(), "val2".getBytes()));
when(mockCluster.sscan(Mockito.any(), Mockito.any(), Mockito.any(ScanParams.class)))
.thenReturn(scanResult);
ScanResult<String> result = jedisCluster.sscan("key", "cursor", new ScanParams());
assertEquals("sscursor", result.getCursor());
assertEquals(2, result.getResult().size());
assertEquals("val1", result.getResult().get(0));
}
@Test
public void testZscan() {
jedisCluster.zscan("key", "cursor");
jedisCluster.zscan("key", "cursor", new ScanParams());
}
@Test
public void testPfadd() {
jedisCluster.pfadd("key", "elements");
}
@Test
public void testPfcount() {
jedisCluster.pfcount("key");
}
@Test
public void testGeoadd() {
jedisCluster.geoadd("key", new HashMap<>());
jedisCluster.geoadd("key", 1337, 1338, "member");
}
@Test
public void testGeodist() {
jedisCluster.geodist("key", "member1", "member2");
jedisCluster.geodist("key", "member1", "member2", GeoUnit.KM);
}
@Test
public void testGeohash() {
jedisCluster.geohash("key", "members");
}
@Test
public void testGeopos() {
jedisCluster.geopos("key", "members");
}
@Test
public void testGeoradius() {
jedisCluster.georadius("key", 1337, 1338, 32, GeoUnit.KM);
jedisCluster.georadius("key", 1337, 1338, 32, GeoUnit.KM, GeoRadiusParam.geoRadiusParam());
}
@Test
public void testGeoradiusByMember() {
jedisCluster.georadiusByMember("key", "member", 1337, GeoUnit.KM);
jedisCluster.georadiusByMember(
"key", "member", 1337, GeoUnit.KM, GeoRadiusParam.geoRadiusParam());
}
@Test
public void testBitfield() {
jedisCluster.bitfield("key", "arguments");
}
}
| 6,563 |
0 | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/test/java/com/netflix/conductor/redis/jedis/JedisSentinelTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.HashMap;
import org.junit.Before;
import org.junit.Test;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisSentinelPool;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class JedisSentinelTest {
private final Jedis jedis = mock(Jedis.class);
private final JedisSentinelPool jedisPool = mock(JedisSentinelPool.class);
private final JedisSentinel jedisSentinel = new JedisSentinel(jedisPool);
@Before
public void init() {
when(this.jedisPool.getResource()).thenReturn(this.jedis);
}
@Test
public void testSet() {
jedisSentinel.set("key", "value");
jedisSentinel.set("key", "value", SetParams.setParams());
}
@Test
public void testGet() {
jedisSentinel.get("key");
}
@Test
public void testExists() {
jedisSentinel.exists("key");
}
@Test
public void testPersist() {
jedisSentinel.persist("key");
}
@Test
public void testType() {
jedisSentinel.type("key");
}
@Test
public void testExpire() {
jedisSentinel.expire("key", 1337);
}
@Test
public void testPexpire() {
jedisSentinel.pexpire("key", 1337);
}
@Test
public void testExpireAt() {
jedisSentinel.expireAt("key", 1337);
}
@Test
public void testPexpireAt() {
jedisSentinel.pexpireAt("key", 1337);
}
@Test
public void testTtl() {
jedisSentinel.ttl("key");
}
@Test
public void testPttl() {
jedisSentinel.pttl("key");
}
@Test
public void testSetbit() {
jedisSentinel.setbit("key", 1337, "value");
jedisSentinel.setbit("key", 1337, true);
}
@Test
public void testGetbit() {
jedisSentinel.getbit("key", 1337);
}
@Test
public void testSetrange() {
jedisSentinel.setrange("key", 1337, "value");
}
@Test
public void testGetrange() {
jedisSentinel.getrange("key", 1337, 1338);
}
@Test
public void testGetSet() {
jedisSentinel.getSet("key", "value");
}
@Test
public void testSetnx() {
jedisSentinel.setnx("test", "value");
}
@Test
public void testSetex() {
jedisSentinel.setex("key", 1337, "value");
}
@Test
public void testPsetex() {
jedisSentinel.psetex("key", 1337, "value");
}
@Test
public void testDecrBy() {
jedisSentinel.decrBy("key", 1337);
}
@Test
public void testDecr() {
jedisSentinel.decr("key");
}
@Test
public void testIncrBy() {
jedisSentinel.incrBy("key", 1337);
}
@Test
public void testIncrByFloat() {
jedisSentinel.incrByFloat("key", 1337);
}
@Test
public void testIncr() {
jedisSentinel.incr("key");
}
@Test
public void testAppend() {
jedisSentinel.append("key", "value");
}
@Test
public void testSubstr() {
jedisSentinel.substr("key", 1337, 1338);
}
@Test
public void testHset() {
jedisSentinel.hset("key", "field", "value");
}
@Test
public void testHget() {
jedisSentinel.hget("key", "field");
}
@Test
public void testHsetnx() {
jedisSentinel.hsetnx("key", "field", "value");
}
@Test
public void testHmset() {
jedisSentinel.hmset("key", new HashMap<>());
}
@Test
public void testHmget() {
jedisSentinel.hmget("key", "fields");
}
@Test
public void testHincrBy() {
jedisSentinel.hincrBy("key", "field", 1337);
}
@Test
public void testHincrByFloat() {
jedisSentinel.hincrByFloat("key", "field", 1337);
}
@Test
public void testHexists() {
jedisSentinel.hexists("key", "field");
}
@Test
public void testHdel() {
jedisSentinel.hdel("key", "field");
}
@Test
public void testHlen() {
jedisSentinel.hlen("key");
}
@Test
public void testHkeys() {
jedisSentinel.hkeys("key");
}
@Test
public void testHvals() {
jedisSentinel.hvals("key");
}
@Test
public void testGgetAll() {
jedisSentinel.hgetAll("key");
}
@Test
public void testRpush() {
jedisSentinel.rpush("key", "string");
}
@Test
public void testLpush() {
jedisSentinel.lpush("key", "string");
}
@Test
public void testLlen() {
jedisSentinel.llen("key");
}
@Test
public void testLrange() {
jedisSentinel.lrange("key", 1337, 1338);
}
@Test
public void testLtrim() {
jedisSentinel.ltrim("key", 1337, 1338);
}
@Test
public void testLindex() {
jedisSentinel.lindex("key", 1337);
}
@Test
public void testLset() {
jedisSentinel.lset("key", 1337, "value");
}
@Test
public void testLrem() {
jedisSentinel.lrem("key", 1337, "value");
}
@Test
public void testLpop() {
jedisSentinel.lpop("key");
}
@Test
public void testRpop() {
jedisSentinel.rpop("key");
}
@Test
public void testSadd() {
jedisSentinel.sadd("key", "member");
}
@Test
public void testSmembers() {
jedisSentinel.smembers("key");
}
@Test
public void testSrem() {
jedisSentinel.srem("key", "member");
}
@Test
public void testSpop() {
jedisSentinel.spop("key");
jedisSentinel.spop("key", 1337);
}
@Test
public void testScard() {
jedisSentinel.scard("key");
}
@Test
public void testSismember() {
jedisSentinel.sismember("key", "member");
}
@Test
public void testSrandmember() {
jedisSentinel.srandmember("key");
jedisSentinel.srandmember("key", 1337);
}
@Test
public void testStrlen() {
jedisSentinel.strlen("key");
}
@Test
public void testZadd() {
jedisSentinel.zadd("key", new HashMap<>());
jedisSentinel.zadd("key", new HashMap<>(), ZAddParams.zAddParams());
jedisSentinel.zadd("key", 1337, "members");
jedisSentinel.zadd("key", 1337, "members", ZAddParams.zAddParams());
}
@Test
public void testZrange() {
jedisSentinel.zrange("key", 1337, 1338);
}
@Test
public void testZrem() {
jedisSentinel.zrem("key", "member");
}
@Test
public void testZincrby() {
jedisSentinel.zincrby("key", 1337, "member");
jedisSentinel.zincrby("key", 1337, "member", ZIncrByParams.zIncrByParams());
}
@Test
public void testZrank() {
jedisSentinel.zrank("key", "member");
}
@Test
public void testZrevrank() {
jedisSentinel.zrevrank("key", "member");
}
@Test
public void testZrevrange() {
jedisSentinel.zrevrange("key", 1337, 1338);
}
@Test
public void testZrangeWithScores() {
jedisSentinel.zrangeWithScores("key", 1337, 1338);
}
@Test
public void testZrevrangeWithScores() {
jedisSentinel.zrevrangeWithScores("key", 1337, 1338);
}
@Test
public void testZcard() {
jedisSentinel.zcard("key");
}
@Test
public void testZscore() {
jedisSentinel.zscore("key", "member");
}
@Test
public void testSort() {
jedisSentinel.sort("key");
jedisSentinel.sort("key", new SortingParams());
}
@Test
public void testZcount() {
jedisSentinel.zcount("key", "min", "max");
jedisSentinel.zcount("key", 1337, 1338);
}
@Test
public void testZrangeByScore() {
jedisSentinel.zrangeByScore("key", "min", "max");
jedisSentinel.zrangeByScore("key", 1337, 1338);
jedisSentinel.zrangeByScore("key", "min", "max", 1337, 1338);
jedisSentinel.zrangeByScore("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrevrangeByScore() {
jedisSentinel.zrevrangeByScore("key", "max", "min");
jedisSentinel.zrevrangeByScore("key", 1337, 1338);
jedisSentinel.zrevrangeByScore("key", "max", "min", 1337, 1338);
jedisSentinel.zrevrangeByScore("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrangeByScoreWithScores() {
jedisSentinel.zrangeByScoreWithScores("key", "min", "max");
jedisSentinel.zrangeByScoreWithScores("key", "min", "max", 1337, 1338);
jedisSentinel.zrangeByScoreWithScores("key", 1337, 1338);
jedisSentinel.zrangeByScoreWithScores("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZrevrangeByScoreWithScores() {
jedisSentinel.zrevrangeByScoreWithScores("key", "max", "min");
jedisSentinel.zrevrangeByScoreWithScores("key", "max", "min", 1337, 1338);
jedisSentinel.zrevrangeByScoreWithScores("key", 1337, 1338);
jedisSentinel.zrevrangeByScoreWithScores("key", 1337, 1338, 1339, 1340);
}
@Test
public void testZremrangeByRank() {
jedisSentinel.zremrangeByRank("key", 1337, 1338);
}
@Test
public void testZremrangeByScore() {
jedisSentinel.zremrangeByScore("key", "start", "end");
jedisSentinel.zremrangeByScore("key", 1337, 1338);
}
@Test
public void testZlexcount() {
jedisSentinel.zlexcount("key", "min", "max");
}
@Test
public void testZrangeByLex() {
jedisSentinel.zrangeByLex("key", "min", "max");
jedisSentinel.zrangeByLex("key", "min", "max", 1337, 1338);
}
@Test
public void testZrevrangeByLex() {
jedisSentinel.zrevrangeByLex("key", "max", "min");
jedisSentinel.zrevrangeByLex("key", "max", "min", 1337, 1338);
}
@Test
public void testZremrangeByLex() {
jedisSentinel.zremrangeByLex("key", "min", "max");
}
@Test
public void testLinsert() {
jedisSentinel.linsert("key", ListPosition.AFTER, "pivot", "value");
}
@Test
public void testLpushx() {
jedisSentinel.lpushx("key", "string");
}
@Test
public void testRpushx() {
jedisSentinel.rpushx("key", "string");
}
@Test
public void testBlpop() {
jedisSentinel.blpop(1337, "arg");
}
@Test
public void testBrpop() {
jedisSentinel.brpop(1337, "arg");
}
@Test
public void testDel() {
jedisSentinel.del("key");
}
@Test
public void testEcho() {
jedisSentinel.echo("string");
}
@Test
public void testMove() {
jedisSentinel.move("key", 1337);
}
@Test
public void testBitcount() {
jedisSentinel.bitcount("key");
jedisSentinel.bitcount("key", 1337, 1338);
}
@Test
public void testBitpos() {
jedisSentinel.bitpos("key", true);
}
@Test
public void testHscan() {
jedisSentinel.hscan("key", "cursor");
jedisSentinel.hscan("key", "cursor", new ScanParams());
}
@Test
public void testSscan() {
jedisSentinel.sscan("key", "cursor");
jedisSentinel.sscan("key", "cursor", new ScanParams());
}
@Test
public void testZscan() {
jedisSentinel.zscan("key", "cursor");
jedisSentinel.zscan("key", "cursor", new ScanParams());
}
@Test
public void testPfadd() {
jedisSentinel.pfadd("key", "elements");
}
@Test
public void testPfcount() {
jedisSentinel.pfcount("key");
}
@Test
public void testGeoadd() {
jedisSentinel.geoadd("key", new HashMap<>());
jedisSentinel.geoadd("key", 1337, 1338, "member");
}
@Test
public void testGeodist() {
jedisSentinel.geodist("key", "member1", "member2");
jedisSentinel.geodist("key", "member1", "member2", GeoUnit.KM);
}
@Test
public void testGeohash() {
jedisSentinel.geohash("key", "members");
}
@Test
public void testGeopos() {
jedisSentinel.geopos("key", "members");
}
@Test
public void testGeoradius() {
jedisSentinel.georadius("key", 1337, 1338, 32, GeoUnit.KM);
jedisSentinel.georadius("key", 1337, 1338, 32, GeoUnit.KM, GeoRadiusParam.geoRadiusParam());
}
@Test
public void testGeoradiusByMember() {
jedisSentinel.georadiusByMember("key", "member", 1337, GeoUnit.KM);
jedisSentinel.georadiusByMember(
"key", "member", 1337, GeoUnit.KM, GeoRadiusParam.geoRadiusParam());
}
@Test
public void testBitfield() {
jedisSentinel.bitfield("key", "arguments");
}
}
| 6,564 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisSentinelConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisSentinel;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.JedisSentinelPool;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel")
public class RedisSentinelConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class);
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
GenericObjectPoolConfig<?> genericObjectPoolConfig = new GenericObjectPoolConfig<>();
genericObjectPoolConfig.setMinIdle(properties.getMinIdleConnections());
genericObjectPoolConfig.setMaxIdle(properties.getMaxIdleConnections());
genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost());
genericObjectPoolConfig.setTestWhileIdle(properties.isTestWhileIdle());
genericObjectPoolConfig.setMinEvictableIdleTimeMillis(
properties.getMinEvictableIdleTimeMillis());
genericObjectPoolConfig.setTimeBetweenEvictionRunsMillis(
properties.getTimeBetweenEvictionRunsMillis());
genericObjectPoolConfig.setNumTestsPerEvictionRun(properties.getNumTestsPerEvictionRun());
log.info(
"Starting conductor server using redis_sentinel and cluster "
+ properties.getClusterName());
Set<String> sentinels = new HashSet<>();
for (Host host : hostSupplier.getHosts()) {
sentinels.add(host.getHostName() + ":" + host.getPort());
}
// We use the password of the first sentinel host as password and sentinelPassword
String password = getPassword(hostSupplier.getHosts());
if (password != null) {
return new JedisSentinel(
new JedisSentinelPool(
properties.getClusterName(),
sentinels,
genericObjectPoolConfig,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
password,
Protocol.DEFAULT_DATABASE,
null,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
password,
null));
} else {
return new JedisSentinel(
new JedisSentinelPool(
properties.getClusterName(), sentinels, genericObjectPoolConfig));
}
}
private String getPassword(List<Host> hosts) {
return hosts.isEmpty() ? null : hosts.get(0).getPassword();
}
}
| 6,565 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisCommonConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.connectionpool.impl.utils.CollectionUtils;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.RedisQueues;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import com.netflix.dyno.queues.shard.DynoShardSupplier;
import com.google.inject.ProvisionException;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@EnableConfigurationProperties(RedisProperties.class)
@Conditional(AnyRedisCondition.class)
public class RedisCommonConfiguration {
public static final String DEFAULT_CLIENT_INJECTION_NAME = "DefaultJedisCommands";
public static final String READ_CLIENT_INJECTION_NAME = "ReadJedisCommands";
private static final Logger LOGGER = LoggerFactory.getLogger(RedisCommonConfiguration.class);
@Bean
public ShardSupplier shardSupplier(HostSupplier hostSupplier, RedisProperties properties) {
if (properties.getAvailabilityZone() == null) {
throw new ProvisionException(
"Availability zone is not defined. Ensure Configuration.getAvailabilityZone() returns a non-null "
+ "and non-empty value.");
}
String localDC =
properties.getAvailabilityZone().replaceAll(properties.getDataCenterRegion(), "");
return new DynoShardSupplier(hostSupplier, properties.getDataCenterRegion(), localDC);
}
@Bean
public TokenMapSupplier tokenMapSupplier() {
final List<HostToken> hostTokens = new ArrayList<>();
return new TokenMapSupplier() {
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
long i = activeHosts.size();
for (Host host : activeHosts) {
HostToken hostToken = new HostToken(i, host);
hostTokens.add(hostToken);
i--;
}
return hostTokens;
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return CollectionUtils.find(
hostTokens, token -> token.getHost().compareTo(host) == 0);
}
};
}
@Bean
public ShardingStrategy shardingStrategy(
ShardSupplier shardSupplier, RedisProperties properties) {
return new RedisQueuesShardingStrategyProvider(shardSupplier, properties).get();
}
@Bean
public RedisQueues redisQueues(
@Qualifier(DEFAULT_CLIENT_INJECTION_NAME) JedisCommands jedisCommands,
@Qualifier(READ_CLIENT_INJECTION_NAME) JedisCommands jedisCommandsRead,
ShardSupplier shardSupplier,
RedisProperties properties,
ShardingStrategy shardingStrategy) {
RedisQueues queues =
new RedisQueues(
jedisCommands,
jedisCommandsRead,
properties.getQueuePrefix(),
shardSupplier,
60_000,
60_000,
shardingStrategy);
LOGGER.info("DynoQueueDAO initialized with prefix " + properties.getQueuePrefix() + "!");
return queues;
}
}
| 6,566 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisStandaloneConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisStandalone;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone")
public class RedisStandaloneConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(RedisSentinelConfiguration.class);
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
JedisPoolConfig config = new JedisPoolConfig();
config.setMinIdle(2);
config.setMaxTotal(properties.getMaxConnectionsPerHost());
log.info("Starting conductor server using redis_standalone.");
Host host = hostSupplier.getHosts().get(0);
return new JedisStandalone(getJedisPool(config, host));
}
private JedisPool getJedisPool(JedisPoolConfig config, Host host) {
if (host.getPassword() != null) {
log.info("Connecting to Redis Standalone with AUTH");
return new JedisPool(
config,
host.getHostName(),
host.getPort(),
Protocol.DEFAULT_TIMEOUT,
host.getPassword());
} else {
return new JedisPool(config, host.getHostName(), host.getPort());
}
}
}
| 6,567 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/InMemoryRedisConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.redis.dynoqueue.LocalhostHostSupplier;
import com.netflix.conductor.redis.jedis.JedisMock;
import com.netflix.dyno.connectionpool.HostSupplier;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.READ_CLIENT_INJECTION_NAME;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory")
public class InMemoryRedisConfiguration {
@Bean
public HostSupplier hostSupplier(RedisProperties properties) {
return new LocalhostHostSupplier(properties);
}
@Bean(name = {DEFAULT_CLIENT_INJECTION_NAME, READ_CLIENT_INJECTION_NAME})
public JedisMock jedisMock() {
return new JedisMock();
}
}
| 6,568 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisClusterConfiguration.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.commons.pool2.impl.GenericObjectPoolConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.jedis.JedisCluster;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.HostAndPort;
import redis.clients.jedis.Protocol;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster")
public class RedisClusterConfiguration extends JedisCommandsConfigurer {
private static final Logger log = LoggerFactory.getLogger(JedisCommandsConfigurer.class);
// Same as redis.clients.jedis.BinaryJedisCluster
protected static final int DEFAULT_MAX_ATTEMPTS = 5;
@Override
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
GenericObjectPoolConfig<?> genericObjectPoolConfig = new GenericObjectPoolConfig<>();
genericObjectPoolConfig.setMaxTotal(properties.getMaxConnectionsPerHost());
Set<HostAndPort> hosts =
hostSupplier.getHosts().stream()
.map(h -> new HostAndPort(h.getHostName(), h.getPort()))
.collect(Collectors.toSet());
String password = getPassword(hostSupplier.getHosts());
if (password != null) {
log.info("Connecting to Redis Cluster with AUTH");
return new JedisCluster(
new redis.clients.jedis.JedisCluster(
hosts,
Protocol.DEFAULT_TIMEOUT,
Protocol.DEFAULT_TIMEOUT,
DEFAULT_MAX_ATTEMPTS,
password,
genericObjectPoolConfig));
} else {
return new JedisCluster(
new redis.clients.jedis.JedisCluster(hosts, genericObjectPoolConfig));
}
}
private String getPassword(List<Host> hosts) {
return hosts.isEmpty() ? null : hosts.get(0).getPassword();
}
}
| 6,569 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/JedisCommandsConfigurer.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.context.annotation.Bean;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.dynoqueue.ConfigurationHostSupplier;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import redis.clients.jedis.commands.JedisCommands;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.READ_CLIENT_INJECTION_NAME;
abstract class JedisCommandsConfigurer {
@Bean
public HostSupplier hostSupplier(RedisProperties properties) {
return new ConfigurationHostSupplier(properties);
}
@Bean(name = DEFAULT_CLIENT_INJECTION_NAME)
public JedisCommands jedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
return createJedisCommands(properties, conductorProperties, hostSupplier, tokenMapSupplier);
}
@Bean(name = READ_CLIENT_INJECTION_NAME)
public JedisCommands readJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
return createJedisCommands(properties, conductorProperties, hostSupplier, tokenMapSupplier);
}
protected abstract JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier);
}
| 6,570 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/RedisProperties.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DurationUnit;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.redis.dynoqueue.RedisQueuesShardingStrategyProvider;
import com.netflix.dyno.connectionpool.RetryPolicy.RetryPolicyFactory;
import com.netflix.dyno.connectionpool.impl.RetryNTimes;
import com.netflix.dyno.connectionpool.impl.RunOnce;
@ConfigurationProperties("conductor.redis")
public class RedisProperties {
private final ConductorProperties conductorProperties;
@Autowired
public RedisProperties(ConductorProperties conductorProperties) {
this.conductorProperties = conductorProperties;
}
/**
* Data center region. If hosting on Amazon the value is something like us-east-1, us-west-2
* etc.
*/
private String dataCenterRegion = "us-east-1";
/**
* Local rack / availability zone. For AWS deployments, the value is something like us-east-1a,
* etc.
*/
private String availabilityZone = "us-east-1c";
/** The name of the redis / dynomite cluster */
private String clusterName = "";
/** Dynomite Cluster details. Format is host:port:rack separated by semicolon */
private String hosts = null;
/** The prefix used to prepend workflow data in redis */
private String workflowNamespacePrefix = null;
/** The prefix used to prepend keys for queues in redis */
private String queueNamespacePrefix = null;
/**
* The domain name to be used in the key prefix for logical separation of workflow data and
* queues in a shared redis setup
*/
private String keyspaceDomain = null;
/**
* The maximum number of connections that can be managed by the connection pool on a given
* instance
*/
private int maxConnectionsPerHost = 10;
/**
* The maximum amount of time to wait for a connection to become available from the connection
* pool
*/
private Duration maxTimeoutWhenExhausted = Duration.ofMillis(800);
/** The maximum retry attempts to use with this connection pool */
private int maxRetryAttempts = 0;
/** The read connection port to be used for connecting to dyno-queues */
private int queuesNonQuorumPort = 22122;
/** The sharding strategy to be used for the dyno queue configuration */
private String queueShardingStrategy = RedisQueuesShardingStrategyProvider.ROUND_ROBIN_STRATEGY;
/** The time in seconds after which the in-memory task definitions cache will be refreshed */
@DurationUnit(ChronoUnit.SECONDS)
private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60);
/** The time to live in seconds for which the event execution will be persisted */
@DurationUnit(ChronoUnit.SECONDS)
private Duration eventExecutionPersistenceTTL = Duration.ofSeconds(60);
// Maximum number of idle connections to be maintained
private int maxIdleConnections = 8;
// Minimum number of idle connections to be maintained
private int minIdleConnections = 5;
private long minEvictableIdleTimeMillis = 1800000;
private long timeBetweenEvictionRunsMillis = -1L;
private boolean testWhileIdle = false;
private int numTestsPerEvictionRun = 3;
public int getNumTestsPerEvictionRun() {
return numTestsPerEvictionRun;
}
public void setNumTestsPerEvictionRun(int numTestsPerEvictionRun) {
this.numTestsPerEvictionRun = numTestsPerEvictionRun;
}
public boolean isTestWhileIdle() {
return testWhileIdle;
}
public void setTestWhileIdle(boolean testWhileIdle) {
this.testWhileIdle = testWhileIdle;
}
public long getMinEvictableIdleTimeMillis() {
return minEvictableIdleTimeMillis;
}
public void setMinEvictableIdleTimeMillis(long minEvictableIdleTimeMillis) {
this.minEvictableIdleTimeMillis = minEvictableIdleTimeMillis;
}
public long getTimeBetweenEvictionRunsMillis() {
return timeBetweenEvictionRunsMillis;
}
public void setTimeBetweenEvictionRunsMillis(long timeBetweenEvictionRunsMillis) {
this.timeBetweenEvictionRunsMillis = timeBetweenEvictionRunsMillis;
}
public int getMinIdleConnections() {
return minIdleConnections;
}
public void setMinIdleConnections(int minIdleConnections) {
this.minIdleConnections = minIdleConnections;
}
public int getMaxIdleConnections() {
return maxIdleConnections;
}
public void setMaxIdleConnections(int maxIdleConnections) {
this.maxIdleConnections = maxIdleConnections;
}
public String getDataCenterRegion() {
return dataCenterRegion;
}
public void setDataCenterRegion(String dataCenterRegion) {
this.dataCenterRegion = dataCenterRegion;
}
public String getAvailabilityZone() {
return availabilityZone;
}
public void setAvailabilityZone(String availabilityZone) {
this.availabilityZone = availabilityZone;
}
public String getClusterName() {
return clusterName;
}
public void setClusterName(String clusterName) {
this.clusterName = clusterName;
}
public String getHosts() {
return hosts;
}
public void setHosts(String hosts) {
this.hosts = hosts;
}
public String getWorkflowNamespacePrefix() {
return workflowNamespacePrefix;
}
public void setWorkflowNamespacePrefix(String workflowNamespacePrefix) {
this.workflowNamespacePrefix = workflowNamespacePrefix;
}
public String getQueueNamespacePrefix() {
return queueNamespacePrefix;
}
public void setQueueNamespacePrefix(String queueNamespacePrefix) {
this.queueNamespacePrefix = queueNamespacePrefix;
}
public String getKeyspaceDomain() {
return keyspaceDomain;
}
public void setKeyspaceDomain(String keyspaceDomain) {
this.keyspaceDomain = keyspaceDomain;
}
public int getMaxConnectionsPerHost() {
return maxConnectionsPerHost;
}
public void setMaxConnectionsPerHost(int maxConnectionsPerHost) {
this.maxConnectionsPerHost = maxConnectionsPerHost;
}
public Duration getMaxTimeoutWhenExhausted() {
return maxTimeoutWhenExhausted;
}
public void setMaxTimeoutWhenExhausted(Duration maxTimeoutWhenExhausted) {
this.maxTimeoutWhenExhausted = maxTimeoutWhenExhausted;
}
public int getMaxRetryAttempts() {
return maxRetryAttempts;
}
public void setMaxRetryAttempts(int maxRetryAttempts) {
this.maxRetryAttempts = maxRetryAttempts;
}
public int getQueuesNonQuorumPort() {
return queuesNonQuorumPort;
}
public void setQueuesNonQuorumPort(int queuesNonQuorumPort) {
this.queuesNonQuorumPort = queuesNonQuorumPort;
}
public String getQueueShardingStrategy() {
return queueShardingStrategy;
}
public void setQueueShardingStrategy(String queueShardingStrategy) {
this.queueShardingStrategy = queueShardingStrategy;
}
public Duration getTaskDefCacheRefreshInterval() {
return taskDefCacheRefreshInterval;
}
public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) {
this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval;
}
public Duration getEventExecutionPersistenceTTL() {
return eventExecutionPersistenceTTL;
}
public void setEventExecutionPersistenceTTL(Duration eventExecutionPersistenceTTL) {
this.eventExecutionPersistenceTTL = eventExecutionPersistenceTTL;
}
public String getQueuePrefix() {
String prefix = getQueueNamespacePrefix() + "." + conductorProperties.getStack();
if (getKeyspaceDomain() != null) {
prefix = prefix + "." + getKeyspaceDomain();
}
return prefix;
}
public RetryPolicyFactory getConnectionRetryPolicy() {
if (getMaxRetryAttempts() == 0) {
return RunOnce::new;
} else {
return () -> new RetryNTimes(maxRetryAttempts, false);
}
}
}
| 6,571 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/AnyRedisCondition.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.AnyNestedCondition;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
public class AnyRedisCondition extends AnyNestedCondition {
public AnyRedisCondition() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "dynomite")
static class DynomiteClusterCondition {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "memory")
static class InMemoryRedisCondition {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_cluster")
static class RedisClusterConfiguration {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_sentinel")
static class RedisSentinelConfiguration {}
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "redis_standalone")
static class RedisStandaloneConfiguration {}
}
| 6,572 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/config/DynomiteClusterConfiguration.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.config;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.jedis.DynoJedisClient;
import redis.clients.jedis.commands.JedisCommands;
@Configuration(proxyBeanMethods = false)
@ConditionalOnProperty(name = "conductor.db.type", havingValue = "dynomite")
public class DynomiteClusterConfiguration extends JedisCommandsConfigurer {
protected JedisCommands createJedisCommands(
RedisProperties properties,
ConductorProperties conductorProperties,
HostSupplier hostSupplier,
TokenMapSupplier tokenMapSupplier) {
ConnectionPoolConfigurationImpl connectionPoolConfiguration =
new ConnectionPoolConfigurationImpl(properties.getClusterName())
.withTokenSupplier(tokenMapSupplier)
.setLocalRack(properties.getAvailabilityZone())
.setLocalDataCenter(properties.getDataCenterRegion())
.setSocketTimeout(0)
.setConnectTimeout(0)
.setMaxConnsPerHost(properties.getMaxConnectionsPerHost())
.setMaxTimeoutWhenExhausted(
(int) properties.getMaxTimeoutWhenExhausted().toMillis())
.setRetryPolicyFactory(properties.getConnectionRetryPolicy());
return new DynoJedisClient.Builder()
.withHostSupplier(hostSupplier)
.withApplicationName(conductorProperties.getAppId())
.withDynomiteClusterName(properties.getClusterName())
.withCPConfig(connectionPoolConfiguration)
.build();
}
}
| 6,573 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/RedisQueuesShardingStrategyProvider.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.sharding.RoundRobinStrategy;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
public class RedisQueuesShardingStrategyProvider {
public static final String LOCAL_ONLY_STRATEGY = "localOnly";
public static final String ROUND_ROBIN_STRATEGY = "roundRobin";
private static final Logger LOGGER =
LoggerFactory.getLogger(RedisQueuesShardingStrategyProvider.class);
private final ShardSupplier shardSupplier;
private final RedisProperties properties;
public RedisQueuesShardingStrategyProvider(
ShardSupplier shardSupplier, RedisProperties properties) {
this.shardSupplier = shardSupplier;
this.properties = properties;
}
public ShardingStrategy get() {
String shardingStrat = properties.getQueueShardingStrategy();
if (shardingStrat.equals(LOCAL_ONLY_STRATEGY)) {
LOGGER.info(
"Using {} sharding strategy for queues",
LocalOnlyStrategy.class.getSimpleName());
return new LocalOnlyStrategy(shardSupplier);
} else {
LOGGER.info(
"Using {} sharding strategy for queues",
RoundRobinStrategy.class.getSimpleName());
return new RoundRobinStrategy();
}
}
public static final class LocalOnlyStrategy implements ShardingStrategy {
private static final Logger LOGGER = LoggerFactory.getLogger(LocalOnlyStrategy.class);
private final ShardSupplier shardSupplier;
public LocalOnlyStrategy(ShardSupplier shardSupplier) {
this.shardSupplier = shardSupplier;
}
@Override
public String getNextShard(List<String> allShards, Message message) {
LOGGER.debug(
"Always using {} shard out of {}", shardSupplier.getCurrentShard(), allShards);
return shardSupplier.getCurrentShard();
}
}
}
| 6,574 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/LocalhostHostSupplier.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.List;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.google.common.collect.Lists;
public class LocalhostHostSupplier implements HostSupplier {
private final RedisProperties properties;
public LocalhostHostSupplier(RedisProperties properties) {
this.properties = properties;
}
@Override
public List<Host> getHosts() {
Host dynoHost =
new HostBuilder()
.setHostname("localhost")
.setIpAddress("0")
.setRack(properties.getAvailabilityZone())
.setStatus(Host.Status.Up)
.createHost();
return Lists.newArrayList(dynoHost);
}
}
| 6,575 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dynoqueue/ConfigurationHostSupplier.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dynoqueue;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
public class ConfigurationHostSupplier implements HostSupplier {
private static final Logger log = LoggerFactory.getLogger(ConfigurationHostSupplier.class);
private final RedisProperties properties;
public ConfigurationHostSupplier(RedisProperties properties) {
this.properties = properties;
}
@Override
public List<Host> getHosts() {
return parseHostsFromConfig();
}
private List<Host> parseHostsFromConfig() {
String hosts = properties.getHosts();
if (hosts == null) {
String message =
"Missing dynomite/redis hosts. Ensure 'conductor.redis.hosts' has been set in the supplied configuration.";
log.error(message);
throw new RuntimeException(message);
}
return parseHostsFrom(hosts);
}
private List<Host> parseHostsFrom(String hostConfig) {
List<String> hostConfigs = Arrays.asList(hostConfig.split(";"));
return hostConfigs.stream()
.map(
hc -> {
String[] hostConfigValues = hc.split(":");
String host = hostConfigValues[0];
int port = Integer.parseInt(hostConfigValues[1]);
String rack = hostConfigValues[2];
if (hostConfigValues.length >= 4) {
String password = hostConfigValues[3];
return new HostBuilder()
.setHostname(host)
.setPort(port)
.setRack(rack)
.setStatus(Host.Status.Up)
.setPassword(password)
.createHost();
}
return new HostBuilder()
.setHostname(host)
.setPort(port)
.setRack(rack)
.setStatus(Host.Status.Up)
.createHost();
})
.collect(Collectors.toList());
}
}
| 6,576 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisEventHandlerDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.events.EventHandler;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.EventHandlerDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisEventHandlerDAO extends BaseDynoDAO implements EventHandlerDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisEventHandlerDAO.class);
private static final String EVENT_HANDLERS = "EVENT_HANDLERS";
private static final String EVENT_HANDLERS_BY_EVENT = "EVENT_HANDLERS_BY_EVENT";
public RedisEventHandlerDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
@Override
public void addEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "Missing Name");
if (getEventHandler(eventHandler.getName()) != null) {
throw new ConflictException(
"EventHandler with name %s already exists!", eventHandler.getName());
}
index(eventHandler);
jedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler));
recordRedisDaoRequests("addEventHandler");
}
@Override
public void updateEventHandler(EventHandler eventHandler) {
Preconditions.checkNotNull(eventHandler.getName(), "Missing Name");
EventHandler existing = getEventHandler(eventHandler.getName());
if (existing == null) {
throw new NotFoundException(
"EventHandler with name %s not found!", eventHandler.getName());
}
if (!existing.getEvent().equals(eventHandler.getEvent())) {
removeIndex(existing);
}
index(eventHandler);
jedisProxy.hset(nsKey(EVENT_HANDLERS), eventHandler.getName(), toJson(eventHandler));
recordRedisDaoRequests("updateEventHandler");
}
@Override
public void removeEventHandler(String name) {
EventHandler existing = getEventHandler(name);
if (existing == null) {
throw new NotFoundException("EventHandler with name %s not found!", name);
}
jedisProxy.hdel(nsKey(EVENT_HANDLERS), name);
recordRedisDaoRequests("removeEventHandler");
removeIndex(existing);
}
@Override
public List<EventHandler> getAllEventHandlers() {
Map<String, String> all = jedisProxy.hgetAll(nsKey(EVENT_HANDLERS));
List<EventHandler> handlers = new LinkedList<>();
all.forEach(
(key, json) -> {
EventHandler eventHandler = readValue(json, EventHandler.class);
handlers.add(eventHandler);
});
recordRedisDaoRequests("getAllEventHandlers");
return handlers;
}
private void index(EventHandler eventHandler) {
String event = eventHandler.getEvent();
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
jedisProxy.sadd(key, eventHandler.getName());
}
private void removeIndex(EventHandler eventHandler) {
String event = eventHandler.getEvent();
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
jedisProxy.srem(key, eventHandler.getName());
}
@Override
public List<EventHandler> getEventHandlersForEvent(String event, boolean activeOnly) {
String key = nsKey(EVENT_HANDLERS_BY_EVENT, event);
Set<String> names = jedisProxy.smembers(key);
List<EventHandler> handlers = new LinkedList<>();
for (String name : names) {
try {
EventHandler eventHandler = getEventHandler(name);
recordRedisDaoEventRequests("getEventHandler", event);
if (eventHandler.getEvent().equals(event)
&& (!activeOnly || eventHandler.isActive())) {
handlers.add(eventHandler);
}
} catch (NotFoundException nfe) {
LOGGER.info("No matching event handler found for event: {}", event);
throw nfe;
}
}
return handlers;
}
private EventHandler getEventHandler(String name) {
EventHandler eventHandler = null;
String json;
try {
json = jedisProxy.hget(nsKey(EVENT_HANDLERS), name);
} catch (Exception e) {
throw new TransientException("Unable to get event handler named " + name, e);
}
if (json != null) {
eventHandler = readValue(json, EventHandler.class);
}
return eventHandler;
}
}
| 6,577 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/BaseDynoDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.io.IOException;
import org.apache.commons.lang3.StringUtils;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
public class BaseDynoDAO {
private static final String NAMESPACE_SEP = ".";
private static final String DAO_NAME = "redis";
private final String domain;
private final RedisProperties properties;
private final ConductorProperties conductorProperties;
protected JedisProxy jedisProxy;
protected ObjectMapper objectMapper;
protected BaseDynoDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
this.jedisProxy = jedisProxy;
this.objectMapper = objectMapper;
this.conductorProperties = conductorProperties;
this.properties = properties;
this.domain = properties.getKeyspaceDomain();
}
String nsKey(String... nsValues) {
String rootNamespace = properties.getWorkflowNamespacePrefix();
StringBuilder namespacedKey = new StringBuilder();
if (StringUtils.isNotBlank(rootNamespace)) {
namespacedKey.append(rootNamespace).append(NAMESPACE_SEP);
}
String stack = conductorProperties.getStack();
if (StringUtils.isNotBlank(stack)) {
namespacedKey.append(stack).append(NAMESPACE_SEP);
}
if (StringUtils.isNotBlank(domain)) {
namespacedKey.append(domain).append(NAMESPACE_SEP);
}
for (String nsValue : nsValues) {
namespacedKey.append(nsValue).append(NAMESPACE_SEP);
}
return StringUtils.removeEnd(namespacedKey.toString(), NAMESPACE_SEP);
}
public JedisProxy getDyno() {
return jedisProxy;
}
String toJson(Object value) {
try {
return objectMapper.writeValueAsString(value);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
<T> T readValue(String json, Class<T> clazz) {
try {
return objectMapper.readValue(json, clazz);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void recordRedisDaoRequests(String action) {
recordRedisDaoRequests(action, "n/a", "n/a");
}
void recordRedisDaoRequests(String action, String taskType, String workflowType) {
Monitors.recordDaoRequests(DAO_NAME, action, taskType, workflowType);
}
void recordRedisDaoEventRequests(String action, String event) {
Monitors.recordDaoEventRequests(DAO_NAME, action, event);
}
void recordRedisDaoPayloadSize(String action, int size, String taskType, String workflowType) {
Monitors.recordDaoPayloadSize(
DAO_NAME,
action,
StringUtils.defaultIfBlank(taskType, ""),
StringUtils.defaultIfBlank(workflowType, ""),
size);
}
}
| 6,578 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisMetadataDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.ConflictException;
import com.netflix.conductor.core.exception.NotFoundException;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import static com.netflix.conductor.common.metadata.tasks.TaskDef.ONE_HOUR;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisMetadataDAO extends BaseDynoDAO implements MetadataDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisMetadataDAO.class);
// Keys Families
private static final String ALL_TASK_DEFS = "TASK_DEFS";
private static final String WORKFLOW_DEF_NAMES = "WORKFLOW_DEF_NAMES";
private static final String WORKFLOW_DEF = "WORKFLOW_DEF";
private static final String LATEST = "latest";
private static final String className = RedisMetadataDAO.class.getSimpleName();
private Map<String, TaskDef> taskDefCache = new HashMap<>();
public RedisMetadataDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
refreshTaskDefs();
long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds();
Executors.newSingleThreadScheduledExecutor()
.scheduleWithFixedDelay(
this::refreshTaskDefs,
cacheRefreshTime,
cacheRefreshTime,
TimeUnit.SECONDS);
}
@Override
public TaskDef createTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
@Override
public TaskDef updateTaskDef(TaskDef taskDef) {
return insertOrUpdateTaskDef(taskDef);
}
private TaskDef insertOrUpdateTaskDef(TaskDef taskDef) {
// Store all task def in under one key
String payload = toJson(taskDef);
jedisProxy.hset(nsKey(ALL_TASK_DEFS), taskDef.getName(), payload);
recordRedisDaoRequests("storeTaskDef");
recordRedisDaoPayloadSize("storeTaskDef", payload.length(), taskDef.getName(), "n/a");
refreshTaskDefs();
return taskDef;
}
private void refreshTaskDefs() {
try {
Map<String, TaskDef> map = new HashMap<>();
getAllTaskDefs().forEach(taskDef -> map.put(taskDef.getName(), taskDef));
this.taskDefCache = map;
LOGGER.debug("Refreshed task defs " + this.taskDefCache.size());
} catch (Exception e) {
Monitors.error(className, "refreshTaskDefs");
LOGGER.error("refresh TaskDefs failed ", e);
}
}
@Override
public TaskDef getTaskDef(String name) {
return Optional.ofNullable(taskDefCache.get(name)).orElseGet(() -> getTaskDefFromDB(name));
}
private TaskDef getTaskDefFromDB(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
TaskDef taskDef = null;
String taskDefJsonStr = jedisProxy.hget(nsKey(ALL_TASK_DEFS), name);
if (taskDefJsonStr != null) {
taskDef = readValue(taskDefJsonStr, TaskDef.class);
recordRedisDaoRequests("getTaskDef");
recordRedisDaoPayloadSize(
"getTaskDef", taskDefJsonStr.length(), taskDef.getName(), "n/a");
}
setDefaults(taskDef);
return taskDef;
}
private void setDefaults(TaskDef taskDef) {
if (taskDef != null && taskDef.getResponseTimeoutSeconds() == 0) {
taskDef.setResponseTimeoutSeconds(
taskDef.getTimeoutSeconds() == 0 ? ONE_HOUR : taskDef.getTimeoutSeconds() - 1);
}
}
@Override
public List<TaskDef> getAllTaskDefs() {
List<TaskDef> allTaskDefs = new LinkedList<>();
recordRedisDaoRequests("getAllTaskDefs");
Map<String, String> taskDefs = jedisProxy.hgetAll(nsKey(ALL_TASK_DEFS));
int size = 0;
if (taskDefs.size() > 0) {
for (String taskDefJsonStr : taskDefs.values()) {
if (taskDefJsonStr != null) {
TaskDef taskDef = readValue(taskDefJsonStr, TaskDef.class);
setDefaults(taskDef);
allTaskDefs.add(taskDef);
size += taskDefJsonStr.length();
}
}
recordRedisDaoPayloadSize("getAllTaskDefs", size, "n/a", "n/a");
}
return allTaskDefs;
}
@Override
public void removeTaskDef(String name) {
Preconditions.checkNotNull(name, "TaskDef name cannot be null");
Long result = jedisProxy.hdel(nsKey(ALL_TASK_DEFS), name);
if (!result.equals(1L)) {
throw new NotFoundException("Cannot remove the task - no such task definition");
}
recordRedisDaoRequests("removeTaskDef");
refreshTaskDefs();
}
@Override
public void createWorkflowDef(WorkflowDef def) {
if (jedisProxy.hexists(
nsKey(WORKFLOW_DEF, def.getName()), String.valueOf(def.getVersion()))) {
throw new ConflictException("Workflow with %s already exists!", def.key());
}
_createOrUpdate(def);
}
@Override
public void updateWorkflowDef(WorkflowDef def) {
_createOrUpdate(def);
}
@Override
/*
* @param name Name of the workflow definition
* @return Latest version of workflow definition
* @see WorkflowDef
*/
public Optional<WorkflowDef> getLatestWorkflowDef(String name) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
WorkflowDef workflowDef = null;
Optional<Integer> optionalMaxVersion = getWorkflowMaxVersion(name);
if (optionalMaxVersion.isPresent()) {
String latestdata =
jedisProxy.hget(nsKey(WORKFLOW_DEF, name), optionalMaxVersion.get().toString());
if (latestdata != null) {
workflowDef = readValue(latestdata, WorkflowDef.class);
}
}
return Optional.ofNullable(workflowDef);
}
private Optional<Integer> getWorkflowMaxVersion(String workflowName) {
return jedisProxy.hkeys(nsKey(WORKFLOW_DEF, workflowName)).stream()
.filter(key -> !key.equals(LATEST))
.map(Integer::valueOf)
.max(Comparator.naturalOrder());
}
public List<WorkflowDef> getAllVersions(String name) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
List<WorkflowDef> workflows = new LinkedList<>();
recordRedisDaoRequests("getAllWorkflowDefsByName");
Map<String, String> workflowDefs = jedisProxy.hgetAll(nsKey(WORKFLOW_DEF, name));
int size = 0;
for (String key : workflowDefs.keySet()) {
if (key.equals(LATEST)) {
continue;
}
String workflowDef = workflowDefs.get(key);
workflows.add(readValue(workflowDef, WorkflowDef.class));
size += workflowDef.length();
}
recordRedisDaoPayloadSize("getAllWorkflowDefsByName", size, "n/a", name);
return workflows;
}
@Override
public Optional<WorkflowDef> getWorkflowDef(String name, int version) {
Preconditions.checkNotNull(name, "WorkflowDef name cannot be null");
WorkflowDef def = null;
recordRedisDaoRequests("getWorkflowDef");
String workflowDefJsonString =
jedisProxy.hget(nsKey(WORKFLOW_DEF, name), String.valueOf(version));
if (workflowDefJsonString != null) {
def = readValue(workflowDefJsonString, WorkflowDef.class);
recordRedisDaoPayloadSize(
"getWorkflowDef", workflowDefJsonString.length(), "n/a", name);
}
return Optional.ofNullable(def);
}
@Override
public void removeWorkflowDef(String name, Integer version) {
Preconditions.checkArgument(
StringUtils.isNotBlank(name), "WorkflowDef name cannot be null");
Preconditions.checkNotNull(version, "Input version cannot be null");
Long result = jedisProxy.hdel(nsKey(WORKFLOW_DEF, name), String.valueOf(version));
if (!result.equals(1L)) {
throw new NotFoundException(
"Cannot remove the workflow - no such workflow" + " definition: %s version: %d",
name, version);
}
// check if there are any more versions remaining if not delete the
// workflow name
Optional<Integer> optionMaxVersion = getWorkflowMaxVersion(name);
// delete workflow name
if (optionMaxVersion.isEmpty()) {
jedisProxy.srem(nsKey(WORKFLOW_DEF_NAMES), name);
}
recordRedisDaoRequests("removeWorkflowDef");
}
public List<String> findAll() {
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
return new ArrayList<>(wfNames);
}
@Override
public List<WorkflowDef> getAllWorkflowDefs() {
List<WorkflowDef> workflows = new LinkedList<>();
// Get all from WORKFLOW_DEF_NAMES
recordRedisDaoRequests("getAllWorkflowDefs");
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
int size = 0;
for (String wfName : wfNames) {
Map<String, String> workflowDefs = jedisProxy.hgetAll(nsKey(WORKFLOW_DEF, wfName));
for (String key : workflowDefs.keySet()) {
if (key.equals(LATEST)) {
continue;
}
String workflowDef = workflowDefs.get(key);
workflows.add(readValue(workflowDef, WorkflowDef.class));
size += workflowDef.length();
}
}
recordRedisDaoPayloadSize("getAllWorkflowDefs", size, "n/a", "n/a");
return workflows;
}
@Override
public List<WorkflowDef> getAllWorkflowDefsLatestVersions() {
List<WorkflowDef> workflows = new LinkedList<>();
// Get all definitions latest versions from WORKFLOW_DEF_NAMES
recordRedisDaoRequests("getAllWorkflowLatestVersionsDefs");
Set<String> wfNames = jedisProxy.smembers(nsKey(WORKFLOW_DEF_NAMES));
int size = 0;
// Place all workflows into the Priority Queue. The PQ will allow us to grab the latest
// version of the workflows.
for (String wfName : wfNames) {
WorkflowDef def = getLatestWorkflowDef(wfName).orElse(null);
if (def != null) {
workflows.add(def);
size += def.toString().length();
}
}
recordRedisDaoPayloadSize("getAllWorkflowLatestVersionsDefs", size, "n/a", "n/a");
return workflows;
}
private void _createOrUpdate(WorkflowDef workflowDef) {
// First set the workflow def
jedisProxy.hset(
nsKey(WORKFLOW_DEF, workflowDef.getName()),
String.valueOf(workflowDef.getVersion()),
toJson(workflowDef));
jedisProxy.sadd(nsKey(WORKFLOW_DEF_NAMES), workflowDef.getName());
recordRedisDaoRequests("storeWorkflowDef", "n/a", workflowDef.getName());
}
}
| 6,579 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisPollDataDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.PollData;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.PollDataDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisPollDataDAO extends BaseDynoDAO implements PollDataDAO {
private static final String POLL_DATA = "POLL_DATA";
public RedisPollDataDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
@Override
public void updateLastPollData(String taskDefName, String domain, String workerId) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis());
String key = nsKey(POLL_DATA, pollData.getQueueName());
String field = (domain == null) ? "DEFAULT" : domain;
String payload = toJson(pollData);
recordRedisDaoRequests("updatePollData");
recordRedisDaoPayloadSize("updatePollData", payload.length(), "n/a", "n/a");
jedisProxy.hset(key, field, payload);
}
@Override
public PollData getPollData(String taskDefName, String domain) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String key = nsKey(POLL_DATA, taskDefName);
String field = (domain == null) ? "DEFAULT" : domain;
String pollDataJsonString = jedisProxy.hget(key, field);
recordRedisDaoRequests("getPollData");
recordRedisDaoPayloadSize(
"getPollData", StringUtils.length(pollDataJsonString), "n/a", "n/a");
PollData pollData = null;
if (StringUtils.isNotBlank(pollDataJsonString)) {
pollData = readValue(pollDataJsonString, PollData.class);
}
return pollData;
}
@Override
public List<PollData> getPollData(String taskDefName) {
Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null");
String key = nsKey(POLL_DATA, taskDefName);
Map<String, String> pMapdata = jedisProxy.hgetAll(key);
List<PollData> pollData = new ArrayList<>();
if (pMapdata != null) {
pMapdata.values()
.forEach(
pollDataJsonString -> {
pollData.add(readValue(pollDataJsonString, PollData.class));
recordRedisDaoRequests("getPollData");
recordRedisDaoPayloadSize(
"getPollData", pollDataJsonString.length(), "n/a", "n/a");
});
}
return pollData;
}
}
| 6,580 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisRateLimitingDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.Optional;
import org.apache.commons.lang3.tuple.ImmutablePair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.dao.RateLimitingDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisRateLimitingDAO extends BaseDynoDAO implements RateLimitingDAO {
private static final Logger LOGGER = LoggerFactory.getLogger(RedisRateLimitingDAO.class);
private static final String TASK_RATE_LIMIT_BUCKET = "TASK_RATE_LIMIT_BUCKET";
public RedisRateLimitingDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
}
/**
* This method evaluates if the {@link TaskDef} is rate limited or not based on {@link
* TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()}
* if not checks the {@link TaskModel} is rate limited or not based on {@link
* TaskModel#getRateLimitPerFrequency()} and {@link TaskModel#getRateLimitFrequencyInSeconds()}
*
* <p>The rate limiting is implemented using the Redis constructs of sorted set and TTL of each
* element in the rate limited bucket.
*
* <ul>
* <li>All the entries that are in the not in the frequency bucket are cleaned up by
* leveraging {@link JedisProxy#zremrangeByScore(String, String, String)}, this is done to
* make the next step of evaluation efficient
* <li>A current count(tasks executed within the frequency) is calculated based on the current
* time and the beginning of the rate limit frequency time(which is current time - {@link
* TaskModel#getRateLimitFrequencyInSeconds()} in millis), this is achieved by using
* {@link JedisProxy#zcount(String, double, double)}
* <li>Once the count is calculated then a evaluation is made to determine if it is within the
* bounds of {@link TaskModel#getRateLimitPerFrequency()}, if so the count is increased
* and an expiry TTL is added to the entry
* </ul>
*
* @param task: which needs to be evaluated whether it is rateLimited or not
* @return true: If the {@link TaskModel} is rateLimited false: If the {@link TaskModel} is not
* rateLimited
*/
@Override
public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) {
// Check if the TaskDefinition is not null then pick the definition values or else pick from
// the Task
ImmutablePair<Integer, Integer> rateLimitPair =
Optional.ofNullable(taskDef)
.map(
definition ->
new ImmutablePair<>(
definition.getRateLimitPerFrequency(),
definition.getRateLimitFrequencyInSeconds()))
.orElse(
new ImmutablePair<>(
task.getRateLimitPerFrequency(),
task.getRateLimitFrequencyInSeconds()));
int rateLimitPerFrequency = rateLimitPair.getLeft();
int rateLimitFrequencyInSeconds = rateLimitPair.getRight();
if (rateLimitPerFrequency <= 0 || rateLimitFrequencyInSeconds <= 0) {
LOGGER.debug(
"Rate limit not applied to the Task: {} either rateLimitPerFrequency: {} or rateLimitFrequencyInSeconds: {} is 0 or less",
task,
rateLimitPerFrequency,
rateLimitFrequencyInSeconds);
return false;
} else {
LOGGER.debug(
"Evaluating rate limiting for TaskId: {} with TaskDefinition of: {} with rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds);
long currentTimeEpochMillis = System.currentTimeMillis();
long currentTimeEpochMinusRateLimitBucket =
currentTimeEpochMillis - (rateLimitFrequencyInSeconds * 1000L);
String key = nsKey(TASK_RATE_LIMIT_BUCKET, task.getTaskDefName());
jedisProxy.zremrangeByScore(
key, "-inf", String.valueOf(currentTimeEpochMinusRateLimitBucket));
int currentBucketCount =
Math.toIntExact(
jedisProxy.zcount(
key,
currentTimeEpochMinusRateLimitBucket,
currentTimeEpochMillis));
if (currentBucketCount < rateLimitPerFrequency) {
jedisProxy.zadd(
key, currentTimeEpochMillis, String.valueOf(currentTimeEpochMillis));
jedisProxy.expire(key, rateLimitFrequencyInSeconds);
LOGGER.info(
"TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} within the rate limit with current count {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds,
++currentBucketCount);
Monitors.recordTaskRateLimited(task.getTaskDefName(), rateLimitPerFrequency);
return false;
} else {
LOGGER.info(
"TaskId: {} with TaskDefinition of: {} has rateLimitPerFrequency: {} and rateLimitFrequencyInSeconds: {} is out of bounds of rate limit with current count {}",
task.getTaskId(),
task.getTaskDefName(),
rateLimitPerFrequency,
rateLimitFrequencyInSeconds,
currentBucketCount);
return true;
}
}
}
}
| 6,581 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/RedisExecutionDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.common.metadata.events.EventExecution;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.TransientException;
import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.metrics.Monitors;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.conductor.redis.config.RedisProperties;
import com.netflix.conductor.redis.jedis.JedisProxy;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@Component
@Conditional(AnyRedisCondition.class)
public class RedisExecutionDAO extends BaseDynoDAO
implements ExecutionDAO, ConcurrentExecutionLimitDAO {
public static final Logger LOGGER = LoggerFactory.getLogger(RedisExecutionDAO.class);
// Keys Families
private static final String TASK_LIMIT_BUCKET = "TASK_LIMIT_BUCKET";
private static final String IN_PROGRESS_TASKS = "IN_PROGRESS_TASKS";
private static final String TASKS_IN_PROGRESS_STATUS =
"TASKS_IN_PROGRESS_STATUS"; // Tasks which are in IN_PROGRESS status.
private static final String WORKFLOW_TO_TASKS = "WORKFLOW_TO_TASKS";
private static final String SCHEDULED_TASKS = "SCHEDULED_TASKS";
private static final String TASK = "TASK";
private static final String WORKFLOW = "WORKFLOW";
private static final String PENDING_WORKFLOWS = "PENDING_WORKFLOWS";
private static final String WORKFLOW_DEF_TO_WORKFLOWS = "WORKFLOW_DEF_TO_WORKFLOWS";
private static final String CORR_ID_TO_WORKFLOWS = "CORR_ID_TO_WORKFLOWS";
private static final String EVENT_EXECUTION = "EVENT_EXECUTION";
private final int ttlEventExecutionSeconds;
public RedisExecutionDAO(
JedisProxy jedisProxy,
ObjectMapper objectMapper,
ConductorProperties conductorProperties,
RedisProperties properties) {
super(jedisProxy, objectMapper, conductorProperties, properties);
ttlEventExecutionSeconds = (int) properties.getEventExecutionPersistenceTTL().getSeconds();
}
private static String dateStr(Long timeInMs) {
Date date = new Date(timeInMs);
return dateStr(date);
}
private static String dateStr(Date date) {
SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
return format.format(date);
}
private static List<String> dateStrBetweenDates(Long startdatems, Long enddatems) {
List<String> dates = new ArrayList<>();
Calendar calendar = new GregorianCalendar();
Date startdate = new Date(startdatems);
Date enddate = new Date(enddatems);
calendar.setTime(startdate);
while (calendar.getTime().before(enddate) || calendar.getTime().equals(enddate)) {
Date result = calendar.getTime();
dates.add(dateStr(result));
calendar.add(Calendar.DATE, 1);
}
return dates;
}
@Override
public List<TaskModel> getPendingTasksByWorkflow(String taskName, String workflowId) {
List<TaskModel> tasks = new LinkedList<>();
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskName);
pendingTasks.forEach(
pendingTask -> {
if (pendingTask.getWorkflowInstanceId().equals(workflowId)) {
tasks.add(pendingTask);
}
});
return tasks;
}
@Override
public List<TaskModel> getTasks(String taskDefName, String startKey, int count) {
List<TaskModel> tasks = new LinkedList<>();
List<TaskModel> pendingTasks = getPendingTasksForTaskType(taskDefName);
boolean startKeyFound = startKey == null;
int foundcount = 0;
for (TaskModel pendingTask : pendingTasks) {
if (!startKeyFound) {
if (pendingTask.getTaskId().equals(startKey)) {
startKeyFound = true;
if (startKey != null) {
continue;
}
}
}
if (startKeyFound && foundcount < count) {
tasks.add(pendingTask);
foundcount++;
}
}
return tasks;
}
@Override
public List<TaskModel> createTasks(List<TaskModel> tasks) {
List<TaskModel> tasksCreated = new LinkedList<>();
for (TaskModel task : tasks) {
validate(task);
recordRedisDaoRequests("createTask", task.getTaskType(), task.getWorkflowType());
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
Long added =
jedisProxy.hset(
nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()),
taskKey,
task.getTaskId());
if (added < 1) {
LOGGER.debug(
"Task already scheduled, skipping the run "
+ task.getTaskId()
+ ", ref="
+ task.getReferenceTaskName()
+ ", key="
+ taskKey);
continue;
}
if (task.getStatus() != null
&& !task.getStatus().isTerminal()
&& task.getScheduledTime() == 0) {
task.setScheduledTime(System.currentTimeMillis());
}
correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId());
LOGGER.debug(
"Scheduled task added to WORKFLOW_TO_TASKS workflowId: {}, taskId: {}, taskType: {} during createTasks",
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
String inProgressTaskKey = nsKey(IN_PROGRESS_TASKS, task.getTaskDefName());
jedisProxy.sadd(inProgressTaskKey, task.getTaskId());
LOGGER.debug(
"Scheduled task added to IN_PROGRESS_TASKS with inProgressTaskKey: {}, workflowId: {}, taskId: {}, taskType: {} during createTasks",
inProgressTaskKey,
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
updateTask(task);
tasksCreated.add(task);
}
return tasksCreated;
}
@Override
public void updateTask(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) {
if (task.getStatus() != null && task.getStatus().equals(TaskModel.Status.IN_PROGRESS)) {
jedisProxy.sadd(
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task added to TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
} else {
jedisProxy.srem(
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName(), task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
String key = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName());
jedisProxy.zrem(key, task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASK_LIMIT_BUCKET with taskLimitBucketKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
key,
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
}
}
String payload = toJson(task);
recordRedisDaoPayloadSize(
"updateTask",
payload.length(),
taskDefinition.map(TaskDef::getName).orElse("n/a"),
task.getWorkflowType());
recordRedisDaoRequests("updateTask", task.getTaskType(), task.getWorkflowType());
jedisProxy.set(nsKey(TASK, task.getTaskId()), payload);
LOGGER.debug(
"Workflow task payload saved to TASK with taskKey: {}, workflowId: {}, taskId: {}, taskType: {} during updateTask",
nsKey(TASK, task.getTaskId()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType());
if (task.getStatus() != null && task.getStatus().isTerminal()) {
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
LOGGER.debug(
"Workflow Task removed from TASKS_IN_PROGRESS_STATUS with tasksInProgressKey: {}, workflowId: {}, taskId: {}, taskType: {}, taskStatus: {} during updateTask",
nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()),
task.getWorkflowInstanceId(),
task.getTaskId(),
task.getTaskType(),
task.getStatus().name());
}
Set<String> taskIds =
jedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId()));
if (!taskIds.contains(task.getTaskId())) {
correlateTaskToWorkflowInDS(task.getTaskId(), task.getWorkflowInstanceId());
}
}
@Override
public boolean exceedsLimit(TaskModel task) {
Optional<TaskDef> taskDefinition = task.getTaskDefinition();
if (taskDefinition.isEmpty()) {
return false;
}
int limit = taskDefinition.get().concurrencyLimit();
if (limit <= 0) {
return false;
}
long current = getInProgressTaskCount(task.getTaskDefName());
if (current >= limit) {
LOGGER.info(
"Task execution count limited. task - {}:{}, limit: {}, current: {}",
task.getTaskId(),
task.getTaskDefName(),
limit,
current);
Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit);
return true;
}
String rateLimitKey = nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName());
double score = System.currentTimeMillis();
String taskId = task.getTaskId();
jedisProxy.zaddnx(rateLimitKey, score, taskId);
recordRedisDaoRequests("checkTaskRateLimiting", task.getTaskType(), task.getWorkflowType());
Set<String> ids = jedisProxy.zrangeByScore(rateLimitKey, 0, score + 1, limit);
boolean rateLimited = !ids.contains(taskId);
if (rateLimited) {
LOGGER.info(
"Task execution count limited. task - {}:{}, limit: {}, current: {}",
task.getTaskId(),
task.getTaskDefName(),
limit,
current);
String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName());
// Cleanup any items that are still present in the rate limit bucket but not in progress
// anymore!
ids.stream()
.filter(id -> !jedisProxy.sismember(inProgressKey, id))
.forEach(id2 -> jedisProxy.zrem(rateLimitKey, id2));
Monitors.recordTaskRateLimited(task.getTaskDefName(), limit);
}
return rateLimited;
}
private void removeTaskMappings(TaskModel task) {
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
jedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey);
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
jedisProxy.srem(nsKey(WORKFLOW_TO_TASKS, task.getWorkflowInstanceId()), task.getTaskId());
jedisProxy.srem(nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
jedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId());
}
private void removeTaskMappingsWithExpiry(TaskModel task) {
String taskKey = task.getReferenceTaskName() + "" + task.getRetryCount();
jedisProxy.hdel(nsKey(SCHEDULED_TASKS, task.getWorkflowInstanceId()), taskKey);
jedisProxy.srem(nsKey(IN_PROGRESS_TASKS, task.getTaskDefName()), task.getTaskId());
jedisProxy.srem(nsKey(TASKS_IN_PROGRESS_STATUS, task.getTaskDefName()), task.getTaskId());
jedisProxy.zrem(nsKey(TASK_LIMIT_BUCKET, task.getTaskDefName()), task.getTaskId());
}
@Override
public boolean removeTask(String taskId) {
TaskModel task = getTask(taskId);
if (task == null) {
LOGGER.warn("No such task found by id {}", taskId);
return false;
}
removeTaskMappings(task);
jedisProxy.del(nsKey(TASK, task.getTaskId()));
recordRedisDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType());
return true;
}
private boolean removeTaskWithExpiry(String taskId, int ttlSeconds) {
TaskModel task = getTask(taskId);
if (task == null) {
LOGGER.warn("No such task found by id {}", taskId);
return false;
}
removeTaskMappingsWithExpiry(task);
jedisProxy.expire(nsKey(TASK, task.getTaskId()), ttlSeconds);
recordRedisDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType());
return true;
}
@Override
public TaskModel getTask(String taskId) {
Preconditions.checkNotNull(taskId, "taskId cannot be null");
return Optional.ofNullable(jedisProxy.get(nsKey(TASK, taskId)))
.map(
json -> {
TaskModel task = readValue(json, TaskModel.class);
recordRedisDaoRequests(
"getTask", task.getTaskType(), task.getWorkflowType());
recordRedisDaoPayloadSize(
"getTask",
toJson(task).length(),
task.getTaskType(),
task.getWorkflowType());
return task;
})
.orElse(null);
}
@Override
public List<TaskModel> getTasks(List<String> taskIds) {
return taskIds.stream()
.map(taskId -> nsKey(TASK, taskId))
.map(jedisProxy::get)
.filter(Objects::nonNull)
.map(
jsonString -> {
TaskModel task = readValue(jsonString, TaskModel.class);
recordRedisDaoRequests(
"getTask", task.getTaskType(), task.getWorkflowType());
recordRedisDaoPayloadSize(
"getTask",
jsonString.length(),
task.getTaskType(),
task.getWorkflowType());
return task;
})
.collect(Collectors.toList());
}
@Override
public List<TaskModel> getTasksForWorkflow(String workflowId) {
Preconditions.checkNotNull(workflowId, "workflowId cannot be null");
Set<String> taskIds = jedisProxy.smembers(nsKey(WORKFLOW_TO_TASKS, workflowId));
recordRedisDaoRequests("getTasksForWorkflow");
return getTasks(new ArrayList<>(taskIds));
}
@Override
public List<TaskModel> getPendingTasksForTaskType(String taskName) {
Preconditions.checkNotNull(taskName, "task name cannot be null");
Set<String> taskIds = jedisProxy.smembers(nsKey(IN_PROGRESS_TASKS, taskName));
recordRedisDaoRequests("getPendingTasksForTaskType");
return getTasks(new ArrayList<>(taskIds));
}
@Override
public String createWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, false);
}
@Override
public String updateWorkflow(WorkflowModel workflow) {
return insertOrUpdateWorkflow(workflow, true);
}
@Override
public boolean removeWorkflow(String workflowId) {
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
recordRedisDaoRequests("removeWorkflow");
// Remove from lists
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.srem(key, workflowId);
jedisProxy.srem(nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()), workflowId);
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId);
// Remove the object
jedisProxy.del(nsKey(WORKFLOW, workflowId));
for (TaskModel task : workflow.getTasks()) {
removeTask(task.getTaskId());
}
return true;
}
return false;
}
public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) {
WorkflowModel workflow = getWorkflow(workflowId, true);
if (workflow != null) {
recordRedisDaoRequests("removeWorkflow");
// Remove from lists
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.srem(key, workflowId);
jedisProxy.srem(nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()), workflowId);
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflowId);
// Remove the object
jedisProxy.expire(nsKey(WORKFLOW, workflowId), ttlSeconds);
for (TaskModel task : workflow.getTasks()) {
removeTaskWithExpiry(task.getTaskId(), ttlSeconds);
}
jedisProxy.expire(nsKey(WORKFLOW_TO_TASKS, workflowId), ttlSeconds);
return true;
}
return false;
}
@Override
public void removeFromPendingWorkflow(String workflowType, String workflowId) {
recordRedisDaoRequests("removePendingWorkflow");
jedisProxy.del(nsKey(SCHEDULED_TASKS, workflowId));
jedisProxy.srem(nsKey(PENDING_WORKFLOWS, workflowType), workflowId);
}
@Override
public WorkflowModel getWorkflow(String workflowId) {
return getWorkflow(workflowId, true);
}
@Override
public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) {
String json = jedisProxy.get(nsKey(WORKFLOW, workflowId));
WorkflowModel workflow = null;
if (json != null) {
workflow = readValue(json, WorkflowModel.class);
recordRedisDaoRequests("getWorkflow", "n/a", workflow.getWorkflowName());
recordRedisDaoPayloadSize(
"getWorkflow", json.length(), "n/a", workflow.getWorkflowName());
if (includeTasks) {
List<TaskModel> tasks = getTasksForWorkflow(workflowId);
tasks.sort(Comparator.comparingInt(TaskModel::getSeq));
workflow.setTasks(tasks);
}
}
return workflow;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflow ids that are in RUNNING state <em>returns workflows of all versions
* for the given workflow name</em>
*/
@Override
public List<String> getRunningWorkflowIds(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
List<String> workflowIds;
recordRedisDaoRequests("getRunningWorkflowsByName");
Set<String> pendingWorkflows = jedisProxy.smembers(nsKey(PENDING_WORKFLOWS, workflowName));
workflowIds = new LinkedList<>(pendingWorkflows);
return workflowIds;
}
/**
* @param workflowName name of the workflow
* @param version the workflow version
* @return list of workflows that are in RUNNING state
*/
@Override
public List<WorkflowModel> getPendingWorkflowsByType(String workflowName, int version) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
List<String> workflowIds = getRunningWorkflowIds(workflowName, version);
return workflowIds.stream()
.map(this::getWorkflow)
.filter(workflow -> workflow.getWorkflowVersion() == version)
.collect(Collectors.toList());
}
@Override
public List<WorkflowModel> getWorkflowsByType(
String workflowName, Long startTime, Long endTime) {
Preconditions.checkNotNull(workflowName, "workflowName cannot be null");
Preconditions.checkNotNull(startTime, "startTime cannot be null");
Preconditions.checkNotNull(endTime, "endTime cannot be null");
List<WorkflowModel> workflows = new LinkedList<>();
// Get all date strings between start and end
List<String> dateStrs = dateStrBetweenDates(startTime, endTime);
dateStrs.forEach(
dateStr -> {
String key = nsKey(WORKFLOW_DEF_TO_WORKFLOWS, workflowName, dateStr);
jedisProxy
.smembers(key)
.forEach(
workflowId -> {
try {
WorkflowModel workflow = getWorkflow(workflowId);
if (workflow.getCreateTime() >= startTime
&& workflow.getCreateTime() <= endTime) {
workflows.add(workflow);
}
} catch (Exception e) {
LOGGER.error(
"Failed to get workflow: {}", workflowId, e);
}
});
});
return workflows;
}
@Override
public List<WorkflowModel> getWorkflowsByCorrelationId(
String workflowName, String correlationId, boolean includeTasks) {
throw new UnsupportedOperationException(
"This method is not implemented in RedisExecutionDAO. Please use ExecutionDAOFacade instead.");
}
@Override
public boolean canSearchAcrossWorkflows() {
return false;
}
/**
* Inserts a new workflow/ updates an existing workflow in the datastore. Additionally, if a
* workflow is in terminal state, it is removed from the set of pending workflows.
*
* @param workflow the workflow instance
* @param update flag to identify if update or create operation
* @return the workflowId
*/
private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) {
Preconditions.checkNotNull(workflow, "workflow object cannot be null");
List<TaskModel> tasks = workflow.getTasks();
workflow.setTasks(new LinkedList<>());
String payload = toJson(workflow);
// Store the workflow object
jedisProxy.set(nsKey(WORKFLOW, workflow.getWorkflowId()), payload);
recordRedisDaoRequests("storeWorkflow", "n/a", workflow.getWorkflowName());
recordRedisDaoPayloadSize(
"storeWorkflow", payload.length(), "n/a", workflow.getWorkflowName());
if (!update) {
// Add to list of workflows for a workflowdef
String key =
nsKey(
WORKFLOW_DEF_TO_WORKFLOWS,
workflow.getWorkflowName(),
dateStr(workflow.getCreateTime()));
jedisProxy.sadd(key, workflow.getWorkflowId());
if (workflow.getCorrelationId() != null) {
// Add to list of workflows for a correlationId
jedisProxy.sadd(
nsKey(CORR_ID_TO_WORKFLOWS, workflow.getCorrelationId()),
workflow.getWorkflowId());
}
}
// Add or remove from the pending workflows
if (workflow.getStatus().isTerminal()) {
jedisProxy.srem(
nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());
} else {
jedisProxy.sadd(
nsKey(PENDING_WORKFLOWS, workflow.getWorkflowName()), workflow.getWorkflowId());
}
workflow.setTasks(tasks);
return workflow.getWorkflowId();
}
/**
* Stores the correlation of a task to the workflow instance in the datastore
*
* @param taskId the taskId to be correlated
* @param workflowInstanceId the workflowId to which the tasks belongs to
*/
@VisibleForTesting
void correlateTaskToWorkflowInDS(String taskId, String workflowInstanceId) {
String workflowToTaskKey = nsKey(WORKFLOW_TO_TASKS, workflowInstanceId);
jedisProxy.sadd(workflowToTaskKey, taskId);
LOGGER.debug(
"Task mapped in WORKFLOW_TO_TASKS with workflowToTaskKey: {}, workflowId: {}, taskId: {}",
workflowToTaskKey,
workflowInstanceId,
taskId);
}
public long getPendingWorkflowCount(String workflowName) {
String key = nsKey(PENDING_WORKFLOWS, workflowName);
recordRedisDaoRequests("getPendingWorkflowCount");
return jedisProxy.scard(key);
}
@Override
public long getInProgressTaskCount(String taskDefName) {
String inProgressKey = nsKey(TASKS_IN_PROGRESS_STATUS, taskDefName);
recordRedisDaoRequests("getInProgressTaskCount");
return jedisProxy.scard(inProgressKey);
}
@Override
public boolean addEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
String json = objectMapper.writeValueAsString(eventExecution);
recordRedisDaoEventRequests("addEventExecution", eventExecution.getEvent());
recordRedisDaoPayloadSize(
"addEventExecution", json.length(), eventExecution.getEvent(), "n/a");
boolean added = jedisProxy.hsetnx(key, eventExecution.getId(), json) == 1L;
if (ttlEventExecutionSeconds > 0) {
jedisProxy.expire(key, ttlEventExecutionSeconds);
}
return added;
} catch (Exception e) {
throw new TransientException(
"Unable to add event execution for " + eventExecution.getId(), e);
}
}
@Override
public void updateEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
String json = objectMapper.writeValueAsString(eventExecution);
LOGGER.info("updating event execution {}", key);
jedisProxy.hset(key, eventExecution.getId(), json);
recordRedisDaoEventRequests("updateEventExecution", eventExecution.getEvent());
recordRedisDaoPayloadSize(
"updateEventExecution", json.length(), eventExecution.getEvent(), "n/a");
} catch (Exception e) {
throw new TransientException(
"Unable to update event execution for " + eventExecution.getId(), e);
}
}
@Override
public void removeEventExecution(EventExecution eventExecution) {
try {
String key =
nsKey(
EVENT_EXECUTION,
eventExecution.getName(),
eventExecution.getEvent(),
eventExecution.getMessageId());
LOGGER.info("removing event execution {}", key);
jedisProxy.hdel(key, eventExecution.getId());
recordRedisDaoEventRequests("removeEventExecution", eventExecution.getEvent());
} catch (Exception e) {
throw new TransientException(
"Unable to remove event execution for " + eventExecution.getId(), e);
}
}
public List<EventExecution> getEventExecutions(
String eventHandlerName, String eventName, String messageId, int max) {
try {
String key = nsKey(EVENT_EXECUTION, eventHandlerName, eventName, messageId);
LOGGER.info("getting event execution {}", key);
List<EventExecution> executions = new LinkedList<>();
for (int i = 0; i < max; i++) {
String field = messageId + "_" + i;
String value = jedisProxy.hget(key, field);
if (value == null) {
break;
}
recordRedisDaoEventRequests("getEventExecution", eventHandlerName);
recordRedisDaoPayloadSize(
"getEventExecution", value.length(), eventHandlerName, "n/a");
EventExecution eventExecution = objectMapper.readValue(value, EventExecution.class);
executions.add(eventExecution);
}
return executions;
} catch (Exception e) {
throw new TransientException(
"Unable to get event executions for " + eventHandlerName, e);
}
}
private void validate(TaskModel task) {
try {
Preconditions.checkNotNull(task, "task object cannot be null");
Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null");
Preconditions.checkNotNull(
task.getWorkflowInstanceId(), "Workflow instance id cannot be null");
Preconditions.checkNotNull(
task.getReferenceTaskName(), "Task reference name cannot be null");
} catch (NullPointerException npe) {
throw new IllegalArgumentException(npe.getMessage(), npe);
}
}
}
| 6,582 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/dao/DynoQueueDAO.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.dao;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.RedisQueues;
@Component
@Conditional(AnyRedisCondition.class)
public class DynoQueueDAO implements QueueDAO {
private final RedisQueues queues;
public DynoQueueDAO(RedisQueues queues) {
this.queues = queues;
}
@Override
public void push(String queueName, String id, long offsetTimeInSecond) {
push(queueName, id, -1, offsetTimeInSecond);
}
@Override
public void push(String queueName, String id, int priority, long offsetTimeInSecond) {
Message msg = new Message(id, null);
msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS);
if (priority >= 0 && priority <= 99) {
msg.setPriority(priority);
}
queues.get(queueName).push(Collections.singletonList(msg));
}
@Override
public void push(
String queueName, List<com.netflix.conductor.core.events.queue.Message> messages) {
List<Message> msgs =
messages.stream()
.map(
msg -> {
Message m = new Message(msg.getId(), msg.getPayload());
if (msg.getPriority() > 0) {
m.setPriority(msg.getPriority());
}
return m;
})
.collect(Collectors.toList());
queues.get(queueName).push(msgs);
}
@Override
public boolean pushIfNotExists(String queueName, String id, long offsetTimeInSecond) {
return pushIfNotExists(queueName, id, -1, offsetTimeInSecond);
}
@Override
public boolean pushIfNotExists(
String queueName, String id, int priority, long offsetTimeInSecond) {
DynoQueue queue = queues.get(queueName);
if (queue.get(id) != null) {
return false;
}
Message msg = new Message(id, null);
if (priority >= 0 && priority <= 99) {
msg.setPriority(priority);
}
msg.setTimeout(offsetTimeInSecond, TimeUnit.SECONDS);
queue.push(Collections.singletonList(msg));
return true;
}
@Override
public List<String> pop(String queueName, int count, int timeout) {
List<Message> msg = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS);
return msg.stream().map(Message::getId).collect(Collectors.toList());
}
@Override
public List<com.netflix.conductor.core.events.queue.Message> pollMessages(
String queueName, int count, int timeout) {
List<Message> msgs = queues.get(queueName).pop(count, timeout, TimeUnit.MILLISECONDS);
return msgs.stream()
.map(
msg ->
new com.netflix.conductor.core.events.queue.Message(
msg.getId(), msg.getPayload(), null, msg.getPriority()))
.collect(Collectors.toList());
}
@Override
public void remove(String queueName, String messageId) {
queues.get(queueName).remove(messageId);
}
@Override
public int getSize(String queueName) {
return (int) queues.get(queueName).size();
}
@Override
public boolean ack(String queueName, String messageId) {
return queues.get(queueName).ack(messageId);
}
@Override
public boolean setUnackTimeout(String queueName, String messageId, long timeout) {
return queues.get(queueName).setUnackTimeout(messageId, timeout);
}
@Override
public void flush(String queueName) {
DynoQueue queue = queues.get(queueName);
if (queue != null) {
queue.clear();
}
}
@Override
public Map<String, Long> queuesDetail() {
return queues.queues().stream()
.collect(Collectors.toMap(DynoQueue::getName, DynoQueue::size));
}
@Override
public Map<String, Map<String, Map<String, Long>>> queuesDetailVerbose() {
return queues.queues().stream()
.collect(Collectors.toMap(DynoQueue::getName, DynoQueue::shardSizes));
}
public void processUnacks(String queueName) {
queues.get(queueName).processUnacks();
}
@Override
public boolean resetOffsetTime(String queueName, String id) {
DynoQueue queue = queues.get(queueName);
return queue.setTimeout(id, 0);
}
@Override
public boolean containsMessage(String queueName, String messageId) {
DynoQueue queue = queues.get(queueName);
Message message = queue.get(messageId);
return Objects.nonNull(message);
}
}
| 6,583 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisMock.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import org.rarefiedredis.redis.IRedisClient;
import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair;
import org.rarefiedredis.redis.RedisMock;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.exceptions.JedisException;
import redis.clients.jedis.params.ZAddParams;
public class JedisMock extends Jedis {
private final IRedisClient redis;
public JedisMock() {
super("");
this.redis = new RedisMock();
}
private Set<Tuple> toTupleSet(Set<ZsetPair> pairs) {
Set<Tuple> set = new HashSet<>();
for (ZsetPair pair : pairs) {
set.add(new Tuple(pair.member, pair.score));
}
return set;
}
@Override
public String set(final String key, String value) {
try {
return redis.set(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String get(final String key) {
try {
return redis.get(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean exists(final String key) {
try {
return redis.exists(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(final String... keys) {
try {
return redis.del(keys);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(String key) {
try {
return redis.del(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String type(final String key) {
try {
return redis.type(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long expire(final String key, final int seconds) {
try {
return redis.expire(key, seconds) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long expireAt(final String key, final long unixTime) {
try {
return redis.expireat(key, unixTime) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long ttl(final String key) {
try {
return redis.ttl(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long move(final String key, final int dbIndex) {
try {
return redis.move(key, dbIndex);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String getSet(final String key, final String value) {
try {
return redis.getset(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> mget(final String... keys) {
try {
String[] mget = redis.mget(keys);
List<String> lst = new ArrayList<>(mget.length);
for (String get : mget) {
lst.add(get);
}
return lst;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long setnx(final String key, final String value) {
try {
return redis.setnx(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String setex(final String key, final int seconds, final String value) {
try {
return redis.setex(key, seconds, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String mset(final String... keysvalues) {
try {
return redis.mset(keysvalues);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long msetnx(final String... keysvalues) {
try {
return redis.msetnx(keysvalues) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decrBy(final String key, final long integer) {
try {
return redis.decrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decr(final String key) {
try {
return redis.decr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incrBy(final String key, final long integer) {
try {
return redis.incrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double incrByFloat(final String key, final double value) {
try {
return Double.parseDouble(redis.incrbyfloat(key, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incr(final String key) {
try {
return redis.incr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long append(final String key, final String value) {
try {
return redis.append(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String substr(final String key, final int start, final int end) {
try {
return redis.getrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hset(final String key, final String field, final String value) {
try {
return redis.hset(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hget(final String key, final String field) {
try {
return redis.hget(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hsetnx(final String key, final String field, final String value) {
try {
return redis.hsetnx(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hmset(final String key, final Map<String, String> hash) {
try {
String field = null, value = null;
String[] args = new String[(hash.size() - 1) * 2];
int idx = 0;
for (String f : hash.keySet()) {
if (field == null) {
field = f;
value = hash.get(f);
continue;
}
args[idx] = f;
args[idx + 1] = hash.get(f);
idx += 2;
}
return redis.hmset(key, field, value, args);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hmget(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hmget(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hincrBy(final String key, final String field, final long value) {
try {
return redis.hincrby(key, field, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double hincrByFloat(final String key, final String field, final double value) {
try {
return Double.parseDouble(redis.hincrbyfloat(key, field, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean hexists(final String key, final String field) {
try {
return redis.hexists(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hdel(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hdel(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hlen(final String key) {
try {
return redis.hlen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> hkeys(final String key) {
try {
return redis.hkeys(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hvals(final String key) {
try {
return redis.hvals(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Map<String, String> hgetAll(final String key) {
try {
return redis.hgetall(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long rpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.rpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.lpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long llen(final String key) {
try {
return redis.llen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> lrange(final String key, final long start, final long end) {
try {
return redis.lrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String ltrim(final String key, final long start, final long end) {
try {
return redis.ltrim(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lindex(final String key, final long index) {
try {
return redis.lindex(key, index);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lset(final String key, final long index, final String value) {
try {
return redis.lset(key, index, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lrem(final String key, final long count, final String value) {
try {
return redis.lrem(key, count, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lpop(final String key) {
try {
return redis.lpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpop(final String key) {
try {
return redis.rpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpoplpush(final String srckey, final String dstkey) {
try {
return redis.rpoplpush(srckey, dstkey);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sadd(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.sadd(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> smembers(final String key) {
try {
return redis.smembers(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long srem(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.srem(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String spop(final String key) {
try {
return redis.spop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long smove(final String srckey, final String dstkey, final String member) {
try {
return redis.smove(srckey, dstkey, member) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long scard(final String key) {
try {
return redis.scard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean sismember(final String key, final String member) {
try {
return redis.sismember(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sinter(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinter(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sinterstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinterstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sunion(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunion(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sunionstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunionstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sdiff(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiff(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sdiffstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiffstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String srandmember(final String key) {
try {
return redis.srandmember(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> srandmember(final String key, final int count) {
try {
return redis.srandmember(key, count);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final double score, final String member) {
try {
return redis.zadd(key, new ZsetPair(member, score));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
try {
if (params.getParam("xx") != null) {
Double existing = redis.zscore(key, member);
if (existing == null) {
return 0L;
}
return redis.zadd(key, new ZsetPair(member, score));
} else {
return redis.zadd(key, new ZsetPair(member, score));
}
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final Map<String, Double> scoreMembers) {
try {
Double score = null;
String member = null;
List<ZsetPair> scoresmembers = new ArrayList<>((scoreMembers.size() - 1) * 2);
for (String m : scoreMembers.keySet()) {
if (m == null) {
member = m;
score = scoreMembers.get(m);
continue;
}
scoresmembers.add(new ZsetPair(m, scoreMembers.get(m)));
}
return redis.zadd(
key, new ZsetPair(member, score), (ZsetPair[]) scoresmembers.toArray());
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrem(final String key, final String... members) {
try {
String member = members[0];
String[] ms = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
ms[idx - 1] = members[idx];
}
return redis.zrem(key, member, ms);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zincrby(final String key, final double score, final String member) {
try {
return Double.parseDouble(redis.zincrby(key, score, member));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrank(final String key, final String member) {
try {
return redis.zrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrevrank(final String key, final String member) {
try {
return redis.zrevrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrevrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrevrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcard(final String key) {
try {
return redis.zcard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zscore(final String key, final String member) {
try {
return redis.zscore(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String watch(final String... keys) {
try {
for (String key : keys) {
redis.watch(key);
}
return "OK";
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcount(final String key, final double min, final double max) {
try {
return redis.zcount(key, min, max);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcount(final String key, final String min, final String max) {
try {
return redis.zcount(key, Double.parseDouble(min), Double.parseDouble(max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final double min, final double max) {
try {
return ZsetPair.members(
redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final String min, final String max) {
try {
return ZsetPair.members(redis.zrangebyscore(key, min, max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(
final String key,
final double min,
final double max,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrangebyscore(
key,
String.valueOf(min),
String.valueOf(max),
"limit",
String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(
final String key,
final String min,
final String max,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrangebyscore(
key, min, max, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key, final double min, final double max) {
try {
return toTupleSet(
redis.zrangebyscore(
key, String.valueOf(min), String.valueOf(max), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key, final String min, final String max) {
try {
return toTupleSet(redis.zrangebyscore(key, min, max, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key,
final double min,
final double max,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrangebyscore(
key,
String.valueOf(min),
String.valueOf(max),
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
final String key,
final String min,
final String max,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrangebyscore(
key,
min,
max,
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final double max, final double min) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final String max, final String min) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, max, min));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(
final String key,
final double max,
final double min,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(
key,
String.valueOf(max),
String.valueOf(min),
"limit",
String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key, final double max, final double min) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key, String.valueOf(max), String.valueOf(min), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key,
final double max,
final double min,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key,
String.valueOf(max),
String.valueOf(min),
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key,
final String max,
final String min,
final int offset,
final int count) {
try {
return toTupleSet(
redis.zrevrangebyscore(
key,
max,
min,
"limit",
String.valueOf(offset),
String.valueOf(count),
"withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(
final String key,
final String max,
final String min,
final int offset,
final int count) {
try {
return ZsetPair.members(
redis.zrevrangebyscore(
key, max, min, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
final String key, final String max, final String min) {
try {
return toTupleSet(redis.zrevrangebyscore(key, max, min, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByRank(final String key, final long start, final long end) {
try {
return redis.zremrangebyrank(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final double start, final double end) {
try {
return redis.zremrangebyscore(key, String.valueOf(start), String.valueOf(end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final String start, final String end) {
try {
return redis.zremrangebyscore(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zunionstore(final String dstkey, final String... sets) {
try {
return redis.zunionstore(dstkey, sets.length, sets);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
try {
org.rarefiedredis.redis.ScanResult<Set<String>> sr =
redis.sscan(key, Long.parseLong(cursor), "count", "1000000");
List<String> list = new ArrayList<>(sr.results);
return new ScanResult<>("0", list);
} catch (Exception e) {
throw new JedisException(e);
}
}
public ScanResult<Entry<String, String>> hscan(final String key, final String cursor) {
try {
org.rarefiedredis.redis.ScanResult<Map<String, String>> mockr =
redis.hscan(key, Long.parseLong(cursor), "count", "1000000");
Map<String, String> results = mockr.results;
List<Entry<String, String>> list = new ArrayList<>(results.entrySet());
return new ScanResult<>("0", list);
} catch (Exception e) {
throw new JedisException(e);
}
}
public ScanResult<Tuple> zscan(final String key, final String cursor) {
try {
org.rarefiedredis.redis.ScanResult<Set<ZsetPair>> sr =
redis.zscan(key, Long.parseLong(cursor), "count", "1000000");
List<ZsetPair> list = new ArrayList<>(sr.results);
List<Tuple> tl = new LinkedList<>();
list.forEach(p -> tl.add(new Tuple(p.member, p.score)));
return new ScanResult<>("0", tl);
} catch (Exception e) {
throw new JedisException(e);
}
}
}
| 6,584 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisStandalone.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
/** A {@link JedisCommands} implementation that delegates to {@link JedisPool}. */
public class JedisStandalone implements JedisCommands {
private final JedisPool jedisPool;
public JedisStandalone(JedisPool jedisPool) {
this.jedisPool = jedisPool;
}
private <R> R executeInJedis(Function<Jedis, R> function) {
try (Jedis jedis = jedisPool.getResource()) {
return function.apply(jedis);
}
}
@Override
public String set(String key, String value) {
return executeInJedis(jedis -> jedis.set(key, value));
}
@Override
public String set(String key, String value, SetParams params) {
return executeInJedis(jedis -> jedis.set(key, value, params));
}
@Override
public String get(String key) {
return executeInJedis(jedis -> jedis.get(key));
}
@Override
public Boolean exists(String key) {
return executeInJedis(jedis -> jedis.exists(key));
}
@Override
public Long persist(String key) {
return executeInJedis(jedis -> jedis.persist(key));
}
@Override
public String type(String key) {
return executeInJedis(jedis -> jedis.type(key));
}
@Override
public byte[] dump(String key) {
return executeInJedis(jedis -> jedis.dump(key));
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
return executeInJedis(jedis -> jedis.restore(key, ttl, serializedValue));
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
return executeInJedis(jedis -> jedis.restoreReplace(key, ttl, serializedValue));
}
@Override
public Long expire(String key, int seconds) {
return executeInJedis(jedis -> jedis.expire(key, seconds));
}
@Override
public Long pexpire(String key, long milliseconds) {
return executeInJedis(jedis -> jedis.pexpire(key, milliseconds));
}
@Override
public Long expireAt(String key, long unixTime) {
return executeInJedis(jedis -> jedis.expireAt(key, unixTime));
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
return executeInJedis(jedis -> jedis.pexpireAt(key, millisecondsTimestamp));
}
@Override
public Long ttl(String key) {
return executeInJedis(jedis -> jedis.ttl(key));
}
@Override
public Long pttl(String key) {
return executeInJedis(jedis -> jedis.pttl(key));
}
@Override
public Long touch(String key) {
return executeInJedis(jedis -> jedis.touch(key));
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
return executeInJedis(jedis -> jedis.setbit(key, offset, value));
}
@Override
public Boolean setbit(String key, long offset, String value) {
return executeInJedis(jedis -> jedis.setbit(key, offset, value));
}
@Override
public Boolean getbit(String key, long offset) {
return executeInJedis(jedis -> jedis.getbit(key, offset));
}
@Override
public Long setrange(String key, long offset, String value) {
return executeInJedis(jedis -> jedis.setrange(key, offset, value));
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
return executeInJedis(jedis -> jedis.getrange(key, startOffset, endOffset));
}
@Override
public String getSet(String key, String value) {
return executeInJedis(jedis -> jedis.getSet(key, value));
}
@Override
public Long setnx(String key, String value) {
return executeInJedis(jedis -> jedis.setnx(key, value));
}
@Override
public String setex(String key, int seconds, String value) {
return executeInJedis(jedis -> jedis.setex(key, seconds, value));
}
@Override
public String psetex(String key, long milliseconds, String value) {
return executeInJedis(jedis -> jedis.psetex(key, milliseconds, value));
}
@Override
public Long decrBy(String key, long decrement) {
return executeInJedis(jedis -> jedis.decrBy(key, decrement));
}
@Override
public Long decr(String key) {
return executeInJedis(jedis -> jedis.decr(key));
}
@Override
public Long incrBy(String key, long increment) {
return executeInJedis(jedis -> jedis.incrBy(key, increment));
}
@Override
public Double incrByFloat(String key, double increment) {
return executeInJedis(jedis -> jedis.incrByFloat(key, increment));
}
@Override
public Long incr(String key) {
return executeInJedis(jedis -> jedis.incr(key));
}
@Override
public Long append(String key, String value) {
return executeInJedis(jedis -> jedis.append(key, value));
}
@Override
public String substr(String key, int start, int end) {
return executeInJedis(jedis -> jedis.substr(key, start, end));
}
@Override
public Long hset(String key, String field, String value) {
return executeInJedis(jedis -> jedis.hset(key, field, value));
}
@Override
public Long hset(String key, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.hset(key, hash));
}
@Override
public String hget(String key, String field) {
return executeInJedis(jedis -> jedis.hget(key, field));
}
@Override
public Long hsetnx(String key, String field, String value) {
return executeInJedis(jedis -> jedis.hsetnx(key, field, value));
}
@Override
public String hmset(String key, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.hmset(key, hash));
}
@Override
public List<String> hmget(String key, String... fields) {
return executeInJedis(jedis -> jedis.hmget(key, fields));
}
@Override
public Long hincrBy(String key, String field, long value) {
return executeInJedis(jedis -> jedis.hincrBy(key, field, value));
}
@Override
public Double hincrByFloat(String key, String field, double value) {
return executeInJedis(jedis -> jedis.hincrByFloat(key, field, value));
}
@Override
public Boolean hexists(String key, String field) {
return executeInJedis(jedis -> jedis.hexists(key, field));
}
@Override
public Long hdel(String key, String... field) {
return executeInJedis(jedis -> jedis.hdel(key, field));
}
@Override
public Long hlen(String key) {
return executeInJedis(jedis -> jedis.hlen(key));
}
@Override
public Set<String> hkeys(String key) {
return executeInJedis(jedis -> jedis.hkeys(key));
}
@Override
public List<String> hvals(String key) {
return executeInJedis(jedis -> jedis.hvals(key));
}
@Override
public Map<String, String> hgetAll(String key) {
return executeInJedis(jedis -> jedis.hgetAll(key));
}
@Override
public Long rpush(String key, String... string) {
return executeInJedis(jedis -> jedis.rpush(key));
}
@Override
public Long lpush(String key, String... string) {
return executeInJedis(jedis -> jedis.lpush(key, string));
}
@Override
public Long llen(String key) {
return executeInJedis(jedis -> jedis.llen(key));
}
@Override
public List<String> lrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.lrange(key, start, stop));
}
@Override
public String ltrim(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.ltrim(key, start, stop));
}
@Override
public String lindex(String key, long index) {
return executeInJedis(jedis -> jedis.lindex(key, index));
}
@Override
public String lset(String key, long index, String value) {
return executeInJedis(jedis -> jedis.lset(key, index, value));
}
@Override
public Long lrem(String key, long count, String value) {
return executeInJedis(jedis -> jedis.lrem(key, count, value));
}
@Override
public String lpop(String key) {
return executeInJedis(jedis -> jedis.lpop(key));
}
@Override
public String rpop(String key) {
return executeInJedis(jedis -> jedis.rpop(key));
}
@Override
public Long sadd(String key, String... member) {
return executeInJedis(jedis -> jedis.sadd(key, member));
}
@Override
public Set<String> smembers(String key) {
return executeInJedis(jedis -> jedis.smembers(key));
}
@Override
public Long srem(String key, String... member) {
return executeInJedis(jedis -> jedis.srem(key, member));
}
@Override
public String spop(String key) {
return executeInJedis(jedis -> jedis.spop(key));
}
@Override
public Set<String> spop(String key, long count) {
return executeInJedis(jedis -> jedis.spop(key, count));
}
@Override
public Long scard(String key) {
return executeInJedis(jedis -> jedis.scard(key));
}
@Override
public Boolean sismember(String key, String member) {
return executeInJedis(jedis -> jedis.sismember(key, member));
}
@Override
public String srandmember(String key) {
return executeInJedis(jedis -> jedis.srandmember(key));
}
@Override
public List<String> srandmember(String key, int count) {
return executeInJedis(jedis -> jedis.srandmember(key, count));
}
@Override
public Long strlen(String key) {
return executeInJedis(jedis -> jedis.strlen(key));
}
@Override
public Long zadd(String key, double score, String member) {
return executeInJedis(jedis -> jedis.zadd(key, score, member));
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
return executeInJedis(jedis -> jedis.zadd(key, score, member, params));
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
return executeInJedis(jedis -> jedis.zadd(key, scoreMembers));
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
return executeInJedis(jedis -> jedis.zadd(key, scoreMembers, params));
}
@Override
public Set<String> zrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrange(key, start, stop));
}
@Override
public Long zrem(String key, String... members) {
return executeInJedis(jedis -> jedis.zrem(key, members));
}
@Override
public Double zincrby(String key, double increment, String member) {
return executeInJedis(jedis -> jedis.zincrby(key, increment, member));
}
@Override
public Double zincrby(String key, double increment, String member, ZIncrByParams params) {
return executeInJedis(jedis -> jedis.zincrby(key, increment, member, params));
}
@Override
public Long zrank(String key, String member) {
return executeInJedis(jedis -> jedis.zrank(key, member));
}
@Override
public Long zrevrank(String key, String member) {
return executeInJedis(jedis -> jedis.zrevrank(key, member));
}
@Override
public Set<String> zrevrange(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrevrange(key, start, stop));
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrangeWithScores(key, start, stop));
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zrevrangeWithScores(key, start, stop));
}
@Override
public Long zcard(String key) {
return executeInJedis(jedis -> jedis.zcard(key));
}
@Override
public Double zscore(String key, String member) {
return executeInJedis(jedis -> jedis.zscore(key, member));
}
@Override
public Tuple zpopmax(String key) {
return executeInJedis(jedis -> jedis.zpopmax(key));
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
return executeInJedis(jedis -> jedis.zpopmax(key, count));
}
@Override
public Tuple zpopmin(String key) {
return executeInJedis(jedis -> jedis.zpopmin(key));
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
return executeInJedis(jedis -> jedis.zpopmin(key, count));
}
@Override
public List<String> sort(String key) {
return executeInJedis(jedis -> jedis.sort(key));
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
return executeInJedis(jedis -> jedis.sort(key, sortingParameters));
}
@Override
public Long zcount(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zcount(key, min, max));
}
@Override
public Long zcount(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zcount(key, min, max));
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max));
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max));
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min));
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min));
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScore(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByScore(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByScoreWithScores(key, max, min));
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByScoreWithScores(key, min, max, offset, count));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
return executeInJedis(
jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count));
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
return executeInJedis(
jedis -> jedis.zrevrangeByScoreWithScores(key, max, min, offset, count));
}
@Override
public Long zremrangeByRank(String key, long start, long stop) {
return executeInJedis(jedis -> jedis.zremrangeByRank(key, start, stop));
}
@Override
public Long zremrangeByScore(String key, double min, double max) {
return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max));
}
@Override
public Long zremrangeByScore(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zremrangeByScore(key, min, max));
}
@Override
public Long zlexcount(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zlexcount(key, min, max));
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max));
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
return executeInJedis(jedis -> jedis.zrangeByLex(key, min, max, offset, count));
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min));
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
return executeInJedis(jedis -> jedis.zrevrangeByLex(key, max, min, offset, count));
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
return executeInJedis(jedis -> jedis.zremrangeByLex(key, min, max));
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
return executeInJedis(jedis -> jedis.linsert(key, where, pivot, value));
}
@Override
public Long lpushx(String key, String... string) {
return executeInJedis(jedis -> jedis.lpushx(key, string));
}
@Override
public Long rpushx(String key, String... string) {
return executeInJedis(jedis -> jedis.rpushx(key, string));
}
@Override
public List<String> blpop(int timeout, String key) {
return executeInJedis(jedis -> jedis.blpop(timeout, key));
}
@Override
public List<String> brpop(int timeout, String key) {
return executeInJedis(jedis -> jedis.brpop(timeout, key));
}
@Override
public Long del(String key) {
return executeInJedis(jedis -> jedis.del(key));
}
@Override
public Long unlink(String key) {
return executeInJedis(jedis -> jedis.unlink(key));
}
@Override
public String echo(String string) {
return executeInJedis(jedis -> jedis.echo(string));
}
@Override
public Long move(String key, int dbIndex) {
return executeInJedis(jedis -> jedis.move(key, dbIndex));
}
@Override
public Long bitcount(String key) {
return executeInJedis(jedis -> jedis.bitcount(key));
}
@Override
public Long bitcount(String key, long start, long end) {
return executeInJedis(jedis -> jedis.bitcount(key, start, end));
}
@Override
public Long bitpos(String key, boolean value) {
return executeInJedis(jedis -> jedis.bitpos(key, value));
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
return executeInJedis(jedis -> jedis.bitpos(key, value, params));
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.hscan(key, cursor));
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(
String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.hscan(key, cursor, params));
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.sscan(key, cursor));
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
return executeInJedis(jedis -> jedis.zscan(key, cursor));
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.zscan(key, cursor, params));
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
return executeInJedis(jedis -> jedis.sscan(key, cursor, params));
}
@Override
public Long pfadd(String key, String... elements) {
return executeInJedis(jedis -> jedis.pfadd(key, elements));
}
@Override
public long pfcount(String key) {
return executeInJedis(jedis -> jedis.pfcount(key));
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
return executeInJedis(jedis -> jedis.geoadd(key, longitude, latitude, member));
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
return executeInJedis(jedis -> jedis.geoadd(key, memberCoordinateMap));
}
@Override
public Double geodist(String key, String member1, String member2) {
return executeInJedis(jedis -> jedis.geodist(key, member1, member2));
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
return executeInJedis(jedis -> jedis.geodist(key, member1, member2, unit));
}
@Override
public List<String> geohash(String key, String... members) {
return executeInJedis(jedis -> jedis.geohash(key, members));
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
return executeInJedis(jedis -> jedis.geopos(key, members));
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadius(key, longitude, latitude, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return executeInJedis(
jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadius(key, longitude, latitude, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
return executeInJedis(jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit));
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return executeInJedis(jedis -> jedis.georadiusByMember(key, member, radius, unit, param));
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return executeInJedis(
jedis -> jedis.georadiusByMemberReadonly(key, member, radius, unit, param));
}
@Override
public List<Long> bitfield(String key, String... arguments) {
return executeInJedis(jedis -> jedis.bitfield(key, arguments));
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
return executeInJedis(jedis -> jedis.bitfieldReadonly(key, arguments));
}
@Override
public Long hstrlen(String key, String field) {
return executeInJedis(jedis -> jedis.hstrlen(key, field));
}
@Override
public StreamEntryID xadd(String key, StreamEntryID id, Map<String, String> hash) {
return executeInJedis(jedis -> jedis.xadd(key, id, hash));
}
@Override
public StreamEntryID xadd(
String key,
StreamEntryID id,
Map<String, String> hash,
long maxLen,
boolean approximateLength) {
return executeInJedis(jedis -> jedis.xadd(key, id, hash, maxLen, approximateLength));
}
@Override
public Long xlen(String key) {
return executeInJedis(jedis -> jedis.xlen(key));
}
@Override
public List<StreamEntry> xrange(String key, StreamEntryID start, StreamEntryID end, int count) {
return executeInJedis(jedis -> jedis.xrange(key, start, end, count));
}
@Override
public List<StreamEntry> xrevrange(
String key, StreamEntryID end, StreamEntryID start, int count) {
return executeInJedis(jedis -> jedis.xrevrange(key, end, start, count));
}
@Override
public long xack(String key, String group, StreamEntryID... ids) {
return executeInJedis(jedis -> jedis.xack(key, group, ids));
}
@Override
public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) {
return executeInJedis(jedis -> jedis.xgroupCreate(key, groupname, id, makeStream));
}
@Override
public String xgroupSetID(String key, String groupname, StreamEntryID id) {
return executeInJedis(jedis -> jedis.xgroupSetID(key, groupname, id));
}
@Override
public long xgroupDestroy(String key, String groupname) {
return executeInJedis(jedis -> jedis.xgroupDestroy(key, groupname));
}
@Override
public Long xgroupDelConsumer(String key, String groupname, String consumername) {
return executeInJedis(jedis -> jedis.hsetnx(key, groupname, consumername));
}
@Override
public List<StreamPendingEntry> xpending(
String key,
String groupname,
StreamEntryID start,
StreamEntryID end,
int count,
String consumername) {
return executeInJedis(
jedis -> jedis.xpending(key, groupname, start, end, count, consumername));
}
@Override
public long xdel(String key, StreamEntryID... ids) {
return executeInJedis(jedis -> jedis.xdel(key, ids));
}
@Override
public long xtrim(String key, long maxLen, boolean approximate) {
return executeInJedis(jedis -> jedis.xtrim(key, maxLen, approximate));
}
@Override
public List<StreamEntry> xclaim(
String key,
String group,
String consumername,
long minIdleTime,
long newIdleTime,
int retries,
boolean force,
StreamEntryID... ids) {
return executeInJedis(
jedis ->
jedis.xclaim(
key,
group,
consumername,
minIdleTime,
newIdleTime,
retries,
force,
ids));
}
@Override
public StreamInfo xinfoStream(String key) {
return executeInJedis(jedis -> jedis.xinfoStream(key));
}
@Override
public List<StreamGroupInfo> xinfoGroup(String key) {
return executeInJedis(jedis -> jedis.xinfoGroup(key));
}
@Override
public List<StreamConsumersInfo> xinfoConsumers(String key, String group) {
return executeInJedis(jedis -> jedis.xinfoConsumers(key, group));
}
}
| 6,585 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisSentinel.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPoolAbstract;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
public class JedisSentinel implements JedisCommands {
private final JedisPoolAbstract jedisPool;
public JedisSentinel(JedisPoolAbstract jedisPool) {
this.jedisPool = jedisPool;
}
@Override
public String set(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.set(key, value);
}
}
@Override
public String set(String key, String value, SetParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.set(key, value, params);
}
}
@Override
public String get(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.get(key);
}
}
@Override
public Boolean exists(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.exists(key);
}
}
@Override
public Long persist(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.persist(key);
}
}
@Override
public String type(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.type(key);
}
}
@Override
public byte[] dump(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.dump(key);
}
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.restore(key, ttl, serializedValue);
}
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.restoreReplace(key, ttl, serializedValue);
}
}
@Override
public Long expire(String key, int seconds) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.expire(key, seconds);
}
}
@Override
public Long pexpire(String key, long milliseconds) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pexpire(key, milliseconds);
}
}
@Override
public Long expireAt(String key, long unixTime) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.expireAt(key, unixTime);
}
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pexpireAt(key, millisecondsTimestamp);
}
}
@Override
public Long ttl(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.ttl(key);
}
}
@Override
public Long pttl(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pttl(key);
}
}
@Override
public Long touch(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.touch(key);
}
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setbit(key, offset, value);
}
}
@Override
public Boolean setbit(String key, long offset, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setbit(key, offset, value);
}
}
@Override
public Boolean getbit(String key, long offset) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getbit(key, offset);
}
}
@Override
public Long setrange(String key, long offset, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setrange(key, offset, value);
}
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getrange(key, startOffset, endOffset);
}
}
@Override
public String getSet(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.getSet(key, value);
}
}
@Override
public Long setnx(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setnx(key, value);
}
}
@Override
public String setex(String key, int seconds, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.setex(key, seconds, value);
}
}
@Override
public String psetex(String key, long milliseconds, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.psetex(key, milliseconds, value);
}
}
@Override
public Long decrBy(String key, long integer) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.decrBy(key, integer);
}
}
@Override
public Long decr(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.decr(key);
}
}
@Override
public Long incrBy(String key, long integer) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incrBy(key, integer);
}
}
@Override
public Double incrByFloat(String key, double value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incrByFloat(key, value);
}
}
@Override
public Long incr(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.incr(key);
}
}
@Override
public Long append(String key, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.append(key, value);
}
}
@Override
public String substr(String key, int start, int end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.substr(key, start, end);
}
}
@Override
public Long hset(String key, String field, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hset(key, field, value);
}
}
@Override
public Long hset(String key, Map<String, String> hash) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hset(key, hash);
}
}
@Override
public String hget(String key, String field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hget(key, field);
}
}
@Override
public Long hsetnx(String key, String field, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hsetnx(key, field, value);
}
}
@Override
public String hmset(String key, Map<String, String> hash) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hmset(key, hash);
}
}
@Override
public List<String> hmget(String key, String... fields) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hmget(key, fields);
}
}
@Override
public Long hincrBy(String key, String field, long value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hincrBy(key, field, value);
}
}
@Override
public Double hincrByFloat(String key, String field, double value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hincrByFloat(key, field, value);
}
}
@Override
public Boolean hexists(String key, String field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hexists(key, field);
}
}
@Override
public Long hdel(String key, String... field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hdel(key, field);
}
}
@Override
public Long hlen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hlen(key);
}
}
@Override
public Set<String> hkeys(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hkeys(key);
}
}
@Override
public List<String> hvals(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hvals(key);
}
}
@Override
public Map<String, String> hgetAll(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hgetAll(key);
}
}
@Override
public Long rpush(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpush(key, string);
}
}
@Override
public Long lpush(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpush(key, string);
}
}
@Override
public Long llen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.llen(key);
}
}
@Override
public List<String> lrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lrange(key, start, end);
}
}
@Override
public String ltrim(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.ltrim(key, start, end);
}
}
@Override
public String lindex(String key, long index) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lindex(key, index);
}
}
@Override
public String lset(String key, long index, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lset(key, index, value);
}
}
@Override
public Long lrem(String key, long count, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lrem(key, count, value);
}
}
@Override
public String lpop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpop(key);
}
}
@Override
public String rpop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpop(key);
}
}
@Override
public Long sadd(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sadd(key, member);
}
}
@Override
public Set<String> smembers(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.smembers(key);
}
}
@Override
public Long srem(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srem(key, member);
}
}
@Override
public String spop(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.spop(key);
}
}
@Override
public Set<String> spop(String key, long count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.spop(key, count);
}
}
@Override
public Long scard(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.scard(key);
}
}
@Override
public Boolean sismember(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sismember(key, member);
}
}
@Override
public String srandmember(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srandmember(key);
}
}
@Override
public List<String> srandmember(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.srandmember(key, count);
}
}
@Override
public Long strlen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.strlen(key);
}
}
@Override
public Long zadd(String key, double score, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, score, member);
}
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, score, member, params);
}
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, scoreMembers);
}
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zadd(key, scoreMembers, params);
}
}
@Override
public Set<String> zrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrange(key, start, end);
}
}
@Override
public Long zrem(String key, String... member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrem(key, member);
}
}
@Override
public Double zincrby(String key, double score, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zincrby(key, score, member);
}
}
@Override
public Double zincrby(String key, double score, String member, ZIncrByParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zincrby(key, score, member, params);
}
}
@Override
public Long zrank(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrank(key, member);
}
}
@Override
public Long zrevrank(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrank(key, member);
}
}
@Override
public Set<String> zrevrange(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrange(key, start, end);
}
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeWithScores(key, start, end);
}
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeWithScores(key, start, end);
}
}
@Override
public Long zcard(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcard(key);
}
}
@Override
public Double zscore(String key, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscore(key, member);
}
}
@Override
public Tuple zpopmax(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmax(key);
}
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmax(key, count);
}
}
@Override
public Tuple zpopmin(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmin(key);
}
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zpopmin(key, count);
}
}
@Override
public List<String> sort(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sort(key);
}
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sort(key, sortingParameters);
}
}
@Override
public Long zcount(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcount(key, min, max);
}
}
@Override
public Long zcount(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zcount(key, min, max);
}
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max);
}
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max);
}
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min);
}
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min);
}
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScore(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
}
@Override
public Long zremrangeByRank(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByRank(key, start, end);
}
}
@Override
public Long zremrangeByScore(String key, double start, double end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByScore(key, start, end);
}
}
@Override
public Long zremrangeByScore(String key, String start, String end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByScore(key, start, end);
}
}
@Override
public Long zlexcount(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zlexcount(key, min, max);
}
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByLex(key, min, max);
}
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrangeByLex(key, min, max, offset, count);
}
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByLex(key, max, min);
}
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zrevrangeByLex(key, max, min, offset, count);
}
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zremrangeByLex(key, min, max);
}
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.linsert(key, where, pivot, value);
}
}
@Override
public Long lpushx(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.lpushx(key, string);
}
}
@Override
public Long rpushx(String key, String... string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.rpushx(key, string);
}
}
@Override
public List<String> blpop(int timeout, String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.blpop(timeout, key);
}
}
@Override
public List<String> brpop(int timeout, String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.brpop(timeout, key);
}
}
@Override
public Long del(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.del(key);
}
}
@Override
public Long unlink(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.unlink(key);
}
}
@Override
public String echo(String string) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.echo(string);
}
}
@Override
public Long move(String key, int dbIndex) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.move(key, dbIndex);
}
}
@Override
public Long bitcount(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitcount(key);
}
}
@Override
public Long bitcount(String key, long start, long end) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitcount(key, start, end);
}
}
@Override
public Long bitpos(String key, boolean value) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitpos(key, value);
}
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitpos(key, value, params);
}
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hscan(key, cursor);
}
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hscan(key, cursor, params);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sscan(key, cursor);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.sscan(key, cursor, params);
}
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscan(key, cursor);
}
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.zscan(key, cursor, params);
}
}
@Override
public Long pfadd(String key, String... elements) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pfadd(key, elements);
}
}
@Override
public long pfcount(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.pfcount(key);
}
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geoadd(key, longitude, latitude, member);
}
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geoadd(key, memberCoordinateMap);
}
}
@Override
public Double geodist(String key, String member1, String member2) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geodist(key, member1, member2);
}
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geodist(key, member1, member2, unit);
}
}
@Override
public List<String> geohash(String key, String... members) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geohash(key, members);
}
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.geopos(key, members);
}
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadius(key, longitude, latitude, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusReadonly(key, longitude, latitude, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadius(key, longitude, latitude, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusReadonly(key, longitude, latitude, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMember(key, member, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMemberReadonly(key, member, radius, unit);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMember(key, member, radius, unit, param);
}
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.georadiusByMemberReadonly(key, member, radius, unit, param);
}
}
@Override
public List<Long> bitfield(String key, String... arguments) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitfield(key, arguments);
}
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.bitfieldReadonly(key, arguments);
}
}
@Override
public Long hstrlen(String key, String field) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.hstrlen(key, field);
}
}
@Override
public StreamEntryID xadd(String key, StreamEntryID id, Map<String, String> hash) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xadd(key, id, hash);
}
}
@Override
public StreamEntryID xadd(
String key,
StreamEntryID id,
Map<String, String> hash,
long maxLen,
boolean approximateLength) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xadd(key, id, hash, maxLen, approximateLength);
}
}
@Override
public Long xlen(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xlen(key);
}
}
@Override
public List<StreamEntry> xrange(String key, StreamEntryID start, StreamEntryID end, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xrange(key, start, end, count);
}
}
@Override
public List<StreamEntry> xrevrange(
String key, StreamEntryID end, StreamEntryID start, int count) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xrevrange(key, end, start, count);
}
}
@Override
public long xack(String key, String group, StreamEntryID... ids) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xack(key, group, ids);
}
}
@Override
public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xgroupCreate(key, groupname, id, makeStream);
}
}
@Override
public String xgroupSetID(String key, String groupname, StreamEntryID id) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xgroupSetID(key, groupname, id);
}
}
@Override
public long xgroupDestroy(String key, String groupname) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xgroupDestroy(key, groupname);
}
}
@Override
public Long xgroupDelConsumer(String key, String groupname, String consumername) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xgroupDelConsumer(key, groupname, consumername);
}
}
@Override
public List<StreamPendingEntry> xpending(
String key,
String groupname,
StreamEntryID start,
StreamEntryID end,
int count,
String consumername) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xpending(key, groupname, start, end, count, consumername);
}
}
@Override
public long xdel(String key, StreamEntryID... ids) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xdel(key, ids);
}
}
@Override
public long xtrim(String key, long maxLen, boolean approximate) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xtrim(key, maxLen, approximate);
}
}
@Override
public List<StreamEntry> xclaim(
String key,
String group,
String consumername,
long minIdleTime,
long newIdleTime,
int retries,
boolean force,
StreamEntryID... ids) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xclaim(
key, group, consumername, minIdleTime, newIdleTime, retries, force, ids);
}
}
@Override
public StreamInfo xinfoStream(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xinfoStream(key);
}
}
@Override
public List<StreamGroupInfo> xinfoGroup(String key) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xinfoGroup(key);
}
}
@Override
public List<StreamConsumersInfo> xinfoConsumers(String key, String group) {
try (Jedis jedis = jedisPool.getResource()) {
return jedis.xinfoConsumers(key, group);
}
}
}
| 6,586 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisProxy.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Conditional;
import org.springframework.stereotype.Component;
import com.netflix.conductor.redis.config.AnyRedisCondition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.ZAddParams;
import static com.netflix.conductor.redis.config.RedisCommonConfiguration.DEFAULT_CLIENT_INJECTION_NAME;
/** Proxy for the {@link JedisCommands} object. */
@Component
@Conditional(AnyRedisCondition.class)
public class JedisProxy {
private static final Logger LOGGER = LoggerFactory.getLogger(JedisProxy.class);
protected JedisCommands jedisCommands;
public JedisProxy(@Qualifier(DEFAULT_CLIENT_INJECTION_NAME) JedisCommands jedisCommands) {
this.jedisCommands = jedisCommands;
}
public Set<String> zrange(String key, long start, long end) {
return jedisCommands.zrange(key, start, end);
}
public Set<Tuple> zrangeByScoreWithScores(String key, double maxScore, int count) {
return jedisCommands.zrangeByScoreWithScores(key, 0, maxScore, 0, count);
}
public Set<String> zrangeByScore(String key, double maxScore, int count) {
return jedisCommands.zrangeByScore(key, 0, maxScore, 0, count);
}
public Set<String> zrangeByScore(String key, double minScore, double maxScore, int count) {
return jedisCommands.zrangeByScore(key, minScore, maxScore, 0, count);
}
public ScanResult<Tuple> zscan(String key, int cursor) {
return jedisCommands.zscan(key, "" + cursor);
}
public String get(String key) {
return jedisCommands.get(key);
}
public Long zcard(String key) {
return jedisCommands.zcard(key);
}
public Long del(String key) {
return jedisCommands.del(key);
}
public Long zrem(String key, String member) {
return jedisCommands.zrem(key, member);
}
public long zremrangeByScore(String key, String start, String end) {
return jedisCommands.zremrangeByScore(key, start, end);
}
public long zcount(String key, double min, double max) {
return jedisCommands.zcount(key, min, max);
}
public String set(String key, String value) {
return jedisCommands.set(key, value);
}
public Long setnx(String key, String value) {
return jedisCommands.setnx(key, value);
}
public Long zadd(String key, double score, String member) {
return jedisCommands.zadd(key, score, member);
}
public Long zaddnx(String key, double score, String member) {
ZAddParams params = ZAddParams.zAddParams().nx();
return jedisCommands.zadd(key, score, member, params);
}
public Long hset(String key, String field, String value) {
return jedisCommands.hset(key, field, value);
}
public Long hsetnx(String key, String field, String value) {
return jedisCommands.hsetnx(key, field, value);
}
public Long hlen(String key) {
return jedisCommands.hlen(key);
}
public String hget(String key, String field) {
return jedisCommands.hget(key, field);
}
public Optional<String> optionalHget(String key, String field) {
return Optional.ofNullable(jedisCommands.hget(key, field));
}
public Map<String, String> hscan(String key, int count) {
Map<String, String> m = new HashMap<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> scanResult = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(scanResult.getCursor());
for (Entry<String, String> r : scanResult.getResult()) {
m.put(r.getKey(), r.getValue());
}
if (m.size() > count) {
break;
}
} while (cursor > 0);
return m;
}
public Map<String, String> hgetAll(String key) {
Map<String, String> m = new HashMap<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> scanResult = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(scanResult.getCursor());
for (Entry<String, String> r : scanResult.getResult()) {
m.put(r.getKey(), r.getValue());
}
} while (cursor > 0);
return m;
}
public List<String> hvals(String key) {
LOGGER.trace("hvals {}", key);
return jedisCommands.hvals(key);
}
public Set<String> hkeys(String key) {
LOGGER.trace("hkeys {}", key);
Set<String> keys = new HashSet<>();
int cursor = 0;
do {
ScanResult<Entry<String, String>> sr = jedisCommands.hscan(key, "" + cursor);
cursor = Integer.parseInt(sr.getCursor());
List<Entry<String, String>> result = sr.getResult();
for (Entry<String, String> e : result) {
keys.add(e.getKey());
}
} while (cursor > 0);
return keys;
}
public Long hdel(String key, String... fields) {
LOGGER.trace("hdel {} {}", key, fields[0]);
return jedisCommands.hdel(key, fields);
}
public Long expire(String key, int seconds) {
return jedisCommands.expire(key, seconds);
}
public Boolean hexists(String key, String field) {
return jedisCommands.hexists(key, field);
}
public Long sadd(String key, String value) {
LOGGER.trace("sadd {} {}", key, value);
return jedisCommands.sadd(key, value);
}
public Long srem(String key, String member) {
LOGGER.trace("srem {} {}", key, member);
return jedisCommands.srem(key, member);
}
public boolean sismember(String key, String member) {
return jedisCommands.sismember(key, member);
}
public Set<String> smembers(String key) {
LOGGER.trace("smembers {}", key);
Set<String> r = new HashSet<>();
int cursor = 0;
ScanParams sp = new ScanParams();
sp.count(50);
do {
ScanResult<String> scanResult = jedisCommands.sscan(key, "" + cursor, sp);
cursor = Integer.parseInt(scanResult.getCursor());
r.addAll(scanResult.getResult());
} while (cursor > 0);
return r;
}
public Long scard(String key) {
return jedisCommands.scard(key);
}
}
| 6,587 |
0 | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis | Create_ds/conductor/redis-persistence/src/main/java/com/netflix/conductor/redis/jedis/JedisCluster.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.redis.jedis;
import java.util.AbstractMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.stream.Collectors;
import redis.clients.jedis.BitPosParams;
import redis.clients.jedis.GeoCoordinate;
import redis.clients.jedis.GeoRadiusResponse;
import redis.clients.jedis.GeoUnit;
import redis.clients.jedis.ListPosition;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.SortingParams;
import redis.clients.jedis.StreamConsumersInfo;
import redis.clients.jedis.StreamEntry;
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.StreamGroupInfo;
import redis.clients.jedis.StreamInfo;
import redis.clients.jedis.StreamPendingEntry;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.GeoRadiusParam;
import redis.clients.jedis.params.SetParams;
import redis.clients.jedis.params.ZAddParams;
import redis.clients.jedis.params.ZIncrByParams;
public class JedisCluster implements JedisCommands {
private final redis.clients.jedis.JedisCluster jedisCluster;
public JedisCluster(redis.clients.jedis.JedisCluster jedisCluster) {
this.jedisCluster = jedisCluster;
}
@Override
public String set(String key, String value) {
return jedisCluster.set(key, value);
}
@Override
public String set(String key, String value, SetParams params) {
return jedisCluster.set(key, value, params);
}
@Override
public String get(String key) {
return jedisCluster.get(key);
}
@Override
public Boolean exists(String key) {
return jedisCluster.exists(key);
}
@Override
public Long persist(String key) {
return jedisCluster.persist(key);
}
@Override
public String type(String key) {
return jedisCluster.type(key);
}
@Override
public byte[] dump(String key) {
return jedisCluster.dump(key);
}
@Override
public String restore(String key, int ttl, byte[] serializedValue) {
return jedisCluster.restore(key, ttl, serializedValue);
}
@Override
public String restoreReplace(String key, int ttl, byte[] serializedValue) {
throw new UnsupportedOperationException();
}
@Override
public Long expire(String key, int seconds) {
return jedisCluster.expire(key, seconds);
}
@Override
public Long pexpire(String key, long milliseconds) {
return jedisCluster.pexpire(key, milliseconds);
}
@Override
public Long expireAt(String key, long unixTime) {
return jedisCluster.expireAt(key, unixTime);
}
@Override
public Long pexpireAt(String key, long millisecondsTimestamp) {
return jedisCluster.pexpireAt(key, millisecondsTimestamp);
}
@Override
public Long ttl(String key) {
return jedisCluster.ttl(key);
}
@Override
public Long pttl(String key) {
return jedisCluster.pttl(key);
}
@Override
public Long touch(String key) {
return jedisCluster.touch(key);
}
@Override
public Boolean setbit(String key, long offset, boolean value) {
return jedisCluster.setbit(key, offset, value);
}
@Override
public Boolean setbit(String key, long offset, String value) {
return jedisCluster.setbit(key, offset, value);
}
@Override
public Boolean getbit(String key, long offset) {
return jedisCluster.getbit(key, offset);
}
@Override
public Long setrange(String key, long offset, String value) {
return jedisCluster.setrange(key, offset, value);
}
@Override
public String getrange(String key, long startOffset, long endOffset) {
return jedisCluster.getrange(key, startOffset, endOffset);
}
@Override
public String getSet(String key, String value) {
return jedisCluster.getSet(key, value);
}
@Override
public Long setnx(String key, String value) {
return jedisCluster.setnx(key, value);
}
@Override
public String setex(String key, int seconds, String value) {
return jedisCluster.setex(key, seconds, value);
}
@Override
public String psetex(String key, long milliseconds, String value) {
return jedisCluster.psetex(key, milliseconds, value);
}
@Override
public Long decrBy(String key, long integer) {
return jedisCluster.decrBy(key, integer);
}
@Override
public Long decr(String key) {
return jedisCluster.decr(key);
}
@Override
public Long incrBy(String key, long integer) {
return jedisCluster.incrBy(key, integer);
}
@Override
public Double incrByFloat(String key, double value) {
return jedisCluster.incrByFloat(key, value);
}
@Override
public Long incr(String key) {
return jedisCluster.incr(key);
}
@Override
public Long append(String key, String value) {
return jedisCluster.append(key, value);
}
@Override
public String substr(String key, int start, int end) {
return jedisCluster.substr(key, start, end);
}
@Override
public Long hset(String key, String field, String value) {
return jedisCluster.hset(key, field, value);
}
@Override
public Long hset(String key, Map<String, String> hash) {
return jedisCluster.hset(key, hash);
}
@Override
public String hget(String key, String field) {
return jedisCluster.hget(key, field);
}
@Override
public Long hsetnx(String key, String field, String value) {
return jedisCluster.hsetnx(key, field, value);
}
@Override
public String hmset(String key, Map<String, String> hash) {
return jedisCluster.hmset(key, hash);
}
@Override
public List<String> hmget(String key, String... fields) {
return jedisCluster.hmget(key, fields);
}
@Override
public Long hincrBy(String key, String field, long value) {
return jedisCluster.hincrBy(key, field, value);
}
@Override
public Double hincrByFloat(String key, String field, double value) {
return jedisCluster.hincrByFloat(key.getBytes(), field.getBytes(), value);
}
@Override
public Boolean hexists(String key, String field) {
return jedisCluster.hexists(key, field);
}
@Override
public Long hdel(String key, String... field) {
return jedisCluster.hdel(key, field);
}
@Override
public Long hlen(String key) {
return jedisCluster.hlen(key);
}
@Override
public Set<String> hkeys(String key) {
return jedisCluster.hkeys(key);
}
@Override
public List<String> hvals(String key) {
return jedisCluster.hvals(key);
}
@Override
public Map<String, String> hgetAll(String key) {
return jedisCluster.hgetAll(key);
}
@Override
public Long rpush(String key, String... string) {
return jedisCluster.rpush(key, string);
}
@Override
public Long lpush(String key, String... string) {
return jedisCluster.lpush(key, string);
}
@Override
public Long llen(String key) {
return jedisCluster.llen(key);
}
@Override
public List<String> lrange(String key, long start, long end) {
return jedisCluster.lrange(key, start, end);
}
@Override
public String ltrim(String key, long start, long end) {
return jedisCluster.ltrim(key, start, end);
}
@Override
public String lindex(String key, long index) {
return jedisCluster.lindex(key, index);
}
@Override
public String lset(String key, long index, String value) {
return jedisCluster.lset(key, index, value);
}
@Override
public Long lrem(String key, long count, String value) {
return jedisCluster.lrem(key, count, value);
}
@Override
public String lpop(String key) {
return jedisCluster.lpop(key);
}
@Override
public String rpop(String key) {
return jedisCluster.rpop(key);
}
@Override
public Long sadd(String key, String... member) {
return jedisCluster.sadd(key, member);
}
@Override
public Set<String> smembers(String key) {
return jedisCluster.smembers(key);
}
@Override
public Long srem(String key, String... member) {
return jedisCluster.srem(key, member);
}
@Override
public String spop(String key) {
return jedisCluster.spop(key);
}
@Override
public Set<String> spop(String key, long count) {
return jedisCluster.spop(key, count);
}
@Override
public Long scard(String key) {
return jedisCluster.scard(key);
}
@Override
public Boolean sismember(String key, String member) {
return jedisCluster.sismember(key, member);
}
@Override
public String srandmember(String key) {
return jedisCluster.srandmember(key);
}
@Override
public List<String> srandmember(String key, int count) {
return jedisCluster.srandmember(key, count);
}
@Override
public Long strlen(String key) {
return jedisCluster.strlen(key);
}
@Override
public Long zadd(String key, double score, String member) {
return jedisCluster.zadd(key, score, member);
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
return jedisCluster.zadd(key, score, member, params);
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers) {
return jedisCluster.zadd(key, scoreMembers);
}
@Override
public Long zadd(String key, Map<String, Double> scoreMembers, ZAddParams params) {
return jedisCluster.zadd(key, scoreMembers, params);
}
@Override
public Set<String> zrange(String key, long start, long end) {
return jedisCluster.zrange(key, start, end);
}
@Override
public Long zrem(String key, String... member) {
return jedisCluster.zrem(key, member);
}
@Override
public Double zincrby(String key, double score, String member) {
return jedisCluster.zincrby(key, score, member);
}
@Override
public Double zincrby(String key, double score, String member, ZIncrByParams params) {
return jedisCluster.zincrby(key, score, member, params);
}
@Override
public Long zrank(String key, String member) {
return jedisCluster.zrank(key, member);
}
@Override
public Long zrevrank(String key, String member) {
return jedisCluster.zrevrank(key, member);
}
@Override
public Set<String> zrevrange(String key, long start, long end) {
return jedisCluster.zrevrange(key, start, end);
}
@Override
public Set<Tuple> zrangeWithScores(String key, long start, long end) {
return jedisCluster.zrangeWithScores(key, start, end);
}
@Override
public Set<Tuple> zrevrangeWithScores(String key, long start, long end) {
return jedisCluster.zrevrangeWithScores(key, start, end);
}
@Override
public Long zcard(String key) {
return jedisCluster.zcard(key);
}
@Override
public Double zscore(String key, String member) {
return jedisCluster.zscore(key, member);
}
@Override
public Tuple zpopmax(String key) {
return jedisCluster.zpopmax(key);
}
@Override
public Set<Tuple> zpopmax(String key, int count) {
return jedisCluster.zpopmax(key, count);
}
@Override
public Tuple zpopmin(String key) {
return jedisCluster.zpopmin(key);
}
@Override
public Set<Tuple> zpopmin(String key, int count) {
return jedisCluster.zpopmin(key, count);
}
@Override
public List<String> sort(String key) {
return jedisCluster.sort(key);
}
@Override
public List<String> sort(String key, SortingParams sortingParameters) {
return jedisCluster.sort(key, sortingParameters);
}
@Override
public Long zcount(String key, double min, double max) {
return jedisCluster.zcount(key, min, max);
}
@Override
public Long zcount(String key, String min, String max) {
return jedisCluster.zcount(key, min, max);
}
@Override
public Set<String> zrangeByScore(String key, double min, double max) {
return jedisCluster.zrangeByScore(key, min, max);
}
@Override
public Set<String> zrangeByScore(String key, String min, String max) {
return jedisCluster.zrangeByScore(key, min, max);
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min) {
return jedisCluster.zrevrangeByScore(key, max, min);
}
@Override
public Set<String> zrangeByScore(String key, double min, double max, int offset, int count) {
return jedisCluster.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min) {
return jedisCluster.zrevrangeByScore(key, max, min);
}
@Override
public Set<String> zrangeByScore(String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, double max, double min, int offset, int count) {
return jedisCluster.zrevrangeByScore(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, double min, double max) {
return jedisCluster.zrangeByScoreWithScores(key, min, max);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, double max, double min) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, double min, double max, int offset, int count) {
return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByScore(String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByScore(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, String min, String max) {
return jedisCluster.zrangeByScoreWithScores(key, min, max);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(String key, String max, String min) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(
String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByScoreWithScores(key, min, max, offset, count);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, double max, double min, int offset, int count) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(
String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByScoreWithScores(key, max, min, offset, count);
}
@Override
public Long zremrangeByRank(String key, long start, long end) {
return jedisCluster.zremrangeByRank(key, start, end);
}
@Override
public Long zremrangeByScore(String key, double start, double end) {
return jedisCluster.zremrangeByScore(key, start, end);
}
@Override
public Long zremrangeByScore(String key, String start, String end) {
return jedisCluster.zremrangeByScore(key, start, end);
}
@Override
public Long zlexcount(String key, String min, String max) {
return jedisCluster.zlexcount(key, min, max);
}
@Override
public Set<String> zrangeByLex(String key, String min, String max) {
return jedisCluster.zrangeByLex(key, min, max);
}
@Override
public Set<String> zrangeByLex(String key, String min, String max, int offset, int count) {
return jedisCluster.zrangeByLex(key, min, max, offset, count);
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min) {
return jedisCluster.zrevrangeByLex(key, max, min);
}
@Override
public Set<String> zrevrangeByLex(String key, String max, String min, int offset, int count) {
return jedisCluster.zrevrangeByLex(key, max, min, offset, count);
}
@Override
public Long zremrangeByLex(String key, String min, String max) {
return jedisCluster.zremrangeByLex(key, min, max);
}
@Override
public Long linsert(String key, ListPosition where, String pivot, String value) {
return jedisCluster.linsert(key, where, pivot, value);
}
@Override
public Long lpushx(String key, String... string) {
return jedisCluster.lpushx(key, string);
}
@Override
public Long rpushx(String key, String... string) {
return jedisCluster.rpushx(key, string);
}
@Override
public List<String> blpop(int timeout, String key) {
return jedisCluster.blpop(timeout, key);
}
@Override
public List<String> brpop(int timeout, String key) {
return jedisCluster.brpop(timeout, key);
}
@Override
public Long del(String key) {
return jedisCluster.del(key);
}
@Override
public Long unlink(String key) {
return jedisCluster.unlink(key);
}
@Override
public String echo(String string) {
return jedisCluster.echo(string);
}
@Override
public Long move(String key, int dbIndex) {
throw new UnsupportedOperationException();
}
@Override
public Long bitcount(String key) {
return jedisCluster.bitcount(key);
}
@Override
public Long bitcount(String key, long start, long end) {
return jedisCluster.bitcount(key, start, end);
}
@Override
public Long bitpos(String key, boolean value) {
throw new UnsupportedOperationException();
}
@Override
public Long bitpos(String key, boolean value, BitPosParams params) {
throw new UnsupportedOperationException();
}
@Override
public ScanResult<Entry<String, String>> hscan(String key, String cursor) {
return jedisCluster.hscan(key, cursor);
}
@Override
public ScanResult<Map.Entry<String, String>> hscan(
String key, String cursor, ScanParams params) {
ScanResult<Map.Entry<byte[], byte[]>> scanResult =
jedisCluster.hscan(key.getBytes(), cursor.getBytes(), params);
List<Map.Entry<String, String>> results =
scanResult.getResult().stream()
.map(
entry ->
new AbstractMap.SimpleEntry<>(
new String(entry.getKey()),
new String(entry.getValue())))
.collect(Collectors.toList());
return new ScanResult<>(scanResult.getCursorAsBytes(), results);
}
@Override
public ScanResult<String> sscan(String key, String cursor) {
return jedisCluster.sscan(key, cursor);
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
ScanResult<byte[]> scanResult =
jedisCluster.sscan(key.getBytes(), cursor.getBytes(), params);
List<String> results =
scanResult.getResult().stream().map(String::new).collect(Collectors.toList());
return new ScanResult<>(scanResult.getCursorAsBytes(), results);
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor) {
return jedisCluster.zscan(key, cursor);
}
@Override
public ScanResult<Tuple> zscan(String key, String cursor, ScanParams params) {
return jedisCluster.zscan(key.getBytes(), cursor.getBytes(), params);
}
@Override
public Long pfadd(String key, String... elements) {
return jedisCluster.pfadd(key, elements);
}
@Override
public long pfcount(String key) {
return jedisCluster.pfcount(key);
}
@Override
public Long geoadd(String key, double longitude, double latitude, String member) {
return jedisCluster.geoadd(key, longitude, latitude, member);
}
@Override
public Long geoadd(String key, Map<String, GeoCoordinate> memberCoordinateMap) {
return jedisCluster.geoadd(key, memberCoordinateMap);
}
@Override
public Double geodist(String key, String member1, String member2) {
return jedisCluster.geodist(key, member1, member2);
}
@Override
public Double geodist(String key, String member1, String member2, GeoUnit unit) {
return jedisCluster.geodist(key, member1, member2, unit);
}
@Override
public List<String> geohash(String key, String... members) {
return jedisCluster.geohash(key, members);
}
@Override
public List<GeoCoordinate> geopos(String key, String... members) {
return jedisCluster.geopos(key, members);
}
@Override
public List<GeoRadiusResponse> georadius(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return jedisCluster.georadius(key, longitude, latitude, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key, double longitude, double latitude, double radius, GeoUnit unit) {
return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadius(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return jedisCluster.georadius(key, longitude, latitude, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusReadonly(
String key,
double longitude,
double latitude,
double radius,
GeoUnit unit,
GeoRadiusParam param) {
return jedisCluster.georadiusReadonly(key, longitude, latitude, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit) {
return jedisCluster.georadiusByMember(key, member, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit) {
return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit);
}
@Override
public List<GeoRadiusResponse> georadiusByMember(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return jedisCluster.georadiusByMember(key, member, radius, unit, param);
}
@Override
public List<GeoRadiusResponse> georadiusByMemberReadonly(
String key, String member, double radius, GeoUnit unit, GeoRadiusParam param) {
return jedisCluster.georadiusByMemberReadonly(key, member, radius, unit, param);
}
@Override
public List<Long> bitfield(String key, String... arguments) {
return jedisCluster.bitfield(key, arguments);
}
@Override
public List<Long> bitfieldReadonly(String key, String... arguments) {
return jedisCluster.bitfieldReadonly(key, arguments);
}
@Override
public Long hstrlen(String key, String field) {
return jedisCluster.hstrlen(key, field);
}
@Override
public StreamEntryID xadd(String key, StreamEntryID id, Map<String, String> hash) {
return jedisCluster.xadd(key, id, hash);
}
@Override
public StreamEntryID xadd(
String key,
StreamEntryID id,
Map<String, String> hash,
long maxLen,
boolean approximateLength) {
return jedisCluster.xadd(key, id, hash, maxLen, approximateLength);
}
@Override
public Long xlen(String key) {
return jedisCluster.xlen(key);
}
@Override
public List<StreamEntry> xrange(String key, StreamEntryID start, StreamEntryID end, int count) {
return jedisCluster.xrange(key, start, end, count);
}
@Override
public List<StreamEntry> xrevrange(
String key, StreamEntryID end, StreamEntryID start, int count) {
return jedisCluster.xrevrange(key, end, start, count);
}
@Override
public long xack(String key, String group, StreamEntryID... ids) {
return jedisCluster.xack(key, group, ids);
}
@Override
public String xgroupCreate(String key, String groupname, StreamEntryID id, boolean makeStream) {
return jedisCluster.xgroupCreate(key, groupname, id, makeStream);
}
@Override
public String xgroupSetID(String key, String groupname, StreamEntryID id) {
return jedisCluster.xgroupSetID(key, groupname, id);
}
@Override
public long xgroupDestroy(String key, String groupname) {
return jedisCluster.xgroupDestroy(key, groupname);
}
@Override
public Long xgroupDelConsumer(String key, String groupname, String consumername) {
return jedisCluster.xgroupDelConsumer(key, groupname, consumername);
}
@Override
public List<StreamPendingEntry> xpending(
String key,
String groupname,
StreamEntryID start,
StreamEntryID end,
int count,
String consumername) {
return jedisCluster.xpending(key, groupname, start, end, count, consumername);
}
@Override
public long xdel(String key, StreamEntryID... ids) {
return jedisCluster.xdel(key, ids);
}
@Override
public long xtrim(String key, long maxLen, boolean approximate) {
return jedisCluster.xtrim(key, maxLen, approximate);
}
@Override
public List<StreamEntry> xclaim(
String key,
String group,
String consumername,
long minIdleTime,
long newIdleTime,
int retries,
boolean force,
StreamEntryID... ids) {
return jedisCluster.xclaim(
key, group, consumername, minIdleTime, newIdleTime, retries, force, ids);
}
@Override
public StreamInfo xinfoStream(String key) {
return null;
}
@Override
public List<StreamGroupInfo> xinfoGroup(String key) {
return null;
}
@Override
public List<StreamConsumersInfo> xinfoConsumers(String key, String group) {
return null;
}
}
| 6,588 |
0 | Create_ds/conductor/core/src/test/java/com/netflix | Create_ds/conductor/core/src/test/java/com/netflix/conductor/TestUtils.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import javax.validation.ConstraintViolation;
public class TestUtils {
public static Set<String> getConstraintViolationMessages(
Set<ConstraintViolation<?>> constraintViolations) {
Set<String> messages = new HashSet<>(constraintViolations.size());
messages.addAll(
constraintViolations.stream()
.map(ConstraintViolation::getMessage)
.collect(Collectors.toList()));
return messages;
}
}
| 6,589 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor | Create_ds/conductor/core/src/test/java/com/netflix/conductor/metrics/WorkflowMonitorTest.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.metrics;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.service.MetadataService;
@RunWith(SpringRunner.class)
public class WorkflowMonitorTest {
@Mock private MetadataService metadataService;
@Mock private QueueDAO queueDAO;
@Mock private ExecutionDAOFacade executionDAOFacade;
private WorkflowMonitor workflowMonitor;
@Before
public void beforeEach() {
workflowMonitor =
new WorkflowMonitor(metadataService, queueDAO, executionDAOFacade, 1000, Set.of());
}
private WorkflowDef makeDef(String name, int version, String ownerApp) {
WorkflowDef wd = new WorkflowDef();
wd.setName(name);
wd.setVersion(version);
wd.setOwnerApp(ownerApp);
return wd;
}
@Test
public void testPendingWorkflowDataMap() {
WorkflowDef test1_1 = makeDef("test1", 1, null);
WorkflowDef test1_2 = makeDef("test1", 2, "name1");
WorkflowDef test2_1 = makeDef("test2", 1, "first");
WorkflowDef test2_2 = makeDef("test2", 2, "mid");
WorkflowDef test2_3 = makeDef("test2", 3, "last");
final Map<String, String> mapping =
workflowMonitor.getPendingWorkflowToOwnerAppMap(
List.of(test1_1, test1_2, test2_1, test2_2, test2_3));
Assert.assertEquals(2, mapping.keySet().size());
Assert.assertTrue(mapping.containsKey("test1"));
Assert.assertTrue(mapping.containsKey("test2"));
Assert.assertEquals("name1", mapping.get("test1"));
Assert.assertEquals("last", mapping.get("test2"));
}
}
| 6,590 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/reconciliation/TestWorkflowRepairService.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.reconciliation;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.events.EventQueues;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.core.execution.tasks.*;
import com.netflix.conductor.core.operation.StartWorkflowOperation;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.ExecutionDAO;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.*;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestWorkflowRepairService {
private QueueDAO queueDAO;
private ExecutionDAO executionDAO;
private ConductorProperties properties;
private WorkflowRepairService workflowRepairService;
private SystemTaskRegistry systemTaskRegistry;
@Before
public void setUp() {
executionDAO = mock(ExecutionDAO.class);
queueDAO = mock(QueueDAO.class);
properties = mock(ConductorProperties.class);
systemTaskRegistry = mock(SystemTaskRegistry.class);
workflowRepairService =
new WorkflowRepairService(executionDAO, queueDAO, properties, systemTaskRegistry);
}
@Test
public void verifyAndRepairSimpleTaskInScheduledState() {
TaskModel task = new TaskModel();
task.setTaskType("SIMPLE");
task.setStatus(TaskModel.Status.SCHEDULED);
task.setTaskId("abcd");
task.setCallbackAfterSeconds(60);
when(queueDAO.containsMessage(anyString(), anyString())).thenReturn(false);
assertTrue(workflowRepairService.verifyAndRepairTask(task));
// Verify that a new queue message is pushed for sync system tasks that fails queue contains
// check.
verify(queueDAO, times(1)).push(anyString(), anyString(), anyLong());
}
@Test
public void verifySimpleTaskInProgressState() {
TaskModel task = new TaskModel();
task.setTaskType("SIMPLE");
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setTaskId("abcd");
task.setCallbackAfterSeconds(60);
when(queueDAO.containsMessage(anyString(), anyString())).thenReturn(false);
assertFalse(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue message is never pushed for simple task in IN_PROGRESS state
verify(queueDAO, never()).containsMessage(anyString(), anyString());
verify(queueDAO, never()).push(anyString(), anyString(), anyLong());
}
@Test
public void verifyAndRepairSystemTask() {
String taskType = "TEST_SYS_TASK";
TaskModel task = new TaskModel();
task.setTaskType(taskType);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setTaskId("abcd");
task.setCallbackAfterSeconds(60);
when(systemTaskRegistry.isSystemTask("TEST_SYS_TASK")).thenReturn(true);
when(systemTaskRegistry.get(taskType))
.thenReturn(
new WorkflowSystemTask("TEST_SYS_TASK") {
@Override
public boolean isAsync() {
return true;
}
@Override
public boolean isAsyncComplete(TaskModel task) {
return false;
}
@Override
public void start(
WorkflowModel workflow,
TaskModel task,
WorkflowExecutor executor) {
super.start(workflow, task, executor);
}
});
when(queueDAO.containsMessage(anyString(), anyString())).thenReturn(false);
assertTrue(workflowRepairService.verifyAndRepairTask(task));
// Verify that a new queue message is pushed for tasks that fails queue contains check.
verify(queueDAO, times(1)).push(anyString(), anyString(), anyLong());
// Verify a system task in IN_PROGRESS state can be recovered.
reset(queueDAO);
task.setStatus(TaskModel.Status.IN_PROGRESS);
assertTrue(workflowRepairService.verifyAndRepairTask(task));
// Verify that a new queue message is pushed for async System task in IN_PROGRESS state that
// fails queue contains check.
verify(queueDAO, times(1)).push(anyString(), anyString(), anyLong());
}
@Test
public void assertSyncSystemTasksAreNotCheckedAgainstQueue() {
// Return a Switch task object to init WorkflowSystemTask registry.
when(systemTaskRegistry.get(TASK_TYPE_DECISION)).thenReturn(new Decision());
when(systemTaskRegistry.isSystemTask(TASK_TYPE_DECISION)).thenReturn(true);
when(systemTaskRegistry.get(TASK_TYPE_SWITCH)).thenReturn(new Switch());
when(systemTaskRegistry.isSystemTask(TASK_TYPE_SWITCH)).thenReturn(true);
TaskModel task = new TaskModel();
task.setTaskType(TASK_TYPE_DECISION);
task.setStatus(TaskModel.Status.SCHEDULED);
assertFalse(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue contains is never checked for sync system tasks
verify(queueDAO, never()).containsMessage(anyString(), anyString());
// Verify that queue message is never pushed for sync system tasks
verify(queueDAO, never()).push(anyString(), anyString(), anyLong());
task = new TaskModel();
task.setTaskType(TASK_TYPE_SWITCH);
task.setStatus(TaskModel.Status.SCHEDULED);
assertFalse(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue contains is never checked for sync system tasks
verify(queueDAO, never()).containsMessage(anyString(), anyString());
// Verify that queue message is never pushed for sync system tasks
verify(queueDAO, never()).push(anyString(), anyString(), anyLong());
}
@Test
public void assertAsyncCompleteInProgressSystemTasksAreNotCheckedAgainstQueue() {
TaskModel task = new TaskModel();
task.setTaskType(TASK_TYPE_EVENT);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setTaskId("abcd");
task.setCallbackAfterSeconds(60);
task.setInputData(Map.of("asyncComplete", true));
WorkflowSystemTask workflowSystemTask =
new Event(
mock(EventQueues.class),
mock(ParametersUtils.class),
mock(ObjectMapper.class));
when(systemTaskRegistry.get(TASK_TYPE_EVENT)).thenReturn(workflowSystemTask);
assertTrue(workflowSystemTask.isAsyncComplete(task));
assertFalse(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue message is never pushed for async complete system tasks
verify(queueDAO, never()).containsMessage(anyString(), anyString());
verify(queueDAO, never()).push(anyString(), anyString(), anyLong());
}
@Test
public void assertAsyncCompleteScheduledSystemTasksAreCheckedAgainstQueue() {
TaskModel task = new TaskModel();
task.setTaskType(TASK_TYPE_SUB_WORKFLOW);
task.setStatus(TaskModel.Status.SCHEDULED);
task.setTaskId("abcd");
task.setCallbackAfterSeconds(60);
WorkflowSystemTask workflowSystemTask =
new SubWorkflow(new ObjectMapper(), mock(StartWorkflowOperation.class));
when(systemTaskRegistry.get(TASK_TYPE_SUB_WORKFLOW)).thenReturn(workflowSystemTask);
when(queueDAO.containsMessage(anyString(), anyString())).thenReturn(false);
assertTrue(workflowSystemTask.isAsyncComplete(task));
assertTrue(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue message is never pushed for async complete system tasks
verify(queueDAO, times(1)).containsMessage(anyString(), anyString());
verify(queueDAO, times(1)).push(anyString(), anyString(), anyLong());
}
@Test
public void verifyAndRepairParentWorkflow() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowId("abcd");
workflow.setParentWorkflowId("parentWorkflowId");
when(properties.getWorkflowOffsetTimeout()).thenReturn(Duration.ofSeconds(10));
when(executionDAO.getWorkflow("abcd", true)).thenReturn(workflow);
when(queueDAO.containsMessage(anyString(), anyString())).thenReturn(false);
workflowRepairService.verifyAndRepairWorkflowTasks("abcd");
verify(queueDAO, times(1)).containsMessage(anyString(), anyString());
verify(queueDAO, times(1)).push(anyString(), anyString(), anyLong());
}
@Test
public void assertInProgressSubWorkflowSystemTasksAreCheckedAndRepaired() {
String subWorkflowId = "subWorkflowId";
String taskId = "taskId";
TaskModel task = new TaskModel();
task.setTaskType(TASK_TYPE_SUB_WORKFLOW);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setTaskId(taskId);
task.setCallbackAfterSeconds(60);
task.setSubWorkflowId(subWorkflowId);
Map<String, Object> outputMap = new HashMap<>();
outputMap.put("subWorkflowId", subWorkflowId);
task.setOutputData(outputMap);
WorkflowModel subWorkflow = new WorkflowModel();
subWorkflow.setWorkflowId(subWorkflowId);
subWorkflow.setStatus(WorkflowModel.Status.TERMINATED);
subWorkflow.setOutput(Map.of("k1", "v1", "k2", "v2"));
when(executionDAO.getWorkflow(subWorkflowId, false)).thenReturn(subWorkflow);
assertTrue(workflowRepairService.verifyAndRepairTask(task));
// Verify that queue message is never pushed for async complete system tasks
verify(queueDAO, never()).containsMessage(anyString(), anyString());
verify(queueDAO, never()).push(anyString(), anyString(), anyLong());
// Verify
ArgumentCaptor<TaskModel> argumentCaptor = ArgumentCaptor.forClass(TaskModel.class);
verify(executionDAO, times(1)).updateTask(argumentCaptor.capture());
assertEquals(taskId, argumentCaptor.getValue().getTaskId());
assertEquals(subWorkflowId, argumentCaptor.getValue().getSubWorkflowId());
assertEquals(TaskModel.Status.CANCELED, argumentCaptor.getValue().getStatus());
assertNotNull(argumentCaptor.getValue().getOutputData());
assertEquals(subWorkflowId, argumentCaptor.getValue().getOutputData().get("subWorkflowId"));
assertEquals("v1", argumentCaptor.getValue().getOutputData().get("k1"));
assertEquals("v2", argumentCaptor.getValue().getOutputData().get("k2"));
}
}
| 6,591 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/reconciliation/TestWorkflowSweeper.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.reconciliation;
import java.time.Duration;
import java.util.List;
import java.util.Optional;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.dal.ExecutionDAOFacade;
import com.netflix.conductor.core.execution.WorkflowExecutor;
import com.netflix.conductor.dao.QueueDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.TaskModel.Status;
import com.netflix.conductor.model.WorkflowModel;
import static com.netflix.conductor.core.utils.Utils.DECIDER_QUEUE;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class TestWorkflowSweeper {
private ConductorProperties properties;
private WorkflowExecutor workflowExecutor;
private WorkflowRepairService workflowRepairService;
private QueueDAO queueDAO;
private ExecutionDAOFacade executionDAOFacade;
private WorkflowSweeper workflowSweeper;
private int defaultPostPoneOffSetSeconds = 1800;
@Before
public void setUp() {
properties = mock(ConductorProperties.class);
workflowExecutor = mock(WorkflowExecutor.class);
queueDAO = mock(QueueDAO.class);
workflowRepairService = mock(WorkflowRepairService.class);
executionDAOFacade = mock(ExecutionDAOFacade.class);
workflowSweeper =
new WorkflowSweeper(
workflowExecutor,
Optional.of(workflowRepairService),
properties,
queueDAO,
executionDAOFacade);
}
@Test
public void testPostponeDurationForHumanTaskType() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_HUMAN);
taskModel.setStatus(Status.IN_PROGRESS);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForWaitTaskType() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_WAIT);
taskModel.setStatus(Status.IN_PROGRESS);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForWaitTaskTypeWithLongWaitTime() {
long waitTimeout = 65845;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_WAIT);
taskModel.setStatus(Status.IN_PROGRESS);
taskModel.setWaitTimeout(System.currentTimeMillis() + waitTimeout);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (waitTimeout / 1000) * 1000);
}
@Test
public void testPostponeDurationForWaitTaskTypeWithLessOneSecondWaitTime() {
long waitTimeout = 180;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_WAIT);
taskModel.setStatus(Status.IN_PROGRESS);
taskModel.setWaitTimeout(System.currentTimeMillis() + waitTimeout);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (waitTimeout / 1000) * 1000);
}
@Test
public void testPostponeDurationForWaitTaskTypeWithZeroWaitTime() {
long waitTimeout = 0;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_WAIT);
taskModel.setStatus(Status.IN_PROGRESS);
taskModel.setWaitTimeout(System.currentTimeMillis() + waitTimeout);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (waitTimeout / 1000) * 1000);
}
@Test
public void testPostponeDurationForTaskInProgress() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_SIMPLE);
taskModel.setStatus(Status.IN_PROGRESS);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForTaskInProgressWithResponseTimeoutSet() {
long responseTimeout = 200;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_SIMPLE);
taskModel.setStatus(Status.IN_PROGRESS);
taskModel.setResponseTimeoutSeconds(responseTimeout);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (responseTimeout + 1) * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduled() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
WorkflowDef workflowDef = new WorkflowDef();
workflowModel.setWorkflowDefinition(workflowDef);
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_SIMPLE);
taskModel.setStatus(Status.SCHEDULED);
taskModel.setReferenceTaskName("task1");
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduledWithWorkflowTimeoutSet() {
long workflowTimeout = 1800;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setTimeoutSeconds(workflowTimeout);
workflowModel.setWorkflowDefinition(workflowDef);
TaskModel taskModel = new TaskModel();
taskModel.setTaskId("task1");
taskModel.setTaskType(TaskType.TASK_TYPE_SIMPLE);
taskModel.setStatus(Status.SCHEDULED);
workflowModel.setTasks(List.of(taskModel));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (workflowTimeout + 1) * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduledWithWorkflowTimeoutSetAndNoPollTimeout() {
long workflowTimeout = 1800;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setTimeoutSeconds(workflowTimeout);
workflowModel.setWorkflowDefinition(workflowDef);
TaskDef taskDef = new TaskDef();
TaskModel taskModel = mock(TaskModel.class);
workflowModel.setTasks(List.of(taskModel));
when(taskModel.getTaskDefinition()).thenReturn(Optional.of(taskDef));
when(taskModel.getStatus()).thenReturn(Status.SCHEDULED);
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (workflowTimeout + 1) * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduledWithNoWorkflowTimeoutSetAndNoPollTimeout() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
WorkflowDef workflowDef = new WorkflowDef();
workflowModel.setWorkflowDefinition(workflowDef);
TaskDef taskDef = new TaskDef();
TaskModel taskModel = mock(TaskModel.class);
workflowModel.setTasks(List.of(taskModel));
when(taskModel.getTaskDefinition()).thenReturn(Optional.of(taskDef));
when(taskModel.getStatus()).thenReturn(Status.SCHEDULED);
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduledWithNoPollTimeoutSet() {
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskDef taskDef = new TaskDef();
WorkflowDef workflowDef = new WorkflowDef();
workflowModel.setWorkflowDefinition(workflowDef);
TaskModel taskModel = mock(TaskModel.class);
workflowModel.setTasks(List.of(taskModel));
when(taskModel.getStatus()).thenReturn(Status.SCHEDULED);
when(taskModel.getTaskDefinition()).thenReturn(Optional.of(taskDef));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE,
workflowModel.getWorkflowId(),
defaultPostPoneOffSetSeconds * 1000);
}
@Test
public void testPostponeDurationForTaskInScheduledWithPollTimeoutSet() {
int pollTimeout = 200;
WorkflowModel workflowModel = new WorkflowModel();
workflowModel.setWorkflowId("1");
TaskDef taskDef = new TaskDef();
taskDef.setPollTimeoutSeconds(pollTimeout);
TaskModel taskModel = mock(TaskModel.class);
;
workflowModel.setTasks(List.of(taskModel));
when(taskModel.getStatus()).thenReturn(Status.SCHEDULED);
when(taskModel.getTaskDefinition()).thenReturn(Optional.of(taskDef));
when(properties.getWorkflowOffsetTimeout())
.thenReturn(Duration.ofSeconds(defaultPostPoneOffSetSeconds));
workflowSweeper.unack(workflowModel, defaultPostPoneOffSetSeconds);
verify(queueDAO)
.setUnackTimeout(
DECIDER_QUEUE, workflowModel.getWorkflowId(), (pollTimeout + 1) * 1000);
}
@Test
public void testWorkflowOffsetJitter() {
long offset = 45;
for (int i = 0; i < 10; i++) {
long offsetWithJitter = workflowSweeper.workflowOffsetWithJitter(offset);
assertTrue(offsetWithJitter >= 30);
assertTrue(offsetWithJitter <= 60);
}
}
}
| 6,592 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/utils/ParametersUtilsTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
@SuppressWarnings("rawtypes")
public class ParametersUtilsTest {
private ParametersUtils parametersUtils;
private JsonUtils jsonUtils;
@Autowired private ObjectMapper objectMapper;
@Before
public void setup() {
parametersUtils = new ParametersUtils(objectMapper);
jsonUtils = new JsonUtils(objectMapper);
}
@Test
public void testReplace() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("name", "conductor");
map.put("version", 2);
map.put("externalId", "{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}");
Map<String, Object> input = new HashMap<>();
input.put("k1", "${$.externalId}");
input.put("k4", "${name}");
input.put("k5", "${version}");
Object jsonObj = objectMapper.readValue(objectMapper.writeValueAsString(map), Object.class);
Map<String, Object> replaced = parametersUtils.replace(input, jsonObj);
assertNotNull(replaced);
assertEquals("{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}", replaced.get("k1"));
assertEquals("conductor", replaced.get("k4"));
assertEquals(2, replaced.get("k5"));
}
@Test
public void testReplaceWithArrayExpand() {
List<Object> list = new LinkedList<>();
Map<String, Object> map = new HashMap<>();
map.put("externalId", "[{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}]");
map.put("name", "conductor");
map.put("version", 2);
list.add(map);
jsonUtils.expand(list);
Map<String, Object> input = new HashMap<>();
input.put("k1", "${$..externalId}");
input.put("k2", "${$[0].externalId[0].taskRefName}");
input.put("k3", "${__json_externalId.taskRefName}");
input.put("k4", "${$[0].name}");
input.put("k5", "${$[0].version}");
Map<String, Object> replaced = parametersUtils.replace(input, list);
assertNotNull(replaced);
assertEquals(replaced.get("k2"), "t001");
assertNull(replaced.get("k3"));
assertEquals(replaced.get("k4"), "conductor");
assertEquals(replaced.get("k5"), 2);
}
@Test
public void testReplaceWithMapExpand() {
Map<String, Object> map = new HashMap<>();
map.put("externalId", "{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}");
map.put("name", "conductor");
map.put("version", 2);
jsonUtils.expand(map);
Map<String, Object> input = new HashMap<>();
input.put("k1", "${$.externalId}");
input.put("k2", "${externalId.taskRefName}");
input.put("k4", "${name}");
input.put("k5", "${version}");
Map<String, Object> replaced = parametersUtils.replace(input, map);
assertNotNull(replaced);
assertEquals("t001", replaced.get("k2"));
assertNull(replaced.get("k3"));
assertEquals("conductor", replaced.get("k4"));
assertEquals(2, replaced.get("k5"));
}
@Test
public void testReplaceConcurrent() throws ExecutionException, InterruptedException {
ExecutorService executorService = Executors.newFixedThreadPool(2);
AtomicReference<String> generatedId = new AtomicReference<>("test-0");
Map<String, Object> input = new HashMap<>();
Map<String, Object> payload = new HashMap<>();
payload.put("event", "conductor:TEST_EVENT");
payload.put("someId", generatedId);
input.put("payload", payload);
input.put("name", "conductor");
input.put("version", 2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("k1", "${payload.someId}");
inputParams.put("k2", "${name}");
CompletableFuture.runAsync(
() -> {
for (int i = 0; i < 10000; i++) {
generatedId.set("test-" + i);
payload.put("someId", generatedId.get());
Object jsonObj = null;
try {
jsonObj =
objectMapper.readValue(
objectMapper.writeValueAsString(input),
Object.class);
} catch (JsonProcessingException e) {
e.printStackTrace();
return;
}
Map<String, Object> replaced =
parametersUtils.replace(inputParams, jsonObj);
assertNotNull(replaced);
assertEquals(generatedId.get(), replaced.get("k1"));
assertEquals("conductor", replaced.get("k2"));
assertNull(replaced.get("k3"));
}
},
executorService)
.get();
executorService.shutdown();
}
// Tests ParametersUtils with Map and List input values, and verifies input map is not mutated
// by ParametersUtils.
@Test
public void testReplaceInputWithMapAndList() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("name", "conductor");
map.put("version", 2);
map.put("externalId", "{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}");
Map<String, Object> input = new HashMap<>();
input.put("k1", "${$.externalId}");
input.put("k2", "${name}");
input.put("k3", "${version}");
input.put("k4", "${}");
input.put("k5", "${ }");
Map<String, String> mapValue = new HashMap<>();
mapValue.put("name", "${name}");
mapValue.put("version", "${version}");
input.put("map", mapValue);
List<String> listValue = new ArrayList<>();
listValue.add("${name}");
listValue.add("${version}");
input.put("list", listValue);
Object jsonObj = objectMapper.readValue(objectMapper.writeValueAsString(map), Object.class);
Map<String, Object> replaced = parametersUtils.replace(input, jsonObj);
assertNotNull(replaced);
// Verify that values are replaced correctly.
assertEquals("{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}", replaced.get("k1"));
assertEquals("conductor", replaced.get("k2"));
assertEquals(2, replaced.get("k3"));
assertEquals("", replaced.get("k4"));
assertEquals("", replaced.get("k5"));
Map replacedMap = (Map) replaced.get("map");
assertEquals("conductor", replacedMap.get("name"));
assertEquals(2, replacedMap.get("version"));
List replacedList = (List) replaced.get("list");
assertEquals(2, replacedList.size());
assertEquals("conductor", replacedList.get(0));
assertEquals(2, replacedList.get(1));
// Verify that input map is not mutated
assertEquals("${$.externalId}", input.get("k1"));
assertEquals("${name}", input.get("k2"));
assertEquals("${version}", input.get("k3"));
Map inputMap = (Map) input.get("map");
assertEquals("${name}", inputMap.get("name"));
assertEquals("${version}", inputMap.get("version"));
List inputList = (List) input.get("list");
assertEquals(2, inputList.size());
assertEquals("${name}", inputList.get(0));
assertEquals("${version}", inputList.get(1));
}
@Test
public void testNestedPathExpressions() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("name", "conductor");
map.put("index", 1);
map.put("mapValue", "a");
map.put("recordIds", List.of(1, 2, 3));
map.put("map", Map.of("a", List.of(1, 2, 3), "b", List.of(2, 4, 5), "c", List.of(3, 7, 8)));
Map<String, Object> input = new HashMap<>();
input.put("k1", "${recordIds[${index}]}");
input.put("k2", "${map.${mapValue}[${index}]}");
input.put("k3", "${map.b[${map.${mapValue}[${index}]}]}");
Object jsonObj = objectMapper.readValue(objectMapper.writeValueAsString(map), Object.class);
Map<String, Object> replaced = parametersUtils.replace(input, jsonObj);
assertNotNull(replaced);
assertEquals(2, replaced.get("k1"));
assertEquals(2, replaced.get("k2"));
assertEquals(5, replaced.get("k3"));
}
@Test
public void testReplaceWithLineTerminators() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("name", "conductor");
map.put("version", 2);
Map<String, Object> input = new HashMap<>();
input.put("k1", "Name: ${name}; Version: ${version};");
input.put("k2", "Name: ${name};\nVersion: ${version};");
input.put("k3", "Name: ${name};\rVersion: ${version};");
input.put("k4", "Name: ${name};\r\nVersion: ${version};");
Object jsonObj = objectMapper.readValue(objectMapper.writeValueAsString(map), Object.class);
Map<String, Object> replaced = parametersUtils.replace(input, jsonObj);
assertNotNull(replaced);
assertEquals("Name: conductor; Version: 2;", replaced.get("k1"));
assertEquals("Name: conductor;\nVersion: 2;", replaced.get("k2"));
assertEquals("Name: conductor;\rVersion: 2;", replaced.get("k3"));
assertEquals("Name: conductor;\r\nVersion: 2;", replaced.get("k4"));
}
@Test
public void testReplaceWithEscapedTags() throws Exception {
Map<String, Object> map = new HashMap<>();
map.put("someString", "conductor");
map.put("someNumber", 2);
Map<String, Object> input = new HashMap<>();
input.put(
"k1",
"${$.someString} $${$.someNumber}${$.someNumber} ${$.someNumber}$${$.someString}");
input.put("k2", "$${$.someString}afterText");
input.put("k3", "beforeText$${$.someString}");
input.put("k4", "$${$.someString} afterText");
input.put("k5", "beforeText $${$.someString}");
Map<String, String> mapValue = new HashMap<>();
mapValue.put("a", "${someString}");
mapValue.put("b", "${someNumber}");
mapValue.put("c", "$${someString} ${someNumber}");
input.put("map", mapValue);
List<String> listValue = new ArrayList<>();
listValue.add("${someString}");
listValue.add("${someNumber}");
listValue.add("${someString} $${someNumber}");
input.put("list", listValue);
Object jsonObj = objectMapper.readValue(objectMapper.writeValueAsString(map), Object.class);
Map<String, Object> replaced = parametersUtils.replace(input, jsonObj);
assertNotNull(replaced);
// Verify that values are replaced correctly.
assertEquals("conductor ${$.someNumber}2 2${$.someString}", replaced.get("k1"));
assertEquals("${$.someString}afterText", replaced.get("k2"));
assertEquals("beforeText${$.someString}", replaced.get("k3"));
assertEquals("${$.someString} afterText", replaced.get("k4"));
assertEquals("beforeText ${$.someString}", replaced.get("k5"));
Map replacedMap = (Map) replaced.get("map");
assertEquals("conductor", replacedMap.get("a"));
assertEquals(2, replacedMap.get("b"));
assertEquals("${someString} 2", replacedMap.get("c"));
List replacedList = (List) replaced.get("list");
assertEquals(3, replacedList.size());
assertEquals("conductor", replacedList.get(0));
assertEquals(2, replacedList.get(1));
assertEquals("conductor ${someNumber}", replacedList.get(2));
// Verify that input map is not mutated
Map inputMap = (Map) input.get("map");
assertEquals("${someString}", inputMap.get("a"));
assertEquals("${someNumber}", inputMap.get("b"));
assertEquals("$${someString} ${someNumber}", inputMap.get("c"));
// Verify that input list is not mutated
List inputList = (List) input.get("list");
assertEquals(3, inputList.size());
assertEquals("${someString}", inputList.get(0));
assertEquals("${someNumber}", inputList.get(1));
assertEquals("${someString} $${someNumber}", inputList.get(2));
}
@Test
public void getWorkflowInputHandlesNullInputTemplate() {
WorkflowDef workflowDef = new WorkflowDef();
Map<String, Object> inputParams = Map.of("key", "value");
Map<String, Object> workflowInput =
parametersUtils.getWorkflowInput(workflowDef, inputParams);
assertEquals("value", workflowInput.get("key"));
}
@Test
public void getWorkflowInputFillsInTemplatedFields() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setInputTemplate(Map.of("other_key", "other_value"));
Map<String, Object> inputParams = new HashMap<>(Map.of("key", "value"));
Map<String, Object> workflowInput =
parametersUtils.getWorkflowInput(workflowDef, inputParams);
assertEquals("value", workflowInput.get("key"));
assertEquals("other_value", workflowInput.get("other_key"));
}
@Test
public void getWorkflowInputPreservesExistingFieldsIfPopulated() {
WorkflowDef workflowDef = new WorkflowDef();
String keyName = "key";
workflowDef.setInputTemplate(Map.of(keyName, "templated_value"));
Map<String, Object> inputParams = new HashMap<>(Map.of(keyName, "supplied_value"));
Map<String, Object> workflowInput =
parametersUtils.getWorkflowInput(workflowDef, inputParams);
assertEquals("supplied_value", workflowInput.get(keyName));
}
}
| 6,593 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/utils/ExternalPayloadStorageUtilsTest.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.util.unit.DataSize;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.netflix.conductor.core.config.ConductorProperties;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.model.TaskModel.Status.FAILED_WITH_TERMINAL_ERROR;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class ExternalPayloadStorageUtilsTest {
private ExternalPayloadStorage externalPayloadStorage;
private ExternalStorageLocation location;
@Autowired private ObjectMapper objectMapper;
// Subject
private ExternalPayloadStorageUtils externalPayloadStorageUtils;
@Rule public ExpectedException expectedException = ExpectedException.none();
@Before
public void setup() {
externalPayloadStorage = mock(ExternalPayloadStorage.class);
ConductorProperties properties = mock(ConductorProperties.class);
location = new ExternalStorageLocation();
location.setPath("some/test/path");
when(properties.getWorkflowInputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10L));
when(properties.getMaxWorkflowInputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10240L));
when(properties.getWorkflowOutputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10L));
when(properties.getMaxWorkflowOutputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10240L));
when(properties.getTaskInputPayloadSizeThreshold()).thenReturn(DataSize.ofKilobytes(10L));
when(properties.getMaxTaskInputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10240L));
when(properties.getTaskOutputPayloadSizeThreshold()).thenReturn(DataSize.ofKilobytes(10L));
when(properties.getMaxTaskOutputPayloadSizeThreshold())
.thenReturn(DataSize.ofKilobytes(10240L));
externalPayloadStorageUtils =
new ExternalPayloadStorageUtils(externalPayloadStorage, properties, objectMapper);
}
@Test
public void testDownloadPayload() throws IOException {
String path = "test/payload";
Map<String, Object> payload = new HashMap<>();
payload.put("key1", "value1");
payload.put("key2", 200);
byte[] payloadBytes = objectMapper.writeValueAsString(payload).getBytes();
when(externalPayloadStorage.download(path))
.thenReturn(new ByteArrayInputStream(payloadBytes));
Map<String, Object> result = externalPayloadStorageUtils.downloadPayload(path);
assertNotNull(result);
assertEquals(payload, result);
}
@SuppressWarnings("unchecked")
@Test
public void testUploadTaskPayload() throws IOException {
AtomicInteger uploadCount = new AtomicInteger(0);
InputStream stream =
com.netflix.conductor.core.utils.ExternalPayloadStorageUtilsTest.class
.getResourceAsStream("/payload.json");
Map<String, Object> payload = objectMapper.readValue(stream, Map.class);
byte[] payloadBytes = objectMapper.writeValueAsString(payload).getBytes();
when(externalPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.TASK_INPUT,
"",
payloadBytes))
.thenReturn(location);
doAnswer(
invocation -> {
uploadCount.incrementAndGet();
return null;
})
.when(externalPayloadStorage)
.upload(anyString(), any(), anyLong());
TaskModel task = new TaskModel();
task.setInputData(payload);
externalPayloadStorageUtils.verifyAndUpload(
task, ExternalPayloadStorage.PayloadType.TASK_INPUT);
assertTrue(StringUtils.isNotEmpty(task.getExternalInputPayloadStoragePath()));
assertFalse(task.getInputData().isEmpty());
assertEquals(1, uploadCount.get());
assertNotNull(task.getExternalInputPayloadStoragePath());
}
@SuppressWarnings("unchecked")
@Test
public void testUploadWorkflowPayload() throws IOException {
AtomicInteger uploadCount = new AtomicInteger(0);
InputStream stream =
com.netflix.conductor.core.utils.ExternalPayloadStorageUtilsTest.class
.getResourceAsStream("/payload.json");
Map<String, Object> payload = objectMapper.readValue(stream, Map.class);
byte[] payloadBytes = objectMapper.writeValueAsString(payload).getBytes();
when(externalPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.WRITE,
ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT,
"",
payloadBytes))
.thenReturn(location);
doAnswer(
invocation -> {
uploadCount.incrementAndGet();
return null;
})
.when(externalPayloadStorage)
.upload(anyString(), any(), anyLong());
WorkflowModel workflow = new WorkflowModel();
WorkflowDef def = new WorkflowDef();
def.setName("name");
def.setVersion(1);
workflow.setOutput(payload);
workflow.setWorkflowDefinition(def);
externalPayloadStorageUtils.verifyAndUpload(
workflow, ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT);
assertTrue(StringUtils.isNotEmpty(workflow.getExternalOutputPayloadStoragePath()));
assertFalse(workflow.getOutput().isEmpty());
assertEquals(1, uploadCount.get());
assertNotNull(workflow.getExternalOutputPayloadStoragePath());
}
@Test
public void testUploadHelper() {
AtomicInteger uploadCount = new AtomicInteger(0);
String path = "some/test/path.json";
ExternalStorageLocation location = new ExternalStorageLocation();
location.setPath(path);
when(externalPayloadStorage.getLocation(any(), any(), any(), any())).thenReturn(location);
doAnswer(
invocation -> {
uploadCount.incrementAndGet();
return null;
})
.when(externalPayloadStorage)
.upload(anyString(), any(), anyLong());
assertEquals(
path,
externalPayloadStorageUtils.uploadHelper(
new byte[] {}, 10L, ExternalPayloadStorage.PayloadType.TASK_OUTPUT));
assertEquals(1, uploadCount.get());
}
@Test
public void testFailTaskWithInputPayload() {
TaskModel task = new TaskModel();
task.setInputData(new HashMap<>());
externalPayloadStorageUtils.failTask(
task, ExternalPayloadStorage.PayloadType.TASK_INPUT, "error");
assertNotNull(task);
assertTrue(task.getInputData().isEmpty());
assertEquals(FAILED_WITH_TERMINAL_ERROR, task.getStatus());
}
@Test
public void testFailTaskWithOutputPayload() {
TaskModel task = new TaskModel();
task.setOutputData(new HashMap<>());
externalPayloadStorageUtils.failTask(
task, ExternalPayloadStorage.PayloadType.TASK_OUTPUT, "error");
assertNotNull(task);
assertTrue(task.getOutputData().isEmpty());
assertEquals(FAILED_WITH_TERMINAL_ERROR, task.getStatus());
}
@Test
public void testFailWorkflowWithInputPayload() {
WorkflowModel workflow = new WorkflowModel();
workflow.setInput(new HashMap<>());
expectedException.expect(TerminateWorkflowException.class);
externalPayloadStorageUtils.failWorkflow(
workflow, ExternalPayloadStorage.PayloadType.TASK_INPUT, "error");
assertNotNull(workflow);
assertTrue(workflow.getInput().isEmpty());
assertEquals(WorkflowModel.Status.FAILED, workflow.getStatus());
}
@Test
public void testFailWorkflowWithOutputPayload() {
WorkflowModel workflow = new WorkflowModel();
workflow.setOutput(new HashMap<>());
expectedException.expect(TerminateWorkflowException.class);
externalPayloadStorageUtils.failWorkflow(
workflow, ExternalPayloadStorage.PayloadType.TASK_OUTPUT, "error");
assertNotNull(workflow);
assertTrue(workflow.getOutput().isEmpty());
assertEquals(WorkflowModel.Status.FAILED, workflow.getStatus());
}
@Test
public void testShouldUpload() {
Map<String, Object> payload = new HashMap<>();
payload.put("key1", "value1");
payload.put("key2", "value2");
TaskModel task = new TaskModel();
task.setInputData(payload);
task.setOutputData(payload);
WorkflowModel workflow = new WorkflowModel();
workflow.setInput(payload);
workflow.setOutput(payload);
assertTrue(
externalPayloadStorageUtils.shouldUpload(
task, ExternalPayloadStorage.PayloadType.TASK_INPUT));
assertTrue(
externalPayloadStorageUtils.shouldUpload(
task, ExternalPayloadStorage.PayloadType.TASK_OUTPUT));
assertTrue(
externalPayloadStorageUtils.shouldUpload(
task, ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT));
assertTrue(
externalPayloadStorageUtils.shouldUpload(
task, ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT));
}
}
| 6,594 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/utils/SemaphoreUtilTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.IntStream;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@SuppressWarnings("ToArrayCallWithZeroLengthArrayArgument")
public class SemaphoreUtilTest {
@Test
public void testBlockAfterAvailablePermitsExhausted() throws Exception {
int threads = 5;
ExecutorService executorService = Executors.newFixedThreadPool(threads);
SemaphoreUtil semaphoreUtil = new SemaphoreUtil(threads);
List<CompletableFuture<Void>> futuresList = new ArrayList<>();
IntStream.range(0, threads)
.forEach(
t ->
futuresList.add(
CompletableFuture.runAsync(
() -> semaphoreUtil.acquireSlots(1),
executorService)));
CompletableFuture<Void> allFutures =
CompletableFuture.allOf(
futuresList.toArray(new CompletableFuture[futuresList.size()]));
allFutures.get();
assertEquals(0, semaphoreUtil.availableSlots());
assertFalse(semaphoreUtil.acquireSlots(1));
executorService.shutdown();
}
@Test
public void testAllowsPollingWhenPermitBecomesAvailable() throws Exception {
int threads = 5;
ExecutorService executorService = Executors.newFixedThreadPool(threads);
SemaphoreUtil semaphoreUtil = new SemaphoreUtil(threads);
List<CompletableFuture<Void>> futuresList = new ArrayList<>();
IntStream.range(0, threads)
.forEach(
t ->
futuresList.add(
CompletableFuture.runAsync(
() -> semaphoreUtil.acquireSlots(1),
executorService)));
CompletableFuture<Void> allFutures =
CompletableFuture.allOf(
futuresList.toArray(new CompletableFuture[futuresList.size()]));
allFutures.get();
assertEquals(0, semaphoreUtil.availableSlots());
semaphoreUtil.completeProcessing(1);
assertTrue(semaphoreUtil.availableSlots() > 0);
assertTrue(semaphoreUtil.acquireSlots(1));
executorService.shutdown();
}
}
| 6,595 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/utils/JsonUtilsTest.java | /*
* Copyright 2021 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.fasterxml.jackson.databind.ObjectMapper;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
@ContextConfiguration(classes = {TestObjectMapperConfiguration.class})
@RunWith(SpringRunner.class)
public class JsonUtilsTest {
private JsonUtils jsonUtils;
@Autowired private ObjectMapper objectMapper;
@Before
public void setup() {
jsonUtils = new JsonUtils(objectMapper);
}
@Test
public void testArray() {
List<Object> list = new LinkedList<>();
Map<String, Object> map = new HashMap<>();
map.put("externalId", "[{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}]");
map.put("name", "conductor");
map.put("version", 2);
list.add(map);
//noinspection unchecked
map = (Map<String, Object>) list.get(0);
assertTrue(map.get("externalId") instanceof String);
int before = list.size();
jsonUtils.expand(list);
assertEquals(before, list.size());
//noinspection unchecked
map = (Map<String, Object>) list.get(0);
assertTrue(map.get("externalId") instanceof ArrayList);
}
@Test
public void testMap() {
Map<String, Object> map = new HashMap<>();
map.put("externalId", "{\"taskRefName\":\"t001\",\"workflowId\":\"w002\"}");
map.put("name", "conductor");
map.put("version", 2);
assertTrue(map.get("externalId") instanceof String);
jsonUtils.expand(map);
assertTrue(map.get("externalId") instanceof LinkedHashMap);
}
@Test
public void testMultiLevelMap() {
Map<String, Object> parentMap = new HashMap<>();
parentMap.put("requestId", "abcde");
parentMap.put("status", "PROCESSED");
Map<String, Object> childMap = new HashMap<>();
childMap.put("path", "test/path");
childMap.put("type", "VIDEO");
Map<String, Object> grandChildMap = new HashMap<>();
grandChildMap.put("duration", "370");
grandChildMap.put("passed", "true");
childMap.put("metadata", grandChildMap);
parentMap.put("asset", childMap);
Object jsonObject = jsonUtils.expand(parentMap);
assertNotNull(jsonObject);
}
// This test verifies that the types of the elements in the input are maintained upon expanding
// the JSON object
@Test
public void testTypes() throws Exception {
String map =
"{\"requestId\":\"1375128656908832001\",\"workflowId\":\"fc147e1d-5408-4d41-b066-53cb2e551d0e\","
+ "\"inner\":{\"num\":42,\"status\":\"READY\"}}";
jsonUtils.expand(map);
Object jsonObject = jsonUtils.expand(map);
assertNotNull(jsonObject);
assertTrue(jsonObject instanceof LinkedHashMap);
assertTrue(((LinkedHashMap<?, ?>) jsonObject).get("requestId") instanceof String);
assertTrue(((LinkedHashMap<?, ?>) jsonObject).get("workflowId") instanceof String);
assertTrue(((LinkedHashMap<?, ?>) jsonObject).get("inner") instanceof LinkedHashMap);
assertTrue(
((LinkedHashMap<?, ?>) ((LinkedHashMap<?, ?>) jsonObject).get("inner")).get("num")
instanceof Integer);
assertTrue(
((LinkedHashMap<?, ?>) ((LinkedHashMap<?, ?>) jsonObject).get("inner"))
.get("status")
instanceof String);
}
}
| 6,596 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/utils/QueueUtilsTest.java | /*
* Copyright 2020 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.utils;
import org.junit.Assert;
import org.junit.Test;
public class QueueUtilsTest {
@Test
public void queueNameWithTypeAndIsolationGroup() {
String queueNameGenerated = QueueUtils.getQueueName("tType", null, "isolationGroup", null);
String queueNameGeneratedOnlyType = QueueUtils.getQueueName("tType", null, null, null);
String queueNameGeneratedWithAllValues =
QueueUtils.getQueueName("tType", "domain", "iso", "eN");
Assert.assertEquals("tType-isolationGroup", queueNameGenerated);
Assert.assertEquals("tType", queueNameGeneratedOnlyType);
Assert.assertEquals("domain:tType@eN-iso", queueNameGeneratedWithAllValues);
}
@Test
public void notIsolatedIfSeparatorNotPresent() {
String notIsolatedQueue = "notIsolated";
Assert.assertFalse(QueueUtils.isIsolatedQueue(notIsolatedQueue));
}
@Test
public void testGetExecutionNameSpace() {
String executionNameSpace = QueueUtils.getExecutionNameSpace("domain:queueName@eN-iso");
Assert.assertEquals(executionNameSpace, "eN");
}
@Test
public void testGetQueueExecutionNameSpaceEmpty() {
Assert.assertEquals(QueueUtils.getExecutionNameSpace("queueName"), "");
}
@Test
public void testGetQueueExecutionNameSpaceWithIsolationGroup() {
Assert.assertEquals(
QueueUtils.getExecutionNameSpace("domain:test@executionNameSpace-isolated"),
"executionNameSpace");
}
@Test
public void testGetQueueName() {
Assert.assertEquals(
"domain:taskType@eN-isolated",
QueueUtils.getQueueName("taskType", "domain", "isolated", "eN"));
}
@Test
public void testGetTaskType() {
Assert.assertEquals("taskType", QueueUtils.getTaskType("domain:taskType-isolated"));
}
@Test
public void testGetTaskTypeWithoutDomain() {
Assert.assertEquals("taskType", QueueUtils.getTaskType("taskType-isolated"));
}
@Test
public void testGetTaskTypeWithoutDomainAndWithoutIsolationGroup() {
Assert.assertEquals("taskType", QueueUtils.getTaskType("taskType"));
}
@Test
public void testGetTaskTypeWithoutDomainAndWithExecutionNameSpace() {
Assert.assertEquals("taskType", QueueUtils.getTaskType("taskType@eN"));
}
}
| 6,597 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/storage/DummyPayloadStorageTest.java | /*
* Copyright 2023 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.storage;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.junit.Before;
import org.junit.Test;
import com.netflix.conductor.common.run.ExternalStorageLocation;
import com.netflix.conductor.common.utils.ExternalPayloadStorage;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.utils.ExternalPayloadStorage.PayloadType;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class DummyPayloadStorageTest {
private DummyPayloadStorage dummyPayloadStorage;
private static final String TEST_STORAGE_PATH = "test-storage";
private ExternalStorageLocation location;
private ObjectMapper objectMapper;
public static final String MOCK_PAYLOAD = "{\n" + "\"output\": \"TEST_OUTPUT\",\n" + "}\n";
@Before
public void setup() {
dummyPayloadStorage = new DummyPayloadStorage();
objectMapper = new ObjectMapper();
location =
dummyPayloadStorage.getLocation(
ExternalPayloadStorage.Operation.WRITE,
PayloadType.TASK_OUTPUT,
TEST_STORAGE_PATH);
try {
byte[] payloadBytes = MOCK_PAYLOAD.getBytes("UTF-8");
dummyPayloadStorage.upload(
location.getPath(),
new ByteArrayInputStream(payloadBytes),
payloadBytes.length);
} catch (UnsupportedEncodingException unsupportedEncodingException) {
}
}
@Test
public void testGetLocationNotNull() {
assertNotNull(location);
}
@Test
public void testDownloadForValidPath() {
try (InputStream inputStream = dummyPayloadStorage.download(location.getPath())) {
Map<String, Object> payload =
objectMapper.readValue(
IOUtils.toString(inputStream, StandardCharsets.UTF_8), Map.class);
assertTrue(payload.containsKey("output"));
assertEquals(payload.get("output"), "TEST_OUTPUT");
} catch (Exception e) {
assertTrue(e instanceof IOException);
}
}
@Test
public void testDownloadForInvalidPath() {
InputStream inputStream = dummyPayloadStorage.download("testPath");
assertNull(inputStream);
}
}
| 6,598 |
0 | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core | Create_ds/conductor/core/src/test/java/com/netflix/conductor/core/execution/TestDeciderService.java | /*
* Copyright 2022 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.netflix.conductor.core.execution;
import java.io.IOException;
import java.io.InputStream;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.Configuration;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import com.netflix.conductor.common.config.TestObjectMapperConfiguration;
import com.netflix.conductor.common.metadata.tasks.TaskDef;
import com.netflix.conductor.common.metadata.tasks.TaskDef.TimeoutPolicy;
import com.netflix.conductor.common.metadata.tasks.TaskType;
import com.netflix.conductor.common.metadata.workflow.SubWorkflowParams;
import com.netflix.conductor.common.metadata.workflow.WorkflowDef;
import com.netflix.conductor.common.metadata.workflow.WorkflowTask;
import com.netflix.conductor.common.utils.TaskUtils;
import com.netflix.conductor.core.exception.TerminateWorkflowException;
import com.netflix.conductor.core.execution.DeciderService.DeciderOutcome;
import com.netflix.conductor.core.execution.mapper.TaskMapper;
import com.netflix.conductor.core.execution.tasks.SubWorkflow;
import com.netflix.conductor.core.execution.tasks.SystemTaskRegistry;
import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask;
import com.netflix.conductor.core.operation.StartWorkflowOperation;
import com.netflix.conductor.core.utils.ExternalPayloadStorageUtils;
import com.netflix.conductor.core.utils.IDGenerator;
import com.netflix.conductor.core.utils.ParametersUtils;
import com.netflix.conductor.dao.MetadataDAO;
import com.netflix.conductor.model.TaskModel;
import com.netflix.conductor.model.WorkflowModel;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.DefaultRegistry;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Spectator;
import com.fasterxml.jackson.databind.ObjectMapper;
import static com.netflix.conductor.common.metadata.tasks.TaskType.*;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.*;
import static org.mockito.Mockito.*;
@ContextConfiguration(
classes = {TestObjectMapperConfiguration.class, TestDeciderService.TestConfiguration.class})
@RunWith(SpringRunner.class)
public class TestDeciderService {
@Configuration
@ComponentScan(basePackageClasses = TaskMapper.class) // loads all TaskMapper beans
public static class TestConfiguration {
@Bean(TASK_TYPE_SUB_WORKFLOW)
public SubWorkflow subWorkflow(ObjectMapper objectMapper) {
return new SubWorkflow(objectMapper, mock(StartWorkflowOperation.class));
}
@Bean("asyncCompleteSystemTask")
public WorkflowSystemTaskStub asyncCompleteSystemTask() {
return new WorkflowSystemTaskStub("asyncCompleteSystemTask") {
@Override
public boolean isAsyncComplete(TaskModel task) {
return true;
}
};
}
@Bean
public SystemTaskRegistry systemTaskRegistry(Set<WorkflowSystemTask> tasks) {
return new SystemTaskRegistry(tasks);
}
@Bean
public MetadataDAO mockMetadataDAO() {
return mock(MetadataDAO.class);
}
@Bean
public Map<String, TaskMapper> taskMapperMap(Collection<TaskMapper> taskMappers) {
return taskMappers.stream()
.collect(Collectors.toMap(TaskMapper::getTaskType, Function.identity()));
}
@Bean
public ParametersUtils parametersUtils(ObjectMapper mapper) {
return new ParametersUtils(mapper);
}
@Bean
public IDGenerator idGenerator() {
return new IDGenerator();
}
}
private DeciderService deciderService;
private ExternalPayloadStorageUtils externalPayloadStorageUtils;
private static Registry registry;
@Autowired private ObjectMapper objectMapper;
@Autowired private SystemTaskRegistry systemTaskRegistry;
@Autowired
@Qualifier("taskMapperMap")
private Map<String, TaskMapper> taskMappers;
@Autowired private ParametersUtils parametersUtils;
@Autowired private MetadataDAO metadataDAO;
@Rule public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() {
registry = new DefaultRegistry();
Spectator.globalRegistry().add(registry);
}
@Before
public void setup() {
externalPayloadStorageUtils = mock(ExternalPayloadStorageUtils.class);
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("TestDeciderService");
workflowDef.setVersion(1);
TaskDef taskDef = new TaskDef();
when(metadataDAO.getTaskDef(any())).thenReturn(taskDef);
when(metadataDAO.getLatestWorkflowDef(any())).thenReturn(Optional.of(workflowDef));
deciderService =
new DeciderService(
new IDGenerator(),
parametersUtils,
metadataDAO,
externalPayloadStorageUtils,
systemTaskRegistry,
taskMappers,
Duration.ofMinutes(60));
}
@Test
public void testGetTaskInputV2() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("taskOutputParam2", "${task2.output.locationBad}");
inputParams.put("taskOutputParam3", "${task3.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("channelMap", "${workflow.input.channelMapping}");
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertTrue(taskInput.containsKey("taskOutputParam2"));
assertTrue(taskInput.containsKey("taskOutputParam3"));
assertNull(taskInput.get("taskOutputParam2"));
assertNotNull(taskInput.get("channelMap"));
assertEquals(5, taskInput.get("channelMap"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNull(taskInput.get("taskOutputParam3"));
assertNull(taskInput.get("nullValue"));
assertEquals(
workflow.getTasks().get(0).getStatus().name(),
taskInput.get("task2Status")); // task2 and task3 are the tasks respectively
}
@Test
public void testGetTaskInputV2Partial() {
WorkflowModel workflow = createDefaultWorkflow();
System.setProperty("EC2_INSTANCE", "i-123abcdef990");
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("workfowOutputParam", "${workflow.output.name}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("taskOutputParam2", "${task2.output.locationBad}");
inputParams.put("taskOutputParam3", "${task3.output.location}");
inputParams.put("constParam", "Some String value &");
inputParams.put("partial", "${task2.output.location}/something?host=${EC2_INSTANCE}");
inputParams.put("jsonPathExtracted", "${workflow.output.names[*].year}");
inputParams.put("secondName", "${workflow.output.names[1].name}");
inputParams.put(
"concatenatedName",
"The Band is: ${workflow.output.names[1].name}-\t${EC2_INSTANCE}");
TaskDef taskDef = new TaskDef();
taskDef.getInputTemplate().put("opname", "${workflow.output.name}");
List<Object> listParams = new LinkedList<>();
List<Object> listParams2 = new LinkedList<>();
listParams2.add("${workflow.input.requestId}-10-${EC2_INSTANCE}");
listParams.add(listParams2);
Map<String, Object> map = new HashMap<>();
map.put("name", "${workflow.output.names[0].name}");
map.put("hasAwards", "${workflow.input.hasAwards}");
listParams.add(map);
taskDef.getInputTemplate().put("listValues", listParams);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, taskDef, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertTrue(taskInput.containsKey("taskOutputParam2"));
assertTrue(taskInput.containsKey("taskOutputParam3"));
assertNull(taskInput.get("taskOutputParam2"));
assertNotNull(taskInput.get("jsonPathExtracted"));
assertTrue(taskInput.get("jsonPathExtracted") instanceof List);
assertNotNull(taskInput.get("secondName"));
assertTrue(taskInput.get("secondName") instanceof String);
assertEquals("The Doors", taskInput.get("secondName"));
assertEquals("The Band is: The Doors-\ti-123abcdef990", taskInput.get("concatenatedName"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNull(taskInput.get("taskOutputParam3"));
assertNotNull(taskInput.get("partial"));
assertEquals("http://location/something?host=i-123abcdef990", taskInput.get("partial"));
}
@SuppressWarnings("unchecked")
@Test
public void testGetTaskInput() {
Map<String, Object> ip = new HashMap<>();
ip.put("workflowInputParam", "${workflow.input.requestId}");
ip.put("taskOutputParam", "${task2.output.location}");
List<Map<String, Object>> json = new LinkedList<>();
Map<String, Object> m1 = new HashMap<>();
m1.put("name", "person name");
m1.put("city", "New York");
m1.put("phone", 2120001234);
m1.put("status", "${task2.output.isPersonActive}");
Map<String, Object> m2 = new HashMap<>();
m2.put("employer", "City Of New York");
m2.put("color", "purple");
m2.put("requestId", "${workflow.input.requestId}");
json.add(m1);
json.add(m2);
ip.put("complexJson", json);
WorkflowDef def = new WorkflowDef();
def.setName("testGetTaskInput");
def.setSchemaVersion(2);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", "request id 001");
TaskModel task = new TaskModel();
task.setReferenceTaskName("task2");
task.addOutput("location", "http://location");
task.addOutput("isPersonActive", true);
workflow.getTasks().add(task);
Map<String, Object> taskInput = parametersUtils.getTaskInput(ip, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
assertNotNull(taskInput.get("complexJson"));
assertTrue(taskInput.get("complexJson") instanceof List);
List<Map<String, Object>> resolvedInput =
(List<Map<String, Object>>) taskInput.get("complexJson");
assertEquals(2, resolvedInput.size());
}
@Test
public void testGetTaskInputV1() {
Map<String, Object> ip = new HashMap<>();
ip.put("workflowInputParam", "workflow.input.requestId");
ip.put("taskOutputParam", "task2.output.location");
WorkflowDef def = new WorkflowDef();
def.setSchemaVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.getInput().put("requestId", "request id 001");
TaskModel task = new TaskModel();
task.setReferenceTaskName("task2");
task.addOutput("location", "http://location");
task.addOutput("isPersonActive", true);
workflow.getTasks().add(task);
Map<String, Object> taskInput = parametersUtils.getTaskInput(ip, workflow, null, null);
assertNotNull(taskInput);
assertTrue(taskInput.containsKey("workflowInputParam"));
assertTrue(taskInput.containsKey("taskOutputParam"));
assertEquals("request id 001", taskInput.get("workflowInputParam"));
assertEquals("http://location", taskInput.get("taskOutputParam"));
}
@Test
public void testGetTaskInputV2WithInputTemplate() {
TaskDef def = new TaskDef();
Map<String, Object> inputTemplate = new HashMap<>();
inputTemplate.put("url", "https://some_url:7004");
inputTemplate.put("default_url", "https://default_url:7004");
inputTemplate.put("someKey", "someValue");
def.getInputTemplate().putAll(inputTemplate);
Map<String, Object> workflowInput = new HashMap<>();
workflowInput.put("some_new_url", "https://some_new_url:7004");
workflowInput.put("workflow_input_url", "https://workflow_input_url:7004");
workflowInput.put("some_other_key", "some_other_value");
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testGetTaskInputV2WithInputTemplate");
workflowDef.setVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setInput(workflowInput);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("url", "${workflow.input.some_new_url}");
workflowTask
.getInputParameters()
.put("workflow_input_url", "${workflow.input.workflow_input_url}");
workflowTask.getInputParameters().put("someKey", "${workflow.input.someKey}");
workflowTask.getInputParameters().put("someOtherKey", "${workflow.input.some_other_key}");
workflowTask
.getInputParameters()
.put("someNowhereToBeFoundKey", "${workflow.input.some_ne_key}");
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
workflowTask.getInputParameters(), workflow, null, def);
assertTrue(taskInput.containsKey("url"));
assertTrue(taskInput.containsKey("default_url"));
assertEquals(taskInput.get("url"), "https://some_new_url:7004");
assertEquals(taskInput.get("default_url"), "https://default_url:7004");
assertEquals(taskInput.get("workflow_input_url"), "https://workflow_input_url:7004");
assertEquals("some_other_value", taskInput.get("someOtherKey"));
assertEquals("someValue", taskInput.get("someKey"));
assertNull(taskInput.get("someNowhereToBeFoundKey"));
}
@Test
public void testGetNextTask() {
WorkflowDef def = createNestedWorkflow();
WorkflowTask firstTask = def.getTasks().get(0);
assertNotNull(firstTask);
assertEquals("fork1", firstTask.getTaskReferenceName());
WorkflowTask nextAfterFirst = def.getNextTask(firstTask.getTaskReferenceName());
assertNotNull(nextAfterFirst);
assertEquals("join1", nextAfterFirst.getTaskReferenceName());
WorkflowTask fork2 = def.getTaskByRefName("fork2");
assertNotNull(fork2);
assertEquals("fork2", fork2.getTaskReferenceName());
WorkflowTask taskAfterFork2 = def.getNextTask("fork2");
assertNotNull(taskAfterFork2);
assertEquals("join2", taskAfterFork2.getTaskReferenceName());
WorkflowTask t2 = def.getTaskByRefName("t2");
assertNotNull(t2);
assertEquals("t2", t2.getTaskReferenceName());
WorkflowTask taskAfterT2 = def.getNextTask("t2");
assertNotNull(taskAfterT2);
assertEquals("t4", taskAfterT2.getTaskReferenceName());
WorkflowTask taskAfterT3 = def.getNextTask("t3");
assertNotNull(taskAfterT3);
assertEquals(DECISION.name(), taskAfterT3.getType());
assertEquals("d1", taskAfterT3.getTaskReferenceName());
WorkflowTask taskAfterT4 = def.getNextTask("t4");
assertNotNull(taskAfterT4);
assertEquals("join2", taskAfterT4.getTaskReferenceName());
WorkflowTask taskAfterT6 = def.getNextTask("t6");
assertNotNull(taskAfterT6);
assertEquals("t9", taskAfterT6.getTaskReferenceName());
WorkflowTask taskAfterJoin2 = def.getNextTask("join2");
assertNotNull(taskAfterJoin2);
assertEquals("join1", taskAfterJoin2.getTaskReferenceName());
WorkflowTask taskAfterJoin1 = def.getNextTask("join1");
assertNotNull(taskAfterJoin1);
assertEquals("t5", taskAfterJoin1.getTaskReferenceName());
WorkflowTask taskAfterSubWF = def.getNextTask("sw1");
assertNotNull(taskAfterSubWF);
assertEquals("join1", taskAfterSubWF.getTaskReferenceName());
WorkflowTask taskAfterT9 = def.getNextTask("t9");
assertNotNull(taskAfterT9);
assertEquals("join2", taskAfterT9.getTaskReferenceName());
}
@Test
public void testCaseStatement() {
WorkflowDef def = createConditionalWF();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(def);
workflow.setCreateTime(0L);
workflow.setWorkflowId("a");
workflow.setCorrelationId("b");
workflow.setStatus(WorkflowModel.Status.RUNNING);
DeciderOutcome outcome = deciderService.decide(workflow);
List<TaskModel> scheduledTasks = outcome.tasksToBeScheduled;
assertNotNull(scheduledTasks);
assertEquals(2, scheduledTasks.size());
assertEquals(TaskModel.Status.IN_PROGRESS, scheduledTasks.get(0).getStatus());
assertEquals(TaskModel.Status.SCHEDULED, scheduledTasks.get(1).getStatus());
}
@Test
public void testGetTaskByRef() {
WorkflowModel workflow = new WorkflowModel();
TaskModel t1 = new TaskModel();
t1.setReferenceTaskName("ref");
t1.setSeq(0);
t1.setStatus(TaskModel.Status.TIMED_OUT);
TaskModel t2 = new TaskModel();
t2.setReferenceTaskName("ref");
t2.setSeq(1);
t2.setStatus(TaskModel.Status.FAILED);
TaskModel t3 = new TaskModel();
t3.setReferenceTaskName("ref");
t3.setSeq(2);
t3.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().add(t1);
workflow.getTasks().add(t2);
workflow.getTasks().add(t3);
TaskModel task = workflow.getTaskByRefName("ref");
assertNotNull(task);
assertEquals(TaskModel.Status.COMPLETED, task.getStatus());
assertEquals(t3.getSeq(), task.getSeq());
}
@Test
public void testTaskTimeout() {
Counter counter =
registry.counter("task_timeout", "class", "WorkflowMonitor", "taskType", "test");
long counterCount = counter.count();
TaskDef taskType = new TaskDef();
taskType.setName("test");
taskType.setTimeoutPolicy(TimeoutPolicy.RETRY);
taskType.setTimeoutSeconds(1);
TaskModel task = new TaskModel();
task.setTaskType(taskType.getName());
task.setStartTime(System.currentTimeMillis() - 2_000); // 2 seconds ago!
task.setStatus(TaskModel.Status.IN_PROGRESS);
deciderService.checkTaskTimeout(taskType, task);
// Task should be marked as timed out
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
assertEquals(++counterCount, counter.count());
taskType.setTimeoutPolicy(TimeoutPolicy.ALERT_ONLY);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
deciderService.checkTaskTimeout(taskType, task);
// Nothing will happen
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertNull(task.getReasonForIncompletion());
assertEquals(++counterCount, counter.count());
boolean exception = false;
taskType.setTimeoutPolicy(TimeoutPolicy.TIME_OUT_WF);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
try {
deciderService.checkTaskTimeout(taskType, task);
} catch (TerminateWorkflowException tw) {
exception = true;
}
assertTrue(exception);
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
assertEquals(++counterCount, counter.count());
taskType.setTimeoutPolicy(TimeoutPolicy.TIME_OUT_WF);
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setReasonForIncompletion(null);
deciderService.checkTaskTimeout(null, task); // this will be a no-op
assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus());
assertNull(task.getReasonForIncompletion());
assertEquals(counterCount, counter.count());
}
@Test
public void testCheckTaskPollTimeout() {
Counter counter =
registry.counter("task_timeout", "class", "WorkflowMonitor", "taskType", "test");
long counterCount = counter.count();
TaskDef taskType = new TaskDef();
taskType.setName("test");
taskType.setTimeoutPolicy(TimeoutPolicy.RETRY);
taskType.setPollTimeoutSeconds(1);
TaskModel task = new TaskModel();
task.setTaskType(taskType.getName());
task.setScheduledTime(System.currentTimeMillis() - 2_000);
task.setStatus(TaskModel.Status.SCHEDULED);
deciderService.checkTaskPollTimeout(taskType, task);
assertEquals(++counterCount, counter.count());
assertEquals(TaskModel.Status.TIMED_OUT, task.getStatus());
assertNotNull(task.getReasonForIncompletion());
task.setScheduledTime(System.currentTimeMillis());
task.setReasonForIncompletion(null);
task.setStatus(TaskModel.Status.SCHEDULED);
deciderService.checkTaskPollTimeout(taskType, task);
assertEquals(counterCount, counter.count());
assertEquals(TaskModel.Status.SCHEDULED, task.getStatus());
assertNull(task.getReasonForIncompletion());
}
@SuppressWarnings("unchecked")
@Test
public void testConcurrentTaskInputCalc() throws InterruptedException {
TaskDef def = new TaskDef();
Map<String, Object> inputMap = new HashMap<>();
inputMap.put("path", "${workflow.input.inputLocation}");
inputMap.put("type", "${workflow.input.sourceType}");
inputMap.put("channelMapping", "${workflow.input.channelMapping}");
List<Map<String, Object>> input = new LinkedList<>();
input.add(inputMap);
Map<String, Object> body = new HashMap<>();
body.put("input", input);
def.getInputTemplate().putAll(body);
ExecutorService executorService = Executors.newFixedThreadPool(10);
final int[] result = new int[10];
CountDownLatch latch = new CountDownLatch(10);
for (int i = 0; i < 10; i++) {
final int x = i;
executorService.submit(
() -> {
try {
Map<String, Object> workflowInput = new HashMap<>();
workflowInput.put("outputLocation", "baggins://outputlocation/" + x);
workflowInput.put("inputLocation", "baggins://inputlocation/" + x);
workflowInput.put("sourceType", "MuxedSource");
workflowInput.put("channelMapping", x);
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("testConcurrentTaskInputCalc");
workflowDef.setVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setInput(workflowInput);
Map<String, Object> taskInput =
parametersUtils.getTaskInputV2(
new HashMap<>(), workflow, null, def);
Object reqInputObj = taskInput.get("input");
assertNotNull(reqInputObj);
assertTrue(reqInputObj instanceof List);
List<Map<String, Object>> reqInput =
(List<Map<String, Object>>) reqInputObj;
Object cmObj = reqInput.get(0).get("channelMapping");
assertNotNull(cmObj);
if (!(cmObj instanceof Number)) {
result[x] = -1;
} else {
Number channelMapping = (Number) cmObj;
result[x] = channelMapping.intValue();
}
latch.countDown();
} catch (Exception e) {
e.printStackTrace();
}
});
}
latch.await(1, TimeUnit.MINUTES);
if (latch.getCount() > 0) {
fail(
"Executions did not complete in a minute. Something wrong with the build server?");
}
executorService.shutdownNow();
for (int i = 0; i < result.length; i++) {
assertEquals(i, result[i]);
}
}
@SuppressWarnings("unchecked")
@Test
public void testTaskRetry() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("null", null);
inputParams.put("task_id", "${CPEWF_TASK_ID}");
Map<String, Object> env = new HashMap<>();
env.put("env_task_id", "${CPEWF_TASK_ID}");
inputParams.put("env", env);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, "t1");
TaskModel task = new TaskModel();
task.getInputData().putAll(taskInput);
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("task_id", "${CPEWF_TASK_ID}");
workflowTask.getInputParameters().put("env", env);
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals("t1", task.getInputData().get("task_id"));
assertEquals(
"t1", ((Map<String, Object>) task.getInputData().get("env")).get("env_task_id"));
assertNotSame(task.getTaskId(), task2.get().getTaskId());
assertEquals(task2.get().getTaskId(), task2.get().getInputData().get("task_id"));
assertEquals(
task2.get().getTaskId(),
((Map<String, Object>) task2.get().getInputData().get("env")).get("env_task_id"));
TaskModel task3 = new TaskModel();
task3.getInputData().putAll(taskInput);
task3.setStatus(TaskModel.Status.FAILED_WITH_TERMINAL_ERROR);
task3.setTaskId("t1");
when(metadataDAO.getWorkflowDef(anyString(), anyInt()))
.thenReturn(Optional.of(new WorkflowDef()));
exception.expect(TerminateWorkflowException.class);
deciderService.retry(taskDef, workflowTask, task3, workflow);
}
@SuppressWarnings("unchecked")
@Test
public void testWorkflowTaskRetry() {
WorkflowModel workflow = createDefaultWorkflow();
workflow.getWorkflowDefinition().setSchemaVersion(2);
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("workflowInputParam", "${workflow.input.requestId}");
inputParams.put("taskOutputParam", "${task2.output.location}");
inputParams.put("constParam", "Some String value");
inputParams.put("nullValue", null);
inputParams.put("task2Status", "${task2.status}");
inputParams.put("null", null);
inputParams.put("task_id", "${CPEWF_TASK_ID}");
Map<String, Object> env = new HashMap<>();
env.put("env_task_id", "${CPEWF_TASK_ID}");
inputParams.put("env", env);
Map<String, Object> taskInput =
parametersUtils.getTaskInput(inputParams, workflow, null, "t1");
// Create a first failed task
TaskModel task = new TaskModel();
task.getInputData().putAll(taskInput);
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
assertEquals(3, taskDef.getRetryCount());
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.getInputParameters().put("task_id", "${CPEWF_TASK_ID}");
workflowTask.getInputParameters().put("env", env);
workflowTask.setRetryCount(1);
// Retry the failed task and assert that a new one has been created
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals("t1", task.getInputData().get("task_id"));
assertEquals(
"t1", ((Map<String, Object>) task.getInputData().get("env")).get("env_task_id"));
assertNotSame(task.getTaskId(), task2.get().getTaskId());
assertEquals(task2.get().getTaskId(), task2.get().getInputData().get("task_id"));
assertEquals(
task2.get().getTaskId(),
((Map<String, Object>) task2.get().getInputData().get("env")).get("env_task_id"));
// Set the retried task to FAILED, retry it again and assert that the workflow failed
task2.get().setStatus(TaskModel.Status.FAILED);
exception.expect(TerminateWorkflowException.class);
final Optional<TaskModel> task3 =
deciderService.retry(taskDef, workflowTask, task2.get(), workflow);
assertFalse(task3.isPresent());
assertEquals(WorkflowModel.Status.FAILED, workflow.getStatus());
}
@Test
public void testLinearBackoff() {
WorkflowModel workflow = createDefaultWorkflow();
TaskModel task = new TaskModel();
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
taskDef.setRetryDelaySeconds(60);
taskDef.setRetryLogic(TaskDef.RetryLogic.LINEAR_BACKOFF);
taskDef.setBackoffScaleFactor(2);
WorkflowTask workflowTask = new WorkflowTask();
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals(120, task2.get().getCallbackAfterSeconds()); // 60*2*1
Optional<TaskModel> task3 =
deciderService.retry(taskDef, workflowTask, task2.get(), workflow);
assertEquals(240, task3.get().getCallbackAfterSeconds()); // 60*2*2
Optional<TaskModel> task4 =
deciderService.retry(taskDef, workflowTask, task3.get(), workflow);
// // 60*2*3
assertEquals(360, task4.get().getCallbackAfterSeconds()); // 60*2*3
taskDef.setRetryCount(Integer.MAX_VALUE);
task4.get().setRetryCount(Integer.MAX_VALUE - 100);
Optional<TaskModel> task5 =
deciderService.retry(taskDef, workflowTask, task4.get(), workflow);
assertEquals(Integer.MAX_VALUE, task5.get().getCallbackAfterSeconds());
}
@Test
public void testExponentialBackoff() {
WorkflowModel workflow = createDefaultWorkflow();
TaskModel task = new TaskModel();
task.setStatus(TaskModel.Status.FAILED);
task.setTaskId("t1");
TaskDef taskDef = new TaskDef();
taskDef.setRetryDelaySeconds(60);
taskDef.setRetryLogic(TaskDef.RetryLogic.EXPONENTIAL_BACKOFF);
WorkflowTask workflowTask = new WorkflowTask();
Optional<TaskModel> task2 = deciderService.retry(taskDef, workflowTask, task, workflow);
assertEquals(60, task2.get().getCallbackAfterSeconds());
Optional<TaskModel> task3 =
deciderService.retry(taskDef, workflowTask, task2.get(), workflow);
assertEquals(120, task3.get().getCallbackAfterSeconds());
Optional<TaskModel> task4 =
deciderService.retry(taskDef, workflowTask, task3.get(), workflow);
assertEquals(240, task4.get().getCallbackAfterSeconds());
taskDef.setRetryCount(Integer.MAX_VALUE);
task4.get().setRetryCount(Integer.MAX_VALUE - 100);
Optional<TaskModel> task5 =
deciderService.retry(taskDef, workflowTask, task4.get(), workflow);
assertEquals(Integer.MAX_VALUE, task5.get().getCallbackAfterSeconds());
}
@Test
public void testFork() throws IOException {
InputStream stream = TestDeciderService.class.getResourceAsStream("/test.json");
WorkflowModel workflow = objectMapper.readValue(stream, WorkflowModel.class);
DeciderOutcome outcome = deciderService.decide(workflow);
assertFalse(outcome.isComplete);
assertEquals(5, outcome.tasksToBeScheduled.size());
assertEquals(1, outcome.tasksToBeUpdated.size());
}
@Test
public void testDecideSuccessfulWorkflow() {
WorkflowDef workflowDef = createLinearWorkflow();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setStatus(WorkflowModel.Status.RUNNING);
TaskModel task1 = new TaskModel();
task1.setTaskType("junit_task_l1");
task1.setReferenceTaskName("s1");
task1.setSeq(1);
task1.setRetried(false);
task1.setExecuted(false);
task1.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().add(task1);
DeciderOutcome deciderOutcome = deciderService.decide(workflow);
assertNotNull(deciderOutcome);
assertFalse(workflow.getTaskByRefName("s1").isRetried());
assertEquals(1, deciderOutcome.tasksToBeUpdated.size());
assertEquals("s1", deciderOutcome.tasksToBeUpdated.get(0).getReferenceTaskName());
assertEquals(1, deciderOutcome.tasksToBeScheduled.size());
assertEquals("s2", deciderOutcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertFalse(deciderOutcome.isComplete);
TaskModel task2 = new TaskModel();
task2.setTaskType("junit_task_l2");
task2.setReferenceTaskName("s2");
task2.setSeq(2);
task2.setRetried(false);
task2.setExecuted(false);
task2.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().add(task2);
deciderOutcome = deciderService.decide(workflow);
assertNotNull(deciderOutcome);
assertTrue(workflow.getTaskByRefName("s2").isExecuted());
assertFalse(workflow.getTaskByRefName("s2").isRetried());
assertEquals(1, deciderOutcome.tasksToBeUpdated.size());
assertEquals("s2", deciderOutcome.tasksToBeUpdated.get(0).getReferenceTaskName());
assertEquals(0, deciderOutcome.tasksToBeScheduled.size());
assertTrue(deciderOutcome.isComplete);
}
@Test
public void testDecideWithLoopTask() {
WorkflowDef workflowDef = createLinearWorkflow();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setStatus(WorkflowModel.Status.RUNNING);
TaskModel task1 = new TaskModel();
task1.setTaskType("junit_task_l1");
task1.setReferenceTaskName("s1");
task1.setSeq(1);
task1.setIteration(1);
task1.setRetried(false);
task1.setExecuted(false);
task1.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().add(task1);
DeciderOutcome deciderOutcome = deciderService.decide(workflow);
assertNotNull(deciderOutcome);
assertFalse(workflow.getTaskByRefName("s1").isRetried());
assertEquals(1, deciderOutcome.tasksToBeUpdated.size());
assertEquals("s1", deciderOutcome.tasksToBeUpdated.get(0).getReferenceTaskName());
assertEquals(1, deciderOutcome.tasksToBeScheduled.size());
assertEquals("s2__1", deciderOutcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertFalse(deciderOutcome.isComplete);
}
@Test
public void testDecideFailedTask() {
WorkflowDef workflowDef = createLinearWorkflow();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setStatus(WorkflowModel.Status.RUNNING);
TaskModel task = new TaskModel();
task.setTaskType("junit_task_l1");
task.setReferenceTaskName("s1");
task.setSeq(1);
task.setRetried(false);
task.setExecuted(false);
task.setStatus(TaskModel.Status.FAILED);
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setTaskReferenceName("s1");
workflowTask.setName("junit_task_l1");
workflowTask.setTaskDefinition(new TaskDef("junit_task_l1"));
task.setWorkflowTask(workflowTask);
workflow.getTasks().add(task);
DeciderOutcome deciderOutcome = deciderService.decide(workflow);
assertNotNull(deciderOutcome);
assertFalse(workflow.getTaskByRefName("s1").isExecuted());
assertTrue(workflow.getTaskByRefName("s1").isRetried());
assertEquals(1, deciderOutcome.tasksToBeUpdated.size());
assertEquals("s1", deciderOutcome.tasksToBeUpdated.get(0).getReferenceTaskName());
assertEquals(1, deciderOutcome.tasksToBeScheduled.size());
assertEquals("s1", deciderOutcome.tasksToBeScheduled.get(0).getReferenceTaskName());
assertFalse(deciderOutcome.isComplete);
}
@Test
public void testGetTasksToBeScheduled() {
WorkflowDef workflowDef = createLinearWorkflow();
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.setStatus(WorkflowModel.Status.RUNNING);
WorkflowTask workflowTask1 = new WorkflowTask();
workflowTask1.setName("s1");
workflowTask1.setTaskReferenceName("s1");
workflowTask1.setType(SIMPLE.name());
workflowTask1.setTaskDefinition(new TaskDef("s1"));
List<TaskModel> tasksToBeScheduled =
deciderService.getTasksToBeScheduled(workflow, workflowTask1, 0, null);
assertNotNull(tasksToBeScheduled);
assertEquals(1, tasksToBeScheduled.size());
assertEquals("s1", tasksToBeScheduled.get(0).getReferenceTaskName());
WorkflowTask workflowTask2 = new WorkflowTask();
workflowTask2.setName("s2");
workflowTask2.setTaskReferenceName("s2");
workflowTask2.setType(SIMPLE.name());
workflowTask2.setTaskDefinition(new TaskDef("s2"));
tasksToBeScheduled = deciderService.getTasksToBeScheduled(workflow, workflowTask2, 0, null);
assertNotNull(tasksToBeScheduled);
assertEquals(1, tasksToBeScheduled.size());
assertEquals("s2", tasksToBeScheduled.get(0).getReferenceTaskName());
}
@Test
public void testIsResponseTimedOut() {
TaskDef taskDef = new TaskDef();
taskDef.setName("test_rt");
taskDef.setResponseTimeoutSeconds(10);
TaskModel task = new TaskModel();
task.setTaskDefName("test_rt");
task.setStatus(TaskModel.Status.IN_PROGRESS);
task.setTaskId("aa");
task.setTaskType(TaskType.TASK_TYPE_SIMPLE);
task.setUpdateTime(System.currentTimeMillis() - TimeUnit.SECONDS.toMillis(11));
assertTrue(deciderService.isResponseTimedOut(taskDef, task));
// verify that sub workflow tasks are not response timed out
task.setTaskType(TaskType.TASK_TYPE_SUB_WORKFLOW);
assertFalse(deciderService.isResponseTimedOut(taskDef, task));
task.setTaskType("asyncCompleteSystemTask");
assertFalse(deciderService.isResponseTimedOut(taskDef, task));
}
@Test
public void testFilterNextLoopOverTasks() {
WorkflowModel workflow = new WorkflowModel();
TaskModel task1 = new TaskModel();
task1.setReferenceTaskName("task1");
task1.setStatus(TaskModel.Status.COMPLETED);
task1.setTaskId("task1");
task1.setIteration(1);
TaskModel task2 = new TaskModel();
task2.setReferenceTaskName("task2");
task2.setStatus(TaskModel.Status.SCHEDULED);
task2.setTaskId("task2");
TaskModel task3 = new TaskModel();
task3.setReferenceTaskName("task3__1");
task3.setStatus(TaskModel.Status.IN_PROGRESS);
task3.setTaskId("task3__1");
TaskModel task4 = new TaskModel();
task4.setReferenceTaskName("task4");
task4.setStatus(TaskModel.Status.SCHEDULED);
task4.setTaskId("task4");
TaskModel task5 = new TaskModel();
task5.setReferenceTaskName("task5");
task5.setStatus(TaskModel.Status.COMPLETED);
task5.setTaskId("task5");
workflow.getTasks().addAll(Arrays.asList(task1, task2, task3, task4, task5));
List<TaskModel> tasks =
deciderService.filterNextLoopOverTasks(
Arrays.asList(task2, task3, task4), task1, workflow);
assertEquals(2, tasks.size());
tasks.forEach(
task -> {
assertTrue(
task.getReferenceTaskName()
.endsWith(TaskUtils.getLoopOverTaskRefNameSuffix(1)));
assertEquals(1, task.getIteration());
});
}
@Test
public void testUpdateWorkflowOutput() {
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(new WorkflowDef());
deciderService.updateWorkflowOutput(workflow, null);
assertNotNull(workflow.getOutput());
assertTrue(workflow.getOutput().isEmpty());
TaskModel task = new TaskModel();
Map<String, Object> taskOutput = new HashMap<>();
taskOutput.put("taskKey", "taskValue");
task.setOutputData(taskOutput);
workflow.getTasks().add(task);
WorkflowDef workflowDef = new WorkflowDef();
when(metadataDAO.getWorkflowDef(anyString(), anyInt()))
.thenReturn(Optional.of(workflowDef));
deciderService.updateWorkflowOutput(workflow, null);
assertNotNull(workflow.getOutput());
assertEquals("taskValue", workflow.getOutput().get("taskKey"));
}
// when workflow definition has outputParameters defined
@SuppressWarnings({"unchecked", "rawtypes"})
@Test
public void testUpdateWorkflowOutput_WhenDefinitionHasOutputParameters() {
WorkflowModel workflow = new WorkflowModel();
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setOutputParameters(
new HashMap() {
{
put("workflowKey", "workflowValue");
}
});
workflow.setWorkflowDefinition(workflowDef);
TaskModel task = new TaskModel();
task.setReferenceTaskName("test_task");
task.setOutputData(
new HashMap() {
{
put("taskKey", "taskValue");
}
});
workflow.getTasks().add(task);
deciderService.updateWorkflowOutput(workflow, null);
assertNotNull(workflow.getOutput());
assertEquals("workflowValue", workflow.getOutput().get("workflowKey"));
}
@Test
public void testUpdateWorkflowOutput_WhenWorkflowHasTerminateTask() {
WorkflowModel workflow = new WorkflowModel();
TaskModel task = new TaskModel();
task.setTaskType(TASK_TYPE_TERMINATE);
task.setStatus(TaskModel.Status.COMPLETED);
task.setOutputData(
new HashMap<String, Object>() {
{
put("taskKey", "taskValue");
}
});
workflow.getTasks().add(task);
deciderService.updateWorkflowOutput(workflow, null);
assertNotNull(workflow.getOutput());
assertEquals("taskValue", workflow.getOutput().get("taskKey"));
verify(externalPayloadStorageUtils, never()).downloadPayload(anyString());
// when terminate task has output in external payload storage
String externalOutputPayloadStoragePath = "/task/output/terminate.json";
workflow.getTasks().get(0).setOutputData(null);
workflow.getTasks()
.get(0)
.setExternalOutputPayloadStoragePath(externalOutputPayloadStoragePath);
when(externalPayloadStorageUtils.downloadPayload(externalOutputPayloadStoragePath))
.thenReturn(
new HashMap() {
{
put("taskKey", "taskValue");
}
});
deciderService.updateWorkflowOutput(workflow, null);
assertNotNull(workflow.getOutput());
assertEquals("taskValue", workflow.getOutput().get("taskKey"));
verify(externalPayloadStorageUtils, times(1)).downloadPayload(anyString());
}
@Test
public void testCheckWorkflowTimeout() {
Counter counter =
registry.counter(
"workflow_failure",
"class",
"WorkflowMonitor",
"workflowName",
"test",
"status",
"TIMED_OUT",
"ownerApp",
"junit");
long counterCount = counter.count();
assertEquals(0, counter.count());
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("test");
WorkflowModel workflow = new WorkflowModel();
workflow.setOwnerApp("junit");
workflow.setCreateTime(System.currentTimeMillis() - 10_000);
workflow.setWorkflowId("workflow_id");
// no-op
workflow.setWorkflowDefinition(null);
deciderService.checkWorkflowTimeout(workflow);
// no-op
workflow.setWorkflowDefinition(workflowDef);
deciderService.checkWorkflowTimeout(workflow);
// alert
workflowDef.setTimeoutPolicy(WorkflowDef.TimeoutPolicy.ALERT_ONLY);
workflowDef.setTimeoutSeconds(2);
workflow.setWorkflowDefinition(workflowDef);
deciderService.checkWorkflowTimeout(workflow);
assertEquals(++counterCount, counter.count());
// time out
workflowDef.setTimeoutPolicy(WorkflowDef.TimeoutPolicy.TIME_OUT_WF);
workflow.setWorkflowDefinition(workflowDef);
try {
deciderService.checkWorkflowTimeout(workflow);
} catch (TerminateWorkflowException twe) {
assertTrue(twe.getMessage().contains("Workflow timed out"));
}
// for a retried workflow
workflow.setLastRetriedTime(System.currentTimeMillis() - 5_000);
try {
deciderService.checkWorkflowTimeout(workflow);
} catch (TerminateWorkflowException twe) {
assertTrue(twe.getMessage().contains("Workflow timed out"));
}
}
@Test
public void testCheckForWorkflowCompletion() {
WorkflowDef conditionalWorkflowDef = createConditionalWF();
WorkflowTask terminateWT = new WorkflowTask();
terminateWT.setType(TaskType.TERMINATE.name());
terminateWT.setTaskReferenceName("terminate");
terminateWT.setName("terminate");
terminateWT.getInputParameters().put("terminationStatus", "COMPLETED");
conditionalWorkflowDef.getTasks().add(terminateWT);
// when workflow has no tasks
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(conditionalWorkflowDef);
// then workflow completion check returns false
assertFalse(deciderService.checkForWorkflowCompletion(workflow));
// when only part of the tasks are completed
TaskModel decTask = new TaskModel();
decTask.setTaskType(DECISION.name());
decTask.setReferenceTaskName("conditional2");
decTask.setStatus(TaskModel.Status.COMPLETED);
TaskModel task1 = new TaskModel();
decTask.setTaskType(SIMPLE.name());
task1.setReferenceTaskName("t1");
task1.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().addAll(Arrays.asList(decTask, task1));
// then workflow completion check returns false
assertFalse(deciderService.checkForWorkflowCompletion(workflow));
// when the terminate task is COMPLETED
TaskModel task2 = new TaskModel();
decTask.setTaskType(SIMPLE.name());
task2.setReferenceTaskName("t2");
task2.setStatus(TaskModel.Status.SCHEDULED);
TaskModel terminateTask = new TaskModel();
decTask.setTaskType(TaskType.TERMINATE.name());
terminateTask.setReferenceTaskName("terminate");
terminateTask.setStatus(TaskModel.Status.COMPLETED);
workflow.getTasks().addAll(Arrays.asList(task2, terminateTask));
// then the workflow completion check returns true
assertTrue(deciderService.checkForWorkflowCompletion(workflow));
}
private WorkflowDef createConditionalWF() {
WorkflowTask workflowTask1 = new WorkflowTask();
workflowTask1.setName("junit_task_1");
Map<String, Object> inputParams1 = new HashMap<>();
inputParams1.put("p1", "workflow.input.param1");
inputParams1.put("p2", "workflow.input.param2");
workflowTask1.setInputParameters(inputParams1);
workflowTask1.setTaskReferenceName("t1");
workflowTask1.setTaskDefinition(new TaskDef("junit_task_1"));
WorkflowTask workflowTask2 = new WorkflowTask();
workflowTask2.setName("junit_task_2");
Map<String, Object> inputParams2 = new HashMap<>();
inputParams2.put("tp1", "workflow.input.param1");
workflowTask2.setInputParameters(inputParams2);
workflowTask2.setTaskReferenceName("t2");
workflowTask2.setTaskDefinition(new TaskDef("junit_task_2"));
WorkflowTask workflowTask3 = new WorkflowTask();
workflowTask3.setName("junit_task_3");
Map<String, Object> inputParams3 = new HashMap<>();
inputParams2.put("tp3", "workflow.input.param2");
workflowTask3.setInputParameters(inputParams3);
workflowTask3.setTaskReferenceName("t3");
workflowTask3.setTaskDefinition(new TaskDef("junit_task_3"));
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("Conditional Workflow");
workflowDef.setDescription("Conditional Workflow");
workflowDef.setInputParameters(Arrays.asList("param1", "param2"));
WorkflowTask decisionTask2 = new WorkflowTask();
decisionTask2.setType(DECISION.name());
decisionTask2.setCaseValueParam("case");
decisionTask2.setName("conditional2");
decisionTask2.setTaskReferenceName("conditional2");
Map<String, List<WorkflowTask>> dc = new HashMap<>();
dc.put("one", Arrays.asList(workflowTask1, workflowTask3));
dc.put("two", Collections.singletonList(workflowTask2));
decisionTask2.setDecisionCases(dc);
decisionTask2.getInputParameters().put("case", "workflow.input.param2");
WorkflowTask decisionTask = new WorkflowTask();
decisionTask.setType(DECISION.name());
decisionTask.setCaseValueParam("case");
decisionTask.setName("conditional");
decisionTask.setTaskReferenceName("conditional");
Map<String, List<WorkflowTask>> decisionCases = new HashMap<>();
decisionCases.put("nested", Collections.singletonList(decisionTask2));
decisionCases.put("three", Collections.singletonList(workflowTask3));
decisionTask.setDecisionCases(decisionCases);
decisionTask.getInputParameters().put("case", "workflow.input.param1");
decisionTask.getDefaultCase().add(workflowTask2);
workflowDef.getTasks().add(decisionTask);
WorkflowTask notifyTask = new WorkflowTask();
notifyTask.setName("junit_task_4");
notifyTask.setTaskReferenceName("junit_task_4");
notifyTask.setTaskDefinition(new TaskDef("junit_task_4"));
WorkflowTask finalDecisionTask = new WorkflowTask();
finalDecisionTask.setName("finalcondition");
finalDecisionTask.setTaskReferenceName("tf");
finalDecisionTask.setType(DECISION.name());
finalDecisionTask.setCaseValueParam("finalCase");
Map<String, Object> fi = new HashMap<>();
fi.put("finalCase", "workflow.input.finalCase");
finalDecisionTask.setInputParameters(fi);
finalDecisionTask.getDecisionCases().put("notify", Collections.singletonList(notifyTask));
workflowDef.getTasks().add(finalDecisionTask);
return workflowDef;
}
private WorkflowDef createLinearWorkflow() {
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("p1", "workflow.input.param1");
inputParams.put("p2", "workflow.input.param2");
WorkflowTask workflowTask1 = new WorkflowTask();
workflowTask1.setName("junit_task_l1");
workflowTask1.setInputParameters(inputParams);
workflowTask1.setTaskReferenceName("s1");
workflowTask1.setTaskDefinition(new TaskDef("junit_task_l1"));
WorkflowTask workflowTask2 = new WorkflowTask();
workflowTask2.setName("junit_task_l2");
workflowTask2.setInputParameters(inputParams);
workflowTask2.setTaskReferenceName("s2");
workflowTask2.setTaskDefinition(new TaskDef("junit_task_l2"));
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setSchemaVersion(2);
workflowDef.setInputParameters(Arrays.asList("param1", "param2"));
workflowDef.setName("Linear Workflow");
workflowDef.getTasks().addAll(Arrays.asList(workflowTask1, workflowTask2));
return workflowDef;
}
private WorkflowModel createDefaultWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("TestDeciderService");
workflowDef.setVersion(1);
WorkflowModel workflow = new WorkflowModel();
workflow.setWorkflowDefinition(workflowDef);
workflow.getInput().put("requestId", "request id 001");
workflow.getInput().put("hasAwards", true);
workflow.getInput().put("channelMapping", 5);
Map<String, Object> name = new HashMap<>();
name.put("name", "The Who");
name.put("year", 1970);
Map<String, Object> name2 = new HashMap<>();
name2.put("name", "The Doors");
name2.put("year", 1975);
List<Object> names = new LinkedList<>();
names.add(name);
names.add(name2);
workflow.addOutput("name", name);
workflow.addOutput("names", names);
workflow.addOutput("awards", 200);
TaskModel task = new TaskModel();
task.setReferenceTaskName("task2");
task.addOutput("location", "http://location");
task.setStatus(TaskModel.Status.COMPLETED);
TaskModel task2 = new TaskModel();
task2.setReferenceTaskName("task3");
task2.addOutput("refId", "abcddef_1234_7890_aaffcc");
task2.setStatus(TaskModel.Status.SCHEDULED);
workflow.getTasks().add(task);
workflow.getTasks().add(task2);
return workflow;
}
private WorkflowDef createNestedWorkflow() {
WorkflowDef workflowDef = new WorkflowDef();
workflowDef.setName("Nested Workflow");
workflowDef.setDescription(workflowDef.getName());
workflowDef.setVersion(1);
workflowDef.setInputParameters(Arrays.asList("param1", "param2"));
Map<String, Object> inputParams = new HashMap<>();
inputParams.put("p1", "workflow.input.param1");
inputParams.put("p2", "workflow.input.param2");
List<WorkflowTask> tasks = new ArrayList<>(10);
for (int i = 0; i < 10; i++) {
WorkflowTask workflowTask = new WorkflowTask();
workflowTask.setName("junit_task_" + i);
workflowTask.setInputParameters(inputParams);
workflowTask.setTaskReferenceName("t" + i);
workflowTask.setTaskDefinition(new TaskDef("junit_task_" + i));
tasks.add(workflowTask);
}
WorkflowTask decisionTask = new WorkflowTask();
decisionTask.setType(DECISION.name());
decisionTask.setName("Decision");
decisionTask.setTaskReferenceName("d1");
decisionTask.setDefaultCase(Collections.singletonList(tasks.get(8)));
decisionTask.setCaseValueParam("case");
Map<String, List<WorkflowTask>> decisionCases = new HashMap<>();
decisionCases.put("a", Arrays.asList(tasks.get(6), tasks.get(9)));
decisionCases.put("b", Collections.singletonList(tasks.get(7)));
decisionTask.setDecisionCases(decisionCases);
WorkflowDef subWorkflowDef = createLinearWorkflow();
WorkflowTask subWorkflow = new WorkflowTask();
subWorkflow.setType(SUB_WORKFLOW.name());
subWorkflow.setName("sw1");
SubWorkflowParams subWorkflowParams = new SubWorkflowParams();
subWorkflowParams.setName(subWorkflowDef.getName());
subWorkflow.setSubWorkflowParam(subWorkflowParams);
subWorkflow.setTaskReferenceName("sw1");
WorkflowTask forkTask2 = new WorkflowTask();
forkTask2.setType(FORK_JOIN.name());
forkTask2.setName("second fork");
forkTask2.setTaskReferenceName("fork2");
forkTask2.getForkTasks().add(Arrays.asList(tasks.get(2), tasks.get(4)));
forkTask2.getForkTasks().add(Arrays.asList(tasks.get(3), decisionTask));
WorkflowTask joinTask2 = new WorkflowTask();
joinTask2.setName("join2");
joinTask2.setType(JOIN.name());
joinTask2.setTaskReferenceName("join2");
joinTask2.setJoinOn(Arrays.asList("t4", "d1"));
WorkflowTask forkTask1 = new WorkflowTask();
forkTask1.setType(FORK_JOIN.name());
forkTask1.setName("fork1");
forkTask1.setTaskReferenceName("fork1");
forkTask1.getForkTasks().add(Collections.singletonList(tasks.get(1)));
forkTask1.getForkTasks().add(Arrays.asList(forkTask2, joinTask2));
forkTask1.getForkTasks().add(Collections.singletonList(subWorkflow));
WorkflowTask joinTask1 = new WorkflowTask();
joinTask1.setName("join1");
joinTask1.setType(JOIN.name());
joinTask1.setTaskReferenceName("join1");
joinTask1.setJoinOn(Arrays.asList("t1", "fork2"));
workflowDef.getTasks().add(forkTask1);
workflowDef.getTasks().add(joinTask1);
workflowDef.getTasks().add(tasks.get(5));
return workflowDef;
}
}
| 6,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.