index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process/JobProcessManager.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.execution.process;
import com.netflix.genie.agent.execution.exceptions.JobLaunchException;
import com.netflix.genie.agent.execution.services.KillService;
import javax.annotation.Nullable;
import java.io.File;
/**
* Singleton to manage the subprocess for the actual user job this Agent instance is managing.
*
* @author mprimi
* @author tgianos
* @since 4.0.0
*/
public interface JobProcessManager {
/**
* Launch the job process (unless launch was aborted by previous a {@code kill} call).
*
* @param jobDirectory Job directory
* @param jobScript job script (a.k.a. run file)
* @param interactive launch in interactive mode (inherit I/O) or batch (no input, write outputs to files)
* @param timeout The optional number of seconds this job is allowed to run before the system will
* kill it
* @param launchInJobDirectory launch the job process from the job directory rather than the current directory
* @throws JobLaunchException if the job process failed to launch
*/
void launchProcess(
File jobDirectory,
File jobScript,
boolean interactive,
@Nullable Integer timeout,
boolean launchInJobDirectory
) throws JobLaunchException;
/**
* Terminate job process execution (if still running) or prevent it from launching (if not launched yet).
* Optionally sends SIGINT to the process (unnecessary under certain circumstances. For example,
* CTRL-C in a terminal session, is already received by the job process, issuing a second one is unneeded).
*
* @param source The {@link KillService.KillSource} value representing where this kill request is coming from
*/
void kill(KillService.KillSource source);
/**
* Wait indefinitely for the job process to terminate.
*
* @return KILLED, SUCCESSFUL, or FAILED
* @throws IllegalStateException if the process was not launched
* @throws InterruptedException if the calling thread is interrupted while waiting
*/
JobProcessResult waitFor() throws InterruptedException;
}
| 3,000 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process/JobProcessResult.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.execution.process;
import com.netflix.genie.common.internal.dtos.JobStatus;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
/**
* A DTO POJO to capture final information about the job process this agent process was responsible for.
*
* @author tgianos
* @since 4.0.0
*/
@Getter
@EqualsAndHashCode(doNotUseGetters = true)
@ToString(doNotUseGetters = true)
@SuppressWarnings("FinalClass")
public class JobProcessResult {
private final JobStatus finalStatus;
private final String finalStatusMessage;
private final long stdOutSize;
private final long stdErrSize;
private final int exitCode;
private JobProcessResult(final Builder builder) {
this.finalStatus = builder.bFinalStatus;
this.finalStatusMessage = builder.bFinalStatusMessage;
this.stdOutSize = builder.bStdOutSize;
this.stdErrSize = builder.bStdErrSize;
this.exitCode = builder.bExitCode;
}
/**
* A builder to create valid, immutable {@link JobProcessResult} instances.
*
* @author tgianos
* @since 4.0.0
*/
public static class Builder {
private final JobStatus bFinalStatus;
private final String bFinalStatusMessage;
private final int bExitCode;
private long bStdOutSize;
private long bStdErrSize;
/**
* Constructor.
*
* @param finalStatus The final {@link JobStatus} for the job. {@link JobStatus#isFinished()} must return
* true
* @param finalStatusMessage The final human readable message for the job status
* @param exitCode The process exit code
* @throws IllegalArgumentException When {@literal finalStatus} is not a final status
*/
public Builder(
final JobStatus finalStatus,
final String finalStatusMessage,
final int exitCode
) throws IllegalArgumentException {
if (!finalStatus.isFinished()) {
throw new IllegalArgumentException(
"finalStatus must be one of the final states: "
+ JobStatus.getFinishedStatuses()
+ ". Was "
+ finalStatusMessage
);
}
this.bFinalStatus = finalStatus;
this.bFinalStatusMessage = finalStatusMessage;
this.bExitCode = exitCode;
}
/**
* Set the length of the std out file in bytes if there was one.
*
* @param stdOutSize The length of the std out file in bytes
* @return This builder object
*/
public Builder withStdOutSize(final long stdOutSize) {
this.bStdOutSize = Math.max(stdOutSize, 0L);
return this;
}
/**
* Set the length of the std error file in bytes if there was one.
*
* @param stdErrSize The length of the std error file in bytes
* @return This builder object
*/
public Builder withStdErrSize(final long stdErrSize) {
this.bStdErrSize = Math.max(stdErrSize, 0L);
return this;
}
/**
* Create a new immutable {@link JobProcessResult} instance based on the current contents of this builder.
*
* @return A {@link JobProcessResult} instance
*/
public JobProcessResult build() {
return new JobProcessResult(this);
}
}
}
| 3,001 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Interfaces and classes involved with managing and interacting with the underlying job process this agent is
* responsible for.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.execution.process;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,002 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process/impl/JobProcessManagerImpl.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.execution.process.impl;
import brave.Span;
import brave.Tracer;
import com.netflix.genie.agent.cli.logging.ConsoleLog;
import com.netflix.genie.agent.execution.exceptions.JobLaunchException;
import com.netflix.genie.agent.execution.process.JobProcessManager;
import com.netflix.genie.agent.execution.process.JobProcessResult;
import com.netflix.genie.agent.execution.services.KillService;
import com.netflix.genie.agent.utils.PathUtils;
import com.netflix.genie.common.dto.JobStatusMessages;
import com.netflix.genie.common.internal.dtos.JobStatus;
import com.netflix.genie.common.internal.tracing.brave.BraveTracePropagator;
import com.netflix.genie.common.internal.tracing.brave.BraveTracingComponents;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.ObjectUtils;
import org.springframework.scheduling.TaskScheduler;
import javax.annotation.Nullable;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.nio.file.Paths;
import java.time.Instant;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
/**
* Configures and launches a job sub-process using metadata passed through ExecutionContext.
*
* @author mprimi
* @since 4.0.0
*/
@Slf4j
public class JobProcessManagerImpl implements JobProcessManager {
// Number of seconds to wait before declaring the kill action fails
private static final int KILL_WAIT_SECS = 10;
// Time interval in milliseconds for checking kill action
private static final int KILL_CHECK_INTERVAL_MS = 1000;
private static final int SUCCESS_EXIT_CODE = 0;
private final AtomicBoolean launched = new AtomicBoolean(false);
private final AtomicReference<Process> processReference = new AtomicReference<>();
private final AtomicBoolean killed = new AtomicBoolean(false);
private final AtomicReference<KillService.KillSource> killSource = new AtomicReference<>();
private final AtomicReference<ScheduledFuture> timeoutKillThread = new AtomicReference<>();
private final AtomicReference<File> initFailedFileRef = new AtomicReference<>();
private final TaskScheduler taskScheduler;
private final Tracer tracer;
private final BraveTracePropagator tracePropagator;
private boolean isInteractiveMode;
/**
* Constructor.
*
* @param taskScheduler The {@link TaskScheduler} instance to use to run scheduled asynchronous tasks
* @param tracingComponents The {@link BraveTracingComponents} instance to use for propagating trace information
*/
public JobProcessManagerImpl(final TaskScheduler taskScheduler, final BraveTracingComponents tracingComponents) {
this.taskScheduler = taskScheduler;
this.tracer = tracingComponents.getTracer();
this.tracePropagator = tracingComponents.getTracePropagator();
}
/**
* {@inheritDoc}
*/
@Override
public void launchProcess(
final File jobDirectory,
final File jobScript,
final boolean interactive,
@Nullable final Integer timeout,
final boolean launchInJobDirectory
) throws JobLaunchException {
if (!this.launched.compareAndSet(false, true)) {
throw new IllegalStateException("Job already launched");
}
this.isInteractiveMode = interactive;
final ProcessBuilder processBuilder = new ProcessBuilder();
// Validate job running directory
if (jobDirectory == null) {
throw new JobLaunchException("Job directory is null");
} else if (!jobDirectory.exists()) {
throw new JobLaunchException("Job directory does not exist: " + jobDirectory);
} else if (!jobDirectory.isDirectory()) {
throw new JobLaunchException("Job directory is not a directory: " + jobDirectory);
} else if (!jobDirectory.canWrite()) {
throw new JobLaunchException("Job directory is not writable: " + jobDirectory);
}
if (jobScript == null) {
throw new JobLaunchException("Job script is null");
} else if (!jobScript.exists() || !jobScript.isFile()) {
throw new JobLaunchException("Job script is not a valid file");
} else if (!jobScript.canExecute()) {
throw new JobLaunchException("Job script is not executable");
}
this.initFailedFileRef.set(PathUtils.jobSetupErrorMarkerFilePath(jobDirectory).toFile());
log.info("Executing job script: {} (working directory: {})",
jobScript.getAbsolutePath(),
launchInJobDirectory ? jobDirectory : Paths.get("").toAbsolutePath().normalize().toString());
processBuilder.command(jobScript.getAbsolutePath());
if (launchInJobDirectory) {
processBuilder.directory(jobDirectory);
}
if (interactive) {
processBuilder.inheritIO();
} else {
processBuilder.redirectError(PathUtils.jobStdErrPath(jobDirectory).toFile());
processBuilder.redirectOutput(PathUtils.jobStdOutPath(jobDirectory).toFile());
}
final Span currentSpan = this.tracer.currentSpan();
if (currentSpan != null) {
processBuilder.environment().putAll(this.tracePropagator.injectForJob(currentSpan.context()));
}
if (this.killed.get()) {
log.info("Job aborted, skipping launch");
return;
}
log.info("Launching job");
try {
this.processReference.set(processBuilder.start());
if (timeout != null) {
// NOTE: There is a chance of a SLIGHT delay here between the process launch and the timeout
final Instant timeoutInstant = Instant.now().plusSeconds(timeout);
this.timeoutKillThread.set(
this.taskScheduler.schedule(new TimeoutKiller(this), timeoutInstant)
);
log.info("Scheduled timeout kill to occur {} second(s) from now at {}", timeout, timeoutInstant);
}
} catch (IOException | SecurityException e) {
throw new JobLaunchException("Failed to launch job: ", e);
}
log.info("Process launched (pid: {})", this.getPid(this.processReference.get()));
}
/**
* {@inheritDoc}
*/
@Override
public void kill(final KillService.KillSource source) {
log.info("Killing job process (kill event source: {})", source);
if (!this.killed.compareAndSet(false, true)) {
// this job was already killed by something else
return;
}
this.killSource.set(source);
final Process process = this.processReference.get();
if (process == null) {
return;
}
try {
// Grace killing period
gracefullyKill(process);
if (!process.isAlive()) {
log.info("Gracefully killed job process successfully");
return;
}
log.info("Forcefully killing job process");
forcefullyKill(process);
if (!process.isAlive()) {
log.info("Forcefully killed job process successfully");
return;
}
} catch (Exception e) {
log.warn("Failed to kill job process with exception: ", e);
}
log.error("Failed to kill job process");
}
/**
* {@inheritDoc}
*/
@Override
public JobProcessResult waitFor() throws InterruptedException {
if (!launched.get()) {
throw new IllegalStateException("Process not launched");
}
final Process process = processReference.get();
int exitCode = 0;
if (process != null) {
exitCode = process.waitFor();
ConsoleLog.getLogger().info("Job process terminated with exit code: {}", exitCode);
}
try {
// Evil-but-necessary little hack.
// The agent and the child job process receive SIGINT at the same time (e.g. in case of ctrl-c).
// If the child terminates quickly, the code below will execute before the signal handler has a chance to
// set the job as killed, and the final status would be (incorrectly) reported as success/failure,
// depending on exit code, as opposed to killed.
// So give the handler a chance to raise the 'killed' flag before attempting to read it.
Thread.sleep(100);
} catch (final InterruptedException e) {
// Do nothing.
}
// If for whatever reason the timeout thread is currently running or if it is scheduled to be run, cancel it
final ScheduledFuture timeoutThreadFuture = this.timeoutKillThread.get();
if (timeoutThreadFuture != null) {
timeoutThreadFuture.cancel(true);
}
// Check exit code first to see if the job finishes successfully and returns SUCCEEDED as status,
// even the job gets a KILL request.
if (process != null && exitCode == SUCCESS_EXIT_CODE) {
return new JobProcessResult.Builder(
JobStatus.SUCCEEDED,
JobStatusMessages.JOB_FINISHED_SUCCESSFULLY,
exitCode
).build();
}
if (this.killed.get()) {
final KillService.KillSource source = ObjectUtils.firstNonNull(
killSource.get(), KillService.KillSource.API_KILL_REQUEST);
switch (source) {
case TIMEOUT:
return new JobProcessResult
.Builder(JobStatus.KILLED, JobStatusMessages.JOB_EXCEEDED_TIMEOUT, exitCode)
.build();
case FILES_LIMIT:
return new JobProcessResult
.Builder(JobStatus.KILLED, JobStatusMessages.JOB_EXCEEDED_FILES_LIMIT, exitCode)
.build();
case REMOTE_STATUS_MONITOR:
return new JobProcessResult
.Builder(JobStatus.KILLED, JobStatusMessages.JOB_MARKED_FAILED, exitCode)
.build();
case SYSTEM_SIGNAL:
// In interactive mode, killed by a system signal is mostly likely by a user (e.g. Ctrl-C)
return new JobProcessResult
.Builder(JobStatus.FAILED,
this.isInteractiveMode
? JobStatusMessages.JOB_KILLED_BY_USER
: JobStatusMessages.JOB_KILLED_BY_SYSTEM,
exitCode)
.build();
case API_KILL_REQUEST:
default:
return new JobProcessResult
.Builder(JobStatus.KILLED, JobStatusMessages.JOB_KILLED_BY_USER, exitCode)
.build();
}
}
final File initFailedFile = initFailedFileRef.get();
final String statusMessage = (initFailedFile != null && initFailedFile.exists())
? JobStatusMessages.JOB_SETUP_FAILED : JobStatusMessages.JOB_FAILED;
return new JobProcessResult.Builder(JobStatus.FAILED, statusMessage, exitCode).build();
}
private void gracefullyKill(final Process process) throws Exception {
final Instant graceKillEnd = Instant.now().plusSeconds(KILL_WAIT_SECS);
process.destroy();
while (process.isAlive() && Instant.now().isBefore(graceKillEnd)) {
process.waitFor(KILL_CHECK_INTERVAL_MS, TimeUnit.MILLISECONDS);
}
}
private void forcefullyKill(final Process process) throws Exception {
final Instant forceKillEnd = Instant.now().plusSeconds(KILL_WAIT_SECS);
// In Java8, this is exactly destroy(). However, this behavior can be changed in future java.
process.destroyForcibly();
while (process.isAlive() && Instant.now().isBefore(forceKillEnd)) {
process.waitFor(KILL_CHECK_INTERVAL_MS, TimeUnit.MILLISECONDS);
}
}
/* TODO: HACK, Process does not expose PID in Java 8 API */
private long getPid(final Process process) {
long pid = -1;
final String processClassName = process.getClass().getCanonicalName();
try {
if ("java.lang.UNIXProcess".equals(processClassName)) {
final Field pidMemberField = process.getClass().getDeclaredField("pid");
final boolean resetAccessible = pidMemberField.isAccessible();
pidMemberField.setAccessible(true);
pid = pidMemberField.getLong(process);
pidMemberField.setAccessible(resetAccessible);
} else {
log.debug("Don't know how to access PID for class {}", processClassName);
}
} catch (final Throwable t) {
log.warn("Failed to determine job process PID");
}
return pid;
}
/**
* This class is meant to be run in a thread that wakes up after some period of time and initiates a kill of the
* job process due to the job timing out.
*
* @author tgianos
* @since 4.0.0
*/
@Slf4j
private static class TimeoutKiller implements Runnable {
private final JobProcessManager jobProcessManager;
TimeoutKiller(final JobProcessManager jobProcessManager) {
this.jobProcessManager = jobProcessManager;
}
/**
* When this thread is run it is expected that the timeout duration has been reached so the run merely
* sends a kill signal to the manager.
*/
@Override
public void run() {
log.info("Timeout for job reached at {}. Sending kill signal to terminate job.", Instant.now());
this.jobProcessManager.kill(KillService.KillSource.TIMEOUT);
}
}
}
| 3,003 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/execution/process/impl/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Implementations of any interfaces that would be found in the parent package.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.execution.process.impl;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,004 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/rpc/GRpcAutoConfiguration.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.rpc;
import brave.Tracing;
import brave.grpc.GrpcTracing;
import com.netflix.genie.agent.cli.ArgumentDelegates;
import com.netflix.genie.proto.FileStreamServiceGrpc;
import com.netflix.genie.proto.HeartBeatServiceGrpc;
import com.netflix.genie.proto.JobKillServiceGrpc;
import com.netflix.genie.proto.JobServiceGrpc;
import com.netflix.genie.proto.PingServiceGrpc;
import io.grpc.ClientInterceptor;
import io.grpc.ManagedChannel;
import io.grpc.ManagedChannelBuilder;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Lazy;
import org.springframework.context.annotation.Scope;
import java.util.List;
/**
* Spring auto configuration for gRPC components.
*
* @author mprimi
* @author tgianos
* @since 4.0.0
*/
@Configuration
@Slf4j
public class GRpcAutoConfiguration {
/**
* Provide a lazy {@link ManagedChannel} bean if none was already defined for communicating with the Genie server.
*
* @param serverArguments The server arguments to use
* @param clientInterceptors The list of available client interceptors that should be added to the generated channel
* @return A {@link ManagedChannel} instance configured to use plain text over the wire
*/
@Bean
@Lazy
@ConditionalOnMissingBean(ManagedChannel.class)
public ManagedChannel channel(
final ArgumentDelegates.ServerArguments serverArguments,
final List<ClientInterceptor> clientInterceptors
) {
return ManagedChannelBuilder
.forAddress(
serverArguments.getServerHost(),
serverArguments.getServerPort()
)
.intercept(clientInterceptors)
.usePlaintext()
.build();
}
/**
* Provide a prototype bean definition for a {@link com.netflix.genie.proto.PingServiceGrpc.PingServiceFutureStub}.
*
* @param channel The managed channel to use to connect to the Genie server
* @return A {@link com.netflix.genie.proto.PingServiceGrpc.PingServiceFutureStub} instance per use
*/
@Bean
@Scope("prototype")
public PingServiceGrpc.PingServiceFutureStub pingServiceClient(final ManagedChannel channel) {
return PingServiceGrpc.newFutureStub(channel);
}
/**
* Provide a prototype bean definition for a
* {@link com.netflix.genie.proto.JobServiceGrpc.JobServiceFutureStub}.
*
* @param channel The managed channel to use to connect to the Genie server
* @return A {@link com.netflix.genie.proto.JobServiceGrpc.JobServiceFutureStub} instance per use
*/
@Bean
@Scope("prototype")
public JobServiceGrpc.JobServiceFutureStub jobClient(final ManagedChannel channel) {
return JobServiceGrpc.newFutureStub(channel);
}
/**
* Provide a prototype bean definition for a
* {@link com.netflix.genie.proto.HeartBeatServiceGrpc.HeartBeatServiceStub}.
*
* @param channel The managed channel to use to connect to the Genie server
* @return A {@link com.netflix.genie.proto.HeartBeatServiceGrpc.HeartBeatServiceStub} instance per use
*/
@Bean
@Scope("prototype")
public HeartBeatServiceGrpc.HeartBeatServiceStub heartBeatClient(final ManagedChannel channel) {
return HeartBeatServiceGrpc.newStub(channel);
}
/**
* Provide a prototype bean definition for a
* {@link com.netflix.genie.proto.JobKillServiceGrpc.JobKillServiceFutureStub}.
*
* @param channel The managed channel to use to connect to the Genie server
* @return A {@link com.netflix.genie.proto.JobKillServiceGrpc.JobKillServiceFutureStub} instance per use
*/
@Bean
@Scope("prototype")
public JobKillServiceGrpc.JobKillServiceFutureStub jobKillClient(final ManagedChannel channel) {
return JobKillServiceGrpc.newFutureStub(channel);
}
/**
* Provide a prototype bean definition for a {@link FileStreamServiceGrpc.FileStreamServiceStub}.
*
* @param channel The managed channel to use to connect to the Genie server
* @return A {@link FileStreamServiceGrpc.FileStreamServiceStub} instance per use
*/
@Bean
@Scope("prototype")
public FileStreamServiceGrpc.FileStreamServiceStub fileStreamClient(final ManagedChannel channel) {
return FileStreamServiceGrpc.newStub(channel);
}
/**
* Provide a {@link ClientInterceptor} which adds tracing information to gRPC calls.
*
* @param tracing The Brave {@link Tracing} instance
* @return A {@link ClientInterceptor} for trace propagation
*/
@Bean
@ConditionalOnMissingBean(name = "genieGrpcTracingClientInterceptor")
@Lazy
public ClientInterceptor genieGrpcTracingClientInterceptor(final Tracing tracing) {
return GrpcTracing.create(tracing).newClientInterceptor();
}
}
| 3,005 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/rpc/ChannelLoggingInterceptor.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.rpc;
import io.grpc.CallOptions;
import io.grpc.Channel;
import io.grpc.ClientCall;
import io.grpc.ClientInterceptor;
import io.grpc.MethodDescriptor;
import lombok.extern.slf4j.Slf4j;
/**
* Interceptor that logs outbound gRPC calls.
*
* @author mprimi
* @since 4.0.0
*/
@Slf4j
class ChannelLoggingInterceptor implements ClientInterceptor {
/**
* {@inheritDoc}
*/
@Override
public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(
final MethodDescriptor<ReqT, RespT> method,
final CallOptions callOptions,
final Channel next
) {
final String methodType = method.getType().toString();
final String methodName = method.getFullMethodName();
final int channelId = next.hashCode();
log.info("gRPC {} call: {} (channel: {})", methodType, methodName, channelId);
return next.newCall(method, callOptions);
}
}
| 3,006 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/rpc/package-info.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* RPC clients and utilities.
*
* @author mprimi
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.rpc;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,007 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/package-info.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Classes related to configuring the Agent as a Spring Boot application.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.spring;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,008 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/processors/GenieDefaultPropertiesPostProcessor.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.spring.processors;
import com.netflix.genie.common.internal.util.PropertySourceUtils;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.env.EnvironmentPostProcessor;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.core.env.PropertySource;
import org.springframework.core.io.ClassPathResource;
import org.springframework.core.io.Resource;
/**
* Adds default properties to the Spring environment before application refresh.
*
* @author tgianos
* @since 4.0.0
*/
public class GenieDefaultPropertiesPostProcessor implements EnvironmentPostProcessor {
static final String DEFAULT_PROPERTY_SOURCE_NAME = "genie-agent-defaults";
private static final String DEFAULT_PROPERTIES_FILE = "genie-agent-defaults.yml";
/**
* {@inheritDoc}
*/
@Override
public void postProcessEnvironment(final ConfigurableEnvironment environment, final SpringApplication application) {
final Resource defaultProperties = new ClassPathResource(DEFAULT_PROPERTIES_FILE);
final PropertySource<?> defaultSource
= PropertySourceUtils.loadYamlPropertySource(DEFAULT_PROPERTY_SOURCE_NAME, defaultProperties);
environment.getPropertySources().addLast(defaultSource);
}
}
| 3,009 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/processors/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Any environment post processors for Spring that are needed.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.spring.processors;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,010 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/autoconfigure/AgentAutoConfiguration.java | /*
*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.spring.autoconfigure;
import com.netflix.genie.agent.AgentMetadata;
import com.netflix.genie.agent.AgentMetadataImpl;
import com.netflix.genie.agent.properties.AgentProperties;
import com.netflix.genie.agent.utils.locks.impl.FileLockFactory;
import com.netflix.genie.common.internal.util.GenieHostInfo;
import com.netflix.genie.common.internal.util.HostnameUtil;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.boot.task.TaskExecutorCustomizer;
import org.springframework.boot.task.TaskSchedulerCustomizer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Lazy;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import java.net.UnknownHostException;
/**
* Configuration for various agent beans.
*
* @author standon
* @since 4.0.0
*/
@Configuration
@EnableConfigurationProperties(
{
AgentProperties.class
}
)
public class AgentAutoConfiguration {
/**
* Provide a bean of type {@link GenieHostInfo} if none already exists.
*
* @return A {@link GenieHostInfo} instance
* @throws UnknownHostException if hostname cannot be determined
*/
@Bean
@ConditionalOnMissingBean(GenieHostInfo.class)
public GenieHostInfo genieAgentHostInfo() throws UnknownHostException {
final String hostname = HostnameUtil.getHostname();
return new GenieHostInfo(hostname);
}
/**
* Provide a lazy bean definition for {@link AgentMetadata} if none already exists.
*
* @param genieHostInfo the host information
* @return A {@link AgentMetadataImpl} instance
*/
@Bean
@Lazy
@ConditionalOnMissingBean(AgentMetadata.class)
public AgentMetadataImpl agentMetadata(final GenieHostInfo genieHostInfo) {
return new AgentMetadataImpl(genieHostInfo.getHostname());
}
/**
* Provide a lazy {@link FileLockFactory}.
*
* @return A {@link FileLockFactory} instance
*/
@Bean
@Lazy
public FileLockFactory fileLockFactory() {
return new FileLockFactory();
}
/**
* Get a lazy {@link AsyncTaskExecutor} bean which may be shared by different components if one isn't already
* defined.
*
* @param agentProperties the agent properties
* @return A {@link ThreadPoolTaskExecutor} instance
*/
@Bean
@Lazy
@ConditionalOnMissingBean(name = "sharedAgentTaskExecutor", value = AsyncTaskExecutor.class)
public AsyncTaskExecutor sharedAgentTaskExecutor(final AgentProperties agentProperties) {
final ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(5);
executor.setThreadNamePrefix("agent-task-executor-");
executor.setWaitForTasksToCompleteOnShutdown(true);
executor.setAwaitTerminationSeconds(
(int) agentProperties.getShutdown().getInternalExecutorsLeeway().getSeconds()
);
return executor;
}
/**
* Provide a lazy {@link TaskScheduler} to be used by the Agent process if one isn't already defined.
*
* @param agentProperties the agent properties
* @return A {@link ThreadPoolTaskScheduler} instance
*/
@Bean
@Lazy
@ConditionalOnMissingBean(name = "sharedAgentTaskScheduler")
public TaskScheduler sharedAgentTaskScheduler(final AgentProperties agentProperties) {
final ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.setPoolSize(5); // Big enough?
scheduler.setThreadNamePrefix("agent-task-scheduler-");
scheduler.setWaitForTasksToCompleteOnShutdown(true);
scheduler.setAwaitTerminationSeconds(
(int) agentProperties.getShutdown().getInternalSchedulersLeeway().getSeconds()
);
return scheduler;
}
/**
* Provide a lazy {@link TaskScheduler} bean for use by the heart beat service is none has already been
* defined in the context.
*
* @param agentProperties the agent properties
* @return A {@link TaskScheduler} that the heart beat service should use
*/
@Bean
@Lazy
@ConditionalOnMissingBean(name = "heartBeatServiceTaskScheduler")
public TaskScheduler heartBeatServiceTaskScheduler(final AgentProperties agentProperties) {
final ThreadPoolTaskScheduler taskScheduler = new ThreadPoolTaskScheduler();
taskScheduler.setPoolSize(1);
taskScheduler.initialize();
taskScheduler.setWaitForTasksToCompleteOnShutdown(true);
taskScheduler.setAwaitTerminationSeconds(
(int) agentProperties.getShutdown().getInternalSchedulersLeeway().getSeconds()
);
return taskScheduler;
}
/**
* Customizer for Spring's task executor.
*
* @param agentProperties the agent properties
* @return a customizer for the task executor
*/
@Bean
TaskExecutorCustomizer taskExecutorCustomizer(final AgentProperties agentProperties) {
return taskExecutor -> {
taskExecutor.setWaitForTasksToCompleteOnShutdown(true);
taskExecutor.setAwaitTerminationSeconds(
(int) agentProperties.getShutdown().getSystemExecutorLeeway().getSeconds()
);
};
}
/**
* Customizer for Spring's task scheduler.
*
* @param agentProperties the agent properties
* @return a customizer for the task scheduler
*/
@Bean
TaskSchedulerCustomizer taskSchedulerCustomizer(final AgentProperties agentProperties) {
return taskScheduler -> {
taskScheduler.setWaitForTasksToCompleteOnShutdown(true);
taskScheduler.setAwaitTerminationSeconds(
(int) agentProperties.getShutdown().getSystemSchedulerLeeway().getSeconds()
);
};
}
}
| 3,011 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/autoconfigure/ProcessAutoConfiguration.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.agent.spring.autoconfigure;
import com.netflix.genie.agent.execution.process.JobProcessManager;
import com.netflix.genie.agent.execution.process.impl.JobProcessManagerImpl;
import com.netflix.genie.common.internal.tracing.brave.BraveTracingComponents;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Lazy;
import org.springframework.scheduling.TaskScheduler;
/**
* Spring Auto Configuration for the {@link com.netflix.genie.agent.execution.process} module.
*
* @author tgianos
* @since 4.0.0
*/
@Configuration
public class ProcessAutoConfiguration {
/**
* Provide a lazy {@link JobProcessManager} bean if one hasn't already been defined.
*
* @param taskScheduler The {@link TaskScheduler} instance to use
* @param tracingComponents The {@link BraveTracingComponents} instance to use
* @return A {@link JobProcessManagerImpl} instance
*/
@Bean
@Lazy
@ConditionalOnMissingBean(JobProcessManager.class)
public JobProcessManagerImpl jobProcessManager(
@Qualifier("sharedAgentTaskScheduler") final TaskScheduler taskScheduler,
final BraveTracingComponents tracingComponents
) {
return new JobProcessManagerImpl(taskScheduler, tracingComponents);
}
}
| 3,012 |
0 | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring | Create_ds/genie/genie-agent/src/main/java/com/netflix/genie/agent/spring/autoconfigure/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Spring Boot auto configuration classes.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie.agent.spring.autoconfigure;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,013 |
0 | Create_ds/genie/genie-agent-app/src/smokeTest/java/com/netflix | Create_ds/genie/genie-agent-app/src/smokeTest/java/com/netflix/genie/GenieAgentApplicationSmokeTest.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie;
import com.netflix.genie.agent.cli.ExitCode;
import com.netflix.genie.agent.cli.GenieAgentRunner;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.ApplicationContext;
import org.springframework.test.context.junit.jupiter.SpringExtension;
/**
* Tests that ensure the app comes up correctly with default values.
*
* @author tgianos
* @since 4.0.0
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(
classes = GenieAgentApplication.class,
webEnvironment = SpringBootTest.WebEnvironment.NONE
)
class GenieAgentApplicationSmokeTest {
@Autowired
private ApplicationContext context;
@Test
public void smokeTestCommands() throws Exception {
final GenieAgentRunner runner = this.context.getBean(GenieAgentRunner.class);
// Test Help
runner.run("help");
Assertions.assertThat(runner.getExitCode()).isEqualTo(ExitCode.SUCCESS.getCode());
// Test info
runner.run("info", "--beans", "--env", "--properties", "--state-machine");
Assertions.assertThat(runner.getExitCode()).isEqualTo(ExitCode.SUCCESS.getCode());
}
}
| 3,014 |
0 | Create_ds/genie/genie-agent-app/src/smokeTest/java/com/netflix | Create_ds/genie/genie-agent-app/src/smokeTest/java/com/netflix/genie/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Smoke tests for this package.
*
* @author tgianos
* @since 4.0.0
*/
package com.netflix.genie;
| 3,015 |
0 | Create_ds/genie/genie-agent-app/src/main/java/com/netflix | Create_ds/genie/genie-agent-app/src/main/java/com/netflix/genie/GenieAgentApplication.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie;
import com.netflix.genie.agent.cli.Util;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.dao.PersistenceExceptionTranslationAutoConfiguration;
import org.springframework.boot.autoconfigure.gson.GsonAutoConfiguration;
import org.springframework.boot.autoconfigure.jackson.JacksonAutoConfiguration;
import org.springframework.boot.autoconfigure.transaction.TransactionAutoConfiguration;
import org.springframework.context.annotation.Configuration;
/**
* Genie Agent application.
*
* @author mprimi
* @author tgianos
* @since 4.0.0
*/
@Configuration
@EnableAutoConfiguration(
exclude = {
/*
* Picked up by default but not believed to be needed currently
*/
GsonAutoConfiguration.class,
JacksonAutoConfiguration.class,
PersistenceExceptionTranslationAutoConfiguration.class,
TransactionAutoConfiguration.class,
}
)
public class GenieAgentApplication {
/**
* Main method, actual execution is delegated to GenieAgentRunner.
*
* @param args command-line arguments
*/
public static void main(final String[] args) {
System.err.println("Starting Genie Agent");
System.exit(new GenieAgentApplication().run(args));
}
private int run(final String[] args) {
final SpringApplication app = new SpringApplication(GenieAgentApplication.class);
// Disable parsing of command-line arguments into properties.
app.setAddCommandLineProperties(false);
//TODO: workaround for https://jira.spring.io/browse/SPR-17416
// Spring chokes on argument '--' (a.k.a. bare double dash) conventionally used to separate options from
// operands. Perform a token replacement to avoid triggering an error in Spring argument parsing.
// Later the original token is restored before the actual Genie argument parsing.
final String[] editedArgs = Util.mangleBareDoubleDash(args);
return SpringApplication.exit(app.run(editedArgs));
}
}
| 3,016 |
0 | Create_ds/genie/genie-agent-app/src/main/java/com/netflix | Create_ds/genie/genie-agent-app/src/main/java/com/netflix/genie/package-info.java | /*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Root Genie package. Will primarily be used to contain Spring Boot application classes.
*
* @author tgianos
*/
package com.netflix.genie;
| 3,017 |
0 | Create_ds/genie/genie-app/src/smokeTest/java/com/netflix | Create_ds/genie/genie-app/src/smokeTest/java/com/netflix/genie/GenieAppSmokeTest.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.boot.web.server.LocalServerPort;
import org.springframework.http.HttpStatus;
import org.springframework.test.context.junit.jupiter.SpringExtension;
/**
* Smoke test to make sure the app comes up successfully with all defaults.
*
* @author tgianos
* @since 4.0.0
*/
@ExtendWith(SpringExtension.class)
@SpringBootTest(
classes = {
GenieApp.class
},
webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT
)
class GenieAppSmokeTest {
@LocalServerPort
private int port;
@Autowired
private TestRestTemplate restTemplate;
@Test
void testAppStarts() {
Assertions
.assertThat(
this.restTemplate
.getForEntity("http://localhost:" + this.port + "/admin/health", String.class)
.getStatusCode()
)
.isEqualByComparingTo(HttpStatus.OK);
}
// TODO: Could add more
}
| 3,018 |
0 | Create_ds/genie/genie-app/src/smokeTest/java/com/netflix | Create_ds/genie/genie-app/src/smokeTest/java/com/netflix/genie/package-info.java | /*
*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Smoke tests for the default Genie Application.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,019 |
0 | Create_ds/genie/genie-app/src/main/java/com/netflix | Create_ds/genie/genie-app/src/main/java/com/netflix/genie/GenieApp.java | /*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.autoconfigure.data.redis.RedisAutoConfiguration;
/**
* Main Genie Spring Configuration class.
*
* @author tgianos
* @since 3.0.0
*/
@SpringBootApplication(
// TODO: We probably should handle these in an environment post processor so that it's universal
exclude = {
RedisAutoConfiguration.class,
}
)
public class GenieApp {
/**
* Protected constructor.
*/
protected GenieApp() {
}
/**
* Spring Boot Main.
*
* @param args Program arguments
* @throws Exception For any failure during program execution
*/
public static void main(final String[] args) throws Exception {
new SpringApplication(GenieApp.class).run(args);
}
}
| 3,020 |
0 | Create_ds/genie/genie-app/src/main/java/com/netflix | Create_ds/genie/genie-app/src/main/java/com/netflix/genie/package-info.java | /*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Root Genie package. Will primarily be used to contain Spring Boot application classes.
*
* @author tgianos
*/
package com.netflix.genie;
| 3,021 |
0 | Create_ds/genie/genie-test-web/src/main/java/com/netflix | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/GenieTestApp.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
* Reusable Spring Boot test application.
*
* @author tgianos
* @since 4.0.0
*/
@SpringBootApplication
public class GenieTestApp {
/**
* Constructor.
*/
protected GenieTestApp() {
}
/**
* Spring Boot Main.
*
* @param args Program arguments
* @throws Exception For any failure during program execution
*/
public static void main(final String[] args) throws Exception {
new SpringApplication(GenieTestApp.class).run(args);
}
}
| 3,022 |
0 | Create_ds/genie/genie-test-web/src/main/java/com/netflix | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/package-info.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Common test resources in the base package.
*
* @author tgianos
* @since 4.0.0
*/
@ParametersAreNonnullByDefault
package com.netflix.genie;
import javax.annotation.ParametersAreNonnullByDefault;
| 3,023 |
0 | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/test/web | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/test/web/configs/IntegrationTestingConfiguration.java | /*
*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.test.web.configs;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.springframework.core.io.FileSystemResource;
import org.springframework.core.io.Resource;
import javax.annotation.PreDestroy;
import java.io.File;
import java.io.IOException;
/**
* Configuration overrides for integration tests.
*
* @author amsharma
* @author tgianos
* @since 3.0.0
*/
@Configuration
@Profile(
{
"ci",
"integration"
}
)
public class IntegrationTestingConfiguration {
private File jobsDir;
/**
* Returns a temporary directory as the jobs resource.
*
* @return The job dir as a resource.
*/
@Bean
public Resource jobsDir() {
this.jobsDir = Files.createTempDir();
if (!this.jobsDir.exists() && !this.jobsDir.mkdirs()) {
throw new IllegalArgumentException("Unable to create directories: " + this.jobsDir);
}
String jobsDirPath = this.jobsDir.getAbsolutePath();
final String slash = "/";
if (!jobsDirPath.endsWith(slash)) {
jobsDirPath = jobsDirPath + slash;
}
return new FileSystemResource(jobsDirPath);
}
/**
* Get rid of the directories created by the system temporarily.
*
* @throws IOException when unable to delete directory
*/
@PreDestroy
public void cleanup() throws IOException {
if (this.jobsDir != null) {
FileUtils.deleteDirectory(this.jobsDir);
}
}
}
| 3,024 |
0 | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/test/web | Create_ds/genie/genie-test-web/src/main/java/com/netflix/genie/test/web/configs/package-info.java | /*
*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Any configurations used during web testing.
*
* @author tgianos
* @since 4.0.0
*/
package com.netflix.genie.test.web.configs;
| 3,025 |
0 | Create_ds/genie/genie-test/src/main/java/com/netflix/genie/test | Create_ds/genie/genie-test/src/main/java/com/netflix/genie/test/suppliers/RandomSuppliers.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.genie.test.suppliers;
import java.time.Instant;
import java.util.Random;
import java.util.UUID;
import java.util.function.Supplier;
/**
* Supply random types.
*
* @author tgianos
* @since 3.0.0
*/
public final class RandomSuppliers {
/**
* Get a random String.
*/
public static final Supplier<String> STRING = UUID.randomUUID()::toString;
private static final Random RANDOM = new Random();
/**
* Get a random integer.
*/
public static final Supplier<Integer> INT = () -> RANDOM.nextInt(Integer.MAX_VALUE - 1);
/**
* Get a random long.
*/
public static final Supplier<Long> LONG = RANDOM::nextLong;
/**
* Get a random instant.
*/
public static final Supplier<Instant> INSTANT = () -> Instant.ofEpochMilli(LONG.get());
/**
* Utility class.
*/
private RandomSuppliers() {
}
}
| 3,026 |
0 | Create_ds/genie/genie-test/src/main/java/com/netflix/genie/test | Create_ds/genie/genie-test/src/main/java/com/netflix/genie/test/suppliers/package-info.java | /*
*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Functional interfaces for suppliers.
*
* @author tgianos
* @since 3.0.0
*/
package com.netflix.genie.test.suppliers;
| 3,027 |
0 | Create_ds/dyno-queues/dyno-queues-core/src/test/java/com/netflix/dyno | Create_ds/dyno-queues/dyno-queues-core/src/test/java/com/netflix/dyno/queues/TestMessage.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues;
import static org.junit.Assert.*;
import java.util.concurrent.TimeUnit;
import org.junit.Test;
/**
* @author Viren
*
*/
public class TestMessage {
@Test
public void test() {
Message msg = new Message();
msg.setPayload("payload");
msg.setTimeout(10, TimeUnit.SECONDS);
assertEquals(msg.toString(), 10 * 1000, msg.getTimeout());
msg.setTimeout(10);
assertEquals(msg.toString(), 10, msg.getTimeout());
}
@Test(expected = IllegalArgumentException.class)
public void testPrioirty() {
Message msg = new Message();
msg.setPriority(-1);
}
@Test(expected = IllegalArgumentException.class)
public void testPrioirty2() {
Message msg = new Message();
msg.setPriority(100);
}
}
| 3,028 |
0 | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno/queues/Message.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues;
import java.util.concurrent.TimeUnit;
/**
* @author Viren
*
*/
public class Message {
private String id;
private String payload;
private long timeout;
private int priority;
private String shard;
public Message() {
}
public Message(String id, String payload) {
this.id = id;
this.payload = payload;
}
/**
* @return the id
*/
public String getId() {
return id;
}
/**
* @param id
* the id to set
*/
public void setId(String id) {
this.id = id;
}
/**
* @return the payload
*/
public String getPayload() {
return payload;
}
/**
* @param payload the payload to set
*
*/
public void setPayload(String payload) {
this.payload = payload;
}
/**
*
* @param timeout Timeout in milliseconds - The message is only given to the consumer after the specified milliseconds have elapsed.
*/
public void setTimeout(long timeout) {
this.timeout = timeout;
}
/**
* Helper method for the {@link #setTimeout(long)}
* @param time timeout time
* @param unit unit for the time
* @see #setTimeout(long)
*/
public void setTimeout(long time, TimeUnit unit) {
this.timeout = TimeUnit.MILLISECONDS.convert(time, unit);
}
/**
*
* @return Returns the timeout for the message
*/
public long getTimeout() {
return timeout;
}
/**
* Sets the message priority. Higher priority message is retrieved ahead of lower priority ones
* @param priority priority for the message.
*/
public void setPriority(int priority) {
if (priority < 0 || priority > 99) {
throw new IllegalArgumentException("priority MUST be between 0 and 99 (inclusive)");
}
this.priority = priority;
}
public int getPriority() {
return priority;
}
/**
* @return the shard
*/
public String getShard() {
return shard;
}
/**
* @param shard the shard to set
*
*/
public void setShard(String shard) {
this.shard = shard;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((id == null) ? 0 : id.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Message other = (Message) obj;
if (id == null) {
if (other.id != null)
return false;
} else if (!id.equals(other.id))
return false;
return true;
}
@Override
public String toString() {
return "Message [id=" + id + ", payload=" + payload + ", timeout=" + timeout + ", priority=" + priority + "]";
}
}
| 3,029 |
0 | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno/queues/DynoQueue.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues;
import java.io.Closeable;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* @author Viren
* Abstraction of a dyno queue.
*/
public interface DynoQueue extends Closeable {
/**
*
* @return Returns the name of the queue
*/
public String getName();
/**
*
* @return Time in milliseconds before the messages that are popped and not acknowledge are pushed back into the queue.
* @see #ack(String)
*/
public int getUnackTime();
/**
*
* @param messages messages to be pushed onto the queue
* @return Returns the list of message ids
*/
public List<String> push(List<Message> messages);
/**
*
* @param messageCount number of messages to be popped out of the queue.
* @param wait Amount of time to wait if there are no messages in queue
* @param unit Time unit for the wait period
* @return messages. Can be less than the messageCount if there are fewer messages available than the message count.
* If the popped messages are not acknowledge in a timely manner, they are pushed back into the queue.
* @see #peek(int)
* @see #ack(String)
* @see #getUnackTime()
*
*/
public List<Message> pop(int messageCount, int wait, TimeUnit unit);
/**
* Pops "messageId" from the local shard if it exists.
* Note that if "messageId" is present in a different shard, we will be unable to pop it.
*
* @param messageId ID of message to pop
* @return Returns a "Message" object if pop was successful. 'null' otherwise.
*/
public Message popWithMsgId(String messageId);
/**
* Provides a peek into the queue without taking messages out.
*
* Note: This peeks only into the 'local' shard.
*
* @param messageCount number of messages to be peeked.
* @return List of peeked messages.
* @see #pop(int, int, TimeUnit)
*/
public List<Message> peek(int messageCount);
/**
* Provides an acknowledgement for the message. Once ack'ed the message is removed from the queue forever.
* @param messageId ID of the message to be acknowledged
* @return true if the message was found pending acknowledgement and is now ack'ed. false if the message id is invalid or message is no longer present in the queue.
*/
public boolean ack(String messageId);
/**
* Bulk version for {@link #ack(String)}
* @param messages Messages to be acknowledged. Each message MUST be populated with id and shard information.
*/
public void ack(List<Message> messages);
/**
* Sets the unack timeout on the message (changes the default timeout to the new value). Useful when extended lease is required for a message by consumer before sending ack.
* @param messageId ID of the message to be acknowledged
* @param timeout time in milliseconds for which the message will remain in un-ack state. If no ack is received after the timeout period has expired, the message is put back into the queue
* @return true if the message id was found and updated with new timeout. false otherwise.
*/
public boolean setUnackTimeout(String messageId, long timeout);
/**
* Updates the timeout for the message.
* @param messageId ID of the message to be acknowledged
* @param timeout time in milliseconds for which the message will remain invisible and not popped out of the queue.
* @return true if the message id was found and updated with new timeout. false otherwise.
*/
public boolean setTimeout(String messageId, long timeout);
/**
*
* @param messageId Remove the message from the queue
* @return true if the message id was found and removed. False otherwise.
*/
public boolean remove(String messageId);
public boolean atomicRemove(String messageId);
/**
* Enqueues 'message' if it doesn't exist in any of the shards or unack sets.
*
* @param message Message to enqueue if it doesn't exist.
* @return true if message was enqueued. False if messageId already exists.
*/
public boolean ensure(Message message);
/**
* Checks the message bodies (i.e. the data in the hash map), and returns true on the first match with
* 'predicate'.
*
* Matching is done based on 'lua pattern' matching.
* http://lua-users.org/wiki/PatternsTutorial
*
* Disclaimer: This is a potentially expensive call, since we will iterate over the entire hash map in the
* worst case. Use mindfully.
*
* @param predicate The predicate to check against.
* @return 'true' if any of the messages contain 'predicate'; 'false' otherwise.
*/
public boolean containsPredicate(String predicate);
/**
* Checks the message bodies (i.e. the data in the hash map), and returns true on the first match with
* 'predicate'.
*
* Matching is done based on 'lua pattern' matching.
* http://lua-users.org/wiki/PatternsTutorial
*
* Disclaimer: This is a potentially expensive call, since we will iterate over the entire hash map in the
* worst case. Use mindfully.
*
* @param predicate The predicate to check against.
* @param localShardOnly If this is true, it will only check if the message exists in the local shard as opposed to
* all shards. Note that this will only work if the Dynomite cluster ring size is 1 (i.e. one
* instance per AZ).
* @return 'true' if any of the messages contain 'predicate'; 'false' otherwise.
*/
public boolean containsPredicate(String predicate, boolean localShardOnly);
/**
* Checks the message bodies (i.e. the data in the hash map), and returns the ID of the first message to match with
* 'predicate'.
*
* Matching is done based on 'lua pattern' matching.
* http://lua-users.org/wiki/PatternsTutorial
*
* Disclaimer: This is a potentially expensive call, since we will iterate over the entire hash map in the
* worst case. Use mindfully.
*
* @param predicate The predicate to check against.
* @return Message ID as string if any of the messages contain 'predicate'; 'null' otherwise.
*/
public String getMsgWithPredicate(String predicate);
/**
* Checks the message bodies (i.e. the data in the hash map), and returns the ID of the first message to match with
* 'predicate'.
*
* Matching is done based on 'lua pattern' matching.
* http://lua-users.org/wiki/PatternsTutorial
*
* Disclaimer: This is a potentially expensive call, since we will iterate over the entire hash map in the
* worst case. Use mindfully.
*
* @param predicate The predicate to check against.
* @param localShardOnly If this is true, it will only check if the message exists in the local shard as opposed to
* all shards. Note that this will only work if the Dynomite cluster ring size is 1 (i.e. one
* instance per AZ).
* @return Message ID as string if any of the messages contain 'predicate'; 'null' otherwise.
*/
public String getMsgWithPredicate(String predicate, boolean localShardOnly);
/**
* Pops the message with the highest priority that matches 'predicate'.
*
* Note: Can be slow for large queues.
*
* @param predicate The predicate to check against.
* @param localShardOnly If this is true, it will only check if the message exists in the local shard as opposed to
* all shards. Note that this will only work if the Dynomite cluster ring size is 1 (i.e. one
* instance per AZ).
* @return
*/
public Message popMsgWithPredicate(String predicate, boolean localShardOnly);
/**
*
* @param messageId message to be retrieved.
* @return Retrieves the message stored in the queue by the messageId. Null if not found.
*/
public Message get(String messageId);
/**
*
* Attempts to return all the messages found in the hashmap. It's a best-effort return of all payloads, i.e. it may
* not 100% match with what's in the queue metadata at any given time and is read with a non-quorum connection.
*
* @return Returns a list of all messages found in the message hashmap.
*/
public List<Message> getAllMessages();
/**
*
* Same as get(), but uses the non quorum connection.
* @param messageId message to be retrieved.
* @return Retrieves the message stored in the queue by the messageId. Null if not found.
*/
public Message localGet(String messageId);
public List<Message> bulkPop(int messageCount, int wait, TimeUnit unit);
public List<Message> unsafeBulkPop(int messageCount, int wait, TimeUnit unit);
/**
*
* @return Size of the queue.
* @see #shardSizes()
*/
public long size();
/**
*
* @return Map of shard name to the # of messages in the shard.
* @see #size()
*/
public Map<String, Map<String, Long>> shardSizes();
/**
* Truncates the entire queue. Use with caution!
*/
public void clear();
/**
* Process un-acknowledged messages. The messages which are polled by the client but not ack'ed are moved back to queue
*/
public void processUnacks();
public void atomicProcessUnacks();
/**
*
* Attempts to return the items present in the local queue shard but not in the hashmap, if any.
* (Ideally, we would not require this function, however, in some configurations, especially with multi-region write
* traffic sharing the same queue, we may find ourselves with stale items in the queue shards)
*
* @return List of stale messages IDs.
*/
public List<Message> findStaleMessages();
/*
* <=== Begin unsafe* functions. ===>
*
* The unsafe functions listed below are not advisable to use.
* The reason they are listed as unsafe is that they operate over all shards of a queue which means that
* due to the eventually consistent nature of Dynomite, the calling application may see duplicate item(s) that
* may have already been popped in a different rack, by another instance of the same application.
*
* Why are these functions made available then?
* There are some users of dyno-queues who have use-cases that are completely okay with dealing with duplicate
* items.
*/
/**
* Provides a peek into all shards of the queue without taking messages out.
* Note: This function does not guarantee ordering of items based on shards like unsafePopAllShards().
*
* @param messageCount The number of messages to peek.
* @return A list of up to 'count' messages.
*/
public List<Message> unsafePeekAllShards(final int messageCount);
/**
* Allows popping from all shards of the queue.
*
* Note: The local shard will always be looked into first and other shards will be filled behind it (if 'messageCount' is
* greater than the number of elements in the local shard). This way we ensure the chances of duplicates are less.
*
* @param messageCount number of messages to be popped out of the queue.
* @param wait Amount of time to wait for each shard if there are no messages in shard.
* @param unit Time unit for the wait period
* @return messages. Can be less than the messageCount if there are fewer messages available than the message count.
* If the popped messages are not acknowledge in a timely manner, they are pushed back into
* the queue.
* @see #peek(int)
* @see #ack(String)
* @see #getUnackTime()
*
*/
public List<Message> unsafePopAllShards(int messageCount, int wait, TimeUnit unit);
/**
* Same as popWithMsgId(), but allows popping from any shard.
*
* @param messageId ID of message to pop
* @return Returns a "Message" object if pop was successful. 'null' otherwise.
*/
public Message unsafePopWithMsgIdAllShards(String messageId);
}
| 3,030 |
0 | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno | Create_ds/dyno-queues/dyno-queues-core/src/main/java/com/netflix/dyno/queues/ShardSupplier.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues;
import com.netflix.dyno.connectionpool.Host;
import java.util.Set;
/**
* @author Viren
*
*/
public interface ShardSupplier {
/**
*
* @return Provides the set of all the available queue shards. The elements are evenly distributed amongst these shards
*/
public Set<String> getQueueShards();
/**
*
* @return Name of the current shard. Used when popping elements out of the queue
*/
public String getCurrentShard();
/**
*
* @param host
* @return shard for this host based on the rack
*/
public String getShardForHost(Host host);
}
| 3,031 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/BaseQueueTests.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public abstract class BaseQueueTests {
private String queueName;
protected static final String redisKeyPrefix = "testdynoqueues";
protected DynoQueue rdq;
protected String messageKeyPrefix;
public abstract DynoQueue getQueue(String redisKeyPrefix, String queueName);
public BaseQueueTests(String queueName) {
this.queueName = queueName;
this.messageKeyPrefix = redisKeyPrefix + ".MESSAGE.";
this.rdq = getQueue(redisKeyPrefix, queueName);
this.rdq.clear();
}
@Test
public void testGetName() {
assertEquals(queueName, rdq.getName());
}
@Test
public void testGetUnackTime() {
assertEquals(1_000, rdq.getUnackTime());
}
@Test
public void testTimeoutUpdate() {
rdq.clear();
String id = UUID.randomUUID().toString();
Message msg = new Message(id, "Hello World-" + id);
msg.setTimeout(100, TimeUnit.MILLISECONDS);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 10, TimeUnit.MILLISECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
boolean updated = rdq.setUnackTimeout(id, 500);
assertTrue(updated);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
updated = rdq.setUnackTimeout(id, 10_000); //10 seconds!
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
updated = rdq.setUnackTimeout(id, 0);
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
rdq.ack(id);
Map<String, Map<String, Long>> size = rdq.shardSizes();
Map<String, Long> values = size.get("a");
long total = values.values().stream().mapToLong(v -> v).sum();
assertEquals(0, total);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
}
@Test
public void testConcurrency() throws InterruptedException, ExecutionException {
rdq.clear();
final int count = 100;
final AtomicInteger published = new AtomicInteger(0);
ScheduledExecutorService ses = Executors.newScheduledThreadPool(6);
CountDownLatch publishLatch = new CountDownLatch(1);
Runnable publisher = new Runnable() {
@Override
public void run() {
List<Message> messages = new LinkedList<>();
for (int i = 0; i < 10; i++) {
Message msg = new Message(UUID.randomUUID().toString(), "Hello World-" + i);
msg.setPriority(new Random().nextInt(98));
messages.add(msg);
}
if (published.get() >= count) {
publishLatch.countDown();
return;
}
published.addAndGet(messages.size());
rdq.push(messages);
}
};
for (int p = 0; p < 3; p++) {
ses.scheduleWithFixedDelay(publisher, 1, 1, TimeUnit.MILLISECONDS);
}
publishLatch.await();
CountDownLatch latch = new CountDownLatch(count);
List<Message> allMsgs = new CopyOnWriteArrayList<>();
AtomicInteger consumed = new AtomicInteger(0);
AtomicInteger counter = new AtomicInteger(0);
Runnable consumer = () -> {
if (consumed.get() >= count) {
return;
}
List<Message> popped = rdq.pop(100, 1, TimeUnit.MILLISECONDS);
allMsgs.addAll(popped);
consumed.addAndGet(popped.size());
popped.stream().forEach(p -> latch.countDown());
counter.incrementAndGet();
};
for (int c = 0; c < 2; c++) {
ses.scheduleWithFixedDelay(consumer, 1, 10, TimeUnit.MILLISECONDS);
}
Uninterruptibles.awaitUninterruptibly(latch);
System.out.println("Consumed: " + consumed.get() + ", all: " + allMsgs.size() + " counter: " + counter.get());
Set<Message> uniqueMessages = allMsgs.stream().collect(Collectors.toSet());
assertEquals(count, allMsgs.size());
assertEquals(count, uniqueMessages.size());
List<Message> more = rdq.pop(1, 1, TimeUnit.SECONDS);
// If we published more than we consumed since we could've published more than we consumed in which case this
// will not be empty
if(published.get() == consumed.get())
assertEquals(0, more.size());
else
assertEquals(1, more.size());
ses.shutdownNow();
}
@Test
public void testSetTimeout() {
rdq.clear();
Message msg = new Message("x001yx", "Hello World");
msg.setPriority(3);
msg.setTimeout(10_000);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertTrue(popped.isEmpty());
boolean updated = rdq.setTimeout(msg.getId(), 0);
assertTrue(updated);
popped = rdq.pop(2, 1, TimeUnit.SECONDS);
assertEquals(1, popped.size());
assertEquals(0, popped.get(0).getTimeout());
}
@Test
public void testAll() {
rdq.clear();
assertEquals(0, rdq.size());
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
messages = rdq.peek(count);
assertNotNull(messages);
assertEquals(count, messages.size());
long size = rdq.size();
assertEquals(count, size);
// We did a peek - let's ensure the messages are still around!
List<Message> messages2 = rdq.peek(count);
assertNotNull(messages2);
assertEquals(messages, messages2);
List<Message> poped = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(poped);
assertEquals(count, poped.size());
assertEquals(messages, poped);
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
rdq.processUnacks();
for (Message msg : messages) {
Message found = rdq.get(msg.getId());
assertNotNull(found);
assertEquals(msg.getId(), found.getId());
assertEquals(msg.getTimeout(), found.getTimeout());
}
assertNull(rdq.get("some fake id"));
List<Message> messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
if (messages3.size() < count) {
List<Message> messages4 = rdq.pop(count, 1, TimeUnit.SECONDS);
messages3.addAll(messages4);
}
assertNotNull(messages3);
assertEquals(10, messages3.size());
assertEquals(messages.stream().map(msg -> msg.getId()).sorted().collect(Collectors.toList()), messages3.stream().map(msg -> msg.getId()).sorted().collect(Collectors.toList()));
assertEquals(10, messages3.stream().map(msg -> msg.getId()).collect(Collectors.toSet()).size());
messages3.stream().forEach(System.out::println);
for (Message msg : messages3) {
assertTrue(rdq.ack(msg.getId()));
assertFalse(rdq.ack(msg.getId()));
}
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(messages3);
assertEquals(0, messages3.size());
}
@Before
public void clear() {
rdq.clear();
}
@Test
public void testClearQueues() {
rdq.clear();
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("x" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
assertEquals(count, rdq.size());
rdq.clear();
assertEquals(0, rdq.size());
}
}
| 3,032 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/DefaultShardingStrategyTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.jedis.JedisMock;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
public class DefaultShardingStrategyTest {
private static JedisMock dynoClient;
private static final String queueName = "test_queue";
private static final String redisKeyPrefix = "testdynoqueues";
private static RedisDynoQueue shard1DynoQueue;
private static RedisDynoQueue shard2DynoQueue;
private static RedisDynoQueue shard3DynoQueue;
private static RedisQueues shard1Queue;
private static RedisQueues shard2Queue;
private static RedisQueues shard3Queue;
private static String messageKey;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("us-east-1d")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("us-east-2d")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("us-east-3d")
.setStatus(Host.Status.Up)
.createHost()
);
return hosts;
}
};
dynoClient = new JedisMock();
Set<String> allShards = hs.getHosts().stream().map(host -> host.getRack().substring(host.getRack().length() - 2)).collect(Collectors.toSet());
Iterator<String> iterator = allShards.iterator();
String shard1Name = iterator.next();
String shard2Name = iterator.next();
String shard3Name = iterator.next();
ShardSupplier shard1Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard1Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
ShardSupplier shard2Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard2Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
ShardSupplier shard3Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard3Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
messageKey = redisKeyPrefix + ".MESSAGE." + queueName;
shard1Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard1Supplier, 1_000, 1_000_000);
shard2Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard2Supplier, 1_000, 1_000_000);
shard3Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard3Supplier, 1_000, 1_000_000);
shard1DynoQueue = (RedisDynoQueue) shard1Queue.get(queueName);
shard2DynoQueue = (RedisDynoQueue) shard2Queue.get(queueName);
shard3DynoQueue = (RedisDynoQueue) shard3Queue.get(queueName);
}
@Before
public void clearAll() {
shard1DynoQueue.clear();
shard2DynoQueue.clear();
shard3DynoQueue.clear();
}
@Test
public void testAll() {
List<Message> messages = new LinkedList<>();
Message msg = new Message("1", "Hello World");
msg.setPriority(1);
messages.add(msg);
/**
* Because of sharding strategy works in round-robin manner, single client, for shard1, should
* push message(even the same) to three different shards.
*/
shard1DynoQueue.push(messages);
shard1DynoQueue.push(messages);
shard1DynoQueue.push(messages);
List<Message> popedFromShard1 = shard1DynoQueue.pop(1, 1, TimeUnit.SECONDS);
List<Message> popedFromShard2 = shard2DynoQueue.pop(1, 1, TimeUnit.SECONDS);
List<Message> popedFromShard3 = shard3DynoQueue.pop(1, 1, TimeUnit.SECONDS);
assertEquals(1, popedFromShard1.size());
assertEquals(1, popedFromShard2.size());
assertEquals(1, popedFromShard3.size());
assertEquals(msg, popedFromShard1.get(0));
assertEquals(msg, popedFromShard2.get(0));
assertEquals(msg, popedFromShard3.get(0));
}
}
| 3,033 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/RedisDynoQueueTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.jedis.JedisMock;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
public class RedisDynoQueueTest {
private static JedisMock dynoClient;
private static final String queueName = "test_queue";
private static final String redisKeyPrefix = "testdynoqueues";
private static RedisDynoQueue rdq;
private static RedisQueues rq;
private static String messageKey;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("ec2-11-22-33-444.compute-0.amazonaws.com")
.setPort(8102)
.setRack("us-east-1d")
.setStatus(Host.Status.Up)
.createHost()
);
return hosts;
}
};
dynoClient = new JedisMock();
Set<String> allShards = hs.getHosts().stream().map(host -> host.getRack().substring(host.getRack().length() - 2)).collect(Collectors.toSet());
String shardName = allShards.iterator().next();
ShardSupplier ss = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shardName;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
messageKey = redisKeyPrefix + ".MESSAGE." + queueName;
rq = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, ss, 1_000, 1_000_000);
DynoQueue rdq1 = rq.get(queueName);
assertNotNull(rdq1);
rdq = (RedisDynoQueue) rq.get(queueName);
assertNotNull(rdq);
assertEquals(rdq1, rdq); // should be the same instance.
}
@Test
public void testGetName() {
assertEquals(queueName, rdq.getName());
}
@Test
public void testGetUnackTime() {
assertEquals(1_000, rdq.getUnackTime());
}
@Test
public void testTimeoutUpdate() {
rdq.clear();
String id = UUID.randomUUID().toString();
Message msg = new Message(id, "Hello World-" + id);
msg.setTimeout(100, TimeUnit.MILLISECONDS);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 10, TimeUnit.MILLISECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
boolean updated = rdq.setUnackTimeout(id, 500);
assertTrue(updated);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
updated = rdq.setUnackTimeout(id, 10_000); //10 seconds!
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
updated = rdq.setUnackTimeout(id, 0);
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
rdq.ack(id);
Map<String, Map<String, Long>> size = rdq.shardSizes();
Map<String, Long> values = size.get("1d");
long total = values.values().stream().mapToLong(v -> v).sum();
assertEquals(0, total);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
}
@Test
public void testConcurrency() throws InterruptedException, ExecutionException {
rdq.clear();
final int count = 10_000;
final AtomicInteger published = new AtomicInteger(0);
ScheduledExecutorService ses = Executors.newScheduledThreadPool(6);
CountDownLatch publishLatch = new CountDownLatch(1);
Runnable publisher = new Runnable() {
@Override
public void run() {
List<Message> messages = new LinkedList<>();
for (int i = 0; i < 10; i++) {
Message msg = new Message(UUID.randomUUID().toString(), "Hello World-" + i);
msg.setPriority(new Random().nextInt(98));
messages.add(msg);
}
if (published.get() >= count) {
publishLatch.countDown();
return;
}
published.addAndGet(messages.size());
rdq.push(messages);
}
};
for (int p = 0; p < 3; p++) {
ses.scheduleWithFixedDelay(publisher, 1, 1, TimeUnit.MILLISECONDS);
}
publishLatch.await();
CountDownLatch latch = new CountDownLatch(count);
List<Message> allMsgs = new CopyOnWriteArrayList<>();
AtomicInteger consumed = new AtomicInteger(0);
AtomicInteger counter = new AtomicInteger(0);
Runnable consumer = new Runnable() {
@Override
public void run() {
if (consumed.get() >= count) {
return;
}
List<Message> popped = rdq.pop(100, 1, TimeUnit.MILLISECONDS);
allMsgs.addAll(popped);
consumed.addAndGet(popped.size());
popped.stream().forEach(p -> latch.countDown());
counter.incrementAndGet();
}
};
for (int c = 0; c < 2; c++) {
ses.scheduleWithFixedDelay(consumer, 1, 10, TimeUnit.MILLISECONDS);
}
Uninterruptibles.awaitUninterruptibly(latch);
System.out.println("Consumed: " + consumed.get() + ", all: " + allMsgs.size() + " counter: " + counter.get());
Set<Message> uniqueMessages = allMsgs.stream().collect(Collectors.toSet());
assertEquals(count, allMsgs.size());
assertEquals(count, uniqueMessages.size());
long start = System.currentTimeMillis();
List<Message> more = rdq.pop(1, 1, TimeUnit.SECONDS);
long elapsedTime = System.currentTimeMillis() - start;
assertTrue(elapsedTime >= 1000);
assertEquals(0, more.size());
assertEquals(0, rdq.numIdsToPrefetch.get());
ses.shutdownNow();
}
@Test
public void testSetTimeout() {
rdq.clear();
Message msg = new Message("x001", "Hello World");
msg.setPriority(3);
msg.setTimeout(20_000);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertTrue(popped.isEmpty());
boolean updated = rdq.setTimeout(msg.getId(), 1);
assertTrue(updated);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertEquals(1, popped.size());
assertEquals(1, popped.get(0).getTimeout());
updated = rdq.setTimeout(msg.getId(), 1);
assertTrue(!updated);
}
@Test
public void testAll() {
rdq.clear();
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
messages = rdq.peek(count);
assertNotNull(messages);
assertEquals(count, messages.size());
long size = rdq.size();
assertEquals(count, size);
// We did a peek - let's ensure the messages are still around!
List<Message> messages2 = rdq.peek(count);
assertNotNull(messages2);
assertEquals(messages, messages2);
List<Message> poped = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(poped);
assertEquals(count, poped.size());
assertEquals(messages, poped);
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
((RedisDynoQueue) rdq).processUnacks();
for (Message msg : messages) {
Message found = rdq.get(msg.getId());
assertNotNull(found);
assertEquals(msg.getId(), found.getId());
assertEquals(msg.getTimeout(), found.getTimeout());
}
assertNull(rdq.get("some fake id"));
List<Message> messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
if (messages3.size() < count) {
List<Message> messages4 = rdq.pop(count, 1, TimeUnit.SECONDS);
messages3.addAll(messages4);
}
assertNotNull(messages3);
assertEquals(10, messages3.size());
assertEquals(messages, messages3);
assertEquals(10, messages3.stream().map(msg -> msg.getId()).collect(Collectors.toSet()).size());
messages3.stream().forEach(System.out::println);
assertTrue(dynoClient.hlen(messageKey) == 10);
for (Message msg : messages3) {
assertTrue(rdq.ack(msg.getId()));
assertFalse(rdq.ack(msg.getId()));
}
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(messages3);
assertEquals(0, messages3.size());
int max = 10;
for (Message msg : messages) {
assertEquals(max, msg.getPriority());
rdq.remove(msg.getId());
max--;
}
size = rdq.size();
assertEquals(0, size);
assertTrue(dynoClient.hlen(messageKey) == 0);
}
@Before
public void clear() {
rdq.clear();
assertTrue(dynoClient.hlen(messageKey) == 0);
}
@Test
public void testClearQueues() {
rdq.clear();
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("x" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
assertEquals(count, rdq.size());
rdq.clear();
assertEquals(0, rdq.size());
}
}
| 3,034 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/CustomShardingStrategyTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.jedis.JedisMock;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
public class CustomShardingStrategyTest {
public static class HashBasedStrategy implements ShardingStrategy {
@Override
public String getNextShard(List<String> allShards, Message message) {
int hashCodeAbs = Math.abs(message.getId().hashCode());
int calculatedShard = (hashCodeAbs % allShards.size());
return allShards.get(calculatedShard);
}
}
private static JedisMock dynoClient;
private static final String queueName = "test_queue";
private static final String redisKeyPrefix = "testdynoqueues";
private static RedisDynoQueue shard1DynoQueue;
private static RedisDynoQueue shard2DynoQueue;
private static RedisDynoQueue shard3DynoQueue;
private static RedisQueues shard1Queue;
private static RedisQueues shard2Queue;
private static RedisQueues shard3Queue;
private static String messageKey;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("rack1")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("rack2")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(8102)
.setRack("rack3")
.setStatus(Host.Status.Up)
.createHost()
);
return hosts;
}
};
dynoClient = new JedisMock();
Set<String> allShards = hs.getHosts().stream().map(host -> host.getRack().substring(host.getRack().length() - 2)).collect(Collectors.toSet());
Iterator<String> iterator = allShards.iterator();
String shard1Name = iterator.next();
String shard2Name = iterator.next();
String shard3Name = iterator.next();
ShardSupplier shard1Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard1Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
ShardSupplier shard2Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard2Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
ShardSupplier shard3Supplier = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shard3Name;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
messageKey = redisKeyPrefix + ".MESSAGE." + queueName;
HashBasedStrategy hashBasedStrategy = new HashBasedStrategy();
shard1Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard1Supplier, 1_000, 1_000_000, hashBasedStrategy);
shard2Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard2Supplier, 1_000, 1_000_000, hashBasedStrategy);
shard3Queue = new RedisQueues(dynoClient, dynoClient, redisKeyPrefix, shard3Supplier, 1_000, 1_000_000, hashBasedStrategy);
shard1DynoQueue = (RedisDynoQueue) shard1Queue.get(queueName);
shard2DynoQueue = (RedisDynoQueue) shard2Queue.get(queueName);
shard3DynoQueue = (RedisDynoQueue) shard3Queue.get(queueName);
}
@Before
public void clearAll() {
shard1DynoQueue.clear();
shard2DynoQueue.clear();
shard3DynoQueue.clear();
}
@Test
public void testAll() {
List<Message> messages = new LinkedList<>();
Message msg = new Message("1", "Hello World");
msg.setPriority(1);
messages.add(msg);
/**
* Because my custom sharding strategy that depends on message id, and calculated hash (just Java's hashCode),
* message will always ends on the same shard, so message never duplicates, in test case, I expect that
* message will be received only once.
*/
shard1DynoQueue.push(messages);
shard1DynoQueue.push(messages);
shard1DynoQueue.push(messages);
List<Message> popedFromShard1 = shard1DynoQueue.pop(1, 1, TimeUnit.SECONDS);
List<Message> popedFromShard2 = shard2DynoQueue.pop(1, 1, TimeUnit.SECONDS);
List<Message> popedFromShard3 = shard3DynoQueue.pop(1, 1, TimeUnit.SECONDS);
assertEquals(0, popedFromShard1.size());
assertEquals(1, popedFromShard2.size());
assertEquals(0, popedFromShard3.size());
assertEquals(msg, popedFromShard2.get(0));
}
}
| 3,035 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/DynoShardSupplierTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import com.netflix.dyno.connectionpool.HostBuilder;
import org.junit.Test;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.Host.Status;
import com.netflix.dyno.queues.shard.DynoShardSupplier;
import com.netflix.dyno.connectionpool.HostSupplier;
/**
* @author Viren
*
*/
public class DynoShardSupplierTest {
@Test
public void test(){
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("host1")
.setPort(8102)
.setRack("us-east-1a")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("host1")
.setPort(8102)
.setRack("us-east-1b")
.setStatus(Host.Status.Up)
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("host1")
.setPort(8102)
.setRack("us-east-1d")
.setStatus(Host.Status.Up)
.createHost()
);
return hosts;
}
};
DynoShardSupplier supplier = new DynoShardSupplier(hs, "us-east-1", "a");
String localShard = supplier.getCurrentShard();
Set<String> allShards = supplier.getQueueShards();
assertNotNull(localShard);
assertEquals("a", localShard);
assertNotNull(allShards);
assertEquals(Arrays.asList("a","b","d").stream().collect(Collectors.toSet()), allShards);
}
}
| 3,036 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/benchmark/BenchmarkTestsNoPipelines.java | /**
*
*/
package com.netflix.dyno.queues.redis.benchmark;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.RedisQueues;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* @author Viren
*/
public class BenchmarkTestsNoPipelines extends QueueBenchmark {
public BenchmarkTestsNoPipelines() {
String redisKeyPrefix = "perftestnopipe";
String queueName = "nopipequeue";
List<Host> hosts = new ArrayList<>(1);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setIpAddress("127.0.0.1")
.setPort(6379)
.setRack("us-east-1c")
.setDatacenter("us-east-1")
.setStatus(Host.Status.Up)
.createHost()
);
DynoJedisClient.Builder builder = new DynoJedisClient.Builder();
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
return hosts;
}
};
ConnectionPoolConfigurationImpl cp = new ConnectionPoolConfigurationImpl("test").withTokenSupplier(new TokenMapSupplier() {
HostToken token = new HostToken(1L, hosts.get(0));
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Arrays.asList(token);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return token;
}
}).setLocalRack("us-east-1c").setLocalDataCenter("us-east-1");
cp.setSocketTimeout(0);
cp.setConnectTimeout(0);
cp.setMaxConnsPerHost(10);
DynoJedisClient client = builder.withApplicationName("test")
.withDynomiteClusterName("test")
.withCPConfig(cp)
.withHostSupplier(hs)
.build();
Set<String> allShards = hs.getHosts().stream().map(host -> host.getRack().substring(host.getRack().length() - 2)).collect(Collectors.toSet());
String shardName = allShards.iterator().next();
ShardSupplier ss = new ShardSupplier() {
@Override
public Set<String> getQueueShards() {
return allShards;
}
@Override
public String getCurrentShard() {
return shardName;
}
@Override
public String getShardForHost(Host host) {
return null;
}
};
RedisQueues rq = new RedisQueues(client, client, redisKeyPrefix, ss, 60_000, 1_000_000);
queue = rq.get(queueName);
}
public static void main(String[] args) throws Exception {
try {
BenchmarkTestsNoPipelines tests = new BenchmarkTestsNoPipelines();
tests.run();
} catch (Exception e) {
e.printStackTrace();
} finally {
System.exit(0);
}
}
}
| 3,037 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/benchmark/QueueBenchmark.java | package com.netflix.dyno.queues.redis.benchmark;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public abstract class QueueBenchmark {
protected DynoQueue queue;
public void publish() {
long s = System.currentTimeMillis();
int loopCount = 100;
int batchSize = 3000;
for (int i = 0; i < loopCount; i++) {
List<Message> messages = new ArrayList<>(batchSize);
for (int k = 0; k < batchSize; k++) {
String id = UUID.randomUUID().toString();
Message message = new Message(id, getPayload());
messages.add(message);
}
queue.push(messages);
}
long e = System.currentTimeMillis();
long diff = e - s;
long throughput = 1000 * ((loopCount * batchSize) / diff);
System.out.println("Publish time: " + diff + ", throughput: " + throughput + " msg/sec");
}
public void consume() {
try {
Set<String> ids = new HashSet<>();
long s = System.currentTimeMillis();
int loopCount = 100;
int batchSize = 3500;
int count = 0;
for (int i = 0; i < loopCount; i++) {
List<Message> popped = queue.pop(batchSize, 1, TimeUnit.MILLISECONDS);
queue.ack(popped);
Set<String> poppedIds = popped.stream().map(Message::getId).collect(Collectors.toSet());
if (popped.size() != poppedIds.size()) {
//We consumed dups
throw new RuntimeException("Count does not match. expected: " + popped.size() + ", but actual was : " + poppedIds.size() + ", i: " + i);
}
ids.addAll(poppedIds);
count += popped.size();
}
long e = System.currentTimeMillis();
long diff = e - s;
long throughput = 1000 * ((count) / diff);
if (count != ids.size()) {
//We consumed dups
throw new RuntimeException("There were duplicate messages consumed... expected messages to be consumed " + count + ", but actual was : " + ids.size());
}
System.out.println("Consume time: " + diff + ", read throughput: " + throughput + " msg/sec, messages read: " + count);
} catch (Exception e) {
e.printStackTrace();
}
}
private String getPayload() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < 1; i++) {
sb.append(UUID.randomUUID().toString());
sb.append(",");
}
return sb.toString();
}
public void run() throws Exception {
ExecutorService es = Executors.newFixedThreadPool(2);
List<Future<Void>> futures = new LinkedList<>();
for (int i = 0; i < 2; i++) {
Future<Void> future = es.submit(() -> {
publish();
consume();
return null;
});
futures.add(future);
}
for (Future<Void> future : futures) {
future.get();
}
}
}
| 3,038 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/benchmark/BenchmarkTestsJedis.java | /**
*
*/
package com.netflix.dyno.queues.redis.benchmark;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.queues.redis.v2.QueueBuilder;
import redis.clients.jedis.JedisPoolConfig;
import java.util.LinkedList;
import java.util.List;
/**
* @author Viren
*
*/
public class BenchmarkTestsJedis extends QueueBenchmark {
public BenchmarkTestsJedis() {
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(6379)
.setRack("us-east-1a")
.createHost()
);
QueueBuilder qb = new QueueBuilder();
JedisPoolConfig config = new JedisPoolConfig();
config.setTestOnBorrow(true);
config.setTestOnCreate(true);
config.setMaxTotal(10);
config.setMaxIdle(5);
config.setMaxWaitMillis(60_000);
queue = qb
.setCurrentShard("a")
.setQueueName("testq")
.setRedisKeyPrefix("keyprefix")
.setUnackTime(60_000_000)
.useNonDynomiteRedis(config, hosts)
.build();
System.out.println("Instance: " + queue.getClass().getName());
}
public static void main(String[] args) throws Exception {
try {
BenchmarkTestsJedis tests = new BenchmarkTestsJedis();
tests.run();
} catch (Exception e) {
e.printStackTrace();
} finally {
System.exit(0);
}
}
}
| 3,039 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/benchmark/BenchmarkTestsDynoJedis.java | /**
*
*/
package com.netflix.dyno.queues.redis.benchmark;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.redis.v2.QueueBuilder;
import java.util.*;
/**
* @author Viren
*/
public class BenchmarkTestsDynoJedis extends QueueBenchmark {
public BenchmarkTestsDynoJedis() {
List<Host> hosts = new ArrayList<>(1);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setIpAddress("127.0.0.1")
.setPort(6379)
.setRack("us-east-1c")
.setDatacenter("us-east-1")
.setStatus(Host.Status.Up)
.createHost()
);
QueueBuilder qb = new QueueBuilder();
DynoJedisClient.Builder builder = new DynoJedisClient.Builder();
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
return hosts;
}
};
ConnectionPoolConfigurationImpl cp = new ConnectionPoolConfigurationImpl("test").withTokenSupplier(new TokenMapSupplier() {
HostToken token = new HostToken(1L, hosts.get(0));
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Arrays.asList(token);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return token;
}
}).setLocalRack("us-east-1c").setLocalDataCenter("us-east-1");
cp.setSocketTimeout(0);
cp.setConnectTimeout(0);
cp.setMaxConnsPerHost(10);
cp.withHashtag("{}");
DynoJedisClient client = builder.withApplicationName("test")
.withDynomiteClusterName("test")
.withCPConfig(cp)
.withHostSupplier(hs)
.build();
queue = qb
.setCurrentShard("a")
.setQueueName("testq")
.setRedisKeyPrefix("keyprefix")
.setUnackTime(60_000)
.useDynomite(client, client)
.build();
}
public static void main(String[] args) throws Exception {
try {
System.out.println("Start");
BenchmarkTestsDynoJedis tests = new BenchmarkTestsDynoJedis();
tests.run();
} catch (Exception e) {
e.printStackTrace();
} finally {
System.exit(0);
}
}
}
| 3,040 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/v2/DynoJedisTests.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.TokenMapSupplier;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.connectionpool.impl.lb.HostToken;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.redis.BaseQueueTests;
import redis.clients.jedis.Jedis;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
public class DynoJedisTests extends BaseQueueTests {
private static Jedis dynoClient;
private static RedisPipelineQueue rdq;
private static String messageKeyPrefix;
private static int maxHashBuckets = 32;
public DynoJedisTests() {
super("dyno_queue_tests");
}
@Override
public DynoQueue getQueue(String redisKeyPrefix, String queueName) {
List<Host> hosts = new ArrayList<>(1);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setIpAddress("127.0.0.1")
.setPort(6379)
.setRack("us-east-1a")
.setDatacenter("us-east-1")
.setStatus(Host.Status.Up)
.createHost()
);
QueueBuilder qb = new QueueBuilder();
DynoJedisClient.Builder builder = new DynoJedisClient.Builder();
HostSupplier hs = new HostSupplier() {
@Override
public List<Host> getHosts() {
return hosts;
}
};
ConnectionPoolConfigurationImpl cp = new ConnectionPoolConfigurationImpl("test").withTokenSupplier(new TokenMapSupplier() {
HostToken token = new HostToken(1L, hosts.get(0));
@Override
public List<HostToken> getTokens(Set<Host> activeHosts) {
return Arrays.asList(token);
}
@Override
public HostToken getTokenForHost(Host host, Set<Host> activeHosts) {
return token;
}
}).setLocalRack("us-east-1a").setLocalDataCenter("us-east-1");
cp.setSocketTimeout(0);
cp.setConnectTimeout(0);
cp.setMaxConnsPerHost(10);
cp.withHashtag("{}");
DynoJedisClient client = builder.withApplicationName("test")
.withDynomiteClusterName("test")
.withCPConfig(cp)
.withHostSupplier(hs)
.build();
return qb
.setCurrentShard("a")
.setQueueName(queueName)
.setRedisKeyPrefix(redisKeyPrefix)
.setUnackTime(1_000)
.useDynomite(client, client)
.build();
}
}
| 3,041 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/v2/RedisDynoQueueTest.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import com.netflix.dyno.queues.redis.v2.RedisPipelineQueue;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.conn.JedisProxy;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
public class RedisDynoQueueTest {
private static Jedis dynoClient;
private static final String queueName = "test_queue";
private static final String redisKeyPrefix = "testdynoqueues";
private static RedisPipelineQueue rdq;
private static String messageKeyPrefix;
private static int maxHashBuckets = 32;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
JedisPoolConfig config = new JedisPoolConfig();
config.setTestOnBorrow(true);
config.setTestOnCreate(true);
config.setMaxTotal(10);
config.setMaxIdle(5);
config.setMaxWaitMillis(60_000);
JedisPool pool = new JedisPool(config, "localhost", 6379);
dynoClient = new Jedis("localhost", 6379, 0, 0);
dynoClient.flushAll();
rdq = new RedisPipelineQueue(redisKeyPrefix, queueName, "x", 1_000, 1_000, new JedisProxy(pool));
messageKeyPrefix = redisKeyPrefix + ".MSG." + "{" + queueName + ".x}";
}
@Test
public void testGetName() {
assertEquals(queueName, rdq.getName());
}
@Test
public void testGetUnackTime() {
assertEquals(1_000, rdq.getUnackTime());
}
@Test
public void testTimeoutUpdate() {
rdq.clear();
String id = UUID.randomUUID().toString();
Message msg = new Message(id, "Hello World-" + id);
msg.setTimeout(100, TimeUnit.MILLISECONDS);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 10, TimeUnit.MILLISECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
boolean updated = rdq.setUnackTimeout(id, 500);
assertTrue(updated);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
Uninterruptibles.sleepUninterruptibly(1000, TimeUnit.MILLISECONDS);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
updated = rdq.setUnackTimeout(id, 10_000); //10 seconds!
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
updated = rdq.setUnackTimeout(id, 0);
assertTrue(updated);
rdq.processUnacks();
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(1, popped.size());
rdq.ack(id);
Map<String, Map<String, Long>> size = rdq.shardSizes();
Map<String, Long> values = size.get("x");
long total = values.values().stream().mapToLong(v -> v).sum();
assertEquals(0, total);
popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertNotNull(popped);
assertEquals(0, popped.size());
}
@Test
public void testConcurrency() throws InterruptedException, ExecutionException {
rdq.clear();
final int count = 100;
final AtomicInteger published = new AtomicInteger(0);
ScheduledExecutorService ses = Executors.newScheduledThreadPool(6);
CountDownLatch publishLatch = new CountDownLatch(1);
Runnable publisher = new Runnable() {
@Override
public void run() {
List<Message> messages = new LinkedList<>();
for (int i = 0; i < 10; i++) {
Message msg = new Message(UUID.randomUUID().toString(), "Hello World-" + i);
msg.setPriority(new Random().nextInt(98));
messages.add(msg);
}
if (published.get() >= count) {
publishLatch.countDown();
return;
}
published.addAndGet(messages.size());
rdq.push(messages);
}
};
for (int p = 0; p < 3; p++) {
ses.scheduleWithFixedDelay(publisher, 1, 1, TimeUnit.MILLISECONDS);
}
publishLatch.await();
CountDownLatch latch = new CountDownLatch(count);
List<Message> allMsgs = new CopyOnWriteArrayList<>();
AtomicInteger consumed = new AtomicInteger(0);
AtomicInteger counter = new AtomicInteger(0);
Runnable consumer = new Runnable() {
@Override
public void run() {
if (consumed.get() >= count) {
return;
}
List<Message> popped = rdq.pop(100, 1, TimeUnit.MILLISECONDS);
allMsgs.addAll(popped);
consumed.addAndGet(popped.size());
popped.stream().forEach(p -> latch.countDown());
counter.incrementAndGet();
}
};
for (int c = 0; c < 2; c++) {
ses.scheduleWithFixedDelay(consumer, 1, 10, TimeUnit.MILLISECONDS);
}
Uninterruptibles.awaitUninterruptibly(latch);
System.out.println("Consumed: " + consumed.get() + ", all: " + allMsgs.size() + " counter: " + counter.get());
Set<Message> uniqueMessages = allMsgs.stream().collect(Collectors.toSet());
assertEquals(count, allMsgs.size());
assertEquals(count, uniqueMessages.size());
List<Message> more = rdq.pop(1, 1, TimeUnit.SECONDS);
assertEquals(0, more.size());
ses.shutdownNow();
}
@Test
public void testSetTimeout() {
rdq.clear();
Message msg = new Message("x001yx", "Hello World");
msg.setPriority(3);
msg.setTimeout(10_000);
rdq.push(Arrays.asList(msg));
List<Message> popped = rdq.pop(1, 1, TimeUnit.SECONDS);
assertTrue(popped.isEmpty());
boolean updated = rdq.setTimeout(msg.getId(), 0);
assertTrue(updated);
popped = rdq.pop(2, 1, TimeUnit.SECONDS);
assertEquals(1, popped.size());
assertEquals(0, popped.get(0).getTimeout());
}
@Test
public void testAll() {
rdq.clear();
assertEquals(0, rdq.size());
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
messages = rdq.peek(count);
assertNotNull(messages);
assertEquals(count, messages.size());
long size = rdq.size();
assertEquals(count, size);
// We did a peek - let's ensure the messages are still around!
List<Message> messages2 = rdq.peek(count);
assertNotNull(messages2);
assertEquals(messages, messages2);
List<Message> poped = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(poped);
assertEquals(count, poped.size());
assertEquals(messages, poped);
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
rdq.processUnacks();
for (Message msg : messages) {
Message found = rdq.get(msg.getId());
assertNotNull(found);
assertEquals(msg.getId(), found.getId());
assertEquals(msg.getTimeout(), found.getTimeout());
}
assertNull(rdq.get("some fake id"));
List<Message> messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
if (messages3.size() < count) {
List<Message> messages4 = rdq.pop(count, 1, TimeUnit.SECONDS);
messages3.addAll(messages4);
}
assertNotNull(messages3);
assertEquals(10, messages3.size());
assertEquals(messages.stream().map(msg -> msg.getId()).sorted().collect(Collectors.toList()), messages3.stream().map(msg -> msg.getId()).sorted().collect(Collectors.toList()));
assertEquals(10, messages3.stream().map(msg -> msg.getId()).collect(Collectors.toSet()).size());
messages3.stream().forEach(System.out::println);
int bucketCounts = 0;
for (int i = 0; i < maxHashBuckets; i++) {
bucketCounts += dynoClient.hlen(messageKeyPrefix + "." + i);
}
assertEquals(10, bucketCounts);
for (Message msg : messages3) {
assertTrue(rdq.ack(msg.getId()));
assertFalse(rdq.ack(msg.getId()));
}
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
messages3 = rdq.pop(count, 1, TimeUnit.SECONDS);
assertNotNull(messages3);
assertEquals(0, messages3.size());
}
@Before
public void clear() {
rdq.clear();
int bucketCounts = 0;
for (int i = 0; i < maxHashBuckets; i++) {
bucketCounts += dynoClient.hlen(messageKeyPrefix + "." + i);
}
assertEquals(0, bucketCounts);
}
@Test
public void testClearQueues() {
rdq.clear();
int count = 10;
List<Message> messages = new LinkedList<>();
for (int i = 0; i < count; i++) {
Message msg = new Message("x" + i, "Hello World-" + i);
msg.setPriority(count - i);
messages.add(msg);
}
rdq.push(messages);
assertEquals(count, rdq.size());
rdq.clear();
assertEquals(0, rdq.size());
}
}
| 3,042 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/v2/JedisTests.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.redis.BaseQueueTests;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.util.LinkedList;
import java.util.List;
/**
*
*/
public class JedisTests extends BaseQueueTests {
private static Jedis dynoClient;
private static RedisPipelineQueue rdq;
private static String messageKeyPrefix;
private static int maxHashBuckets = 32;
public JedisTests() {
super("jedis_queue_tests");
}
@Override
public DynoQueue getQueue(String redisKeyPrefix, String queueName) {
JedisPoolConfig config = new JedisPoolConfig();
config.setTestOnBorrow(true);
config.setTestOnCreate(true);
config.setMaxTotal(10);
config.setMaxIdle(5);
config.setMaxWaitMillis(60_000);
JedisPool pool = new JedisPool(config, "localhost", 6379);
dynoClient = new Jedis("localhost", 6379, 0, 0);
dynoClient.flushAll();
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(6379)
.setRack("us-east-1a")
.createHost()
);
QueueBuilder qb = new QueueBuilder();
DynoQueue queue = qb
.setCurrentShard("a")
.setQueueName(queueName)
.setRedisKeyPrefix(redisKeyPrefix)
.setUnackTime(1_000)
.useNonDynomiteRedis(config, hosts)
.build();
queue.clear();
return queue;
}
}
| 3,043 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/redis/v2/MultiQueueTests.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import org.junit.Test;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
*
*/
public class MultiQueueTests {
private static Jedis dynoClient;
private static RedisPipelineQueue rdq;
private static String messageKeyPrefix;
private static int maxHashBuckets = 32;
public DynoQueue getQueue(String redisKeyPrefix, String queueName) {
JedisPoolConfig config = new JedisPoolConfig();
config.setTestOnBorrow(true);
config.setTestOnCreate(true);
config.setMaxTotal(10);
config.setMaxIdle(5);
config.setMaxWaitMillis(60_000);
JedisPool pool = new JedisPool(config, "localhost", 6379);
dynoClient = new Jedis("localhost", 6379, 0, 0);
dynoClient.flushAll();
List<Host> hosts = new LinkedList<>();
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(6379)
.setRack("us-east-1a")
.createHost()
);
hosts.add(
new HostBuilder()
.setHostname("localhost")
.setPort(6379)
.setRack("us-east-2b")
.createHost()
);
QueueBuilder qb = new QueueBuilder();
DynoQueue queue = qb
.setCurrentShard("a")
.setQueueName(queueName)
.setRedisKeyPrefix(redisKeyPrefix)
.setUnackTime(50_000)
.useNonDynomiteRedis(config, hosts)
.build();
queue.clear(); //clear the queue
return queue;
}
@Test
public void testAll() {
DynoQueue queue = getQueue("test", "multi_queue");
assertEquals(MultiRedisQueue.class, queue.getClass());
long start = System.currentTimeMillis();
List<Message> popped = queue.pop(1, 1, TimeUnit.SECONDS);
assertTrue(popped.isEmpty()); //we have not pushed anything!!!!
long elapsedTime = System.currentTimeMillis() - start;
System.out.println("elapsed Time " + elapsedTime);
assertTrue(elapsedTime > 1000);
List<Message> messages = new LinkedList<>();
for (int i = 0; i < 10; i++) {
Message msg = new Message();
msg.setId("" + i);
msg.setPayload("" + i);
messages.add(msg);
}
queue.push(messages);
assertEquals(10, queue.size());
Map<String, Map<String, Long>> shards = queue.shardSizes();
assertEquals(2, shards.keySet().size()); //a and b
Map<String, Long> shardA = shards.get("a");
Map<String, Long> shardB = shards.get("b");
assertNotNull(shardA);
assertNotNull(shardB);
Long sizeA = shardA.get("size");
Long sizeB = shardB.get("size");
assertNotNull(sizeA);
assertNotNull(sizeB);
assertEquals(5L, sizeA.longValue());
assertEquals(5L, sizeB.longValue());
start = System.currentTimeMillis();
popped = queue.pop(2, 1, TimeUnit.SECONDS);
elapsedTime = System.currentTimeMillis() - start;
assertEquals(2, popped.size());
System.out.println("elapsed Time " + elapsedTime);
assertTrue(elapsedTime < 1000);
start = System.currentTimeMillis();
popped = queue.pop(5, 5, TimeUnit.SECONDS);
elapsedTime = System.currentTimeMillis() - start;
assertEquals(3, popped.size()); //3 remaining in the current shard
System.out.println("elapsed Time " + elapsedTime);
assertTrue(elapsedTime > 5000); //we would have waited for at least 5 second for the last 2 elements!
}
}
| 3,044 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/test/java/com/netflix/dyno/queues/jedis/JedisMock.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.jedis;
import org.rarefiedredis.redis.IRedisClient;
import org.rarefiedredis.redis.IRedisSortedSet.ZsetPair;
import org.rarefiedredis.redis.RedisMock;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.ScanParams;
import redis.clients.jedis.ScanResult;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.exceptions.JedisException;
import redis.clients.jedis.params.ZAddParams;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.stream.Collectors;
/**
* @author Viren
*
*/
public class JedisMock extends Jedis {
private IRedisClient redis;
public JedisMock() {
super("");
this.redis = new RedisMock();
}
private Set<Tuple> toTupleSet(Set<ZsetPair> pairs) {
Set<Tuple> set = new HashSet<Tuple>();
for (ZsetPair pair : pairs) {
set.add(new Tuple(pair.member, pair.score));
}
return set;
}
@Override
public String set(final String key, String value) {
try {
return redis.set(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
public String set(final String key, final String value, final String nxxx, final String expx, final long time) {
try {
return redis.set(key, value, nxxx, expx, String.valueOf(time));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String get(final String key) {
try {
return redis.get(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean exists(final String key) {
try {
return redis.exists(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(final String... keys) {
try {
return redis.del(keys);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long del(String key) {
try {
return redis.del(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String type(final String key) {
try {
return redis.type(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
/*
* public Set<String> keys(final String pattern) { checkIsInMulti();
* client.keys(pattern); return
* BuilderFactory.STRING_SET.build(client.getBinaryMultiBulkReply()); }
*
* public String randomKey() { checkIsInMulti(); client.randomKey(); return
* client.getBulkReply(); }
*
* public String rename(final String oldkey, final String newkey) {
* checkIsInMulti(); client.rename(oldkey, newkey); return
* client.getStatusCodeReply(); }
*
* public Long renamenx(final String oldkey, final String newkey) {
* checkIsInMulti(); client.renamenx(oldkey, newkey); return
* client.getIntegerReply(); }
*/
@Override
public Long expire(final String key, final int seconds) {
try {
return redis.expire(key, seconds) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long expireAt(final String key, final long unixTime) {
try {
return redis.expireat(key, unixTime) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long ttl(final String key) {
try {
return redis.ttl(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long move(final String key, final int dbIndex) {
try {
return redis.move(key, dbIndex);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String getSet(final String key, final String value) {
try {
return redis.getset(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> mget(final String... keys) {
try {
String[] mget = redis.mget(keys);
List<String> lst = new ArrayList<String>(mget.length);
for (String get : mget) {
lst.add(get);
}
return lst;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long setnx(final String key, final String value) {
try {
return redis.setnx(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String setex(final String key, final int seconds, final String value) {
try {
return redis.setex(key, seconds, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String mset(final String... keysvalues) {
try {
return redis.mset(keysvalues);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long msetnx(final String... keysvalues) {
try {
return redis.msetnx(keysvalues) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decrBy(final String key, final long integer) {
try {
return redis.decrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long decr(final String key) {
try {
return redis.decr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incrBy(final String key, final long integer) {
try {
return redis.incrby(key, integer);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double incrByFloat(final String key, final double value) {
try {
return Double.parseDouble(redis.incrbyfloat(key, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long incr(final String key) {
try {
return redis.incr(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long append(final String key, final String value) {
try {
return redis.append(key, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String substr(final String key, final int start, final int end) {
try {
return redis.getrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hset(final String key, final String field, final String value) {
try {
return redis.hset(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hget(final String key, final String field) {
try {
return redis.hget(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hsetnx(final String key, final String field, final String value) {
try {
return redis.hsetnx(key, field, value) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String hmset(final String key, final Map<String, String> hash) {
try {
String field = null, value = null;
String[] args = new String[(hash.size() - 1) * 2];
int idx = 0;
for (String f : hash.keySet()) {
if (field == null) {
field = f;
value = hash.get(f);
continue;
}
args[idx] = f;
args[idx + 1] = hash.get(f);
idx += 2;
}
return redis.hmset(key, field, value, args);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hmget(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hmget(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hincrBy(final String key, final String field, final long value) {
try {
return redis.hincrby(key, field, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double hincrByFloat(final String key, final String field, final double value) {
try {
return Double.parseDouble(redis.hincrbyfloat(key, field, value));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean hexists(final String key, final String field) {
try {
return redis.hexists(key, field);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hdel(final String key, final String... fields) {
try {
String field = fields[0];
String[] f = new String[fields.length - 1];
for (int idx = 1; idx < fields.length; ++idx) {
f[idx - 1] = fields[idx];
}
return redis.hdel(key, field, f);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long hlen(final String key) {
try {
return redis.hlen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> hkeys(final String key) {
try {
return redis.hkeys(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> hvals(final String key) {
try {
return redis.hvals(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Map<String, String> hgetAll(final String key) {
try {
return redis.hgetall(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long rpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.rpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lpush(final String key, final String... strings) {
try {
String element = strings[0];
String[] elements = new String[strings.length - 1];
for (int idx = 1; idx < strings.length; ++idx) {
elements[idx - 1] = strings[idx];
}
return redis.lpush(key, element, elements);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long llen(final String key) {
try {
return redis.llen(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> lrange(final String key, final long start, final long end) {
try {
return redis.lrange(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String ltrim(final String key, final long start, final long end) {
try {
return redis.ltrim(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lindex(final String key, final long index) {
try {
return redis.lindex(key, index);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lset(final String key, final long index, final String value) {
try {
return redis.lset(key, index, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long lrem(final String key, final long count, final String value) {
try {
return redis.lrem(key, count, value);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String lpop(final String key) {
try {
return redis.lpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpop(final String key) {
try {
return redis.rpop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String rpoplpush(final String srckey, final String dstkey) {
try {
return redis.rpoplpush(srckey, dstkey);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sadd(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.sadd(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> smembers(final String key) {
try {
return redis.smembers(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long srem(final String key, final String... members) {
try {
String member = members[0];
String[] m = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
m[idx - 1] = members[idx];
}
return redis.srem(key, member, m);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String spop(final String key) {
try {
return redis.spop(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long smove(final String srckey, final String dstkey, final String member) {
try {
return redis.smove(srckey, dstkey, member) ? 1L : 0L;
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long scard(final String key) {
try {
return redis.scard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Boolean sismember(final String key, final String member) {
try {
return redis.sismember(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sinter(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinter(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sinterstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sinterstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sunion(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunion(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sunionstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sunionstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> sdiff(final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiff(key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long sdiffstore(final String dstkey, final String... keys) {
try {
String key = keys[0];
String[] k = new String[keys.length - 1];
for (int idx = 0; idx < keys.length; ++idx) {
k[idx - 1] = keys[idx];
}
return redis.sdiffstore(dstkey, key, k);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String srandmember(final String key) {
try {
return redis.srandmember(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public List<String> srandmember(final String key, final int count) {
try {
return redis.srandmember(key, count);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final double score, final String member) {
try {
return redis.zadd(key, new ZsetPair(member, score));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(String key, double score, String member, ZAddParams params) {
try {
if (params.getParam("xx") != null) {
Double existing = redis.zscore(key, member);
if (existing == null) {
return 0L;
}
redis.zadd(key, new ZsetPair(member, score));
return 1L;
} else {
return redis.zadd(key, new ZsetPair(member, score));
}
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zadd(final String key, final Map<String, Double> scoreMembers) {
try {
Double score = null;
String member = null;
List<ZsetPair> scoresmembers = new ArrayList<ZsetPair>((scoreMembers.size() - 1) * 2);
for (String m : scoreMembers.keySet()) {
if (m == null) {
member = m;
score = scoreMembers.get(m);
continue;
}
scoresmembers.add(new ZsetPair(m, scoreMembers.get(m)));
}
return redis.zadd(key, new ZsetPair(member, score), (ZsetPair[]) scoresmembers.toArray());
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrem(final String key, final String... members) {
try {
String member = members[0];
String[] ms = new String[members.length - 1];
for (int idx = 1; idx < members.length; ++idx) {
ms[idx - 1] = members[idx];
}
return redis.zrem(key, member, ms);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zincrby(final String key, final double score, final String member) {
try {
return Double.parseDouble(redis.zincrby(key, score, member));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrank(final String key, final String member) {
try {
return redis.zrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zrevrank(final String key, final String member) {
try {
return redis.zrevrank(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrange(final String key, final long start, final long end) {
try {
return ZsetPair.members(redis.zrevrange(key, start, end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeWithScores(final String key, final long start, final long end) {
try {
return toTupleSet(redis.zrevrange(key, start, end, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcard(final String key) {
try {
return redis.zcard(key);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Double zscore(final String key, final String member) {
try {
return redis.zscore(key, member);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public String watch(final String... keys) {
try {
for (String key : keys) {
redis.watch(key);
}
return "OK";
} catch (Exception e) {
throw new JedisException(e);
}
}
/*
* public List<String> sort(final String key) { checkIsInMulti();
* client.sort(key); return client.getMultiBulkReply(); }
*
* public List<String> sort(final String key, final SortingParams
* sortingParameters) { checkIsInMulti(); client.sort(key,
* sortingParameters); return client.getMultiBulkReply(); }
*
* public List<String> blpop(final int timeout, final String... keys) {
* return blpop(getArgsAddTimeout(timeout, keys)); }
*
* private String[] getArgsAddTimeout(int timeout, String[] keys) { final
* int keyCount = keys.length; final String[] args = new String[keyCount +
* 1]; for (int at = 0; at != keyCount; ++at) { args[at] = keys[at]; }
*
* args[keyCount] = String.valueOf(timeout); return args; }
*
* public List<String> blpop(String... args) { checkIsInMulti();
* client.blpop(args); client.setTimeoutInfinite(); try { return
* client.getMultiBulkReply(); } finally { client.rollbackTimeout(); } }
*
* public List<String> brpop(String... args) { checkIsInMulti();
* client.brpop(args); client.setTimeoutInfinite(); try { return
* client.getMultiBulkReply(); } finally { client.rollbackTimeout(); } }
*
* @Deprecated public List<String> blpop(String arg) { return blpop(new
* String[] { arg }); }
*
* public List<String> brpop(String arg) { return brpop(new String[] { arg
* }); }
*
* public Long sort(final String key, final SortingParams sortingParameters,
* final String dstkey) { checkIsInMulti(); client.sort(key,
* sortingParameters, dstkey); return client.getIntegerReply(); }
*
* public Long sort(final String key, final String dstkey) {
* checkIsInMulti(); client.sort(key, dstkey); return
* client.getIntegerReply(); }
*
* public List<String> brpop(final int timeout, final String... keys) {
* return brpop(getArgsAddTimeout(timeout, keys)); }
*/
@Override
public Long zcount(final String key, final double min, final double max) {
try {
return redis.zcount(key, min, max);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zcount(final String key, final String min, final String max) {
try {
return redis.zcount(key, Double.parseDouble(min), Double.parseDouble(max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final double min, final double max) {
try {
return ZsetPair.members(redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final String min, final String max) {
try {
return ZsetPair.members(redis.zrangebyscore(key, min, max));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final double min, final double max, final int offset, final int count) {
try {
return ZsetPair.members(redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max), "limit", String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrangeByScore(final String key, final String min, final String max, final int offset, final int count) {
try {
return ZsetPair.members(redis.zrangebyscore(key, min, max, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(final String key, final double min, final double max) {
try {
return toTupleSet(redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(final String key, final String min, final String max) {
try {
return toTupleSet(redis.zrangebyscore(key, min, max, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(final String key, final double min, final double max, final int offset, final int count) {
try {
return toTupleSet(redis.zrangebyscore(key, String.valueOf(min), String.valueOf(max), "limit", String.valueOf(offset),
String.valueOf(count), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrangeByScoreWithScores(final String key, final String min, final String max, final int offset, final int count) {
try {
return toTupleSet(redis.zrangebyscore(key, min, max, "limit", String.valueOf(offset), String.valueOf(count), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final double max, final double min) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final String max, final String min) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, max, min));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final double max, final double min, final int offset, final int count) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min), "limit", String.valueOf(offset),
String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(final String key, final double max, final double min) {
try {
return toTupleSet(redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(final String key, final double max, final double min, final int offset, final int count) {
try {
return toTupleSet(redis.zrevrangebyscore(key, String.valueOf(max), String.valueOf(min), "limit", String.valueOf(offset),
String.valueOf(count), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(final String key, final String max, final String min, final int offset, final int count) {
try {
return toTupleSet(redis.zrevrangebyscore(key, max, min, "limit", String.valueOf(offset), String.valueOf(count), "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<String> zrevrangeByScore(final String key, final String max, final String min, final int offset, final int count) {
try {
return ZsetPair.members(redis.zrevrangebyscore(key, max, min, "limit", String.valueOf(offset), String.valueOf(count)));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Set<Tuple> zrevrangeByScoreWithScores(final String key, final String max, final String min) {
try {
return toTupleSet(redis.zrevrangebyscore(key, max, min, "withscores"));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByRank(final String key, final long start, final long end) {
try {
return redis.zremrangebyrank(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final double start, final double end) {
try {
return redis.zremrangebyscore(key, String.valueOf(start), String.valueOf(end));
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zremrangeByScore(final String key, final String start, final String end) {
try {
return redis.zremrangebyscore(key, start, end);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public Long zunionstore(final String dstkey, final String... sets) {
try {
return redis.zunionstore(dstkey, sets.length, sets);
} catch (Exception e) {
throw new JedisException(e);
}
}
@Override
public ScanResult<String> sscan(String key, String cursor, ScanParams params) {
try {
org.rarefiedredis.redis.ScanResult<Set<String>> sr = redis.sscan(key, Long.valueOf(cursor), "count", "1000000");
List<String> list = sr.results.stream().collect(Collectors.toList());
ScanResult<String> result = new ScanResult<String>("0", list);
return result;
} catch (Exception e) {
throw new JedisException(e);
}
}
public ScanResult<Entry<String, String>> hscan(final String key, final String cursor) {
try {
org.rarefiedredis.redis.ScanResult<Map<String, String>> mockr = redis.hscan(key, Long.valueOf(cursor), "count", "1000000");
Map<String, String> results = mockr.results;
List<Entry<String, String>> list = results.entrySet().stream().collect(Collectors.toList());
ScanResult<Entry<String, String>> result = new ScanResult<Entry<String, String>>("0", list);
return result;
} catch (Exception e) {
throw new JedisException(e);
}
}
public ScanResult<Tuple> zscan(final String key, final String cursor) {
try {
org.rarefiedredis.redis.ScanResult<Set<ZsetPair>> sr = redis.zscan(key, Long.valueOf(cursor), "count", "1000000");
List<ZsetPair> list = sr.results.stream().collect(Collectors.toList());
List<Tuple> tl = new LinkedList<Tuple>();
list.forEach(p -> tl.add(new Tuple(p.member, p.score)));
ScanResult<Tuple> result = new ScanResult<Tuple>("0", tl);
return result;
} catch (Exception e) {
throw new JedisException(e);
}
}
}
| 3,045 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/demo/DynoQueueDemo.java | package com.netflix.dyno.queues.demo;
import com.netflix.dyno.demo.redis.DynoJedisDemo;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.RedisQueues;
import com.netflix.dyno.queues.redis.v2.QueueBuilder;
import com.netflix.dyno.queues.shard.ConsistentAWSDynoShardSupplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
public class DynoQueueDemo extends DynoJedisDemo {
private static final Logger logger = LoggerFactory.getLogger(DynoQueue.class);
public DynoQueueDemo(String clusterName, String localRack) {
super(clusterName, localRack);
}
public DynoQueueDemo(String primaryCluster, String shadowCluster, String localRack) {
super(primaryCluster, shadowCluster, localRack);
}
/**
* Provide the cluster name to connect to as an argument to the function.
* throws java.lang.RuntimeException: java.net.ConnectException: Connection timed out (Connection timed out)
* if the cluster is not reachable.
*
* @param args: cluster-name version
* <p>
* cluster-name: Name of cluster to run demo against
* version: Possible values = 1 or 2; (for V1 or V2)
*/
public static void main(String[] args) throws IOException {
final String clusterName = args[0];
if (args.length < 2) {
throw new IllegalArgumentException("Need to pass in cluster-name and version of dyno-queues to run as arguments");
}
int version = Integer.parseInt(args[1]);
final DynoQueueDemo demo = new DynoQueueDemo(clusterName, "us-east-1e");
Properties props = new Properties();
props.load(DynoQueueDemo.class.getResourceAsStream("/demo.properties"));
for (String name : props.stringPropertyNames()) {
System.setProperty(name, props.getProperty(name));
}
try {
demo.initWithRemoteClusterFromEurekaUrl(args[0], 8102, false);
if (version == 1) {
demo.runSimpleV1Demo(demo.client);
} else if (version == 2) {
demo.runSimpleV2QueueDemo(demo.client);
}
Thread.sleep(10000);
} catch (Exception ex) {
ex.printStackTrace();
} finally {
demo.stop();
logger.info("Done");
}
}
private void runSimpleV1Demo(DynoJedisClient dyno) throws IOException {
String region = System.getProperty("LOCAL_DATACENTER");
String localRack = System.getProperty("LOCAL_RACK");
String prefix = "dynoQueue_";
ConsistentAWSDynoShardSupplier ss = new ConsistentAWSDynoShardSupplier(dyno.getConnPool().getConfiguration().getHostSupplier(), region, localRack);
RedisQueues queues = new RedisQueues(dyno, dyno, prefix, ss, 50_000, 50_000);
List<Message> payloads = new ArrayList<>();
payloads.add(new Message("id1", "searchable payload123"));
payloads.add(new Message("id2", "payload 2"));
payloads.add(new Message("id3", "payload 3"));
payloads.add(new Message("id4", "payload 4"));
payloads.add(new Message("id5", "payload 5"));
payloads.add(new Message("id6", "payload 6"));
payloads.add(new Message("id7", "payload 7"));
payloads.add(new Message("id8", "payload 8"));
payloads.add(new Message("id9", "payload 9"));
payloads.add(new Message("id10", "payload 10"));
payloads.add(new Message("id11", "payload 11"));
payloads.add(new Message("id12", "payload 12"));
payloads.add(new Message("id13", "payload 13"));
payloads.add(new Message("id14", "payload 14"));
payloads.add(new Message("id15", "payload 15"));
DynoQueue V1Queue = queues.get("simpleQueue");
// Clear the queue in case the server already has the above key.
V1Queue.clear();
// Test push() API
List pushed_msgs = V1Queue.push(payloads);
// Test ensure() API
Message msg1 = payloads.get(0);
logger.info("Does Message with ID '" + msg1.getId() + "' already exist? -> " + !V1Queue.ensure(msg1));
// Test containsPredicate() API
logger.info("Does the predicate 'searchable' exist in the queue? -> " + V1Queue.containsPredicate("searchable"));
// Test getMsgWithPredicate() API
logger.info("Get MSG ID that contains 'searchable' in the queue -> " + V1Queue.getMsgWithPredicate("searchable pay*"));
// Test getMsgWithPredicate(predicate, localShardOnly=true) API
// NOTE: This only works on single ring sized Dynomite clusters.
logger.info("Get MSG ID that contains 'searchable' in the queue -> " + V1Queue.getMsgWithPredicate("searchable pay*", true));
logger.info("Get MSG ID that contains '3' in the queue -> " + V1Queue.getMsgWithPredicate("3", true));
Message poppedWithPredicate = V1Queue.popMsgWithPredicate("searchable pay*", false);
V1Queue.ack(poppedWithPredicate.getId());
List<Message> specific_pops = new ArrayList<>();
// We'd only be able to pop from the local shard with popWithMsgId(), so try to pop the first payload ID we see in the local shard.
// Until then pop all messages not in the local shard with unsafePopWithMsgIdAllShards().
for (int i = 1; i < payloads.size(); ++i) {
Message popWithMsgId = V1Queue.popWithMsgId(payloads.get(i).getId());
if (popWithMsgId != null) {
specific_pops.add(popWithMsgId);
break;
} else {
// If we were unable to pop using popWithMsgId(), that means the message ID does not exist in the local shard.
// Ensure that we can pop with unsafePopWithMsgIdAllShards().
Message unsafeSpecificPop = V1Queue.unsafePopWithMsgIdAllShards(payloads.get(i).getId());
assert(unsafeSpecificPop != null);
boolean ack = V1Queue.ack(unsafeSpecificPop.getId());
assert(ack);
}
}
// Test ack()
boolean ack_successful = V1Queue.ack(specific_pops.get(0).getId());
assert(ack_successful);
// Test remove()
// Note: This checks for "id9" specifically as it implicitly expects every 3rd element we push to be in our
// local shard.
boolean removed = V1Queue.remove("id9");
assert(removed);
// Test pop(). Even though we try to pop 3 messages, there will only be one remaining message in our local shard.
List<Message> popped_msgs = V1Queue.pop(1, 1000, TimeUnit.MILLISECONDS);
V1Queue.ack(popped_msgs.get(0).getId());
// Test unsafePeekAllShards()
List<Message> peek_all_msgs = V1Queue.unsafePeekAllShards(5);
for (Message msg : peek_all_msgs) {
logger.info("Message peeked (ID : payload) -> " + msg.getId() + " : " + msg.getPayload());
}
// Test unsafePopAllShards()
List<Message> pop_all_msgs = V1Queue.unsafePopAllShards(7, 1000, TimeUnit.MILLISECONDS);
for (Message msg : pop_all_msgs) {
logger.info("Message popped (ID : payload) -> " + msg.getId() + " : " + msg.getPayload());
boolean ack = V1Queue.ack(msg.getId());
assert(ack);
}
V1Queue.clear();
V1Queue.close();
}
private void runSimpleV2QueueDemo(DynoJedisClient dyno) throws IOException {
String prefix = "dynoQueue_";
DynoQueue queue = new QueueBuilder()
.setQueueName("test")
.setRedisKeyPrefix(prefix)
.useDynomite(dyno, dyno)
.setUnackTime(50_000)
.build();
Message msg = new Message("id1", "message payload");
queue.push(Arrays.asList(msg));
int count = 10;
List<Message> polled = queue.pop(count, 1, TimeUnit.SECONDS);
logger.info(polled.toString());
queue.ack("id1");
queue.close();
}
}
| 3,046 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/RedisQueues.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.sharding.RoundRobinStrategy;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import redis.clients.jedis.commands.JedisCommands;
import java.io.Closeable;
import java.io.IOException;
import java.time.Clock;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* @author Viren
*
* Please note that you should take care for disposing resource related to RedisQueue instances - that means you
* should call close() on RedisQueue instance.
*/
public class RedisQueues implements Closeable {
private final Clock clock;
private final JedisCommands quorumConn;
private final JedisCommands nonQuorumConn;
private final Set<String> allShards;
private final String shardName;
private final String redisKeyPrefix;
private final int unackTime;
private final int unackHandlerIntervalInMS;
private final ConcurrentHashMap<String, DynoQueue> queues;
private final ShardingStrategy shardingStrategy;
private final boolean singleRingTopology;
/**
* @param quorumConn Dyno connection with dc_quorum enabled
* @param nonQuorumConn Dyno connection to local Redis
* @param redisKeyPrefix prefix applied to the Redis keys
* @param shardSupplier Provider for the shards for the queues created
* @param unackTime Time in millisecond within which a message needs to be acknowledged by the client, after which the message is re-queued.
* @param unackHandlerIntervalInMS Time in millisecond at which the un-acknowledgement processor runs
*/
public RedisQueues(JedisCommands quorumConn, JedisCommands nonQuorumConn, String redisKeyPrefix, ShardSupplier shardSupplier, int unackTime, int unackHandlerIntervalInMS) {
this(Clock.systemDefaultZone(), quorumConn, nonQuorumConn, redisKeyPrefix, shardSupplier, unackTime, unackHandlerIntervalInMS, new RoundRobinStrategy());
}
/**
* @param quorumConn Dyno connection with dc_quorum enabled
* @param nonQuorumConn Dyno connection to local Redis
* @param redisKeyPrefix prefix applied to the Redis keys
* @param shardSupplier Provider for the shards for the queues created
* @param unackTime Time in millisecond within which a message needs to be acknowledged by the client, after which the message is re-queued.
* @param unackHandlerIntervalInMS Time in millisecond at which the un-acknowledgement processor runs
* @param shardingStrategy sharding strategy responsible for calculating message's destination shard
*/
public RedisQueues(JedisCommands quorumConn, JedisCommands nonQuorumConn, String redisKeyPrefix, ShardSupplier shardSupplier, int unackTime, int unackHandlerIntervalInMS, ShardingStrategy shardingStrategy) {
this(Clock.systemDefaultZone(), quorumConn, nonQuorumConn, redisKeyPrefix, shardSupplier, unackTime, unackHandlerIntervalInMS, shardingStrategy);
}
/**
* @param clock Time provider
* @param quorumConn Dyno connection with dc_quorum enabled
* @param nonQuorumConn Dyno connection to local Redis
* @param redisKeyPrefix prefix applied to the Redis keys
* @param shardSupplier Provider for the shards for the queues created
* @param unackTime Time in millisecond within which a message needs to be acknowledged by the client, after which the message is re-queued.
* @param unackHandlerIntervalInMS Time in millisecond at which the un-acknowledgement processor runs
* @param shardingStrategy sharding strategy responsible for calculating message's destination shard
*/
public RedisQueues(Clock clock, JedisCommands quorumConn, JedisCommands nonQuorumConn, String redisKeyPrefix, ShardSupplier shardSupplier, int unackTime, int unackHandlerIntervalInMS, ShardingStrategy shardingStrategy) {
this.clock = clock;
this.quorumConn = quorumConn;
this.nonQuorumConn = nonQuorumConn;
this.redisKeyPrefix = redisKeyPrefix;
this.allShards = shardSupplier.getQueueShards();
this.shardName = shardSupplier.getCurrentShard();
this.unackTime = unackTime;
this.unackHandlerIntervalInMS = unackHandlerIntervalInMS;
this.queues = new ConcurrentHashMap<>();
this.shardingStrategy = shardingStrategy;
if (quorumConn instanceof DynoJedisClient) {
this.singleRingTopology = ((DynoJedisClient) quorumConn).getConnPool().getPools().size() == 3;
} else {
this.singleRingTopology = false;
}
}
/**
*
* @param queueName Name of the queue
* @return Returns the DynoQueue hosting the given queue by name
* @see DynoQueue
* @see RedisDynoQueue
*/
public DynoQueue get(String queueName) {
String key = queueName.intern();
return queues.computeIfAbsent(key, (keyToCompute) -> new RedisDynoQueue(clock, redisKeyPrefix, queueName, allShards, shardName, unackHandlerIntervalInMS, shardingStrategy, singleRingTopology)
.withUnackTime(unackTime)
.withNonQuorumConn(nonQuorumConn)
.withQuorumConn(quorumConn));
}
/**
*
* @return Collection of all the registered queues
*/
public Collection<DynoQueue> queues() {
return this.queues.values();
}
@Override
public void close() throws IOException {
queues.values().forEach(queue -> {
try {
queue.close();
} catch (final IOException e) {
throw new RuntimeException(e.getCause());
}
});
}
}
| 3,047 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/QueueMonitor.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.MonitorRegistry;
import com.netflix.servo.monitor.BasicCounter;
import com.netflix.servo.monitor.BasicStopwatch;
import com.netflix.servo.monitor.BasicTimer;
import com.netflix.servo.monitor.MonitorConfig;
import com.netflix.servo.monitor.StatsMonitor;
import com.netflix.servo.monitor.Stopwatch;
import com.netflix.servo.stats.StatsConfig;
/**
* @author Viren
* Monitoring for the queue, publishes the metrics using servo
* https://github.com/Netflix/servo
*/
public class QueueMonitor implements Closeable {
public BasicTimer peek;
public BasicTimer ack;
public BasicTimer size;
public BasicTimer processUnack;
public BasicTimer remove;
public BasicTimer get;
public StatsMonitor queueDepth;
public StatsMonitor batchSize;
public StatsMonitor pop;
public StatsMonitor push;
public BasicCounter misses;
public StatsMonitor prefetch;
private String queueName;
private String shardName;
private ScheduledExecutorService executor;
private static final String className = QueueMonitor.class.getSimpleName();
public QueueMonitor(String queueName, String shardName) {
String totalTagName = "total";
executor = Executors.newScheduledThreadPool(1);
this.queueName = queueName;
this.shardName = shardName;
peek = new BasicTimer(create("peek"), TimeUnit.MILLISECONDS);
ack = new BasicTimer(create("ack"), TimeUnit.MILLISECONDS);
size = new BasicTimer(create("size"), TimeUnit.MILLISECONDS);
processUnack = new BasicTimer(create("processUnack"), TimeUnit.MILLISECONDS);
remove = new BasicTimer(create("remove"), TimeUnit.MILLISECONDS);
get = new BasicTimer(create("get"), TimeUnit.MILLISECONDS);
misses = new BasicCounter(create("queue_miss"));
StatsConfig statsConfig = new StatsConfig.Builder().withPublishCount(true).withPublishMax(true).withPublishMean(true).withPublishMin(true).withPublishTotal(true).build();
queueDepth = new StatsMonitor(create("queueDepth"), statsConfig, executor, totalTagName, true);
batchSize = new StatsMonitor(create("batchSize"), statsConfig, executor, totalTagName, true);
pop = new StatsMonitor(create("pop"), statsConfig, executor, totalTagName, true);
push = new StatsMonitor(create("push"), statsConfig, executor, totalTagName, true);
prefetch = new StatsMonitor(create("prefetch"), statsConfig, executor, totalTagName, true);
MonitorRegistry registry = DefaultMonitorRegistry.getInstance();
registry.register(pop);
registry.register(push);
registry.register(peek);
registry.register(ack);
registry.register(size);
registry.register(processUnack);
registry.register(remove);
registry.register(get);
registry.register(queueDepth);
registry.register(misses);
registry.register(batchSize);
registry.register(prefetch);
}
private MonitorConfig create(String name) {
return MonitorConfig.builder(name).withTag("class", className).withTag("shard", shardName).withTag("queueName", queueName).build();
}
public Stopwatch start(StatsMonitor sm, int batchCount) {
int count = (batchCount == 0) ? 1 : batchCount;
Stopwatch sw = new BasicStopwatch() {
@Override
public void stop() {
super.stop();
long duration = getDuration(TimeUnit.MILLISECONDS) / count;
sm.record(duration);
batchSize.record(count);
}
};
sw.start();
return sw;
}
@Override
public void close() throws IOException {
executor.shutdown();
}
}
| 3,048 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/RedisDynoQueue.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.Uninterruptibles;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.sharding.ShardingStrategy;
import com.netflix.servo.monitor.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.commands.JedisCommands;
import redis.clients.jedis.params.ZAddParams;
import java.io.IOException;
import java.text.NumberFormat;
import java.time.Clock;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static com.netflix.dyno.queues.redis.QueueUtils.execute;
/**
*
* @author Viren
* Current Production (March 2018) recipe - well tested in production.
* Note, this recipe does not use redis pipelines and hence the throughput offered is less compared to v2 recipes.
*/
public class RedisDynoQueue implements DynoQueue {
private final Logger logger = LoggerFactory.getLogger(RedisDynoQueue.class);
private final Clock clock;
private final String queueName;
private final List<String> allShards;
private final String shardName;
private final String redisKeyPrefix;
private final String messageStoreKey;
private final String localQueueShard;
private volatile int unackTime = 60;
private final QueueMonitor monitor;
private final ObjectMapper om;
private volatile JedisCommands quorumConn;
private volatile JedisCommands nonQuorumConn;
private final ConcurrentLinkedQueue<String> prefetchedIds;
private final Map<String, ConcurrentLinkedQueue<String>> unsafePrefetchedIdsAllShardsMap;
private final ScheduledExecutorService schedulerForUnacksProcessing;
private final int retryCount = 2;
private final ShardingStrategy shardingStrategy;
private final boolean singleRingTopology;
// Tracks the number of message IDs to prefetch based on the message counts requested by the caller via pop().
@VisibleForTesting
AtomicInteger numIdsToPrefetch;
// Tracks the number of message IDs to prefetch based on the message counts requested by the caller via
// unsafePopAllShards().
@VisibleForTesting
AtomicInteger unsafeNumIdsToPrefetchAllShards;
public RedisDynoQueue(String redisKeyPrefix, String queueName, Set<String> allShards, String shardName, ShardingStrategy shardingStrategy, boolean singleRingTopology) {
this(redisKeyPrefix, queueName, allShards, shardName, 60_000, shardingStrategy, singleRingTopology);
}
public RedisDynoQueue(String redisKeyPrefix, String queueName, Set<String> allShards, String shardName, int unackScheduleInMS, ShardingStrategy shardingStrategy, boolean singleRingTopology) {
this(Clock.systemDefaultZone(), redisKeyPrefix, queueName, allShards, shardName, unackScheduleInMS, shardingStrategy, singleRingTopology);
}
public RedisDynoQueue(Clock clock, String redisKeyPrefix, String queueName, Set<String> allShards, String shardName, int unackScheduleInMS, ShardingStrategy shardingStrategy, boolean singleRingTopology) {
this.clock = clock;
this.redisKeyPrefix = redisKeyPrefix;
this.queueName = queueName;
this.allShards = ImmutableList.copyOf(allShards.stream().collect(Collectors.toList()));
this.shardName = shardName;
this.messageStoreKey = redisKeyPrefix + ".MESSAGE." + queueName;
this.localQueueShard = getQueueShardKey(queueName, shardName);
this.shardingStrategy = shardingStrategy;
this.numIdsToPrefetch = new AtomicInteger(0);
this.unsafeNumIdsToPrefetchAllShards = new AtomicInteger(0);
this.singleRingTopology = singleRingTopology;
this.om = QueueUtils.constructObjectMapper();
this.monitor = new QueueMonitor(queueName, shardName);
this.prefetchedIds = new ConcurrentLinkedQueue<>();
this.unsafePrefetchedIdsAllShardsMap = new HashMap<>();
for (String shard : allShards) {
unsafePrefetchedIdsAllShardsMap.put(getQueueShardKey(queueName, shard), new ConcurrentLinkedQueue<>());
}
schedulerForUnacksProcessing = Executors.newScheduledThreadPool(1);
if (this.singleRingTopology) {
schedulerForUnacksProcessing.scheduleAtFixedRate(() -> atomicProcessUnacks(), unackScheduleInMS, unackScheduleInMS, TimeUnit.MILLISECONDS);
} else {
schedulerForUnacksProcessing.scheduleAtFixedRate(() -> processUnacks(), unackScheduleInMS, unackScheduleInMS, TimeUnit.MILLISECONDS);
}
logger.info(RedisDynoQueue.class.getName() + " is ready to serve " + queueName);
}
public RedisDynoQueue withQuorumConn(JedisCommands quorumConn) {
this.quorumConn = quorumConn;
return this;
}
public RedisDynoQueue withNonQuorumConn(JedisCommands nonQuorumConn) {
this.nonQuorumConn = nonQuorumConn;
return this;
}
public RedisDynoQueue withUnackTime(int unackTime) {
this.unackTime = unackTime;
return this;
}
/**
* @return Number of items in each ConcurrentLinkedQueue from 'unsafePrefetchedIdsAllShardsMap'.
*/
private int unsafeGetNumPrefetchedIds() {
// Note: We use an AtomicInteger due to Java's limitation of not allowing the modification of local native
// data types in lambdas (Java 8).
AtomicInteger totalSize = new AtomicInteger(0);
unsafePrefetchedIdsAllShardsMap.forEach((k,v)->totalSize.addAndGet(v.size()));
return totalSize.get();
}
@Override
public String getName() {
return queueName;
}
@Override
public int getUnackTime() {
return unackTime;
}
@Override
public List<String> push(final List<Message> messages) {
Stopwatch sw = monitor.start(monitor.push, messages.size());
try {
execute("push", "(a shard in) " + queueName, () -> {
for (Message message : messages) {
String json = om.writeValueAsString(message);
quorumConn.hset(messageStoreKey, message.getId(), json);
double priority = message.getPriority() / 100.0;
double score = Long.valueOf(clock.millis() + message.getTimeout()).doubleValue() + priority;
String shard = shardingStrategy.getNextShard(allShards, message);
String queueShard = getQueueShardKey(queueName, shard);
quorumConn.zadd(queueShard, score, message.getId());
}
return messages;
});
return messages.stream().map(msg -> msg.getId()).collect(Collectors.toList());
} finally {
sw.stop();
}
}
@Override
public List<Message> peek(final int messageCount) {
Stopwatch sw = monitor.peek.start();
try {
Set<String> ids = peekIds(0, messageCount);
if (ids == null) {
return Collections.emptyList();
}
return doPeekBodyHelper(ids);
} finally {
sw.stop();
}
}
@Override
public List<Message> unsafePeekAllShards(final int messageCount) {
Stopwatch sw = monitor.peek.start();
try {
Set<String> ids = peekIdsAllShards(0, messageCount);
if (ids == null) {
return Collections.emptyList();
}
return doPeekBodyHelper(ids);
} finally {
sw.stop();
}
}
/**
*
* Peeks into 'this.localQueueShard' and returns up to 'count' items starting at position 'offset' in the shard.
*
*
* @param offset Number of items to skip over in 'this.localQueueShard'
* @param count Number of items to return.
* @return Up to 'count' number of message IDs in a set.
*/
private Set<String> peekIds(final int offset, final int count, final double peekTillTs) {
return execute("peekIds", localQueueShard, () -> {
double peekTillTsOrNow = (peekTillTs == 0.0) ? Long.valueOf(clock.millis() + 1).doubleValue() : peekTillTs;
return doPeekIdsFromShardHelper(localQueueShard, peekTillTsOrNow, offset, count);
});
}
private Set<String> peekIds(final int offset, final int count) {
return peekIds(offset, count, 0.0);
}
/**
*
* Same as 'peekIds()' but looks into all shards of the queue ('this.allShards').
*
* @param count Number of items to return.
* @return Up to 'count' number of message IDs in a set.
*/
private Set<String> peekIdsAllShards(final int offset, final int count) {
return execute("peekIdsAllShards", localQueueShard, () -> {
Set<String> scanned = new HashSet<>();
double now = Long.valueOf(clock.millis() + 1).doubleValue();
int remaining_count = count;
// Try to get as many items from 'this.localQueueShard' first to reduce chances of returning duplicate items.
// (See unsafe* functions disclaimer in DynoQueue.java)
scanned.addAll(peekIds(offset, count, now));
remaining_count -= scanned.size();
for (String shard : allShards) {
String queueShardName = getQueueShardKey(queueName, shard);
// Skip 'localQueueShard'.
if (queueShardName.equals(localQueueShard)) continue;
Set<String> elems = doPeekIdsFromShardHelper(queueShardName, now, offset, count);
scanned.addAll(elems);
remaining_count -= elems.size();
if (remaining_count <= 0) break;
}
return scanned;
});
}
private Set<String> doPeekIdsFromShardHelper(final String queueShardName, final double peekTillTs, final int offset,
final int count) {
return nonQuorumConn.zrangeByScore(queueShardName, 0, peekTillTs, offset, count);
}
/**
* Takes a set of message IDs, 'message_ids', and returns a list of Message objects
* corresponding to 'message_ids'. Read only, does not make any updates.
*
* @param message_ids Set of message IDs to peek.
* @return a list of Message objects corresponding to 'message_ids'
*
*/
private List<Message> doPeekBodyHelper(Set<String> message_ids) {
List<Message> msgs = execute("peek", messageStoreKey, () -> {
List<Message> messages = new LinkedList<Message>();
for (String id : message_ids) {
String json = nonQuorumConn.hget(messageStoreKey, id);
Message message = om.readValue(json, Message.class);
messages.add(message);
}
return messages;
});
return msgs;
}
@Override
public List<Message> pop(int messageCount, int wait, TimeUnit unit) {
if (messageCount < 1) {
return Collections.emptyList();
}
Stopwatch sw = monitor.start(monitor.pop, messageCount);
try {
long start = clock.millis();
long waitFor = unit.toMillis(wait);
numIdsToPrefetch.addAndGet(messageCount);
// We prefetch message IDs here first before attempting to pop them off the sorted set.
// The reason we do this (as opposed to just popping from the head of the sorted set),
// is that due to the eventually consistent nature of Dynomite, the different replicas of the same
// sorted set _may_ not look exactly the same at any given time, i.e. they may have a different number of
// items due to replication lag.
// So, we first peek into the sorted set to find the list of message IDs that we know for sure are
// replicated across all replicas and then attempt to pop them based on those message IDs.
prefetchIds();
while (prefetchedIds.size() < messageCount && ((clock.millis() - start) < waitFor)) {
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
prefetchIds();
}
return _pop(shardName, messageCount, prefetchedIds);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
sw.stop();
}
}
@Override
public Message popWithMsgId(String messageId) {
return popWithMsgIdHelper(messageId, shardName, true);
}
@Override
public Message unsafePopWithMsgIdAllShards(String messageId) {
int numShards = allShards.size();
for (String shard : allShards) {
boolean warnIfNotExists = false;
// Only one of the shards will have the message, so we don't want the check in the other 2 shards
// to spam the logs. So make sure only the last shard emits a warning log which means that none of the
// shards have 'messageId'.
if (--numShards == 0) warnIfNotExists = true;
Message msg = popWithMsgIdHelper(messageId, shard, warnIfNotExists);
if (msg != null) return msg;
}
return null;
}
public Message popWithMsgIdHelper(String messageId, String targetShard, boolean warnIfNotExists) {
Stopwatch sw = monitor.start(monitor.pop, 1);
try {
return execute("popWithMsgId", targetShard, () -> {
String queueShardName = getQueueShardKey(queueName, targetShard);
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
String unackShardName = getUnackKey(queueName, targetShard);
ZAddParams zParams = ZAddParams.zAddParams().nx();
Long exists = nonQuorumConn.zrank(queueShardName, messageId);
// If we get back a null type, then the element doesn't exist.
if (exists == null) {
// We only have a 'warnIfNotExists' check for this call since not all messages are present in
// all shards. So we want to avoid a log spam. If any of the following calls return 'null' or '0',
// we may have hit an inconsistency (because it's in the queue, but other calls have failed),
// so make sure to log those.
if (warnIfNotExists) {
logger.warn("Cannot find the message with ID {}", messageId);
}
monitor.misses.increment();
return null;
}
String json = quorumConn.hget(messageStoreKey, messageId);
if (json == null) {
logger.warn("Cannot get the message payload for {}", messageId);
monitor.misses.increment();
return null;
}
long added = quorumConn.zadd(unackShardName, unackScore, messageId, zParams);
if (added == 0) {
logger.warn("cannot add {} to the unack shard {}", messageId, unackShardName);
monitor.misses.increment();
return null;
}
long removed = quorumConn.zrem(queueShardName, messageId);
if (removed == 0) {
logger.warn("cannot remove {} from the queue shard ", queueName, messageId);
monitor.misses.increment();
return null;
}
Message msg = om.readValue(json, Message.class);
return msg;
});
} finally {
sw.stop();
}
}
public List<Message> unsafePopAllShards(int messageCount, int wait, TimeUnit unit) {
if (messageCount < 1) {
return Collections.emptyList();
}
Stopwatch sw = monitor.start(monitor.pop, messageCount);
try {
long start = clock.millis();
long waitFor = unit.toMillis(wait);
unsafeNumIdsToPrefetchAllShards.addAndGet(messageCount);
prefetchIdsAllShards();
while(unsafeGetNumPrefetchedIds() < messageCount && ((clock.millis() - start) < waitFor)) {
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
prefetchIdsAllShards();
}
int remainingCount = messageCount;
// Pop as much as possible from the local shard first to reduce chances of returning duplicate items.
// (See unsafe* functions disclaimer in DynoQueue.java)
List<Message> popped = _pop(shardName, remainingCount, unsafePrefetchedIdsAllShardsMap.get(localQueueShard));
remainingCount -= popped.size();
for (String shard : allShards) {
String queueShardName = getQueueShardKey(queueName, shard);
List<Message> elems = _pop(shard, remainingCount, unsafePrefetchedIdsAllShardsMap.get(queueShardName));
popped.addAll(elems);
remainingCount -= elems.size();
}
return popped;
} catch(Exception e) {
throw new RuntimeException(e);
} finally {
sw.stop();
}
}
/**
* Prefetch message IDs from the local shard.
*/
private void prefetchIds() {
double now = Long.valueOf(clock.millis() + 1).doubleValue();
int numPrefetched = doPrefetchIdsHelper(localQueueShard, numIdsToPrefetch, prefetchedIds, now);
if (numPrefetched == 0) {
numIdsToPrefetch.set(0);
}
}
/**
* Prefetch message IDs from all shards.
*/
private void prefetchIdsAllShards() {
double now = Long.valueOf(clock.millis() + 1).doubleValue();
// Try to prefetch as many items from 'this.localQueueShard' first to reduce chances of returning duplicate items.
// (See unsafe* functions disclaimer in DynoQueue.java)
doPrefetchIdsHelper(localQueueShard, unsafeNumIdsToPrefetchAllShards,
unsafePrefetchedIdsAllShardsMap.get(localQueueShard), now);
if (unsafeNumIdsToPrefetchAllShards.get() < 1) return;
for (String shard : allShards) {
String queueShardName = getQueueShardKey(queueName, shard);
if (queueShardName.equals(localQueueShard)) continue; // Skip since we've already serviced the local shard.
doPrefetchIdsHelper(queueShardName, unsafeNumIdsToPrefetchAllShards,
unsafePrefetchedIdsAllShardsMap.get(queueShardName), now);
}
}
/**
* Attempts to prefetch up to 'prefetchCounter' message IDs, by peeking into a queue based on 'peekFunction',
* and store it in a concurrent linked queue.
*
* @param prefetchCounter Number of message IDs to attempt prefetch.
* @param prefetchedIdQueue Concurrent Linked Queue where message IDs are stored.
* @param peekFunction Function to call to peek into the queue.
*/
private int doPrefetchIdsHelper(String queueShardName, AtomicInteger prefetchCounter,
ConcurrentLinkedQueue<String> prefetchedIdQueue, double prefetchFromTs) {
if (prefetchCounter.get() < 1) {
return 0;
}
int numSuccessfullyPrefetched = 0;
int numToPrefetch = prefetchCounter.get();
Stopwatch sw = monitor.start(monitor.prefetch, numToPrefetch);
try {
// Attempt to peek up to 'numToPrefetch' message Ids.
Set<String> ids = doPeekIdsFromShardHelper(queueShardName, prefetchFromTs, 0, numToPrefetch);
// TODO: Check for duplicates.
// Store prefetched IDs in a queue.
prefetchedIdQueue.addAll(ids);
numSuccessfullyPrefetched = ids.size();
// Account for number of IDs successfully prefetched.
prefetchCounter.addAndGet((-1 * ids.size()));
if(prefetchCounter.get() < 0) {
prefetchCounter.set(0);
}
} finally {
sw.stop();
}
return numSuccessfullyPrefetched;
}
private List<Message> _pop(String shard, int messageCount,
ConcurrentLinkedQueue<String> prefetchedIdQueue) throws Exception {
String queueShardName = getQueueShardKey(queueName, shard);
String unackShardName = getUnackKey(queueName, shard);
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
// NX option indicates add only if it doesn't exist.
// https://redis.io/commands/zadd#zadd-options-redis-302-or-greater
ZAddParams zParams = ZAddParams.zAddParams().nx();
List<Message> popped = new LinkedList<>();
for (;popped.size() != messageCount;) {
String msgId = prefetchedIdQueue.poll();
if(msgId == null) {
break;
}
long added = quorumConn.zadd(unackShardName, unackScore, msgId, zParams);
if(added == 0){
logger.warn("cannot add {} to the unack shard {}", msgId, unackShardName);
monitor.misses.increment();
continue;
}
long removed = quorumConn.zrem(queueShardName, msgId);
if (removed == 0) {
logger.warn("cannot remove {} from the queue shard {}", msgId, queueShardName);
monitor.misses.increment();
continue;
}
String json = quorumConn.hget(messageStoreKey, msgId);
if (json == null) {
logger.warn("Cannot get the message payload for {}", msgId);
monitor.misses.increment();
continue;
}
Message msg = om.readValue(json, Message.class);
popped.add(msg);
if (popped.size() == messageCount) {
return popped;
}
}
return popped;
}
@Override
public boolean ack(String messageId) {
Stopwatch sw = monitor.ack.start();
try {
return execute("ack", "(a shard in) " + queueName, () -> {
for (String shard : allShards) {
String unackShardKey = getUnackKey(queueName, shard);
Long removed = quorumConn.zrem(unackShardKey, messageId);
if (removed > 0) {
quorumConn.hdel(messageStoreKey, messageId);
return true;
}
}
return false;
});
} finally {
sw.stop();
}
}
@Override
public void ack(List<Message> messages) {
for (Message message : messages) {
ack(message.getId());
}
}
@Override
public boolean setUnackTimeout(String messageId, long timeout) {
Stopwatch sw = monitor.ack.start();
try {
return execute("setUnackTimeout", "(a shard in) " + queueName, () -> {
double unackScore = Long.valueOf(clock.millis() + timeout).doubleValue();
for (String shard : allShards) {
String unackShardKey = getUnackKey(queueName, shard);
Double score = quorumConn.zscore(unackShardKey, messageId);
if (score != null) {
quorumConn.zadd(unackShardKey, unackScore, messageId);
return true;
}
}
return false;
});
} finally {
sw.stop();
}
}
@Override
public boolean setTimeout(String messageId, long timeout) {
return execute("setTimeout", "(a shard in) " + queueName, () -> {
String json = nonQuorumConn.hget(messageStoreKey, messageId);
if (json == null) {
return false;
}
Message message = om.readValue(json, Message.class);
message.setTimeout(timeout);
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
Double score = quorumConn.zscore(queueShard, messageId);
if (score != null) {
double priorityd = message.getPriority() / 100;
double newScore = Long.valueOf(clock.millis() + timeout).doubleValue() + priorityd;
ZAddParams params = ZAddParams.zAddParams().xx();
quorumConn.zadd(queueShard, newScore, messageId, params);
json = om.writeValueAsString(message);
quorumConn.hset(messageStoreKey, message.getId(), json);
return true;
}
}
return false;
});
}
@Override
public boolean remove(String messageId) {
Stopwatch sw = monitor.remove.start();
try {
return execute("remove", "(a shard in) " + queueName, () -> {
for (String shard : allShards) {
String unackShardKey = getUnackKey(queueName, shard);
quorumConn.zrem(unackShardKey, messageId);
String queueShardKey = getQueueShardKey(queueName, shard);
Long removed = quorumConn.zrem(queueShardKey, messageId);
if (removed > 0) {
// Ignoring return value since we just want to get rid of it.
Long msgRemoved = quorumConn.hdel(messageStoreKey, messageId);
return true;
}
}
return false;
});
} finally {
sw.stop();
}
}
@Override
public boolean atomicRemove(String messageId) {
Stopwatch sw = monitor.remove.start();
try {
return execute("remove", "(a shard in) " + queueName, () -> {
String atomicRemoveScript = "local hkey=KEYS[1]\n" +
"local msg_id=ARGV[1]\n" +
"local num_shards=ARGV[2]\n" +
"\n" +
"local removed_shard=0\n" +
"local removed_unack=0\n" +
"local removed_hash=0\n" +
"for i=0,num_shards-1 do\n" +
" local shard_name = ARGV[3+(i*2)]\n" +
" local unack_name = ARGV[3+(i*2)+1]\n" +
"\n" +
" removed_shard = removed_shard + redis.call('zrem', shard_name, msg_id)\n" +
" removed_unack = removed_unack + redis.call('zrem', unack_name, msg_id)\n" +
"end\n" +
"\n" +
"removed_hash = redis.call('hdel', hkey, msg_id)\n" +
"if (removed_shard==1 or removed_unack==1 or removed_hash==1) then\n" +
" return 1\n" +
"end\n" +
"return removed_unack\n";
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(messageId);
builder.add(Integer.toString(allShards.size()));
for (String shard : allShards) {
String queueShardKey = getQueueShardKey(queueName, shard);
String unackShardKey = getUnackKey(queueName, shard);
builder.add(queueShardKey);
builder.add(unackShardKey);
}
Long removed = (Long) ((DynoJedisClient)quorumConn).eval(atomicRemoveScript, Collections.singletonList(messageStoreKey), builder.build());
if (removed == 1) return true;
return false;
});
} finally {
sw.stop();
}
}
@Override
public boolean ensure(Message message) {
return execute("ensure", "(a shard in) " + queueName, () -> {
String messageId = message.getId();
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
Double score = quorumConn.zscore(queueShard, messageId);
if (score != null) {
return false;
}
String unackShardKey = getUnackKey(queueName, shard);
score = quorumConn.zscore(unackShardKey, messageId);
if (score != null) {
return false;
}
}
push(Collections.singletonList(message));
return true;
});
}
@Override
public boolean containsPredicate(String predicate) {
return containsPredicate(predicate, false);
}
@Override
public String getMsgWithPredicate(String predicate) {
return getMsgWithPredicate(predicate, false);
}
@Override
public boolean containsPredicate(String predicate, boolean localShardOnly) {
return execute("containsPredicate", messageStoreKey, () -> getMsgWithPredicate(predicate, localShardOnly) != null);
}
@Override
public String getMsgWithPredicate(String predicate, boolean localShardOnly) {
return execute("getMsgWithPredicate", messageStoreKey, () -> {
// We use a Lua script here to do predicate matching since we only want to find whether the predicate
// exists in any of the message bodies or not, and the only way to do that is to check for the predicate
// match on the server side.
// The alternative is to have the server return all the hash values back to us and we filter it here on
// the client side. This is not desirable since we would potentially be sending large amounts of data
// over the network only to return a single string value back to the calling application.
String predicateCheckAllLuaScript = "local hkey=KEYS[1]\n" +
"local predicate=ARGV[1]\n" +
"local cursor=0\n" +
"local begin=false\n" +
"while (cursor ~= 0 or begin==false) do\n" +
" local ret = redis.call('hscan', hkey, cursor)\n" +
" local curmsgid\n" +
" for i, content in ipairs(ret[2]) do\n" +
" if (i % 2 ~= 0) then\n" +
" curmsgid = content\n" +
" elseif (string.match(content, predicate)) then\n" +
" return curmsgid\n" +
" end\n" +
" end\n" +
" cursor=tonumber(ret[1])\n" +
" begin=true\n" +
"end\n" +
"return nil";
String predicateCheckLocalOnlyLuaScript = "local hkey=KEYS[1]\n" +
"local predicate=ARGV[1]\n" +
"local shard_name=ARGV[2]\n" +
"local cursor=0\n" +
"local begin=false\n" +
"while (cursor ~= 0 or begin==false) do\n" +
" local ret = redis.call('hscan', hkey, cursor)\n" +
"local curmsgid\n" +
"for i, content in ipairs(ret[2]) do\n" +
" if (i % 2 ~= 0) then\n" +
" curmsgid = content\n" +
"elseif (string.match(content, predicate)) then\n" +
"local in_local_shard = redis.call('zrank', shard_name, curmsgid)\n" +
"if (type(in_local_shard) ~= 'boolean' and in_local_shard >= 0) then\n" +
"return curmsgid\n" +
"end\n" +
" end\n" +
"end\n" +
" cursor=tonumber(ret[1])\n" +
"begin=true\n" +
"end\n" +
"return nil";
String retval;
if (localShardOnly) {
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
retval = (String) ((DynoJedisClient) nonQuorumConn).eval(predicateCheckLocalOnlyLuaScript,
Collections.singletonList(messageStoreKey), ImmutableList.of(predicate, localQueueShard));
} else {
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
retval = (String) ((DynoJedisClient) nonQuorumConn).eval(predicateCheckAllLuaScript,
Collections.singletonList(messageStoreKey), Collections.singletonList(predicate));
}
return retval;
});
}
private Message popMsgWithPredicateObeyPriority(String predicate, boolean localShardOnly) {
String popPredicateObeyPriority = "local hkey=KEYS[1]\n" +
"local predicate=ARGV[1]\n" +
"local num_shards=ARGV[2]\n" +
"local peek_until=tonumber(ARGV[3])\n" +
"local unack_score=tonumber(ARGV[4])\n" +
"\n" +
"local shard_names={}\n" +
"local unack_names={}\n" +
"local shard_lengths={}\n" +
"local largest_shard=-1\n" +
"for i=0,num_shards-1 do\n" +
" shard_names[i+1]=ARGV[5+(i*2)]\n" +
" shard_lengths[i+1] = redis.call('zcard', shard_names[i+1])\n" +
" unack_names[i+1]=ARGV[5+(i*2)+1]\n" +
"\n" +
" if (shard_lengths[i+1] > largest_shard) then\n" +
" largest_shard = shard_lengths[i+1]\n" +
" end\n" +
"end\n" +
"\n" +
"local min_score=-1\n" +
"local min_member\n" +
"local matching_value\n" +
"local owning_shard_idx=-1\n" +
"\n" +
"local num_complete_shards=0\n" +
"for j=0,largest_shard-1 do\n" +
" for i=1,num_shards do\n" +
" local skiploop=false\n" +
" if (shard_lengths[i] < j+1) then\n" +
" skiploop=true\n" +
" end\n" +
"\n" +
" if (skiploop == false) then\n" +
" local element = redis.call('zrange', shard_names[i], j, j, 'WITHSCORES')\n" +
" if ((min_score ~= -1 and min_score < tonumber(element[2])) or peek_until < tonumber(element[2])) then\n" +
" -- This is to make sure we don't process this shard again\n" +
" -- since all elements henceforth are of lower priority than min_member\n" +
" shard_lengths[i]=0\n" +
" num_complete_shards = num_complete_shards + 1\n" +
" else\n" +
" local value = redis.call('hget', hkey, tostring(element[1]))\n" +
" if (value) then\n" +
" if (string.match(value, predicate)) then\n" +
" if (min_score == -1 or tonumber(element[2]) < min_score) then\n" +
" min_score = tonumber(element[2])\n" +
" owning_shard_idx=i\n" +
" min_member = element[1]\n" +
" matching_value = value\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
" if (num_complete_shards == num_shards) then\n" +
" break\n" +
" end\n" +
"end\n" +
"\n" +
"if (min_member) then\n" +
" local queue_shard_name=shard_names[owning_shard_idx]\n" +
" local unack_shard_name=unack_names[owning_shard_idx]\n" +
" local zadd_ret = redis.call('zadd', unack_shard_name, 'NX', unack_score, min_member)\n" +
" if (zadd_ret) then\n" +
" redis.call('zrem', queue_shard_name, min_member)\n" +
" end\n" +
"end\n" +
"return {min_member, matching_value}";
double now = Long.valueOf(clock.millis() + 1).doubleValue();
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
// The script requires the scores as whole numbers
NumberFormat fmt = NumberFormat.getIntegerInstance();
fmt.setGroupingUsed(false);
String nowScoreString = fmt.format(now);
String unackScoreString = fmt.format(unackScore);
ArrayList<String> retval;
if (localShardOnly) {
String unackShardName = getUnackKey(queueName, shardName);
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(predicate);
builder.add(Integer.toString(1));
builder.add(nowScoreString);
builder.add(unackScoreString);
builder.add(localQueueShard);
builder.add(unackShardName);
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
retval = (ArrayList) ((DynoJedisClient) quorumConn).eval(popPredicateObeyPriority,
Collections.singletonList(messageStoreKey), builder.build());
} else {
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(predicate);
builder.add(Integer.toString(allShards.size()));
builder.add(nowScoreString);
builder.add(unackScoreString);
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
String unackShardName = getUnackKey(queueName, shard);
builder.add(queueShard);
builder.add(unackShardName);
}
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
retval = (ArrayList) ((DynoJedisClient) quorumConn).eval(popPredicateObeyPriority,
Collections.singletonList(messageStoreKey), builder.build());
}
if (retval.size() == 0) return null;
return new Message(retval.get(0), retval.get(1));
}
@Override
public Message popMsgWithPredicate(String predicate, boolean localShardOnly) {
Stopwatch sw = monitor.start(monitor.pop, 1);
try {
Message payload = execute("popMsgWithPredicateObeyPriority", messageStoreKey, () -> popMsgWithPredicateObeyPriority(predicate, localShardOnly));
return payload;
} finally {
sw.stop();
}
}
@Override
public List<Message> bulkPop(int messageCount, int wait, TimeUnit unit) {
if (messageCount < 1) {
return Collections.emptyList();
}
Stopwatch sw = monitor.start(monitor.pop, messageCount);
try {
long start = clock.millis();
long waitFor = unit.toMillis(wait);
numIdsToPrefetch.addAndGet(messageCount);
prefetchIds();
while (prefetchedIds.size() < messageCount && ((clock.millis() - start) < waitFor)) {
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
prefetchIds();
}
int numToPop = (prefetchedIds.size() > messageCount) ? messageCount : prefetchedIds.size();
return atomicBulkPopHelper(numToPop, prefetchedIds, true);
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
sw.stop();
}
}
@Override
public List<Message> unsafeBulkPop(int messageCount, int wait, TimeUnit unit) {
if (messageCount < 1) {
return Collections.emptyList();
}
Stopwatch sw = monitor.start(monitor.pop, messageCount);
try {
long start = clock.millis();
long waitFor = unit.toMillis(wait);
unsafeNumIdsToPrefetchAllShards.addAndGet(messageCount);
prefetchIdsAllShards();
while(unsafeGetNumPrefetchedIds() < messageCount && ((clock.millis() - start) < waitFor)) {
Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
prefetchIdsAllShards();
}
int numToPop = (unsafeGetNumPrefetchedIds() > messageCount) ? messageCount : unsafeGetNumPrefetchedIds();
ConcurrentLinkedQueue<String> messageIds = new ConcurrentLinkedQueue<>();
int numPrefetched = 0;
for (String shard : allShards) {
String queueShardName = getQueueShardKey(queueName, shard);
int prefetchedIdsSize = unsafePrefetchedIdsAllShardsMap.get(queueShardName).size();
for (int i = 0; i < prefetchedIdsSize; ++i) {
messageIds.add(unsafePrefetchedIdsAllShardsMap.get(queueShardName).poll());
if (++numPrefetched == numToPop) break;
}
if (numPrefetched == numToPop) break;
}
return atomicBulkPopHelper(numToPop, messageIds, false);
} catch(Exception e) {
throw new RuntimeException(e);
} finally {
sw.stop();
}
}
// TODO: Do code cleanup/consolidation
private List<Message> atomicBulkPopHelper(int messageCount,
ConcurrentLinkedQueue<String> prefetchedIdQueue, boolean localShardOnly) throws IOException {
double now = Long.valueOf(clock.millis() + 1).doubleValue();
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
// The script requires the scores as whole numbers
NumberFormat fmt = NumberFormat.getIntegerInstance();
fmt.setGroupingUsed(false);
String nowScoreString = fmt.format(now);
String unackScoreString = fmt.format(unackScore);
List<String> messageIds = new ArrayList<>();
for (int i = 0; i < messageCount; ++i) {
messageIds.add(prefetchedIdQueue.poll());
}
String atomicBulkPopScriptLocalOnly="local hkey=KEYS[1]\n" +
"local num_msgs=ARGV[1]\n" +
"local peek_until=ARGV[2]\n" +
"local unack_score=ARGV[3]\n" +
"local queue_shard_name=ARGV[4]\n" +
"local unack_shard_name=ARGV[5]\n" +
"local msg_start_idx = 6\n" +
"local idx = 1\n" +
"local return_vals={}\n" +
"for i=0,num_msgs-1 do\n" +
" local message_id=ARGV[msg_start_idx + i]\n" +
" local exists = redis.call('zscore', queue_shard_name, message_id)\n" +
" if (exists) then\n" +
" if (exists <=peek_until) then\n" +
" local value = redis.call('hget', hkey, message_id)\n" +
" if (value) then\n" +
" local zadd_ret = redis.call('zadd', unack_shard_name, 'NX', unack_score, message_id)\n" +
" if (zadd_ret) then\n" +
" redis.call('zrem', queue_shard_name, message_id)\n" +
" return_vals[idx]=value\n" +
" idx=idx+1\n" +
" end\n" +
" end\n" +
" end\n" +
" else\n" +
" return {}\n" +
" end\n" +
"end\n" +
"return return_vals";
String atomicBulkPopScript="local hkey=KEYS[1]\n" +
"local num_msgs=ARGV[1]\n" +
"local num_shards=ARGV[2]\n" +
"local peek_until=ARGV[3]\n" +
"local unack_score=ARGV[4]\n" +
"local shard_start_idx = 5\n" +
"local msg_start_idx = 5 + (num_shards * 2)\n" +
"local out_idx = 1\n" +
"local return_vals={}\n" +
"for i=0,num_msgs-1 do\n" +
" local found_msg=false\n" +
" local message_id=ARGV[msg_start_idx + i]\n" +
" for j=0,num_shards-1 do\n" +
" local queue_shard_name=ARGV[shard_start_idx + (j*2)]\n" +
" local unack_shard_name=ARGV[shard_start_idx + (j*2) + 1]\n" +
" local exists = redis.call('zscore', queue_shard_name, message_id)\n" +
" if (exists) then\n" +
" found_msg=true\n" +
" if (exists <=peek_until) then\n" +
" local value = redis.call('hget', hkey, message_id)\n" +
" if (value) then\n" +
" local zadd_ret = redis.call('zadd', unack_shard_name, 'NX', unack_score, message_id)\n" +
" if (zadd_ret) then\n" +
" redis.call('zrem', queue_shard_name, message_id)\n" +
" return_vals[out_idx]=value\n" +
" out_idx=out_idx+1\n" +
" break\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
" if (found_msg == false) then\n" +
" return {}\n" +
" end\n" +
"end\n" +
"return return_vals";
List<Message> payloads = new ArrayList<>();
if (localShardOnly) {
String unackShardName = getUnackKey(queueName, shardName);
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(Integer.toString(messageCount));
builder.add(nowScoreString);
builder.add(unackScoreString);
builder.add(localQueueShard);
builder.add(unackShardName);
for (int i = 0; i < messageCount; ++i) {
builder.add(messageIds.get(i));
}
List<String> jsonPayloads;
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
jsonPayloads = (List) ((DynoJedisClient) quorumConn).eval(atomicBulkPopScriptLocalOnly,
Collections.singletonList(messageStoreKey), builder.build());
for (String p : jsonPayloads) {
Message msg = om.readValue(p, Message.class);
payloads.add(msg);
}
} else {
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(Integer.toString(messageCount));
builder.add(Integer.toString(allShards.size()));
builder.add(nowScoreString);
builder.add(unackScoreString);
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
String unackShardName = getUnackKey(queueName, shard);
builder.add(queueShard);
builder.add(unackShardName);
}
for (int i = 0; i < messageCount; ++i) {
builder.add(messageIds.get(i));
}
List<String> jsonPayloads;
// Cast from 'JedisCommands' to 'DynoJedisClient' here since the former does not expose 'eval()'.
jsonPayloads = (List) ((DynoJedisClient) quorumConn).eval(atomicBulkPopScript,
Collections.singletonList(messageStoreKey), builder.build());
for (String p : jsonPayloads) {
Message msg = om.readValue(p, Message.class);
payloads.add(msg);
}
}
return payloads;
}
/**
*
* Similar to popWithMsgId() but completes all the operations in one round trip.
*
* NOTE: This function assumes that the ring size in the cluster is 1. DO NOT use for APIs that support a ring
* size larger than 1.
*
* @param messageId
* @param localShardOnly
* @return
*/
private String atomicPopWithMsgIdHelper(String messageId, boolean localShardOnly) {
double now = Long.valueOf(clock.millis() + 1).doubleValue();
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
// The script requires the scores as whole numbers
NumberFormat fmt = NumberFormat.getIntegerInstance();
fmt.setGroupingUsed(false);
String nowScoreString = fmt.format(now);
String unackScoreString = fmt.format(unackScore);
String atomicPopScript = "local hkey=KEYS[1]\n" +
"local message_id=ARGV[1]\n" +
"local num_shards=ARGV[2]\n" +
"local peek_until=ARGV[3]\n" +
"local unack_score=ARGV[4]\n" +
"for i=0,num_shards-1 do\n" +
" local queue_shard_name=ARGV[(i*2)+5]\n" +
" local unack_shard_name=ARGV[(i*2)+5+1]\n" +
" local exists = redis.call('zscore', queue_shard_name, message_id)\n" +
" if (exists) then\n" +
" if (exists <= peek_until) then\n" +
" local value = redis.call('hget', hkey, message_id)\n" +
" if (value) then\n" +
" local zadd_ret = redis.call('zadd', unack_shard_name, 'NX', unack_score, message_id )\n" +
" if (zadd_ret) then\n" +
" redis.call('zrem', queue_shard_name, message_id)\n" +
" return value\n" +
" end\n" +
" end\n" +
" end\n" +
" end\n" +
"end\n" +
"return nil";
String retval;
if (localShardOnly) {
String unackShardName = getUnackKey(queueName, shardName);
retval = (String) ((DynoJedisClient) quorumConn).eval(atomicPopScript, Collections.singletonList(messageStoreKey),
ImmutableList.of(messageId, Integer.toString(1), nowScoreString,
unackScoreString, localQueueShard, unackShardName));
} else {
int numShards = allShards.size();
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(messageId);
builder.add(Integer.toString(numShards));
builder.add(nowScoreString);
builder.add(unackScoreString);
List<String> arguments = Arrays.asList(messageId, Integer.toString(numShards), nowScoreString,
unackScoreString);
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
String unackShardName = getUnackKey(queueName, shard);
builder.add(queueShard);
builder.add(unackShardName);
}
retval = (String) ((DynoJedisClient) quorumConn).eval(atomicPopScript, Collections.singletonList(messageStoreKey), builder.build());
}
return retval;
}
@Override
public Message get(String messageId) {
Stopwatch sw = monitor.get.start();
try {
return execute("get", messageStoreKey, () -> {
String json = quorumConn.hget(messageStoreKey, messageId);
if (json == null) {
logger.warn("Cannot get the message payload " + messageId);
return null;
}
Message msg = om.readValue(json, Message.class);
return msg;
});
} finally {
sw.stop();
}
}
@Override
public List<Message> getAllMessages() {
Map<String, String> allMsgs = nonQuorumConn.hgetAll(messageStoreKey);
List<Message> retList = new ArrayList<>();
for (Map.Entry<String,String> entry: allMsgs.entrySet()) {
Message msg = new Message(entry.getKey(), entry.getValue());
retList.add(msg);
}
return retList;
}
@Override
public Message localGet(String messageId) {
Stopwatch sw = monitor.get.start();
try {
return execute("localGet", messageStoreKey, () -> {
String json = nonQuorumConn.hget(messageStoreKey, messageId);
if (json == null) {
logger.warn("Cannot get the message payload " + messageId);
return null;
}
Message msg = om.readValue(json, Message.class);
return msg;
});
} finally {
sw.stop();
}
}
@Override
public long size() {
Stopwatch sw = monitor.size.start();
try {
return execute("size", "(a shard in) " + queueName, () -> {
long size = 0;
for (String shard : allShards) {
size += nonQuorumConn.zcard(getQueueShardKey(queueName, shard));
}
return size;
});
} finally {
sw.stop();
}
}
@Override
public Map<String, Map<String, Long>> shardSizes() {
Stopwatch sw = monitor.size.start();
Map<String, Map<String, Long>> shardSizes = new HashMap<>();
try {
return execute("shardSizes", "(a shard in) " + queueName, () -> {
for (String shard : allShards) {
long size = nonQuorumConn.zcard(getQueueShardKey(queueName, shard));
long uacked = nonQuorumConn.zcard(getUnackKey(queueName, shard));
Map<String, Long> shardDetails = new HashMap<>();
shardDetails.put("size", size);
shardDetails.put("uacked", uacked);
shardSizes.put(shard, shardDetails);
}
return shardSizes;
});
} finally {
sw.stop();
}
}
@Override
public void clear() {
execute("clear", "(a shard in) " + queueName, () -> {
for (String shard : allShards) {
String queueShard = getQueueShardKey(queueName, shard);
String unackShard = getUnackKey(queueName, shard);
quorumConn.del(queueShard);
quorumConn.del(unackShard);
}
quorumConn.del(messageStoreKey);
return null;
});
}
@Override
public void processUnacks() {
logger.info("processUnacks() will NOT be atomic.");
Stopwatch sw = monitor.processUnack.start();
try {
long queueDepth = size();
monitor.queueDepth.record(queueDepth);
String keyName = getUnackKey(queueName, shardName);
execute("processUnacks", keyName, () -> {
int batchSize = 1_000;
String unackShardName = getUnackKey(queueName, shardName);
double now = Long.valueOf(clock.millis()).doubleValue();
int num_moved_back = 0;
int num_stale = 0;
Set<Tuple> unacks = nonQuorumConn.zrangeByScoreWithScores(unackShardName, 0, now, 0, batchSize);
if (unacks.size() > 0) {
logger.info("processUnacks: Attempting to add " + unacks.size() + " messages back to shard of queue: " + unackShardName);
}
for (Tuple unack : unacks) {
double score = unack.getScore();
String member = unack.getElement();
String payload = quorumConn.hget(messageStoreKey, member);
if (payload == null) {
quorumConn.zrem(unackShardName, member);
++num_stale;
continue;
}
long added_back = quorumConn.zadd(localQueueShard, score, member);
long removed_from_unack = quorumConn.zrem(unackShardName, member);
if (added_back > 0 && removed_from_unack > 0) ++num_moved_back;
}
if (num_moved_back > 0 || num_stale > 0) {
logger.info("processUnacks: Moved back " + num_moved_back + " items. Got rid of " + num_stale + " stale items.");
}
return null;
});
} catch (Exception e) {
logger.error("Error while processing unacks. " + e.getMessage());
} finally {
sw.stop();
}
}
@Override
public List<Message> findStaleMessages() {
return execute("findStaleMessages", localQueueShard, () -> {
List<Message> stale_msgs = new ArrayList<>();
int batchSize = 10;
double now = Long.valueOf(clock.millis()).doubleValue();
long num_stale = 0;
for (String shard : allShards) {
String queueShardName = getQueueShardKey(queueName, shard);
Set<String> elems = nonQuorumConn.zrangeByScore(queueShardName, 0, now, 0, batchSize);
if (elems.size() == 0) {
continue;
}
String findStaleMsgsScript = "local hkey=KEYS[1]\n" +
"local queue_shard=ARGV[1]\n" +
"local unack_shard=ARGV[2]\n" +
"local num_msgs=ARGV[3]\n" +
"\n" +
"local stale_msgs={}\n" +
"local num_stale_idx = 1\n" +
"for i=0,num_msgs-1 do\n" +
" local msg_id=ARGV[4+i]\n" +
"\n" +
" local exists_hash = redis.call('hget', hkey, msg_id)\n" +
" local exists_queue = redis.call('zscore', queue_shard, msg_id)\n" +
" local exists_unack = redis.call('zscore', unack_shard, msg_id)\n" +
"\n" +
" if (exists_hash and exists_queue) then\n" +
" elseif (not (exists_unack)) then\n" +
" stale_msgs[num_stale_idx] = msg_id\n" +
" num_stale_idx = num_stale_idx + 1\n" +
" end\n" +
"end\n" +
"\n" +
"return stale_msgs\n";
String unackKey = getUnackKey(queueName, shard);
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(queueShardName);
builder.add(unackKey);
builder.add(Integer.toString(elems.size()));
for (String msg : elems) {
builder.add(msg);
}
ArrayList<String> stale_msg_ids = (ArrayList) ((DynoJedisClient)quorumConn).eval(findStaleMsgsScript, Collections.singletonList(messageStoreKey), builder.build());
num_stale = stale_msg_ids.size();
if (num_stale > 0) {
logger.info("findStaleMsgs(): Found " + num_stale + " messages present in queue but not in hashmap");
}
for (String m : stale_msg_ids) {
Message msg = new Message();
msg.setId(m);
stale_msgs.add(msg);
}
}
return stale_msgs;
});
}
@Override
public void atomicProcessUnacks() {
logger.info("processUnacks() will be atomic.");
Stopwatch sw = monitor.processUnack.start();
try {
long queueDepth = size();
monitor.queueDepth.record(queueDepth);
String keyName = getUnackKey(queueName, shardName);
execute("processUnacks", keyName, () -> {
int batchSize = 1_000;
String unackShardName = getUnackKey(queueName, shardName);
double now = Long.valueOf(clock.millis()).doubleValue();
long num_moved_back = 0;
long num_stale = 0;
Set<Tuple> unacks = nonQuorumConn.zrangeByScoreWithScores(unackShardName, 0, now, 0, batchSize);
if (unacks.size() > 0) {
logger.info("processUnacks: Attempting to add " + unacks.size() + " messages back to shard of queue: " + unackShardName);
} else {
return null;
}
String atomicProcessUnacksScript = "local hkey=KEYS[1]\n" +
"local unack_shard=ARGV[1]\n" +
"local queue_shard=ARGV[2]\n" +
"local num_unacks=ARGV[3]\n" +
"\n" +
"local unacks={}\n" +
"local unack_scores={}\n" +
"local unack_start_idx = 4\n" +
"for i=0,num_unacks-1 do\n" +
" unacks[i]=ARGV[4 + (i*2)]\n" +
" unack_scores[i]=ARGV[4+(i*2)+1]\n" +
"end\n" +
"\n" +
"local num_moved=0\n" +
"local num_stale=0\n" +
"for i=0,num_unacks-1 do\n" +
" local mem_val = redis.call('hget', hkey, unacks[i])\n" +
" if (mem_val) then\n" +
" redis.call('zadd', queue_shard, unack_scores[i], unacks[i])\n" +
" redis.call('zrem', unack_shard, unacks[i])\n" +
" num_moved=num_moved+1\n" +
" else\n" +
" redis.call('zrem', unack_shard, unacks[i])\n" +
" num_stale=num_stale+1\n" +
" end\n" +
"end\n" +
"\n" +
"return {num_moved, num_stale}\n";
ImmutableList.Builder builder = ImmutableList.builder();
builder.add(unackShardName);
builder.add(localQueueShard);
builder.add(Integer.toString(unacks.size()));
for (Tuple unack : unacks) {
builder.add(unack.getElement());
// The script requires the scores as whole numbers
NumberFormat fmt = NumberFormat.getIntegerInstance();
fmt.setGroupingUsed(false);
String unackScoreString = fmt.format(unack.getScore());
builder.add(unackScoreString);
}
ArrayList<Long> retval = (ArrayList) ((DynoJedisClient)quorumConn).eval(atomicProcessUnacksScript, Collections.singletonList(messageStoreKey), builder.build());
num_moved_back = retval.get(0).longValue();
num_stale = retval.get(1).longValue();
if (num_moved_back > 0 || num_stale > 0) {
logger.info("processUnacks: Moved back " + num_moved_back + " items. Got rid of " + num_stale + " stale items.");
}
return null;
});
} catch (Exception e) {
logger.error("Error while processing unacks. " + e.getMessage());
} finally {
sw.stop();
}
}
private String getQueueShardKey(String queueName, String shard) {
return redisKeyPrefix + ".QUEUE." + queueName + "." + shard;
}
private String getUnackKey(String queueName, String shard) {
return redisKeyPrefix + ".UNACK." + queueName + "." + shard;
}
@Override
public void close() throws IOException {
schedulerForUnacksProcessing.shutdown();
monitor.close();
}
}
| 3,049 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/QueueUtils.java | package com.netflix.dyno.queues.redis;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.netflix.dyno.connectionpool.exception.DynoException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
/**
* Helper class to consolidate functions which might be reused across different DynoQueue implementations.
*/
public class QueueUtils {
private static final int retryCount = 2;
/**
* Execute function with retries if required
*
* @param opName
* @param keyName
* @param r
* @param <R>
* @return
*/
public static <R> R execute(String opName, String keyName, Callable<R> r) {
return executeWithRetry(opName, keyName, r, 0);
}
private static <R> R executeWithRetry(String opName, String keyName, Callable<R> r, int retryNum) {
try {
return r.call();
} catch (ExecutionException e) {
if (e.getCause() instanceof DynoException) {
if (retryNum < retryCount) {
return executeWithRetry(opName, keyName, r, ++retryNum);
}
}
throw new RuntimeException(e.getCause());
} catch (Exception e) {
throw new RuntimeException(
"Operation: ( " + opName + " ) failed on key: [" + keyName + " ].", e);
}
}
/**
* Construct standard objectmapper to use within the DynoQueue instances to read/write Message objects
*
* @return
*/
public static ObjectMapper constructObjectMapper() {
ObjectMapper om = new ObjectMapper();
om.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
om.configure(DeserializationFeature.FAIL_ON_IGNORED_PROPERTIES, false);
om.configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, false);
om.setSerializationInclusion(JsonInclude.Include.NON_NULL);
om.setSerializationInclusion(JsonInclude.Include.NON_EMPTY);
om.disable(SerializationFeature.INDENT_OUTPUT);
return om;
}
}
| 3,050 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/sharding/ShardingStrategy.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.sharding;
import com.netflix.dyno.queues.Message;
import java.util.List;
/**
* Expose common interface that allow to apply custom sharding strategy.
*/
public interface ShardingStrategy {
String getNextShard(List<String> allShards, Message message);
}
| 3,051 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/sharding/RoundRobinStrategy.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.sharding;
import com.netflix.dyno.queues.Message;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
public class RoundRobinStrategy implements ShardingStrategy {
private final AtomicInteger nextShardIndex = new AtomicInteger(0);
/**
* Get shard based on round robin strategy.
* @param allShards
* @param message is ignored in round robin strategy
* @return
*/
@Override
public String getNextShard(List<String> allShards, Message message) {
int index = nextShardIndex.incrementAndGet();
if (index >= allShards.size()) {
nextShardIndex.set(0);
index = 0;
}
return allShards.get(index);
}
}
| 3,052 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/v2/QueueBuilder.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis.v2;
import com.google.common.collect.Collections2;
import com.google.common.collect.Lists;
import com.netflix.appinfo.AmazonInfo;
import com.netflix.appinfo.AmazonInfo.MetaDataKey;
import com.netflix.appinfo.InstanceInfo;
import com.netflix.appinfo.InstanceInfo.InstanceStatus;
import com.netflix.discovery.EurekaClient;
import com.netflix.discovery.shared.Application;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.impl.utils.ConfigUtils;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.ShardSupplier;
import com.netflix.dyno.queues.redis.conn.DynoClientProxy;
import com.netflix.dyno.queues.redis.conn.JedisProxy;
import com.netflix.dyno.queues.redis.conn.RedisConnection;
import com.netflix.dyno.queues.shard.DynoShardSupplier;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author Viren
* Builder for the queues.
*
*/
public class QueueBuilder {
private Clock clock;
private String queueName;
private String redisKeyPrefix;
private int unackTime;
private String currentShard;
private ShardSupplier shardSupplier;
private HostSupplier hs;
private EurekaClient eurekaClient;
private String applicationName;
private Collection<Host> hosts;
private JedisPoolConfig redisPoolConfig;
private DynoJedisClient dynoQuorumClient;
private DynoJedisClient dynoNonQuorumClient;
/**
* @param clock the Clock instance to set
* @return instance of QueueBuilder
*/
public QueueBuilder setClock(Clock clock) {
this.clock = clock;
return this;
}
public QueueBuilder setApplicationName(String appName) {
this.applicationName = appName;
return this;
}
public QueueBuilder setEurekaClient(EurekaClient eurekaClient) {
this.eurekaClient = eurekaClient;
return this;
}
/**
* @param queueName the queueName to set
* @return instance of QueueBuilder
*/
public QueueBuilder setQueueName(String queueName) {
this.queueName = queueName;
return this;
}
/**
* @param redisKeyPrefix Prefix used for all the keys in Redis
* @return instance of QueueBuilder
*/
public QueueBuilder setRedisKeyPrefix(String redisKeyPrefix) {
this.redisKeyPrefix = redisKeyPrefix;
return this;
}
/**
* @param redisPoolConfig
* @return instance of QueueBuilder
*/
public QueueBuilder useNonDynomiteRedis(JedisPoolConfig redisPoolConfig, List<Host> redisHosts) {
this.redisPoolConfig = redisPoolConfig;
this.hosts = redisHosts;
return this;
}
/**
*
* @param dynoQuorumClient
* @param dynoNonQuorumClient
* @return
*/
public QueueBuilder useDynomite(DynoJedisClient dynoQuorumClient, DynoJedisClient dynoNonQuorumClient) {
this.dynoQuorumClient = dynoQuorumClient;
this.dynoNonQuorumClient = dynoNonQuorumClient;
this.hs = dynoQuorumClient.getConnPool().getConfiguration().getHostSupplier();
return this;
}
/**
* @param unackTime Time in millisecond, after which the uncked messages will be re-queued for the delivery
* @return instance of QueueBuilder
*/
public QueueBuilder setUnackTime(int unackTime) {
this.unackTime = unackTime;
return this;
}
/**
* @param currentShard Name of the current shard
* @return instance of QueueBuilder
*/
public QueueBuilder setCurrentShard(String currentShard) {
this.currentShard = currentShard;
return this;
}
/**
* @param shardSupplier
* @return
*/
public QueueBuilder setShardSupplier(ShardSupplier shardSupplier) {
this.shardSupplier = shardSupplier;
return this;
}
/**
*
* @return Build an instance of the queue with supplied parameters.
* @see MultiRedisQueue
* @see RedisPipelineQueue
*/
public DynoQueue build() {
boolean useDynomiteCluster = dynoQuorumClient != null;
if (useDynomiteCluster) {
if(hs == null) {
hs = dynoQuorumClient.getConnPool().getConfiguration().getHostSupplier();
}
this.hosts = hs.getHosts();
}
if (shardSupplier == null) {
String region = ConfigUtils.getDataCenter();
String az = ConfigUtils.getLocalZone();
shardSupplier = new DynoShardSupplier(hs, region, az);
}
if(currentShard == null) {
currentShard = shardSupplier.getCurrentShard();
}
if (clock == null) {
clock = Clock.systemDefaultZone();
}
Map<String, Host> shardMap = new HashMap<>();
for (Host host : hosts) {
String shard = shardSupplier.getShardForHost(host);
shardMap.put(shard, host);
}
Map<String, RedisPipelineQueue> queues = new HashMap<>();
for (String queueShard : shardMap.keySet()) {
Host host = shardMap.get(queueShard);
String hostAddress = host.getIpAddress();
if (hostAddress == null || "".equals(hostAddress)) {
hostAddress = host.getHostName();
}
RedisConnection redisConn = null;
RedisConnection redisConnRead = null;
if (useDynomiteCluster) {
redisConn = new DynoClientProxy(dynoQuorumClient);
if(dynoNonQuorumClient == null) {
dynoNonQuorumClient = dynoQuorumClient;
}
redisConnRead = new DynoClientProxy(dynoNonQuorumClient);
} else {
JedisPool pool = new JedisPool(redisPoolConfig, hostAddress, host.getPort(), 0);
redisConn = new JedisProxy(pool);
redisConnRead = new JedisProxy(pool);
}
RedisPipelineQueue q = new RedisPipelineQueue(clock, redisKeyPrefix, queueName, queueShard, unackTime, unackTime, redisConn);
q.setNonQuorumPool(redisConnRead);
queues.put(queueShard, q);
}
if (queues.size() == 1) {
//This is a queue with a single shard
return queues.values().iterator().next();
}
MultiRedisQueue queue = new MultiRedisQueue(queueName, currentShard, queues);
return queue;
}
private HostSupplier getHostSupplierFromEureka(String applicationName) {
return () -> {
Application app = eurekaClient.getApplication(applicationName);
List<Host> hosts = new ArrayList<>();
if (app == null) {
return hosts;
}
List<InstanceInfo> ins = app.getInstances();
if (ins == null || ins.isEmpty()) {
return hosts;
}
hosts = Lists.newArrayList(Collections2.transform(ins,
info -> {
Host.Status status = info.getStatus() == InstanceStatus.UP ? Host.Status.Up : Host.Status.Down;
String rack = null;
if (info.getDataCenterInfo() instanceof AmazonInfo) {
AmazonInfo amazonInfo = (AmazonInfo) info.getDataCenterInfo();
rack = amazonInfo.get(MetaDataKey.availabilityZone);
}
//Host host = new Host(info.getHostName(), info.getIPAddr(), rack, status);
Host host = new HostBuilder()
.setHostname(info.getHostName())
.setIpAddress(info.getIPAddr())
.setRack(rack).setStatus(status)
.createHost();
return host;
}));
return hosts;
};
}
}
| 3,053 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/v2/RedisPipelineQueue.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.dyno.connectionpool.HashPartitioner;
import com.netflix.dyno.connectionpool.impl.hash.Murmur3HashPartitioner;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import com.netflix.dyno.queues.redis.QueueMonitor;
import com.netflix.dyno.queues.redis.QueueUtils;
import com.netflix.dyno.queues.redis.conn.Pipe;
import com.netflix.dyno.queues.redis.conn.RedisConnection;
import com.netflix.servo.monitor.Stopwatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Response;
import redis.clients.jedis.Tuple;
import redis.clients.jedis.params.ZAddParams;
import java.io.IOException;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/**
* @author Viren
* Queue implementation that uses Redis pipelines that improves the throughput under heavy load.
*/
public class RedisPipelineQueue implements DynoQueue {
private final Logger logger = LoggerFactory.getLogger(RedisPipelineQueue.class);
private final Clock clock;
private final String queueName;
private final String shardName;
private final String messageStoreKeyPrefix;
private final String myQueueShard;
private final String unackShardKeyPrefix;
private final int unackTime;
private final QueueMonitor monitor;
private final ObjectMapper om;
private final RedisConnection connPool;
private volatile RedisConnection nonQuorumPool;
private final ScheduledExecutorService schedulerForUnacksProcessing;
private final HashPartitioner partitioner = new Murmur3HashPartitioner();
private final int maxHashBuckets = 32;
private final int longPollWaitIntervalInMillis = 10;
public RedisPipelineQueue(String redisKeyPrefix, String queueName, String shardName, int unackScheduleInMS, int unackTime, RedisConnection pool) {
this(Clock.systemDefaultZone(), redisKeyPrefix, queueName, shardName, unackScheduleInMS, unackTime, pool);
}
public RedisPipelineQueue(Clock clock, String redisKeyPrefix, String queue, String shardName, int unackScheduleInMS, int unackTime, RedisConnection pool) {
this.clock = clock;
this.queueName = queue;
String qName = "{" + queue + "." + shardName + "}";
this.shardName = shardName;
this.messageStoreKeyPrefix = redisKeyPrefix + ".MSG." + qName;
this.myQueueShard = redisKeyPrefix + ".QUEUE." + qName;
this.unackShardKeyPrefix = redisKeyPrefix + ".UNACK." + qName + ".";
this.unackTime = unackTime;
this.connPool = pool;
this.nonQuorumPool = pool;
this.om = QueueUtils.constructObjectMapper();
this.monitor = new QueueMonitor(qName, shardName);
schedulerForUnacksProcessing = Executors.newScheduledThreadPool(1);
schedulerForUnacksProcessing.scheduleAtFixedRate(() -> processUnacks(), unackScheduleInMS, unackScheduleInMS, TimeUnit.MILLISECONDS);
logger.info(RedisPipelineQueue.class.getName() + " is ready to serve " + qName + ", shard=" + shardName);
}
/**
* @param nonQuorumPool When using a cluster like Dynomite, which relies on the quorum reads, supply a separate non-quorum read connection for ops like size etc.
*/
public void setNonQuorumPool(RedisConnection nonQuorumPool) {
this.nonQuorumPool = nonQuorumPool;
}
@Override
public String getName() {
return queueName;
}
@Override
public int getUnackTime() {
return unackTime;
}
@Override
public List<String> push(final List<Message> messages) {
Stopwatch sw = monitor.start(monitor.push, messages.size());
RedisConnection conn = connPool.getResource();
try {
Pipe pipe = conn.pipelined();
for (Message message : messages) {
String json = om.writeValueAsString(message);
pipe.hset(messageStoreKey(message.getId()), message.getId(), json);
double priority = message.getPriority() / 100.0;
double score = Long.valueOf(clock.millis() + message.getTimeout()).doubleValue() + priority;
pipe.zadd(myQueueShard, score, message.getId());
}
pipe.sync();
pipe.close();
return messages.stream().map(msg -> msg.getId()).collect(Collectors.toList());
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
conn.close();
sw.stop();
}
}
private String messageStoreKey(String msgId) {
Long hash = partitioner.hash(msgId);
long bucket = hash % maxHashBuckets;
return messageStoreKeyPrefix + "." + bucket;
}
private String unackShardKey(String messageId) {
Long hash = partitioner.hash(messageId);
long bucket = hash % maxHashBuckets;
return unackShardKeyPrefix + bucket;
}
@Override
public List<Message> peek(final int messageCount) {
Stopwatch sw = monitor.peek.start();
RedisConnection jedis = connPool.getResource();
try {
Set<String> ids = peekIds(0, messageCount);
if (ids == null) {
return Collections.emptyList();
}
List<Message> messages = new LinkedList<Message>();
for (String id : ids) {
String json = jedis.hget(messageStoreKey(id), id);
Message message = om.readValue(json, Message.class);
messages.add(message);
}
return messages;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
jedis.close();
sw.stop();
}
}
@Override
public synchronized List<Message> pop(int messageCount, int wait, TimeUnit unit) {
if (messageCount < 1) {
return Collections.emptyList();
}
Stopwatch sw = monitor.start(monitor.pop, messageCount);
List<Message> messages = new LinkedList<>();
int remaining = messageCount;
long time = clock.millis() + unit.toMillis(wait);
try {
do {
List<String> peeked = peekIds(0, remaining).stream().collect(Collectors.toList());
List<Message> popped = _pop(peeked);
int poppedCount = popped.size();
if (poppedCount == messageCount) {
messages = popped;
break;
}
messages.addAll(popped);
remaining -= poppedCount;
if (clock.millis() > time) {
break;
}
try {
Thread.sleep(longPollWaitIntervalInMillis);
} catch (InterruptedException ie) {
logger.error(ie.getMessage(), ie);
}
} while (remaining > 0);
return messages;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
sw.stop();
}
}
@Override
public Message popWithMsgId(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public Message unsafePopWithMsgIdAllShards(String messageId) {
throw new UnsupportedOperationException();
}
private List<Message> _pop(List<String> batch) throws Exception {
double unackScore = Long.valueOf(clock.millis() + unackTime).doubleValue();
List<Message> popped = new LinkedList<>();
ZAddParams zParams = ZAddParams.zAddParams().nx();
RedisConnection jedis = connPool.getResource();
try {
Pipe pipe = jedis.pipelined();
List<Response<Long>> zadds = new ArrayList<>(batch.size());
for (int i = 0; i < batch.size(); i++) {
String msgId = batch.get(i);
if (msgId == null) {
break;
}
zadds.add(pipe.zadd(unackShardKey(msgId), unackScore, msgId, zParams));
}
pipe.sync();
pipe = jedis.pipelined();
int count = zadds.size();
List<String> zremIds = new ArrayList<>(count);
List<Response<Long>> zremRes = new LinkedList<>();
for (int i = 0; i < count; i++) {
long added = zadds.get(i).get();
if (added == 0) {
if (logger.isDebugEnabled()) {
logger.debug("Cannot add {} to unack queue shard", batch.get(i));
}
monitor.misses.increment();
continue;
}
String id = batch.get(i);
zremIds.add(id);
zremRes.add(pipe.zrem(myQueueShard, id));
}
pipe.sync();
pipe = jedis.pipelined();
List<Response<String>> getRes = new ArrayList<>(count);
for (int i = 0; i < zremRes.size(); i++) {
long removed = zremRes.get(i).get();
if (removed == 0) {
if (logger.isDebugEnabled()) {
logger.debug("Cannot remove {} from queue shard", zremIds.get(i));
}
monitor.misses.increment();
continue;
}
getRes.add(pipe.hget(messageStoreKey(zremIds.get(i)), zremIds.get(i)));
}
pipe.sync();
for (int i = 0; i < getRes.size(); i++) {
String json = getRes.get(i).get();
if (json == null) {
if (logger.isDebugEnabled()) {
logger.debug("Cannot read payload for {}", zremIds.get(i));
}
monitor.misses.increment();
continue;
}
Message msg = om.readValue(json, Message.class);
msg.setShard(shardName);
popped.add(msg);
}
return popped;
} finally {
jedis.close();
}
}
@Override
public boolean ack(String messageId) {
Stopwatch sw = monitor.ack.start();
RedisConnection jedis = connPool.getResource();
try {
Long removed = jedis.zrem(unackShardKey(messageId), messageId);
if (removed > 0) {
jedis.hdel(messageStoreKey(messageId), messageId);
return true;
}
return false;
} finally {
jedis.close();
sw.stop();
}
}
@Override
public void ack(List<Message> messages) {
Stopwatch sw = monitor.ack.start();
RedisConnection jedis = connPool.getResource();
Pipe pipe = jedis.pipelined();
List<Response<Long>> responses = new LinkedList<>();
try {
for (Message msg : messages) {
responses.add(pipe.zrem(unackShardKey(msg.getId()), msg.getId()));
}
pipe.sync();
pipe = jedis.pipelined();
List<Response<Long>> dels = new LinkedList<>();
for (int i = 0; i < messages.size(); i++) {
Long removed = responses.get(i).get();
if (removed > 0) {
dels.add(pipe.hdel(messageStoreKey(messages.get(i).getId()), messages.get(i).getId()));
}
}
pipe.sync();
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
jedis.close();
sw.stop();
}
}
@Override
public boolean setUnackTimeout(String messageId, long timeout) {
Stopwatch sw = monitor.ack.start();
RedisConnection jedis = connPool.getResource();
try {
double unackScore = Long.valueOf(clock.millis() + timeout).doubleValue();
Double score = jedis.zscore(unackShardKey(messageId), messageId);
if (score != null) {
jedis.zadd(unackShardKey(messageId), unackScore, messageId);
return true;
}
return false;
} finally {
jedis.close();
sw.stop();
}
}
@Override
public boolean setTimeout(String messageId, long timeout) {
RedisConnection jedis = connPool.getResource();
try {
String json = jedis.hget(messageStoreKey(messageId), messageId);
if (json == null) {
return false;
}
Message message = om.readValue(json, Message.class);
message.setTimeout(timeout);
Double score = jedis.zscore(myQueueShard, messageId);
if (score != null) {
double priorityd = message.getPriority() / 100.0;
double newScore = Long.valueOf(clock.millis() + timeout).doubleValue() + priorityd;
jedis.zadd(myQueueShard, newScore, messageId);
json = om.writeValueAsString(message);
jedis.hset(messageStoreKey(message.getId()), message.getId(), json);
return true;
}
return false;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
jedis.close();
}
}
@Override
public boolean remove(String messageId) {
Stopwatch sw = monitor.remove.start();
RedisConnection jedis = connPool.getResource();
try {
jedis.zrem(unackShardKey(messageId), messageId);
Long removed = jedis.zrem(myQueueShard, messageId);
Long msgRemoved = jedis.hdel(messageStoreKey(messageId), messageId);
if (removed > 0 && msgRemoved > 0) {
return true;
}
return false;
} finally {
jedis.close();
sw.stop();
}
}
@Override
public boolean ensure(Message message) {
throw new UnsupportedOperationException();
}
@Override
public boolean containsPredicate(String predicate) {
return containsPredicate(predicate, false);
}
@Override
public String getMsgWithPredicate(String predicate) {
return getMsgWithPredicate(predicate, false);
}
@Override
public boolean containsPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public String getMsgWithPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public Message popMsgWithPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> bulkPop(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> unsafeBulkPop(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
@Override
public Message get(String messageId) {
Stopwatch sw = monitor.get.start();
RedisConnection jedis = connPool.getResource();
try {
String json = jedis.hget(messageStoreKey(messageId), messageId);
if (json == null) {
if (logger.isDebugEnabled()) {
logger.debug("Cannot get the message payload " + messageId);
}
return null;
}
Message msg = om.readValue(json, Message.class);
return msg;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
jedis.close();
sw.stop();
}
}
@Override
public Message localGet(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public long size() {
Stopwatch sw = monitor.size.start();
RedisConnection jedis = nonQuorumPool.getResource();
try {
long size = jedis.zcard(myQueueShard);
return size;
} finally {
jedis.close();
sw.stop();
}
}
@Override
public Map<String, Map<String, Long>> shardSizes() {
Stopwatch sw = monitor.size.start();
Map<String, Map<String, Long>> shardSizes = new HashMap<>();
RedisConnection jedis = nonQuorumPool.getResource();
try {
long size = jedis.zcard(myQueueShard);
long uacked = 0;
for (int i = 0; i < maxHashBuckets; i++) {
String unackShardKey = unackShardKeyPrefix + i;
uacked += jedis.zcard(unackShardKey);
}
Map<String, Long> shardDetails = new HashMap<>();
shardDetails.put("size", size);
shardDetails.put("uacked", uacked);
shardSizes.put(shardName, shardDetails);
return shardSizes;
} finally {
jedis.close();
sw.stop();
}
}
@Override
public void clear() {
RedisConnection jedis = connPool.getResource();
try {
jedis.del(myQueueShard);
for (int bucket = 0; bucket < maxHashBuckets; bucket++) {
String unackShardKey = unackShardKeyPrefix + bucket;
jedis.del(unackShardKey);
String messageStoreKey = messageStoreKeyPrefix + "." + bucket;
jedis.del(messageStoreKey);
}
} finally {
jedis.close();
}
}
private Set<String> peekIds(int offset, int count) {
RedisConnection jedis = connPool.getResource();
try {
double now = Long.valueOf(clock.millis() + 1).doubleValue();
Set<String> scanned = jedis.zrangeByScore(myQueueShard, 0, now, offset, count);
return scanned;
} finally {
jedis.close();
}
}
public void processUnacks() {
for (int i = 0; i < maxHashBuckets; i++) {
String unackShardKey = unackShardKeyPrefix + i;
processUnacks(unackShardKey);
}
}
private void processUnacks(String unackShardKey) {
Stopwatch sw = monitor.processUnack.start();
RedisConnection jedis2 = connPool.getResource();
try {
do {
long queueDepth = size();
monitor.queueDepth.record(queueDepth);
int batchSize = 1_000;
double now = Long.valueOf(clock.millis()).doubleValue();
Set<Tuple> unacks = jedis2.zrangeByScoreWithScores(unackShardKey, 0, now, 0, batchSize);
if (unacks.size() > 0) {
logger.debug("Adding " + unacks.size() + " messages back to the queue for " + queueName);
} else {
//Nothing more to be processed
return;
}
List<Tuple> requeue = new LinkedList<>();
for (Tuple unack : unacks) {
double score = unack.getScore();
String member = unack.getElement();
String payload = jedis2.hget(messageStoreKey(member), member);
if (payload == null) {
jedis2.zrem(unackShardKey(member), member);
continue;
}
requeue.add(unack);
}
Pipe pipe = jedis2.pipelined();
for (Tuple unack : requeue) {
double score = unack.getScore();
String member = unack.getElement();
pipe.zadd(myQueueShard, score, member);
pipe.zrem(unackShardKey(member), member);
}
pipe.sync();
} while (true);
} finally {
jedis2.close();
sw.stop();
}
}
@Override
public List<Message> getAllMessages() {
throw new UnsupportedOperationException();
}
@Override
public void atomicProcessUnacks() {
throw new UnsupportedOperationException();
}
@Override
public List<Message> findStaleMessages() { throw new UnsupportedOperationException(); }
@Override
public boolean atomicRemove(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public void close() throws IOException {
schedulerForUnacksProcessing.shutdown();
monitor.close();
}
@Override
public List<Message> unsafePeekAllShards(final int messageCount) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> unsafePopAllShards(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
}
| 3,054 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/v2/MultiRedisQueue.java | /**
* Copyright 2017 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.v2;
import com.netflix.dyno.queues.DynoQueue;
import com.netflix.dyno.queues.Message;
import java.io.IOException;
import java.lang.UnsupportedOperationException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
/**
* @author Viren
* MultiRedisQueue exposes a single queue using multiple redis queues. Each RedisQueue is a shard.
* When pushing elements to the queue, does a round robin to push the message to one of the shards.
* When polling, the message is polled from the current shard (shardName) the instance is associated with.
*/
public class MultiRedisQueue implements DynoQueue {
private List<String> shards;
private String name;
private Map<String, RedisPipelineQueue> queues = new HashMap<>();
private RedisPipelineQueue me;
public MultiRedisQueue(String queueName, String shardName, Map<String, RedisPipelineQueue> queues) {
this.name = queueName;
this.queues = queues;
this.me = queues.get(shardName);
if (me == null) {
throw new IllegalArgumentException("List of shards supplied (" + queues.keySet() + ") does not contain current shard name: " + shardName);
}
this.shards = queues.keySet().stream().collect(Collectors.toList());
}
@Override
public String getName() {
return name;
}
@Override
public int getUnackTime() {
return me.getUnackTime();
}
@Override
public List<String> push(List<Message> messages) {
int size = queues.size();
int partitionSize = messages.size() / size;
List<String> ids = new LinkedList<>();
for (int i = 0; i < size - 1; i++) {
RedisPipelineQueue queue = queues.get(getNextShard());
int start = i * partitionSize;
int end = start + partitionSize;
ids.addAll(queue.push(messages.subList(start, end)));
}
RedisPipelineQueue queue = queues.get(getNextShard());
int start = (size - 1) * partitionSize;
ids.addAll(queue.push(messages.subList(start, messages.size())));
return ids;
}
@Override
public List<Message> pop(int messageCount, int wait, TimeUnit unit) {
return me.pop(messageCount, wait, unit);
}
@Override
public Message popWithMsgId(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public Message unsafePopWithMsgIdAllShards(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> peek(int messageCount) {
return me.peek(messageCount);
}
@Override
public boolean ack(String messageId) {
for (DynoQueue q : queues.values()) {
if (q.ack(messageId)) {
return true;
}
}
return false;
}
@Override
public void ack(List<Message> messages) {
Map<String, List<Message>> byShard = messages.stream().collect(Collectors.groupingBy(Message::getShard));
for (Entry<String, List<Message>> e : byShard.entrySet()) {
queues.get(e.getKey()).ack(e.getValue());
}
}
@Override
public boolean setUnackTimeout(String messageId, long timeout) {
for (DynoQueue q : queues.values()) {
if (q.setUnackTimeout(messageId, timeout)) {
return true;
}
}
return false;
}
@Override
public boolean setTimeout(String messageId, long timeout) {
for (DynoQueue q : queues.values()) {
if (q.setTimeout(messageId, timeout)) {
return true;
}
}
return false;
}
@Override
public boolean remove(String messageId) {
for (DynoQueue q : queues.values()) {
if (q.remove(messageId)) {
return true;
}
}
return false;
}
@Override
public boolean ensure(Message message) {
throw new UnsupportedOperationException();
}
@Override
public boolean containsPredicate(String predicate) {
return containsPredicate(predicate, false);
}
@Override
public String getMsgWithPredicate(String predicate) {
return getMsgWithPredicate(predicate, false);
}
@Override
public boolean containsPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public String getMsgWithPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public Message popMsgWithPredicate(String predicate, boolean localShardOnly) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> bulkPop(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> unsafeBulkPop(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
@Override
public Message get(String messageId) {
for (DynoQueue q : queues.values()) {
Message msg = q.get(messageId);
if (msg != null) {
return msg;
}
}
return null;
}
@Override
public Message localGet(String messageId) {
throw new UnsupportedOperationException();
}
@Override
public long size() {
long size = 0;
for (DynoQueue q : queues.values()) {
size += q.size();
}
return size;
}
@Override
public Map<String, Map<String, Long>> shardSizes() {
Map<String, Map<String, Long>> sizes = new HashMap<>();
for (Entry<String, RedisPipelineQueue> e : queues.entrySet()) {
sizes.put(e.getKey(), e.getValue().shardSizes().get(e.getKey()));
}
return sizes;
}
@Override
public void clear() {
for (DynoQueue q : queues.values()) {
q.clear();
}
}
@Override
public void close() throws IOException {
for (RedisPipelineQueue queue : queues.values()) {
queue.close();
}
}
@Override
public List<Message> getAllMessages() {
throw new UnsupportedOperationException();
}
@Override
public void processUnacks() {
for (RedisPipelineQueue queue : queues.values()) {
queue.processUnacks();
}
}
@Override
public void atomicProcessUnacks() {
throw new UnsupportedOperationException();
}
@Override
public List<Message> findStaleMessages() { throw new UnsupportedOperationException(); }
@Override
public boolean atomicRemove(String messageId) {
throw new UnsupportedOperationException();
}
private AtomicInteger nextShardIndex = new AtomicInteger(0);
private String getNextShard() {
int indx = nextShardIndex.incrementAndGet();
if (indx >= shards.size()) {
nextShardIndex.set(0);
indx = 0;
}
String s = shards.get(indx);
return s;
}
@Override
public List<Message> unsafePeekAllShards(final int messageCount) {
throw new UnsupportedOperationException();
}
@Override
public List<Message> unsafePopAllShards(int messageCount, int wait, TimeUnit unit) {
throw new UnsupportedOperationException();
}
}
| 3,055 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/DynoJedisPipe.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis.conn;
import com.netflix.dyno.jedis.DynoJedisPipeline;
import redis.clients.jedis.Response;
import redis.clients.jedis.params.ZAddParams;
/**
* @author Viren
* Pipeline abstraction for Dynomite Pipeline.
*/
public class DynoJedisPipe implements Pipe {
private DynoJedisPipeline pipe;
private boolean modified;
public DynoJedisPipe(DynoJedisPipeline pipe) {
this.pipe = pipe;
this.modified = false;
}
@Override
public void hset(String key, String field, String value) {
pipe.hset(key, field, value);
this.modified = true;
}
@Override
public Response<Long> zadd(String key, double score, String member) {
this.modified = true;
return pipe.zadd(key, score, member);
}
@Override
public Response<Long> zadd(String key, double score, String member, ZAddParams zParams) {
this.modified = true;
return pipe.zadd(key, score, member, zParams);
}
@Override
public Response<Long> zrem(String key, String member) {
this.modified = true;
return pipe.zrem(key, member);
}
@Override
public Response<String> hget(String key, String member) {
this.modified = true;
return pipe.hget(key, member);
}
@Override
public Response<Long> hdel(String key, String member) {
this.modified = true;
return pipe.hdel(key, member);
}
@Override
public void sync() {
if (modified) {
pipe.sync();
modified = false;
}
}
@Override
public void close() throws Exception {
pipe.close();
}
}
| 3,056 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/RedisPipe.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis.conn;
import redis.clients.jedis.Pipeline;
import redis.clients.jedis.Response;
import redis.clients.jedis.params.ZAddParams;
/**
* @author Viren
*
* Pipeline abstraction for direct redis connection - when not using Dynomite.
*/
public class RedisPipe implements Pipe {
private Pipeline pipe;
public RedisPipe(Pipeline pipe) {
this.pipe = pipe;
}
@Override
public void hset(String key, String field, String value) {
pipe.hset(key, field, value);
}
@Override
public Response<Long> zadd(String key, double score, String member) {
return pipe.zadd(key, score, member);
}
@Override
public Response<Long> zadd(String key, double score, String member, ZAddParams zParams) {
return pipe.zadd(key, score, member, zParams);
}
@Override
public Response<Long> zrem(String key, String member) {
return pipe.zrem(key, member);
}
@Override
public Response<String> hget(String key, String member) {
return pipe.hget(key, member);
}
@Override
public Response<Long> hdel(String key, String member) {
return pipe.hdel(key, member);
}
@Override
public void sync() {
pipe.sync();
}
@Override
public void close() throws Exception {
pipe.close();
}
}
| 3,057 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/Pipe.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.conn;
import com.netflix.dyno.jedis.DynoJedisPipeline;
import redis.clients.jedis.Pipeline;
import redis.clients.jedis.Response;
import redis.clients.jedis.params.ZAddParams;
/**
*
* @author Viren
* <p>
* Abstraction of Redis Pipeline.
* The abstraction is required as there is no common interface between DynoJedisPipeline and Jedis' Pipeline classes.
* </p>
* @see DynoJedisPipeline
* @see Pipeline
* The commands here reflects the RedisCommand structure.
*
*/
public interface Pipe {
/**
*
* @param key The Key
* @param field Field
* @param value Value of the Field
*/
public void hset(String key, String field, String value);
/**
*
* @param key The Key
* @param score Score for the member
* @param member Member to be added within the key
* @return
*/
public Response<Long> zadd(String key, double score, String member);
/**
*
* @param key The Key
* @param score Score for the member
* @param member Member to be added within the key
* @param zParams Parameters
* @return
*/
public Response<Long> zadd(String key, double score, String member, ZAddParams zParams);
/**
*
* @param key The Key
* @param member Member
* @return
*/
public Response<Long> zrem(String key, String member);
/**
*
* @param key The Key
* @param member Member
* @return
*/
public Response<String> hget(String key, String member);
/**
*
* @param key
* @param member
* @return
*/
public Response<Long> hdel(String key, String member);
/**
*
*/
public void sync();
/**
*
* @throws Exception
*/
public void close() throws Exception;
} | 3,058 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/DynoClientProxy.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis.conn;
import java.util.Set;
import com.netflix.dyno.jedis.DynoJedisClient;
import redis.clients.jedis.Tuple;
/**
* @author Viren
*
* Dynomite connection
*/
public class DynoClientProxy implements RedisConnection {
private DynoJedisClient jedis;
public DynoClientProxy(DynoJedisClient jedis) {
this.jedis = jedis;
}
@Override
public RedisConnection getResource() {
return this;
}
@Override
public void close() {
//nothing!
}
@Override
public Pipe pipelined() {
return new DynoJedisPipe(jedis.pipelined());
}
@Override
public String hget(String key, String member) {
return jedis.hget(key, member);
}
@Override
public Long zrem(String key, String member) {
return jedis.zrem(key, member);
}
@Override
public Long hdel(String key, String member) {
return jedis.hdel(key, member);
}
@Override
public Double zscore(String key, String member) {
return jedis.zscore(key, member);
}
@Override
public void zadd(String key, double score, String member) {
jedis.zadd(key, score, member);
}
@Override
public void hset(String key, String member, String json) {
jedis.hset(key, member, json);
}
@Override
public long zcard(String key) {
return jedis.zcard(key);
}
@Override
public void del(String key) {
jedis.del(key);
}
@Override
public Set<String> zrangeByScore(String key, int min, double max, int offset, int count) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, int min, double max, int offset, int count) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
| 3,059 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/JedisProxy.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.redis.conn;
import java.util.Set;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.Tuple;
/**
* @author Viren
*
* This class provides the abstraction of a Jedis Connection Pool. Used when using Redis directly without Dynomite.
*/
public class JedisProxy implements RedisConnection {
private JedisPool pool;
private Jedis jedis;
public JedisProxy(JedisPool pool) {
this.pool = pool;
}
public JedisProxy(Jedis jedis) {
this.jedis = jedis;
}
@Override
public RedisConnection getResource() {
Jedis jedis = pool.getResource();
return new JedisProxy(jedis);
}
@Override
public void close() {
jedis.close();
}
@Override
public Pipe pipelined() {
return new RedisPipe(jedis.pipelined());
}
@Override
public String hget(String key, String member) {
return jedis.hget(key, member);
}
@Override
public Long zrem(String key, String member) {
return jedis.zrem(key, member);
}
@Override
public Long hdel(String key, String member) {
return jedis.hdel(key, member);
}
@Override
public Double zscore(String key, String member) {
return jedis.zscore(key, member);
}
@Override
public void zadd(String key, double unackScore, String member) {
jedis.zadd(key, unackScore, member);
}
@Override
public void hset(String key, String member, String json) {
jedis.hset(key, member, json);
}
@Override
public long zcard(String key) {
return jedis.zcard(key);
}
@Override
public void del(String key) {
jedis.del(key);
}
@Override
public Set<String> zrangeByScore(String key, int min, double max, int offset, int count) {
return jedis.zrangeByScore(key, min, max, offset, count);
}
@Override
public Set<Tuple> zrangeByScoreWithScores(String key, int min, double max, int offset, int count) {
return jedis.zrangeByScoreWithScores(key, min, max, offset, count);
}
}
| 3,060 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/redis/conn/RedisConnection.java | /**
* Copyright 2018 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dyno.queues.redis.conn;
import java.util.Set;
import com.netflix.dyno.jedis.DynoJedisClient;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.Tuple;
/**
* Abstraction of Redis connection.
*
* @author viren
* <p>
* The methods are 1-1 proxies from Jedis. See Jedis documentation for the details.
* </p>
* @see Jedis
* @see DynoJedisClient
*/
public interface RedisConnection {
/**
*
* @return Returns the underlying connection resource. For connection pool, returns the actual connection
*/
public RedisConnection getResource();
public String hget(String messkeyageStoreKey, String member);
public Long zrem(String key, String member);
public Long hdel(String key, String member);
public Double zscore(String key, String member);
public void zadd(String key, double score, String member);
public void hset(String key, String id, String json);
public long zcard(String key);
public void del(String key);
public Set<String> zrangeByScore(String key, int min, double max, int offset, int count);
public Set<Tuple> zrangeByScoreWithScores(String key, int min, double max, int offset, int count);
public void close();
public Pipe pipelined();
} | 3,061 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/shard/ConsistentAWSDynoShardSupplier.java | package com.netflix.dyno.queues.shard;
import com.netflix.dyno.connectionpool.HostSupplier;
import java.util.HashMap;
import java.util.Map;
public class ConsistentAWSDynoShardSupplier extends ConsistentDynoShardSupplier {
/**
* Dynomite based shard supplier. Keeps the number of shards in parity with the hosts and regions
*
* Note: This ensures that all racks use the same shard names. This fixes issues with the now deprecated DynoShardSupplier
* that would write to the wrong shard if there are cross-region writers/readers.
*
* @param hs Host supplier
* @param region current region
* @param localRack local rack identifier
*/
public ConsistentAWSDynoShardSupplier(HostSupplier hs, String region, String localRack) {
super(hs, region, localRack);
Map<String, String> rackToHashMapEntries = new HashMap<String, String>() {{
this.put("us-east-1c", "c");
this.put("us-east-1d", "d");
this.put("us-east-1e", "e");
this.put("eu-west-1a", "c");
this.put("eu-west-1b", "d");
this.put("eu-west-1c", "e");
this.put("us-west-2a", "c");
this.put("us-west-2b", "d");
this.put("us-west-2c", "e");
}};
setRackToShardMap(rackToHashMapEntries);
}
} | 3,062 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/shard/ConsistentDynoShardSupplier.java | package com.netflix.dyno.queues.shard;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.queues.ShardSupplier;
import java.util.*;
abstract class ConsistentDynoShardSupplier implements ShardSupplier {
protected HostSupplier hs;
protected String region;
protected String localRack;
protected Map<String, String> rackToShardMap;
/**
* Dynomite based shard supplier. Keeps the number of shards in parity with the hosts and regions
* @param hs Host supplier
* @param region current region
* @param localRack local rack identifier
*/
public ConsistentDynoShardSupplier(HostSupplier hs, String region, String localRack) {
this.hs = hs;
this.region = region;
this.localRack = localRack;
}
public void setRackToShardMap(Map<String, String> rackToShardMapEntries) {
rackToShardMap = new HashMap<>(rackToShardMapEntries);
}
@Override
public String getCurrentShard() {
return rackToShardMap.get(localRack);
}
@Override
public Set<String> getQueueShards() {
Set<String> queueShards = new HashSet<>();
List<Host> hosts = hs.getHosts();
for (Host host : hosts) {
queueShards.add(rackToShardMap.get(host.getRack()));
}
return queueShards;
}
@Override
public String getShardForHost(Host host) {
return rackToShardMap.get(host.getRack());
}
} | 3,063 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/shard/SingleShardSupplier.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.shard;
import com.google.common.collect.Sets;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.queues.ShardSupplier;
import java.util.Set;
/**
* @author Viren
*
*/
public class SingleShardSupplier implements ShardSupplier {
private String shardName;
public SingleShardSupplier(String shardName) {
this.shardName = shardName;
}
@Override
public String getCurrentShard() {
return shardName;
}
@Override
public String getShardForHost(Host host) {
return shardName;
}
@Override
public Set<String> getQueueShards() {
return Sets.newHashSet(shardName);
}
}
| 3,064 |
0 | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues | Create_ds/dyno-queues/dyno-queues-redis/src/main/java/com/netflix/dyno/queues/shard/DynoShardSupplier.java | /**
* Copyright 2016 Netflix, Inc.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.netflix.dyno.queues.shard;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.queues.ShardSupplier;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* @author Viren
*
* NOTE: This class is deprecated and should not be used. It still remains for backwards compatibility for legacy applications
* New applications must use 'ConsistentAWSDynoShardSupplier' or extend 'ConsistentDynoShardSupplier' for non-AWS environments.
*
*/
@Deprecated
public class DynoShardSupplier implements ShardSupplier {
private HostSupplier hs;
private String region;
private String localRack;
private Function<String, String> rackToShardMap = rack -> rack.substring(rack.length()-1);
/**
* Dynomite based shard supplier. Keeps the number of shards in parity with the hosts and regions
* @param hs Host supplier
* @param region current region
* @param localRack local rack identifier
*/
public DynoShardSupplier(HostSupplier hs, String region, String localRack) {
this.hs = hs;
this.region = region;
this.localRack = localRack;
}
@Override
public String getCurrentShard() {
return rackToShardMap.apply(localRack);
}
@Override
public Set<String> getQueueShards() {
return hs.getHosts().stream().map(host -> host.getRack()).map(rackToShardMap).collect(Collectors.toSet());
}
@Override
public String getShardForHost(Host host) {
return rackToShardMap.apply(host.getRack());
}
}
| 3,065 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/sidecore/monitoring | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/sidecore/monitoring/test/RedisInfoMetricsTaskTest.java | package com.netflix.florida.sidecore.monitoring.test;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import com.netflix.florida.config.FloridaConfig;
import com.netflix.florida.defaultimpl.test.BlankConfiguration;
import com.netflix.florida.defaultimpl.test.FakeStorageProxy;
import com.netflix.florida.monitoring.JedisFactory;
import com.netflix.florida.monitoring.RedisInfoMetricsTask;
import com.netflix.florida.sidecore.storage.StorageProxy;
import com.netflix.servo.DefaultMonitorRegistry;
import mockit.Expectations;
import mockit.Mocked;
import mockit.integration.junit4.JMockit;
import redis.clients.jedis.Jedis;
@RunWith(JMockit.class)
public class RedisInfoMetricsTaskTest {
@Mocked
Jedis jedis;
@Test
public void executeTest() throws Exception {
int metricsCountSampleRedisInfo = 26;
File file = new File(new File(".").getCanonicalPath() + "/src/test/resources/redis_info.txt");
final String info = new String(Files.readAllBytes((Paths.get(file.getPath()))));
new Expectations() {
{
jedis.connect();
jedis.info();
result = info;
jedis.disconnect();
}
};
JedisFactory jedisFactory = new JedisFactory() {
@Override
public Jedis newInstance(String hostname, int port) {
return jedis;
}
};
StorageProxy storageProxy = new FakeStorageProxy();
RedisInfoMetricsTask mimt = new RedisInfoMetricsTask(storageProxy, jedisFactory);
mimt.execute();
Assert.assertNotNull(DefaultMonitorRegistry.getInstance().getRegisteredMonitors());
Assert.assertEquals(metricsCountSampleRedisInfo,
DefaultMonitorRegistry.getInstance().getRegisteredMonitors().size());
}
}
| 3,066 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/FakeSleeper.java | package com.netflix.florida.utils.test;
import com.netflix.florida.sidecore.utils.Sleeper;
public class FakeSleeper implements Sleeper
{
@Override
public void sleep(long waitTimeMs) throws InterruptedException
{
// no-op
}
public void sleepQuietly(long waitTimeMs)
{
//no-op
}
}
| 3,067 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/ServoMetricsTastTest.java | package com.netflix.florida.utils.test;
import java.util.concurrent.ConcurrentHashMap;
import junit.framework.Assert;
import org.junit.Test;
import com.netflix.florida.monitoring.ServoMetricsTask;
import com.netflix.servo.monitor.NumericMonitor;
public class ServoMetricsTastTest {
@Test
public void test() throws Exception {
String json = "{\"service\":\"dynomite\", \"source\":\"florida-i-16ca1846\", \"version\":\"0.3.1\", \"uptime\":40439, \"timestamp\":1399064677, \"datacenter\":\"DC1\","
+ "\"dyn_o_mite\":"
+ "{\"client_eof\":0, \"client_err\":0, \"client_connections\":3, \"server_ejects\":0, \"forward_error\":0, \"fragments\":0, \"stats_count\":22,"
+ "\"127.0.0.1\":"
+ "{\"server_eof\":0, \"server_err\":0, \"server_timedout\":11, \"server_connections\":3, \"server_ejected_at\":0, \"requests\":20000,"
+ "\"request_bytes\":0, \"responses\":5, \"response_bytes\":0, \"in_queue\":0, \"in_queue_bytes\":0, \"out_queue\":0,"
+ "\"out_queue_bytes\":0" + "}" + "}" + "}";
ServoMetricsTask impl = new ServoMetricsTask(null);
impl.processJsonResponse(json);
ConcurrentHashMap<String, NumericMonitor<Number>> metricMap = impl.getMetricsMap();
testCounterValue("dynomite__client_eof", 0, metricMap);
testCounterValue("dynomite__client_err", 0, metricMap);
testCounterValue("dynomite__client_connections", 3, metricMap);
testCounterValue("dynomite__server_ejects", 0, metricMap);
testCounterValue("dynomite__forward_error", 0, metricMap);
testCounterValue("dynomite__fragments", 0, metricMap);
testCounterValue("dynomite__stats_count", 22, metricMap);
testCounterValue("dynomite__127.0.0.1__server_eof", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__server_err", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__server_timedout", 11, metricMap);
testCounterValue("dynomite__127.0.0.1__server_connections", 3, metricMap);
testCounterValue("dynomite__127.0.0.1__server_ejected_at", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__requests", 20000, metricMap);
testCounterValue("dynomite__127.0.0.1__request_bytes", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__responses", 5, metricMap);
testCounterValue("dynomite__127.0.0.1__response_bytes", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__in_queue", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__in_queue_bytes", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__out_queue", 0, metricMap);
testCounterValue("dynomite__127.0.0.1__out_queue_bytes", 0, metricMap);
}
private void testCounterValue(String name, int expectedValue,
ConcurrentHashMap<String, NumericMonitor<Number>> metricMap) throws Exception {
NumericMonitor<Number> metric = metricMap.get(name);
Assert.assertNotNull(metric);
Assert.assertEquals(expectedValue, metric.getValue().intValue());
}
}
| 3,068 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/RedisInfoParserTest.java | package com.netflix.florida.utils.test;
import java.io.File;
import java.io.FileReader;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.florida.sidecore.storage.RedisInfoParser;
public class RedisInfoParserTest {
@Test
public void test() throws Exception {
File file = new File("./src/test/resources/redis_info.txt");
RedisInfoParser parser = new RedisInfoParser();
Map<String, Long> metrics = parser.parse(new FileReader(file));
Assert.assertTrue(metrics.get("Redis_Server_uptime_in_seconds") == 1234L);
Assert.assertTrue(metrics.get("Redis_Clients_connected_clients") == 4L);
Assert.assertTrue(metrics.get("Redis_Clients_client_longest_output_list") == 0L);
Assert.assertTrue(metrics.get("Redis_Clients_client_biggest_input_buf") == 0L);
Assert.assertTrue(metrics.get("Redis_Clients_blocked_clients") == 0L);
Assert.assertTrue(metrics.get("Redis_Memory_used_memory") == 314569968L);
// Assert.assertTrue(metrics.get("Redis_Memory_used_memory_human") ==
// 300L);
// Assert.assertTrue(metrics.get("Redis_Memory_used_memory_rss") ==
// 328806400L);
// Assert.assertTrue(metrics.get("Redis_Memory_used_memory_peak") ==
// 314569968L);
// Assert.assertTrue(metrics.get("Redis_Memory_used_memory_peak_human")
// == 300L);
// Assert.assertTrue(metrics.get("Redis_Memory_used_memory_lua") ==
// 33792L);
Assert.assertTrue(metrics.get("Redis_Memory_mem_fragmentation_ratio") == 105L);
// Assert.assertTrue(metrics.get("Redis_Persistence_loading") == 0L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_changes_since_last_save")
// == 53046299L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_bgsave_in_progress")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_last_save_time")
// == 1411544331L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_last_bgsave_status")
// == 1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_last_bgsave_time_sec")
// == -1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_rdb_current_bgsave_time_sec")
// == -1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_enabled") ==
// 0L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_rewrite_in_progress")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_rewrite_scheduled")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_last_rewrite_time_sec")
// == -1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_current_rewrite_time_sec")
// == -1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_last_bgrewrite_status")
// == 1L);
// Assert.assertTrue(metrics.get("Redis_Persistence_aof_last_write_status")
// == 1L);
Assert.assertTrue(metrics.get("Redis_Stats_total_connections_received") == 3995L);
Assert.assertTrue(metrics.get("Redis_Stats_total_commands_processed") == 94308679L);
Assert.assertTrue(metrics.get("Redis_Stats_instantaneous_ops_per_sec") == 6321L);
Assert.assertTrue(metrics.get("Redis_Stats_rejected_connections") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_sync_full") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_sync_partial_ok") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_sync_partial_err") == 0L);
Assert.assertTrue(metrics.get("Redis_Stats_expired_keys") == 0L);
Assert.assertTrue(metrics.get("Redis_Stats_evicted_keys") == 0L);
Assert.assertTrue(metrics.get("Redis_Stats_keyspace_hits") == 41254397L);
Assert.assertTrue(metrics.get("Redis_Stats_keyspace_misses") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_pubsub_channels") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_pubsub_patterns") == 0L);
// Assert.assertTrue(metrics.get("Redis_Stats_latest_fork_usec") == 0L);
// Assert.assertTrue(metrics.get("Redis_Replication_connected_slaves")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Replication_master_repl_offset")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Replication_repl_backlog_active")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Replication_repl_backlog_size")
// == 1048576L);
// Assert.assertTrue(metrics.get("Redis_Replication_repl_backlog_first_byte_offset")
// == 0L);
// Assert.assertTrue(metrics.get("Redis_Replication_repl_backlog_histlen")
// == 0L);
Assert.assertTrue(metrics.get("Redis_CPU_used_cpu_sys") == 2052L);
Assert.assertTrue(metrics.get("Redis_CPU_used_cpu_user") == 793L);
// Assert.assertTrue(metrics.get("Redis_CPU_used_cpu_sys_children") ==
// 0L);
// Assert.assertTrue(metrics.get("Redis_CPU_used_cpu_user_children") ==
// 0L);
Assert.assertTrue(metrics.get("Redis_Keyspace_db0_keys") == 2499968L);
Assert.assertTrue(metrics.get("Redis_Keyspace_db0_expires") == 0L);
Assert.assertTrue(metrics.get("Redis_Keyspace_db0_avg_ttl") == 0L);
}
}
| 3,069 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/TokenManagerTest.java | /**
* Copyright 2016 Netflix, Inc. <p/> Licensed under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at <p/>
* http://www.apache.org/licenses/LICENSE-2.0 <p/> Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and limitations under the
* License.
*/
package com.netflix.florida.utils.test;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.florida.sidecore.utils.TokenManager;
public class TokenManagerTest {
@Test
public void createTokenTest() {
TokenManager tm = new TokenManager();
String token = tm.createToken(0, 1, "us-west-2");
Assert.assertNotNull(token);
Assert.assertTrue(!"".equals(token));
Assert.assertEquals("1383429731", token);
}
@Test
public void createToken2Test() {
TokenManager tm = new TokenManager();
String token = tm.createToken(1, 2, "us-west-2");
Assert.assertNotNull(token);
Assert.assertTrue(!"".equals(token));
Assert.assertEquals("3530913378", token);
}
@Test
public void createTokenRackAndSizeTest() {
TokenManager tm = new TokenManager();
String token = tm.createToken(1, 2, 2, "us-west-2");
Assert.assertNotNull(token);
Assert.assertTrue(!"".equals(token));
Assert.assertEquals("2457171554", token);
}
@Test(expected = IllegalArgumentException.class)
public void createTokenWorngCountTest() {
TokenManager tm = new TokenManager();
tm.createToken(0, -1, "us-west-2");
}
@Test(expected = IllegalArgumentException.class)
public void createTokenWorngSlotTest() {
TokenManager tm = new TokenManager();
tm.createToken(-1, 0, "us-west-2");
}
@Test(expected = IllegalArgumentException.class)
public void createTokenWorngRackCountTest() {
TokenManager tm = new TokenManager();
tm.createToken(1, -1, 2, "us-west-2");
}
@Test(expected = IllegalArgumentException.class)
public void createTokenWorngSizeTest() {
TokenManager tm = new TokenManager();
tm.createToken(1, 1, -1, "us-west-2");
}
@Test
public void createRegionOffSet() {
TokenManager tm = new TokenManager();
tm.createToken(0, 2, "us-west-2");
int offSet = tm.regionOffset("us-west-2");
Assert.assertTrue(offSet >= 1);
Assert.assertEquals(1383429731, offSet);
}
}
| 3,070 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/ArdbConfParserTest.java | package com.netflix.florida.utils.test;
import java.io.File;
import java.util.Scanner;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.florida.sidecore.storage.ArdbRocksDbRedisCompatible;
public class ArdbConfParserTest {
@Test
public void test() throws Exception {
/**
* some random values
*/
int writeBufferSize = 128;
int maxWriteBufferNumber = 16;
int minWriteBufferToMerge = 4;
long storeMaxMem = 10000000;
String configPathName = "./src/test/resources/rocksdb.conf";
ArdbRocksDbRedisCompatible checkConf = new ArdbRocksDbRedisCompatible(storeMaxMem, writeBufferSize,
maxWriteBufferNumber, minWriteBufferToMerge);
checkConf.updateConfiguration(configPathName);
String conf = new Scanner(new File(configPathName)).useDelimiter("\\Z").next();
final String bufSize = "write_buffer_size=" + writeBufferSize + "M;";
int occurrences = 0;
int index = 0;
while (index < conf.length() && (index = conf.indexOf(bufSize, index)) >= 0) {
occurrences++;
index += bufSize.length();
}
Assert.assertTrue(occurrences == 1);
final String bufNum = "max_write_buffer_number=" + maxWriteBufferNumber;
occurrences = 0;
index = 0;
while (index < conf.length() && (index = conf.indexOf(bufNum, index)) >= 0) {
occurrences++;
index += bufNum.length();
}
Assert.assertTrue(occurrences == 1);
final String bufMerge = "min_write_buffer_number_to_merge=" + minWriteBufferToMerge;;
occurrences = 0;
index = 0;
while (index < conf.length() && (index = conf.indexOf(bufMerge, index)) >= 0) {
occurrences++;
index += bufMerge.length();
}
Assert.assertTrue(occurrences == 1);
}
}
| 3,071 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/utils/test/FloridaHealthCheckHandlerTest.java | /**
* Copyright 2016 Netflix, Inc. <p/> Licensed under the Apache License, Version 2.0 (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the License at <p/>
* http://www.apache.org/licenses/LICENSE-2.0 <p/> Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and limitations under the
* License.
*/
package com.netflix.florida.utils.test;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.florida.config.InstanceState;
import com.netflix.florida.resources.FloridaHealthCheckHandler;
/**
* FloridaHealthCheckHandler unit tests
*
*/
public class FloridaHealthCheckHandlerTest {
@Test
public void testHandlerBootstrapping() {
InstanceState state = new InstanceState() {
public boolean isBootstrapping() {
return true;
}
;
};
FloridaHealthCheckHandler fhc = new FloridaHealthCheckHandler(state);
Assert.assertEquals(503, fhc.getStatus());
}
@Test
public void testHandlerNotHealthy() {
InstanceState state = new InstanceState() {
public boolean isHealthy() {
return false;
}
;
};
FloridaHealthCheckHandler fhc = new FloridaHealthCheckHandler(state);
Assert.assertEquals(503, fhc.getStatus());
}
@Test
public void testHandlerOK() {
InstanceState state = new InstanceState() {
public boolean isHealthy() {
return true;
}
;
};
FloridaHealthCheckHandler fhc = new FloridaHealthCheckHandler(state);
Assert.assertEquals(200, fhc.getStatus());
}
}
| 3,072 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FakeInstanceIdentity.java | package com.netflix.florida.defaultimpl.test;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import com.netflix.florida.identity.InstanceIdentity;
public class FakeInstanceIdentity extends InstanceIdentity {
public FakeInstanceIdentity() throws Exception {
super(null, null, null, null, null, null, null, new FakeEnvVariables());
}
@Override
public void init() throws Exception {
// overrides by-design so it forces not to init the InstanceIdentity.
}
@Override
public String getTokens() {
return "101134286";
}
@Override
public List<String> getSeeds() throws UnknownHostException {
List<String> seeds = new ArrayList<>();
seeds.add("dynomite.us-west-2.prod.myaws.com:8101:us-west-2a:us-west-2:1383429731");
seeds.add("dynomite.us-west-2.prod.myaws.com:8101:us-west-2b:us-west-2:1383429731");
seeds.add("dynomite.us-west-2.prod.myaws.com:8101:us-west-2c:us-west-2:1383429731");
return seeds;
}
}
| 3,073 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FakeEnvVariables.java | package com.netflix.florida.defaultimpl.test;
import com.netflix.florida.resources.env.IEnvVariables;
public class FakeEnvVariables implements IEnvVariables {
@Override
public String getDynomiteClusterName() {
return "Dynomite";
}
@Override
public String getRegion() {
return "us-east-1";
}
@Override
public String getRack() {
return "us-east-1c";
}
}
| 3,074 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FakeStorageProxy.java | package com.netflix.florida.defaultimpl.test;
import java.io.IOException;
import com.netflix.florida.sidecore.storage.Bootstrap;
import com.netflix.florida.sidecore.storage.StorageProxy;
public class FakeStorageProxy implements StorageProxy {
@Override
public boolean isAlive() {
// TODO Auto-generated method stub
return false;
}
@Override
public long getUptime() {
// TODO Auto-generated method stub
return 0;
}
@Override
public Bootstrap warmUpStorage(String[] peers) {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean resetStorage() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean takeSnapshot() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean loadingData() {
// TODO Auto-generated method stub
return false;
}
@Override
public void stopPeerSync() {
// TODO Auto-generated method stub
}
@Override
public String getEngine() {
// TODO Auto-generated method stub
return null;
}
@Override
public int getEngineNumber() {
// TODO Auto-generated method stub
return 0;
}
@Override
public void updateConfiguration() throws IOException {
// TODO Auto-generated method stub
}
@Override
public String getStartupScript() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getStopScript() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getIpAddress() {
// TODO Auto-generated method stub
return null;
}
@Override
public int getPort() {
// TODO Auto-generated method stub
return 0;
}
@Override
public String getUnixPath() {
return "";
}
@Override
public long getStoreMaxMem() {
// TODO Auto-generated method stub
return 0;
}
@Override
public long getTotalAvailableSystemMemory() {
// TODO Auto-generated method stub
return 0;
}
}
| 3,075 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/BlankConfiguration.java | package com.netflix.florida.defaultimpl.test;
import java.util.List;
import com.netflix.florida.config.FloridaConfig;
public class BlankConfiguration implements FloridaConfig {
@Override
public boolean isWarmBootstrap() {
return false;
}
@Override
public boolean isVpc() {
return false;
}
@Override
public boolean isRestoreEnabled() {
return false;
}
@Override
public boolean isDynomiteMultiDC() {
return false;
}
@Override
public boolean isBackupEnabled() {
return false;
}
@Override
public String getDynomiteYaml() {
return null;
}
@Override
public String getDynomiteWriteConsistency() {
return "DC_SAFE_QUORUM";
}
@Override
public int getTimeout() {
return 0;
}
@Override
public int getStorageMaxMemoryPercent() {
return 0;
}
@Override
public int getServerRetryTimeout() {
return 0;
}
@Override
public String getDynomiteSeedProvider() {
return null;
}
@Override
public String getDynomiteIntraClusterSecurity() {
return null;
}
@Override
public String getRestoreDate() {
return null;
}
@Override
public String getDynomiteReadConsistency() {
return "DC_SAFE_QUORUM";
}
@Override
public List<String> getRacks() {
return null;
}
@Override
public String getDynomiteProcessName() {
return null;
}
@Override
public boolean getDynomiteStoragePreconnect() {
return false;
}
@Override
public int getStoragePeerPort() {
return 0;
}
@Override
public int getDynomiteMBufSize() {
return 0;
}
@Override
public int getMaxTimeToBootstrap() {
return 0;
}
@Override
public String getDynomiteHashAlgorithm() {
return null;
}
@Override
public int getDynomiteGossipInterval() {
return 0;
}
@Override
public String getDistribution() {
return null;
}
@Override
public String getCassandraKeyspaceName() {
return null;
}
@Override
public String getBucketName() {
return null;
}
@Override
public String getCassandraClusterName() {
return null;
}
@Override
public String getBackupSchedule() {
return null;
}
@Override
public String getBackupLocation() {
return null;
}
@Override
public int getBackupHour() {
return 0;
}
@Override
public boolean getDynomiteAutoEjectHosts() {
return true;
}
@Override
public String getDynomiteStopScript() {
return null;
}
@Override
public String getDynomiteStartScript() {
return null;
}
@Override
public String getDynomiteInstallDir() {
return "/apps/dynomite";
}
@Override
public int getAllowableBytesSyncDiff() {
return 0;
}
@Override
public int getDynomiteMaxAllocatedMessages() {
return 0;
}
@Override
public String getACLGroupName() {
return null;
}
@Override
public String getClassicAWSRoleAssumptionArn() {
return null;
}
@Override
public String getVpcAWSRoleAssumptionArn() {
return null;
}
@Override
public boolean isDualAccount() {
return false;
}
@Override
public boolean isForceWarm() {
return false;
}
@Override
public String getRedisCompatibleEngine() {
return null;
}
// ARDB RocksDB
// ============
@Override
public int getWriteBufferSize() {
return 0;
}
@Override
public int getArdbRocksDBMaxWriteBufferNumber() {
return 0;
}
@Override
public int getArdbRocksDBMinWriteBuffersToMerge() {
return 0;
}
@Override
public String getDynomiteClusterName() {
return null;
}
@Override
public String getRegion() {
// TODO Auto-generated method stub
return null;
}
@Override
public int getSecuredPeerListenerPort() {
// TODO Auto-generated method stub
return 0;
}
@Override
public int getDynomiteClientPort() {
// TODO Auto-generated method stub
return 0;
}
@Override
public boolean isHealthCheckEnable() {
// TODO Auto-generated method stub
return false;
}
@Override
public String getPersistenceLocation() {
// TODO Auto-generated method stub
return null;
}
@Override
public boolean isPersistenceEnabled() {
// TODO Auto-generated method stub
return false;
}
@Override
public String getKeySpaceEvents() {
// TODO Auto-generated method stub
return null;
}
@Override
public String persistenceType() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getRack() {
// TODO Auto-generated method stub
return null;
}
@Override
public String getDynomiteLocalAddress() {
// TODO Auto-generated method stub
return null;
}
@Override
public int getDatastoreConnections() {
return 1;
}
@Override
public int getLocalPeerConnections() {
return 1;
}
@Override
public int getRemotePeerConnections() {
return 1;
}
@Override
public boolean getConnectionPoolEnabled() {
return false;
}
@Override
public String getRedisUnixPath() {
return "";
}
}
| 3,076 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FakeInstanceState.java | package com.netflix.florida.defaultimpl.test;
import com.netflix.florida.identity.IInstanceState;
public class FakeInstanceState implements IInstanceState {
@Override
public boolean isSideCarProcessAlive() {
return false;
}
@Override
public boolean isBootstrapping() {
return false;
}
@Override
public boolean getYmlWritten() {
return false;
}
@Override
public void setYmlWritten(boolean b) {
// TODO Auto-generated method stub
}
} | 3,077 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FloridaStandardTunerTest.java | package com.netflix.florida.defaultimpl.test;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.apache.commons.io.FileUtils;
import org.junit.Assert;
import org.junit.Test;
import com.netflix.florida.dynomite.DynomiteStandardTuner;
public class FloridaStandardTunerTest {
@Test
public void testWriteAllProperties() throws Exception {
DynomiteStandardTuner tuner = new DynomiteStandardTuner(new BlankConfiguration(), new FakeInstanceIdentity(),
new FakeInstanceState(), new FakeStorageProxy(), new FakeEnvVariables(), new FakeInstanceDataRetriever());
String yamlPath = System.getProperty("java.io.tmpdir") + "/yaml-tunner.yaml";
String templateYamlPath = new File(".").getCanonicalPath() + "/src/test/resources/sample-yaml.yaml";
Files.copy(Paths.get(templateYamlPath), Paths.get(yamlPath), REPLACE_EXISTING);
tuner.writeAllProperties(yamlPath);
String result = FileUtils.readFileToString(new File(yamlPath),"UTF-8");
Assert.assertNotNull(result);
Assert.assertTrue(result.contains("101134286"));
Assert.assertTrue(result.contains("/apps/dynomite/conf/dynomite.pem"));
Assert.assertTrue(result.contains(new FakeInstanceIdentity().getSeeds().get(0)));
Assert.assertTrue(result.contains(new FakeInstanceIdentity().getSeeds().get(1)));
Assert.assertTrue(result.contains(new FakeInstanceIdentity().getSeeds().get(2)));
}
}
| 3,078 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl | Create_ds/dynomite-manager/dynomitemanager-core/java/com/netflix/florida/defaultimpl/test/FakeInstanceDataRetriever.java | package com.netflix.florida.defaultimpl.test;
import com.netflix.florida.instance.InstanceDataRetriever;
public class FakeInstanceDataRetriever implements InstanceDataRetriever {
@Override
public String getRac() {
return "us-east-1";
}
@Override
public String getPublicHostname() {
return "dynomite";
}
@Override
public String getPublicIP() {
return "0.0.0.0";
}
@Override
public String getInstanceId() {
return "i-abcdefg";
}
@Override
public String getInstanceType() {
return "r3.2xlarge";
}
@Override
public String getMac() {
return "00:00:00:00:00";
}
@Override
public String getVpcId() {
return "no";
}
}
| 3,079 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/FloridaServer.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
import com.netflix.dynomitemanager.backup.RestoreTask;
import com.netflix.dynomitemanager.backup.SnapshotTask;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dynomite.DynomiteProcessManager;
import com.netflix.dynomitemanager.dynomite.DynomiteRest;
import com.netflix.dynomitemanager.dynomite.DynomiteYamlTask;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.dynomitemanager.dynomite.ProxyAndStorageResetTask;
import com.netflix.dynomitemanager.monitoring.ProcessMonitorTask;
import com.netflix.dynomitemanager.monitoring.RedisInfoMetricsTask;
import com.netflix.dynomitemanager.monitoring.ServoMetricsTask;
import com.netflix.dynomitemanager.storage.*;
import com.netflix.nfsidecar.aws.UpdateSecuritySettings;
import com.netflix.nfsidecar.config.CommonConfig;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.netflix.nfsidecar.scheduler.TaskScheduler;
import com.netflix.nfsidecar.utils.Sleeper;
import com.netflix.servo.DefaultMonitorRegistry;
import com.netflix.servo.monitor.Monitors;
/**
* Start all tasks here - Property update task - Backup task - Restore task -
* Incremental backup
*/
@Singleton
public class FloridaServer {
private final TaskScheduler scheduler;
private final FloridaConfig floridaConfig;
private final CommonConfig commonConfig;
private final InstanceIdentity id;
private final Sleeper sleeper;
private final DynomiteYamlTask tuneTask;
private final IDynomiteProcess dynProcess;
private final InstanceState state;
private final StorageProcessManager storageProcess;
private final StorageProxy storageProxy;
private static final Logger logger = LoggerFactory.getLogger(FloridaServer.class);
private final DynamicStringProperty readConsistencyFP;
private final DynamicStringProperty writeConsistencyFP;
@Inject
public FloridaServer(FloridaConfig floridaConfig, CommonConfig commonConfig, TaskScheduler scheduler,
InstanceIdentity id, Sleeper sleeper, DynomiteYamlTask tuneTask, InstanceState state,
IDynomiteProcess dynProcess, StorageProcessManager storageProcess, StorageProxy storageProxy) {
this.floridaConfig = floridaConfig;
this.commonConfig = commonConfig;
this.scheduler = scheduler;
this.id = id;
this.sleeper = sleeper;
this.tuneTask = tuneTask;
this.state = state;
this.dynProcess = dynProcess;
this.storageProcess = storageProcess;
this.storageProxy = storageProxy;
try {
initialize();
} catch (Exception e) {
throw new RuntimeException(e.getMessage(), e);
}
// TODO: Consider adding FastPropertyManager class.
// Set Fast Property callbacks for dynamic updates.
DynamicPropertyFactory propertyFactory = DynamicPropertyFactory.getInstance();
this.readConsistencyFP =
propertyFactory.getStringProperty(
"florida.dyno.read.consistency", floridaConfig.getDynomiteReadConsistency());
Runnable updateReadConsitencyFP = ()-> {
logger.info("Updating FP: " + this.readConsistencyFP.getName());
if (!DynomiteRest.sendCommand("/set_consistency/read/" + floridaConfig.getDynomiteReadConsistency())) {
logger.error("REST call to Dynomite for read consistency failed --> using the default");
}
};
this.readConsistencyFP.addCallback(updateReadConsitencyFP);
this.writeConsistencyFP =
propertyFactory.getStringProperty(
"florida.dyno.write.consistency", floridaConfig.getDynomiteWriteConsistency());
Runnable updateWriteConsitencyFP = ()-> {
logger.info("Updating FP: " + this.writeConsistencyFP.getName());
if (!DynomiteRest.sendCommand("/set_consistency/write/" + floridaConfig.getDynomiteWriteConsistency())) {
logger.error("REST call to Dynomite for write consistency failed --> using the default");
}
};
this.writeConsistencyFP.addCallback(updateWriteConsitencyFP);
DefaultMonitorRegistry.getInstance().register(Monitors.newObjectMonitor(state));
}
public void initialize() throws Exception {
if (id.getInstance().isOutOfService()) {
logger.error("Out of service");
return;
}
logger.info("Initializing Florida Server now ...");
state.setSideCarProcessAlive(true);
state.setBootstrapStatus(Bootstrap.NOT_STARTED);
state.setStorageAlive(storageProxy.isAlive());
if (floridaConfig.isDynomiteMultiDC()) {
scheduler.runTaskNow(UpdateSecuritySettings.class);
/*
* sleep for some random between 100 - 200 sec if this is a new node
* with new IP for SG to be updated by other seed nodes
*/
if (id.isReplace() || id.isTokenPregenerated()) {
long initTime = 100 + (int) (Math.random() * ((200 - 100) + 1));
logger.info("Sleeping " + initTime + " seconds -> a node is replaced or token is pregenerated.");
sleeper.sleep(initTime * 1000);
} else if (UpdateSecuritySettings.firstTimeUpdated) {
logger.info("Sleeping 60 seconds -> first time security settings are updated");
sleeper.sleep(60 * 1000);
}
scheduler.addTask(UpdateSecuritySettings.JOBNAME, UpdateSecuritySettings.class,
UpdateSecuritySettings.getTimer(id));
}
// Invoking the task directly as any errors in this task
// should not let Florida continue. However, we don't want to kill
// the Florida process, but, want it to be stuck.
logger.info("Running TuneTask and updating configuration.");
try {
tuneTask.execute();
} catch (IOException e) {
logger.error("Cannot update Dynomite YAML " + e.getMessage());
}
// Determine if we need to restore from backup else start Dynomite.
if (commonConfig.isRestoreEnabled()) {
logger.info("Restore is enabled.");
scheduler.runTaskNow(RestoreTask.class); // restore from the AWS
logger.info("Scheduled task " + RestoreTask.TaskName);
} else { // no restores needed
logger.info("Restore is disabled.");
/**
* Bootstrapping cases 1. The user has enforced warm up through an
* FP 2. It is a new node that replaces an existing token (node
* termination) 3. An existing token exists and Storage is not alive
* (node reboot)
*/
boolean warmUp = false;
if (floridaConfig.isForceWarm()) {
logger.info("force bootstrap -> warm up");
warmUp = true;
} else if (floridaConfig.isWarmBootstrap() && id.isReplace()) {
logger.info("Instance replacement -> warm up");
warmUp = true;
} else if (floridaConfig.isWarmBootstrap() && !id.isNewToken() && !storageProxy.isAlive()) {
logger.info("Not a new token and Storage is down -> warm up");
warmUp = true;
}
if (warmUp) {
logger.info("Warm bootstraping node. Scheduling BootstrapTask now!");
dynProcess.stop();
scheduler.runTaskNow(WarmBootstrapTask.class);
} else {
logger.info("Cold bootstraping, launching storage process.");
storageProcess.start();
sleeper.sleepQuietly(2000); // 2s
logger.info("Launching dynomite process.");
dynProcess.start();
sleeper.sleepQuietly(1000); // 1s
scheduler.runTaskNow(ProxyAndStorageResetTask.class);
}
}
// Backup
if (commonConfig.isBackupEnabled() && commonConfig.getBackupHour() >= 0) {
scheduler.addTask(SnapshotTask.TaskName, SnapshotTask.class, SnapshotTask.getTimer(commonConfig));
}
// Metrics
scheduler.addTask(ServoMetricsTask.TaskName, ServoMetricsTask.class, ServoMetricsTask.getTimer());
scheduler.addTask(RedisInfoMetricsTask.TaskName, RedisInfoMetricsTask.class, RedisInfoMetricsTask.getTimer());
// Routine monitoring and restarting dynomite or storage processes as
// needed.
scheduler.addTask(ProcessMonitorTask.JOBNAME, ProcessMonitorTask.class, ProcessMonitorTask.getTimer());
scheduler.addTask(DynomiteProcessManager.JOB_TASK_NAME, DynomiteProcessManager.class,
DynomiteProcessManager.getTimer());
scheduler.addTask(RedisStorageProxy.JOB_TASK_NAME, RedisStorageProxy.class, RedisStorageProxy.getTimer());
// Routing changing the YML file so that a manual Dynomite restart gets
// the proper tokens
scheduler.addTask(DynomiteYamlTask.JOBNAME, DynomiteYamlTask.class, DynomiteYamlTask.getTimer());
logger.info("Starting task scheduler");
scheduler.start();
}
public InstanceIdentity getId() {
return id;
}
public TaskScheduler getScheduler() {
return scheduler;
}
public FloridaConfig getConfiguration() {
return floridaConfig;
}
}
| 3,080 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/config/FloridaConfig.java | /**
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.config;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
@Configuration(prefix = "florida")
public interface FloridaConfig {
/**
* Get the full path to Dynomite's installation directory.
*
* @return full path to the Dynomite installation directory
*/
@DefaultValue("/apps/dynomite")
@PropertyName(name = "dyno.home")
public String getDynomiteInstallDir();
/**
* @return Path to target application startup script
*/
@DefaultValue("/apps/dynomite/bin/launch_dynomite.sh")
@PropertyName(name = "dyno.startscript")
public String getDynomiteStartScript();
/**
* @return Path to target application stop sript
*/
@DefaultValue("/apps/dynomite/bin/kill_dynomite.sh")
@PropertyName(name = "dyno.stopscript")
public String getDynomiteStopScript();
/**
* @return Cluster name if the environment variable for cluster name does
* not exist
*/
@DefaultValue("dynomite_demo1")
@PropertyName(name = "dyno.clustername")
public String getDynomiteClusterName();
/**
* YAML file to bootstrap Dynomite
*
* @return
*/
@DefaultValue("/apps/dynomite/conf/dynomite.yml")
public String getDynomiteYaml();
/**
* @return Get the name of seed provider
*/
@DefaultValue("florida_provider")
@PropertyName(name = "dyno.seed.provider")
public String getDynomiteSeedProvider();
/**
* Get the Dynomite process name.
*
* @return the Dynomite process name
*/
@DefaultValue("dynomite")
public String getDynomiteProcessName();
/**
* Get the read consistency level.
*
* @return the read consistency level
*/
@DefaultValue("DC_ONE")
@PropertyName(name = "dyno.read.consistency")
public String getDynomiteReadConsistency();
/**
* Get the write consistency level.
*
* @return the write consistency level
*/
@DefaultValue("DC_ONE")
@PropertyName(name = "dyno.write.consistency")
public String getDynomiteWriteConsistency();
@DefaultValue("8101")
@PropertyName(name = "dyno.secured.peer.port")
public int getSecuredPeerListenerPort();
@DefaultValue("8102")
public int getDynomiteClientPort();
@DefaultValue("127.0.0.1")
public String getDynomiteLocalAddress();
/**
* Dynomite now supports multiple connections to datastore and peer. These
* are the set of properties for each of them
*
* @return the peer-to-peer port used for intra-cluster communication
*/
// we need this for backward compatibility.
@DefaultValue("false")
@PropertyName(name = "dyno.connections.pool.enable")
public boolean getConnectionPoolEnabled();
@DefaultValue("1")
@PropertyName(name = "dyno.connections.storage")
public int getDatastoreConnections();
@DefaultValue("1")
@PropertyName(name = "dyno.connections.peer.local")
public int getLocalPeerConnections();
@DefaultValue("1")
@PropertyName(name = "dyno.connections.peer.remote")
public int getRemotePeerConnections();
/**
* Dynomite support of hashtags Link:
* https://github.com/Netflix/dynomite/blob/dev/notes/recommendation.md#hash-tags
*/
@DefaultValue("")
@PropertyName(name = "dyno.hashtag")
public String getDynomiteHashtag();
/**
* Determine if Dynomite should auto-eject nodes from the cluster.
*
* @return true if Dynomite should auto-ejects hosts, false if not
*/
@DefaultValue("true")
public boolean getDynomiteAutoEjectHosts();
@DefaultValue("vnode")
public String getDistribution();
/**
* Get the Dynomite gossip interval which is the amount of time (in ms) that
* Dynomite should wait between gossip rounds.
*
* @return the amount of time in ms to wait between gossip rounds
*/
@DefaultValue("10000")
@PropertyName(name = "dyno.gossip.interval")
public int getDynomiteGossipInterval();
/**
* Get the hash algorithm that Dynomite uses to hash the data's key.
*
* @return the hash algorithm used to hash the data key
*/
@DefaultValue("murmur")
@PropertyName(name = "dyno.tokens.hash")
public String getDynomiteHashAlgorithm();
/**
* Should Dynomite preconnect to the backend storage engine.
*
* @return true if Dynomite should preconnect to the backend storage engine,
* false if it should not preconnect
*/
@DefaultValue("true")
@PropertyName(name = "dyno.connections.preconnect")
public boolean getDynomiteStoragePreconnect();
@DefaultValue("30000")
public int getServerRetryTimeout();
@DefaultValue("5000")
@PropertyName(name = "dyno.request.timeout")
public int getTimeout();
/**
* Determine if Dynomite is configured as a multi-DC (data center) cluster).
*
* @return true if the Dynomite cluster is running across multiple DCs
*/
@DefaultValue("true")
@PropertyName(name = "dyno.multiregion")
public boolean isDynomiteMultiDC();
/**
* Get the intra-cluster (i.e. node-to-node) security option. Maps to the
* secure_server_option property in dynomite.yaml.
*
* @return the intra-cluster security option
*/
@DefaultValue("datacenter")
@PropertyName(name = "dyno.secured.option")
public String getDynomiteIntraClusterSecurity();
/**
* If warm up is enabled for the cluster
*
* @return enabled/disabled
*/
@DefaultValue("true")
@PropertyName(name = "dyno.warm.bootstrap")
public boolean isWarmBootstrap();
/**
* Enforcing the warm up
*
* @return enabled/disabled
*/
@DefaultValue("false")
@PropertyName(name = "dyno.warm.force")
public boolean isForceWarm();
@DefaultValue("true")
@PropertyName(name = "dyno.healthcheck.enable")
public boolean isHealthCheckEnable();
@DefaultValue("1000000")
@PropertyName(name = "dyno.warm.bytes.sync.diff")
public int getAllowableBytesSyncDiff();
@DefaultValue("1200000")
@PropertyName(name = "dyno.warm.msec.bootstraptime")
public int getMaxTimeToBootstrap();
/**
* The max percentage of system memory to be allocated to the Dynomite
* fronted data store.
*/
@DefaultValue("85")
@PropertyName(name = "dyno.storage.mem.pct.int")
public int getStorageMaxMemoryPercent();
/**
* Get the size (in bytes) of Dynomite's memory buffer (mbuf).
*
* @return size of Dynomite mbuf in bytes
*/
@DefaultValue("16384")
@PropertyName(name = "dyno.mbuf.size")
public int getDynomiteMBufSize();
/**
* Get the maximum number of messages that Dynomite will hold in queue.
* Default is 0, such that we can let Florida automate the value based on
* the instance type.
*
* @return the maximum number of messages that Dynomite will allocate
*/
@DefaultValue("0")
@PropertyName(name = "dyno.allocated.messages")
public int getDynomiteMaxAllocatedMessages();
// VPC
@DefaultValue("true")
public boolean isVpc();
// Persistence
@DefaultValue("/mnt/data/nfredis")
@PropertyName(name = "dyno.persistence.directory")
public String getPersistenceLocation();
@DefaultValue("false")
@PropertyName(name = "dyno.persistence.enabled")
public boolean isPersistenceEnabled();
@DefaultValue("aof")
@PropertyName(name = "dyno.persistence.type")
public String persistenceType();
@DefaultValue("-1")
@PropertyName(name = "dyno.redis.zset.maxZipListValue")
public int getRedisMaxZsetZiplistValue();
// Storage engine: ARDB with RocksDB
// =================================
/**
* Compaction strategy for RocksDB. RocksDB allows for optimized compaction
* strategies: OptimizeLevelStyleCompaction,
* OptimizeUniversalStyleCompaction or none.
*
* @return the compaction strategy
*/
@DefaultValue("none")
@PropertyName(name = "dyno.ardb.rocksdb.compactionStrategy")
public String getRocksDBCompactionStrategy();
@DefaultValue("256")
@PropertyName(name = "dyno.ardb.rocksdb.writebuffermb")
public int getRocksDBWriteBufferSize();
/**
* Get the maximum number of memtables used by RocksDB. This number includes
* both active and immutable memtables.
*
* @return the maximum number of memtables
*/
@DefaultValue("16")
@PropertyName(name = "dyno.ardb.rocksdb.maxwritebuffernumber")
public int getRocksDBMaxWriteBufferNumber();
/**
* Loglevel
*
* @return the loglevel to set for RocksDB
*/
@DefaultValue("info")
@PropertyName(name = "dyno.ardb.loglevel")
public String getArdbLoglevel();
/**
* Get the minimum number of memtables to be merged before flushing data to
* persistent storage.
*
* @return the minimum number of memtables that must exist before a flush
* occurs
*/
@DefaultValue("4")
@PropertyName(name = "dyno.ardb.rocksdb.minwritebuffernametomerge")
public int getRocksDBMinWriteBuffersToMerge();
@DefaultValue("redis")
@PropertyName(name = "dyno.redis.compatible.engine")
public String getRedisCompatibleEngine();
@DefaultValue("")
@PropertyName(name = "redis.pubsub.keyspacevents")
public String getKeySpaceEvents();
@DefaultValue("")
@PropertyName(name = "redis.unixpath")
public String getRedisUnixPath();
}
| 3,081 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/config/InstanceState.java | package com.netflix.dynomitemanager.config;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.storage.Bootstrap;
import com.netflix.nfsidecar.identity.IInstanceState;
import org.joda.time.DateTime;
/**
* Contains the state of the health of processed managed by Florida, and
* maintains the isHealthy flag used for reporting discovery health check.
*
*/
@Singleton
public class InstanceState implements IInstanceState {
private final AtomicBoolean isSideCarProcessAlive = new AtomicBoolean(false);
private final AtomicBoolean isBootstrapping = new AtomicBoolean(false);
private final AtomicBoolean firstBootstrap = new AtomicBoolean(true);
private final AtomicBoolean isBackup = new AtomicBoolean(false);
private final AtomicBoolean isBackupSuccessful = new AtomicBoolean(false);
private final AtomicBoolean firstBackup = new AtomicBoolean(true);
private final AtomicBoolean isRestore = new AtomicBoolean(false);
private final AtomicBoolean isRestoreSuccessful = new AtomicBoolean(false);
private final AtomicBoolean firstRestore = new AtomicBoolean(true);
private final AtomicBoolean isStorageProxyAlive = new AtomicBoolean(false);
private final AtomicBoolean isStorageProxyProcessAlive = new AtomicBoolean(false);
private final AtomicBoolean isStorageAlive = new AtomicBoolean(false);
private Bootstrap bootstrapStatus;
private long bootstrapTime;
private long backupTime;
private long restoreTime;
private final AtomicBoolean isYmlWritten = new AtomicBoolean(false);
// This is true if storage proxy and storage are alive.
private final AtomicBoolean isHealthy = new AtomicBoolean(false);
// State of whether the rest endpoints /admin/stop or /admin/start are invoked
// If its true then ProcessMonitorTask will suspend its process monitoring tasks.
private final AtomicBoolean isProcessMonitoringSuspended = new AtomicBoolean(false);
@Override
public String toString() {
return "InstanceState{" +
"isSideCarProcessAlive=" + isSideCarProcessAlive +
", isBootstrapping=" + isBootstrapping +
", isBackingup=" + isBackup +
", isRestoring=" + isRestore +
", isStorageProxyAlive=" + isStorageProxyAlive +
", isStorageProxyProcessAlive=" + isStorageProxyProcessAlive +
", isStorageAlive=" + isStorageAlive +
", isHealthy=" + isHealthy +
", isProcessMonitoringSuspended=" + isProcessMonitoringSuspended +
'}';
}
public boolean isSideCarProcessAlive() {
return isSideCarProcessAlive.get();
}
public void setSideCarProcessAlive(boolean isSideCarProcessAlive) {
this.isSideCarProcessAlive.set(isSideCarProcessAlive);
}
//@Monitor(name="sideCarProcessAlive", type=DataSourceType.GAUGE)
public int metricIsSideCarProcessAlive() {
return isSideCarProcessAlive() ? 1 : 0;
}
/* Boostrap */
public boolean isBootstrapping() {
return isBootstrapping.get();
}
public Bootstrap isBootstrapStatus() {
return bootstrapStatus;
}
public boolean firstBootstrap() {
return firstBootstrap.get();
}
public long getBootstrapTime() {
return bootstrapTime;
}
public void setBootstrapping(boolean isBootstrapping) {
this.isBootstrapping.set(isBootstrapping);
}
public void setBootstrapStatus(Bootstrap bootstrapStatus) {
this.bootstrapStatus = bootstrapStatus;
}
public void setFirstBootstrap(boolean firstBootstrap) {
this.firstBootstrap.set(firstBootstrap);
}
public void setBootstrapTime(DateTime bootstrapTime) {
this.bootstrapTime = bootstrapTime.getMillis();
}
/* Backup */
public boolean isBackingup() {
return isBackup.get();
}
public boolean isBackupSuccessful() {
return isBackupSuccessful.get();
}
public boolean firstBackup() {
return firstBackup.get();
}
public long getBackupTime() {
return backupTime;
}
public void setBackingup(boolean isBackup) {
this.isBackup.set(isBackup);
}
public void setBackUpStatus(boolean isBackupSuccessful) {
this.isBackupSuccessful.set(isBackupSuccessful);
}
public void setFirstBackup(boolean firstBackup) {
this.firstBackup.set(firstBackup);
}
public void setBackupTime(DateTime backupTime) {
this.backupTime = backupTime.getMillis();
}
/* Restore */
public boolean isRestoring() {
return isRestore.get();
}
public boolean isRestoreSuccessful() {
return isRestoreSuccessful.get();
}
public boolean firstRestore() {
return firstRestore.get();
}
public long getRestoreTime() {
return restoreTime;
}
public void setRestoring(boolean isRestoring) {
this.isRestore.set(isRestoring);
}
public void setRestoreStatus(boolean isRestoreSuccessful) {
this.isRestoreSuccessful.set(isRestoreSuccessful);
}
public void setFirstRestore(boolean firstRestore) {
this.firstRestore.set(firstRestore);
}
public void setRestoreTime(DateTime restoreTime) {
this.restoreTime = restoreTime.getMillis();
}
//@Monitor(name="bootstrapping", type=DataSourceType.GAUGE)
public int metricIsBootstrapping() {
return isBootstrapping() ? 1 : 0;
}
public boolean isStorageProxyAlive() {
return isStorageProxyAlive.get();
}
public void setStorageProxyAlive(boolean isStorageProxyAlive) {
this.isStorageProxyAlive.set(isStorageProxyAlive);
setHealthy();
}
//@Monitor(name="storageProxyAlive", type=DataSourceType.GAUGE)
public int metricIsStorageProxyAlive() {
return isStorageProxyAlive() ? 1 : 0;
}
public boolean isStorageProxyProcessAlive() {
return isStorageProxyProcessAlive.get();
}
public void setStorageProxyProcessAlive(boolean isStorageProxyProcessAlive) {
this.isStorageProxyProcessAlive.set(isStorageProxyProcessAlive);
}
//@Monitor(name="storageProxyProcessAlive", type=DataSourceType.GAUGE)
public int metricIsStorageProxyProcessAlive() {
return isStorageProxyProcessAlive() ? 1 : 0;
}
public boolean isStorageAlive() {
return isStorageAlive.get();
}
public void setStorageAlive(boolean isStorageAlive) {
this.isStorageAlive.set(isStorageAlive);
setHealthy();
}
//@Monitor(name="storageAlive", type=DataSourceType.GAUGE)
public int metricIsStorageAlive() {
return isStorageAlive() ? 1 : 0;
}
public boolean isHealthy() {
return isHealthy.get();
}
private void setHealthy() {
this.isHealthy.set(isStorageProxyAlive() && isStorageAlive());
}
//@Monitor(name="healthy", type=DataSourceType.GAUGE)
public int metricIsHealthy() {
return isHealthy() ? 1 : 0;
}
public boolean getIsProcessMonitoringSuspended() {
return isProcessMonitoringSuspended.get();
}
public void setIsProcessMonitoringSuspended(boolean ipms) {
this.isProcessMonitoringSuspended.set(ipms);
}
//@Monitor(name="processMonitoringSuspended", type=DataSourceType.GAUGE)
public int metricIsProcessMonitoringSuspended() {
return getIsProcessMonitoringSuspended() ? 1 : 0;
}
public boolean getYmlWritten(){
return this.isYmlWritten.get();
}
public void setYmlWritten(boolean yml){
this.isYmlWritten.set(yml);
}
}
| 3,082 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/resources/DynomiteAdmin.java | package com.netflix.dynomitemanager.resources;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.lang.StringUtils;
import org.codehaus.jettison.json.JSONObject;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.netflix.dynomitemanager.backup.RestoreTask;
import com.netflix.dynomitemanager.backup.SnapshotTask;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.dynomitemanager.storage.Bootstrap;
import com.netflix.dynomitemanager.storage.StorageProcessManager;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.identity.AppsInstance;
import com.netflix.nfsidecar.identity.InstanceIdentity;
@Path("/v1/admin")
@Produces(MediaType.APPLICATION_JSON)
public class DynomiteAdmin {
private static final String REST_SUCCESS = "[\"ok\"]";
private static final Logger logger = LoggerFactory.getLogger(DynomiteAdmin.class);
private IDynomiteProcess dynoProcess;
private InstanceIdentity ii;
private InstanceState instanceState;
private SnapshotTask snapshotBackup;
private RestoreTask restoreBackup;
private StorageProxy storage;
private StorageProcessManager storageProcessMgr;
private FloridaConfig config;
@Inject
public DynomiteAdmin(FloridaConfig config, IDynomiteProcess dynoProcess, InstanceIdentity ii,
InstanceState instanceState, SnapshotTask snapshotBackup, RestoreTask restoreBackup, StorageProxy storage,
StorageProcessManager storageProcessMgr) {
this.config = config;
this.dynoProcess = dynoProcess;
this.ii = ii;
this.instanceState = instanceState;
this.snapshotBackup = snapshotBackup;
this.restoreBackup = restoreBackup;
this.storage = storage;
this.storageProcessMgr = storageProcessMgr;
}
@GET
@Path("/{start : (?i)start}")
public Response dynoStart() throws IOException, InterruptedException, JSONException {
logger.info("REST call: Starting Dynomite");
instanceState.setIsProcessMonitoringSuspended(false);
// Let the ProcessMonitorTask take over the job of starting the process
// correctly.
dynoProcess.start();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{stop : (?i)stop}")
public Response dynoStop() throws IOException, InterruptedException, JSONException {
logger.info("REST call: Stopping Dynomite");
instanceState.setIsProcessMonitoringSuspended(true);
dynoProcess.stop();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{startstorageprocess : (?i)startstorageprocess}")
public Response storageProcessStart() throws IOException, InterruptedException, JSONException {
storageProcessMgr.start();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{stopstorageprocess : (?i)stopstorageprocess}")
public Response storageProcessStop() throws IOException, InterruptedException, JSONException {
storageProcessMgr.stop();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{info : (?i)info}")
public Response dynoInfo() throws IOException, InterruptedException, JSONException {
logger.info("REST interface for INFO - not implemented");
// NodeProbe probe = new NodeProbe();
// return Response.ok(probe.info(), MediaType.APPLICATION_JSON).build();
return null;
}
@GET
@Path("/{ring : (?i)ring}/{id}")
public Response dynoRing(@PathParam("id") String keyspace) throws IOException, InterruptedException, JSONException {
logger.info("REST interface for RING - not implemented");
// NodeProbe probe = new NodeProbe();
// logger.debug("node tool ring being called");
// return Response.ok(probe.ring(keyspace),
// MediaType.APPLICATION_JSON).build();
return null;
}
@GET
@Path("/{repair : (?i)repair}")
public Response dynoRepair(@QueryParam("sequential") boolean isSequential,
@QueryParam("localDC") boolean localDCOnly,
@DefaultValue("false") @QueryParam("primaryRange") boolean primaryRange)
throws IOException, ExecutionException, InterruptedException {
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{version : (?i)version}")
public Response version() throws IOException, ExecutionException, InterruptedException {
logger.info("REST: version");
return Response.ok(new JSONArray().put("1.0.0"), MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{drain : (?i)drain}")
public Response dynoDrain() throws IOException, ExecutionException, InterruptedException {
logger.info("REST interface for DRAIN - not implemented");
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
}
@GET
@Path("/{get_seeds : (?i)get_seeds}")
public Response getSeeds() {
try {
final List<String> seeds = ii.getSeeds();
return Response.ok(StringUtils.join(seeds, '|')).build();
} catch (Exception e) {
logger.error("Error while executing get_seeds", e);
return Response.serverError().build();
}
}
@GET
@Path("/{cluster_describe : (?i)cluster_describe}")
public Response getClusterDescribe() {
try {
List<String> nodes = new LinkedList<String>();
for (AppsInstance ins : ii.getClusterInfo()) {
logger.debug("Adding node: " + ins.getInstanceId());
String node = "\"token\":" + "\"" + ins.getToken() + "\","
+ "\"hostname\":" + "\"" + ins.getHostName() + "\","
+ "\"port\":" + "\"" + Integer.toString(ins.getDynomitePort()) + "\","
+ "\"secure_port\":" + "\"" + Integer.toString(ins.getDynomiteSecurePort()) + "\","
+ "\"secure_storage_port\":" + "\"" + Integer.toString(ins.getDynomiteSecureStoragePort()) + "\","
+ "\"peer_port\":" + "\"" + Integer.toString(ins.getPeerPort()) + "\","
+ "\"rack\":" + "\"" + ins.getRack() + "\","
+ "\"ip\":" + "\"" + ins.getHostIP() + "\","
+ "\"zone\":" + "\"" + ins.getZone() + "\","
+ "\"dc\":" + "\"" + ins.getDatacenter() + "\"";
if (config.getDynomiteHashtag().isEmpty()) {
nodes.add("{" + node + "}");
} else {
nodes.add("{" + node + ",\"hashtag\":" + "\"" + config.getDynomiteHashtag() + "\"" + "}");
}
}
if (!nodes.isEmpty())
return Response.ok("[" + StringUtils.join(nodes, ',') + "]").build();
logger.error("Cannot find the nodes");
} catch (Exception e) {
logger.error("Error while executing cluster_describe", e);
return Response.serverError().build();
}
return Response.status(500).build();
}
@GET
@Path("/{backup : (?i)backup}")
public Response doBackup() {
try {
logger.info("REST call: backups");
this.snapshotBackup.execute();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error while executing backups from REST call", e);
return Response.serverError().build();
}
}
@GET
@Path("/{restore : (?i)restore}")
public Response doRestore() {
try {
logger.info("REST call: restore");
this.restoreBackup.execute();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error while executing restores from REST call", e);
return Response.serverError().build();
}
}
@GET
@Path("/{takesnapshot : (?i)takesnapshot}")
public Response takeSnapshot() {
try {
logger.info("REST call: Persisting Data to Disk");
this.storage.takeSnapshot();
return Response.ok(REST_SUCCESS, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error executing data persistence from REST call", e);
return Response.serverError().build();
}
}
@GET
@Path("/{consistency : (?i)consistency}")
public Response getConsistency() {
try {
logger.info("REST call: Get Consistency");
JSONObject consistencyJson = new JSONObject();
consistencyJson.put("read_consistency", config.getDynomiteReadConsistency());
consistencyJson.put("write_consistency", config.getDynomiteWriteConsistency());
return Response.ok(consistencyJson, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error getting consistency from REST call", e);
return Response.serverError().build();
}
}
@GET
@Path("/{hashtag : (?i)hashtag}")
public Response getHashtag() {
try {
JSONObject hashtagJson = new JSONObject();
if (!config.getDynomiteHashtag().isEmpty()) {
hashtagJson.put("hashtag", config.getDynomiteHashtag());
} else {
hashtagJson.put("hashtag", "none");
}
return Response.ok(hashtagJson, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error getting the hashtag from REST call", e);
return Response.serverError().build();
}
}
@GET
@Path("/{status : (?i)status}")
public Response floridaStatus() {
try {
JSONObject statusJson = new JSONObject();
/* Warm up status */
JSONObject warmupJson = new JSONObject();
if (!this.instanceState.firstBootstrap()) {
if (this.instanceState.isBootstrapping()) {
warmupJson.put("status", "pending");
} else {
Bootstrap bootstrap = this.instanceState.isBootstrapStatus();
switch (bootstrap) {
case CANNOT_CONNECT_FAIL:
warmupJson.put("status", "failed: cannot connect");
break;
case WARMUP_ERROR_FAIL:
warmupJson.put("status", "failed: error in warmup");
break;
case RETRIES_FAIL:
warmupJson.put("status", "failed: too fast to warmup - retries");
break;
case EXPIRED_BOOTSTRAPTIME_FAIL:
warmupJson.put("status", "failed: too fast to warmup - expired bootstrap time");
break;
case IN_SYNC_SUCCESS:
warmupJson.put("status", "completed");
break;
default:
warmupJson.put("status", "unknown");
break;
}
}
warmupJson.put("time", this.instanceState.getBootstrapTime());
} else {
warmupJson.put("status", "not started");
}
statusJson.put("warmup", warmupJson);
/* backup status */
JSONObject backupJson = new JSONObject();
if (!this.instanceState.firstBackup()) {
if (this.instanceState.isBackingup()) {
backupJson.put("status", "pending");
} else if (!this.instanceState.isBackingup() && !this.instanceState.isBackupSuccessful()) {
backupJson.put("status", "unsuccessful");
} else if (!this.instanceState.isBackingup() && this.instanceState.isBackupSuccessful()) {
backupJson.put("status", "completed");
}
backupJson.put("time", this.instanceState.getBackupTime());
} else {
backupJson.put("status", "not started");
}
statusJson.put("backup", backupJson);
/* restore status */
JSONObject restoreJson = new JSONObject();
if (!this.instanceState.firstRestore()) {
if (this.instanceState.isRestoring()) {
restoreJson.put("status", "pending");
} else if (!this.instanceState.isRestoring() && !this.instanceState.isRestoreSuccessful()) {
restoreJson.put("status", "unsuccessful");
} else if (!this.instanceState.isRestoring() && this.instanceState.isRestoreSuccessful()) {
restoreJson.put("status", "completed");
}
restoreJson.put("time", this.instanceState.getRestoreTime());
} else {
restoreJson.put("status", "not started");
}
statusJson.put("restore", restoreJson);
/* Dynomite Consistency */
JSONObject consistencyJson = new JSONObject();
consistencyJson.put("read", config.getDynomiteReadConsistency());
consistencyJson.put("write", config.getDynomiteWriteConsistency());
statusJson.put("consistency", consistencyJson);
JSONObject healthJson = new JSONObject();
/* Dynomite status */
healthJson.put("dynomiteAlive", this.instanceState.isStorageProxyProcessAlive() ? true : false);
/* Redis status */
healthJson.put("storageAlive", this.instanceState.isStorageAlive() ? true : false);
/* Overall status */
healthJson.put("Overall", this.instanceState.isHealthy() ? true : false);
statusJson.put("health", healthJson);
/* My token */
statusJson.put("tokens", this.ii.getTokens());
/* Storage Engine */
statusJson.put("storage engine", config.getRedisCompatibleEngine());
if (config.getConnectionPoolEnabled()) {
JSONObject connectionsJson = new JSONObject();
connectionsJson.put("Storage", config.getDatastoreConnections());
connectionsJson.put("Local Peer", config.getLocalPeerConnections());
connectionsJson.put("Remote Peer", config.getRemotePeerConnections());
statusJson.put("Number Connections", connectionsJson);
}
/* Hashtag */
if (!config.getDynomiteHashtag().isEmpty()) {
statusJson.put("hashtag", config.getDynomiteHashtag());
}
logger.info("REST call: Florida Status");
return Response.ok(statusJson, MediaType.APPLICATION_JSON).build();
} catch (Exception e) {
logger.error("Error requesting Florida status from REST call", e);
return Response.serverError().build();
}
}
}
| 3,083 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/JedisUtils.java | package com.netflix.dynomitemanager.storage;
import com.netflix.config.DynamicIntProperty;
import com.netflix.config.DynamicLongProperty;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.nfsidecar.utils.BoundedExponentialRetryCallable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
/**
* Useful utilities to connect to storage or storage proxy via Jedis.
*
* @author Monal Daxini
* @author ipapapa
*/
public class JedisUtils {
private static final Logger logger = LoggerFactory.getLogger(JedisUtils.class);
private static final DynamicLongProperty minRetryMs = DynamicPropertyFactory.getInstance()
.getLongProperty("florida.storage.isAlive.retry.min.ms", 3000L);
private static final DynamicLongProperty maxRetryMs = DynamicPropertyFactory.getInstance()
.getLongProperty("florida.storage.isAlive.retry.max.ms", 30000L);
private static final DynamicIntProperty jedisConnectTimeoutMs = DynamicPropertyFactory.getInstance()
.getIntProperty("florida.storage.jedis.connect.timeout.ms", 30000);
/**
* The caller is responsible for invoking
* {@link redis.clients.jedis.Jedis#disconnect()}.
*
* @return a Jedis object connected to the specified host and port.
*/
public static Jedis connect(final String host, final int port) {
Jedis jedis;
try {
jedis = new Jedis(host, port, jedisConnectTimeoutMs.getValue());
jedis.connect();
return jedis;
} catch (Exception e) {
logger.warn("Unable to connect to host:" + host + " port: " + port);
}
return null;
}
/**
* Sends a SETEX with an expire after 1 sec to Redis at the specified port
*
* @return an OK response if everything was written properly
*/
public static boolean isWritableWithRetry(final String host, final int port) {
BoundedExponentialRetryCallable<Boolean> jedisRetryCallable = new BoundedExponentialRetryCallable<Boolean>() {
Jedis jedis = null;
@Override
public Boolean retriableCall() throws Exception {
jedis = connect(host, port);
/*
* check 1: write a SETEX key (single write) and auto-expire
*/
String status = jedis.setex("ignore_dyno", 1, "dynomite");
if (!status.equalsIgnoreCase("OK")) {
jedis.disconnect();
return false;
}
return true;
}
@Override
public void forEachExecution() {
jedis.disconnect();
}
};
jedisRetryCallable.setMin(minRetryMs.getValue());
jedisRetryCallable.setMax(maxRetryMs.getValue());
try {
return jedisRetryCallable.call();
} catch (Exception e) {
logger.warn(String.format("All retries to SETEX to host:%s port:%s failed.", host, port));
return false;
}
}
/**
* Sends a PING and INFO to Dynomite and Redis at the specified host and
* port.
*
* @return true if a PONG or found "master" in the role, else false.
*/
public static boolean isAliveWithRetry(final String host, final int port) {
BoundedExponentialRetryCallable<Boolean> jedisRetryCallable = new BoundedExponentialRetryCallable<Boolean>() {
Jedis jedis = null;
@Override
public Boolean retriableCall() throws Exception {
jedis = connect(host, port);
/* check 1: perform a ping */
if (jedis.ping() == null) {
jedis.disconnect();
return false;
}
jedis.disconnect();
return true;
}
@Override
public void forEachExecution() {
jedis.disconnect();
}
};
jedisRetryCallable.setMin(minRetryMs.getValue());
jedisRetryCallable.setMax(maxRetryMs.getValue());
try {
return jedisRetryCallable.call();
} catch (Exception e) {
logger.warn(String.format("All retries to PING host:%s port:%s failed.", host, port));
return false;
}
}
}
| 3,084 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/Bootstrap.java | package com.netflix.dynomitemanager.storage;
public enum Bootstrap {
NOT_STARTED, CANNOT_CONNECT_FAIL, WARMUP_ERROR_FAIL, RETRIES_FAIL, EXPIRED_BOOTSTRAPTIME_FAIL, IN_SYNC_SUCCESS,
}
| 3,085 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/StorageProxy.java | package com.netflix.dynomitemanager.storage;
import java.io.IOException;
public interface StorageProxy {
boolean isAlive();
long getUptime();
Bootstrap warmUpStorage(String[] peers);
boolean resetStorage();
boolean takeSnapshot();
boolean loadingData();
void stopPeerSync();
String getEngine();
int getEngineNumber();
void updateConfiguration() throws IOException;
String getStartupScript();
String getStopScript();
String getIpAddress();
int getPort();
String getUnixPath();
long getStoreMaxMem();
long getTotalAvailableSystemMemory();
} | 3,086 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/WarmBootstrapTask.java | /**
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.storage;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.dynomitemanager.dynomite.DynomiteRest;
import com.netflix.dynomitemanager.dynomite.IDynomiteProcess;
import com.netflix.nfsidecar.identity.AppsInstance;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.netflix.nfsidecar.resources.env.IEnvVariables;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.tokensdb.IAppsInstanceFactory;
import com.netflix.nfsidecar.utils.Sleeper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.joda.time.DateTime;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@Singleton
public class WarmBootstrapTask extends Task {
private static final Logger logger = LoggerFactory.getLogger(WarmBootstrapTask.class);
public static final String JOBNAME = "Bootstrap-Task";
private final IDynomiteProcess dynProcess;
private final StorageProxy storageProxy;
private final IAppsInstanceFactory appsInstanceFactory;
private final InstanceIdentity ii;
private final InstanceState state;
private final Sleeper sleeper;
private final StorageProcessManager storageProcessMgr;
private final IEnvVariables envVariables;
@Inject
public WarmBootstrapTask(IAppsInstanceFactory appsInstanceFactory, InstanceIdentity id,
IDynomiteProcess dynProcess, StorageProxy storageProxy, InstanceState ss, Sleeper sleeper,
StorageProcessManager storageProcessMgr, IEnvVariables envVariables) {
this.dynProcess = dynProcess;
this.storageProxy = storageProxy;
this.appsInstanceFactory = appsInstanceFactory;
this.ii = id;
this.state = ss;
this.sleeper = sleeper;
this.storageProcessMgr = storageProcessMgr;
this.envVariables = envVariables;
}
public void execute() throws IOException {
logger.info("Running warmbootstrapping ...");
this.state.setFirstBootstrap(false);
this.state.setBootstrapTime(DateTime.now());
// Just to be sure testing again
if (!state.isStorageAlive()) {
// starting storage
this.storageProcessMgr.start();
logger.info("Redis is up ---> Starting warm bootstrap.");
// setting the status to bootstraping
this.state.setBootstrapping(true);
// sleep to make sure Storage process is up.
this.sleeper.sleepQuietly(5000);
String[] peers = getLocalPeersWithSameTokensRange();
// try one node only for now
// TODOs: if this peer is not good, try the next one until we can
// get the data
if (peers != null && peers.length != 0) {
/**
* Check the warm up status.
*/
Bootstrap boostrap = this.storageProxy.warmUpStorage(peers);
if (boostrap == Bootstrap.IN_SYNC_SUCCESS || boostrap == Bootstrap.EXPIRED_BOOTSTRAPTIME_FAIL
|| boostrap == Bootstrap.RETRIES_FAIL) {
// Since we are ready let us start Dynomite.
try {
this.dynProcess.start();
} catch (IOException ex) {
logger.error("Dynomite failed to start");
}
// Wait for 1 second before we check dynomite status
sleeper.sleepQuietly(1000);
if (this.dynProcess.dynomiteCheck()) {
logger.info("Dynomite health check passed");
} else {
logger.error("Dynomite health check failed");
}
// Set the state of bootstrap as successful.
this.state.setBootstrapStatus(boostrap);
logger.info("Set Dynomite to allow writes only!!!");
DynomiteRest.sendCommand("/state/writes_only");
logger.info("Stop Redis' Peer syncing!!!");
this.storageProxy.stopPeerSync();
logger.info("Set Dynomite to resuming state to allow writes and flush delayed writes");
DynomiteRest.sendCommand("/state/resuming");
// sleep 15s for the flushing to catch up
sleeper.sleepQuietly(15000);
logger.info("Set Dynomite to normal state");
DynomiteRest.sendCommand("/state/normal");
} else {
logger.error("Warm up failed: Stop Redis' Peer syncing!!!");
this.storageProxy.stopPeerSync();
}
} else {
logger.error("Unable to find any peer with the same token!");
}
/*
* Performing a check of Dynomite after bootstrap is complete. This
* is important as there are cases that Dynomite reaches the 1M
* messages limit and is unaccessible after bootstrap.
*/
if (this.dynProcess.dynomiteCheck()) {
logger.error("Dynomite is up since warm up succeeded");
}
// finalizing bootstrap
this.state.setBootstrapping(false);
}
}
@Override
public String getName() {
return JOBNAME;
}
public static TaskTimer getTimer() {
// run once every 10mins
return new SimpleTimer(JOBNAME, 10 * 60 * 1000);
}
private String[] getLocalPeersWithSameTokensRange() {
String tokens = ii.getTokens();
logger.info("Warming up node's own token(s) : " + tokens);
List<AppsInstance> instances = appsInstanceFactory.getLocalDCIds(envVariables.getDynomiteClusterName(), envVariables.getRegion());
List<String> peers = new ArrayList<String>();
for (AppsInstance ins : instances) {
logger.info("Instance's token(s); " + ins.getToken());
if (!ins.getRack().equals(ii.getInstance().getRack()) && ins.getToken().equals(tokens)) {
peers.add(ins.getHostName());
}
}
logger.info("peers size: " + peers.size());
return peers.toArray(new String[0]);
}
}
| 3,087 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/RedisStorageProxy.java | package com.netflix.dynomitemanager.storage;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.utils.Sleeper;
import com.netflix.runtime.health.api.Health;
import com.netflix.runtime.health.api.HealthIndicator;
import com.netflix.runtime.health.api.HealthIndicatorCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.exceptions.JedisConnectionException;
import redis.clients.jedis.exceptions.JedisDataException;
import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES;
import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING;
import static java.nio.file.StandardOpenOption.WRITE;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
//TODOs: we should talk to admin port (22222) instead of 8102 for both local
//and peer
@Singleton
public class RedisStorageProxy extends Task implements StorageProxy, HealthIndicator {
private static final String DYNO_REDIS = "redis";
private static final String DYNO_REDIS_CONF_PATH = "/apps/nfredis/conf/redis.conf";
private static final String REDIS_ADDRESS = "127.0.0.1";
private static final int REDIS_PORT = 22122;
private static final long GB_2_IN_KB = 2L * 1024L * 1024L;
private static final String PROC_MEMINFO_PATH = "/proc/meminfo";
private static final Pattern MEMINFO_PATTERN = Pattern.compile("MemTotal:\\s*([0-9]*)");
private final String REDIS_START_SCRIPT = "/apps/nfredis/bin/launch_nfredis.sh";
private final String REDIS_STOP_SCRIPT = "/apps/nfredis/bin/kill_redis.sh";
private static final String REDIS_CONF_MAXMEMORY_PATTERN = "^maxmemory\\s*[0-9][0-9]*[a-zA-Z]*";
private static final String REDIS_CONF_APPENDONLY = "^appendonly\\s*[a-zA-Z]*";
private static final String REDIS_CONF_APPENDFSYNC = "^ appendfsync\\s*[a-zA-Z]*";
private static final String REDIS_CONF_AUTOAOFREWRITEPERCENTAGE = "^auto-aof-rewrite-percentage\\s*[0-9][0-9]*[a-zA-Z]*";
private static final String REDIS_CONF_STOP_WRITES_BGSAVE_ERROR = "^stop-writes-on-bgsave-error\\s*[a-zA-Z]*";
private static final String REDIS_CONF_SAVE_SCHEDULE = "^\\ssave\\s[0-9]*\\s[0-9]*";
private static final String REDIS_CONF_UNIXSOCKET = "^\\s*unixsocket\\s*[/0-9a-zA-Z._]*";
private static final String REDIS_CONF_UNIXSOCKETPERM = "^\\s*unixsocketperm\\s*[0-9]*";
private static final String REDIS_CONF_PUBSUB = "notify-keyspace-events";
private static final String REDIS_CONF_DAEMONIZE = "^daemonize\\s*[a-zA-Z]*";
private static final String REDIS_CONF_ZSET_MAXZIPLISTVALUE = "^zset-max-ziplist-value *[0-9]*";
private static final Logger logger = LoggerFactory.getLogger(RedisStorageProxy.class);
public static final String JOB_TASK_NAME = "REDIS HEALTH TRACKER";
private Jedis localJedis;
private boolean redisHealth = false;
private final FloridaConfig config;
@Inject
private Sleeper sleeper;
@Inject
public RedisStorageProxy(FloridaConfig config) {
this.config = config;
// connect();
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOB_TASK_NAME, 15L * 1000);
}
@Override
public String getName() {
return JOB_TASK_NAME;
}
@Override
public void execute() throws Exception {
redisHealth = isAlive();
}
/**
* A wrapper function around JedisUtils to connect to Redis
*/
private void localRedisConnect() {
if (this.localJedis == null) {
logger.info("Connecting to Redis.");
this.localJedis = JedisUtils.connect(REDIS_ADDRESS, REDIS_PORT);
}
}
private void localRedisDisconnect() {
if (this.localJedis != null) {
logger.info("Disconnecting from Redis.");
this.localJedis.disconnect();
this.localJedis = null;
}
}
/**
* Connect to the peer with the same token, in order to start the warm up
* process
*
* @param peer
* address
* @param peer
* port
*/
private void startPeerSync(String peer, int port) {
boolean isDone = false;
localRedisConnect();
/*
* Iterate until we succeed the SLAVEOF command with some sleep time in
* between. We disconnect and reconnect if the jedis connection fails.
*/
while (!isDone) {
try {
// only sync from one peer for now
isDone = (this.localJedis.slaveof(peer, port) != null);
sleeper.sleepQuietly(1000);
} catch (JedisConnectionException e) {
logger.warn("JedisConnection Exception in SLAVEOF peer " + peer + " port " + port + " Exception: "
+ e.getMessage());
logger.warn("Trying to reconnect...");
localRedisDisconnect();
localRedisConnect();
} catch (Exception e) {
logger.error("Error: " + e.getMessage());
}
}
// clean up the Redis connection.
localRedisDisconnect();
}
/**
* Turn off Redis' slave replication and switch from slave to master.
*/
@Override
public void stopPeerSync() {
boolean isDone = false;
localRedisConnect();
/*
* Iterate until we succeed the SLAVE NO ONE command with some sleep
* time in between. We disconnect and reconnect if the jedis connection
* fails.
*/
while (!isDone) {
logger.info("calling SLAVEOF NO ONE");
try {
isDone = (this.localJedis.slaveofNoOne() != null);
sleeper.sleepQuietly(1000);
} catch (JedisConnectionException e) {
logger.warn("JedisConnection Exception in SLAVEOF NO ONE: " + e.getMessage());
logger.warn("Trying to reconnect...");
localRedisDisconnect();
localRedisConnect();
} catch (Exception e) {
logger.error("Error: " + e.getMessage());
}
}
// clean up the Redis connection.
localRedisDisconnect();
}
@Override
public String getEngine() {
return DYNO_REDIS;
}
@Override
public int getEngineNumber() {
return 0;
}
@Override
public boolean takeSnapshot() {
localRedisConnect();
try {
if (config.persistenceType().equals("aof")) {
logger.info("starting Redis BGREWRITEAOF");
this.localJedis.bgrewriteaof();
} else {
logger.info("starting Redis BGSAVE");
this.localJedis.bgsave();
}
/*
* We want to check if a bgrewriteaof was already scheduled or it
* has started. If a bgrewriteaof was already scheduled then we
* should get an error from Redis but should continue. If a
* bgrewriteaof has started, we should also continue. Otherwise we
* may be having old data in the disk.
*/
} catch (JedisDataException e) {
String scheduled = null;
if (!config.persistenceType().equals("aof")) {
scheduled = "ERR Background save already in progress";
} else {
scheduled = "ERR Background append only file rewriting already in progress";
}
if (!e.getMessage().equals(scheduled)) {
throw e;
}
logger.warn("Redis: There is already a pending BGREWRITEAOF/BGSAVE.");
} catch (JedisConnectionException e) {
logger.error("Redis: BGREWRITEAOF/BGSAVE cannot be completed");
localRedisDisconnect();
return false;
}
String peerRedisInfo = null;
int retry = 0;
try {
while (true) {
peerRedisInfo = this.localJedis.info();
Iterable<String> result = Splitter.on('\n').split(peerRedisInfo);
String pendingPersistence = null;
for (String line : result) {
if ((line.startsWith("aof_rewrite_in_progress") && config.persistenceType().equals("aof"))
|| (line.startsWith("rdb_bgsave_in_progress") && !config.persistenceType().equals("aof"))) {
String[] items = line.split(":");
pendingPersistence = items[1].trim();
if (pendingPersistence.equals("0")) {
logger.info("Redis: BGREWRITEAOF/BGSAVE completed.");
return true;
} else {
retry++;
logger.warn("Redis: BGREWRITEAOF/BGSAVE pending. Sleeping 30 secs...");
sleeper.sleepQuietly(30000);
if (retry > 20) {
return false;
}
}
}
}
}
} catch (JedisConnectionException e) {
logger.error("Cannot connect to Redis to INFO to determine if BGREWRITEAOF/BGSAVE completed ");
} finally {
localRedisDisconnect();
}
logger.error("Redis BGREWRITEAOF/BGSAVE was not successful.");
return false;
}
@Override
public boolean loadingData() {
localRedisConnect();
logger.info("loading AOF from the drive");
String peerRedisInfo = null;
int retry = 0;
try {
peerRedisInfo = localJedis.info();
Iterable<String> result = Splitter.on('\n').split(peerRedisInfo);
String pendingAOF = null;
for (String line : result) {
if (line.startsWith("loading")) {
String[] items = line.split(":");
pendingAOF = items[1].trim();
if (pendingAOF.equals("0")) {
logger.info("Redis: memory loading completed.");
return true;
} else {
retry++;
logger.warn("Redis: memory pending. Sleeping 30 secs...");
sleeper.sleepQuietly(30000);
if (retry > 20) {
return false;
}
}
}
}
} catch (JedisConnectionException e) {
logger.error("Cannot connect to Redis to INFO to checking loading AOF");
} finally {
localRedisDisconnect();
}
return false;
}
@Override
public boolean isAlive() {
// Not using localJedis variable as it can be used by
// ProcessMonitorTask as well.
return JedisUtils.isAliveWithRetry(REDIS_ADDRESS, REDIS_PORT);
}
public long getUptime() {
return 0;
}
private class AlivePeer {
String selectedPeer;
Jedis selectedJedis;
Long upTime;
}
private AlivePeer peerNodeSelection(String peer, Jedis peerJedis) {
AlivePeer currentAlivePeer = new AlivePeer();
currentAlivePeer.selectedPeer = peer;
currentAlivePeer.selectedJedis = peerJedis;
String s = peerJedis.info(); // Parsing the info command on the peer
// node
RedisInfoParser infoParser = new RedisInfoParser();
InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(s.getBytes()));
try {
Map<String, Long> allInfo = infoParser.parse(reader);
Iterator iter = allInfo.keySet().iterator();
String key = null;
boolean found = false;
while (iter.hasNext()) {
key = (String) iter.next();
if (key.equals("Redis_Server_uptime_in_seconds")) {
currentAlivePeer.upTime = allInfo.get(key);
found = true;
break;
}
}
if (!found) {
logger.warn("uptime_in_seconds was not found in Redis info");
return null;
}
logger.info("Alive Peer node [" + peer + "] is up for " + currentAlivePeer.upTime + " seconds");
} catch (Exception e) {
e.printStackTrace();
}
return currentAlivePeer;
}
// probably use our Retries Util here
@Override
public Bootstrap warmUpStorage(String[] peers) {
AlivePeer longestAlivePeer = new AlivePeer();
Jedis peerJedis = null;
for (String peer : peers) { // Looking into the peers with the same
// token
logger.info("Peer node [" + peer + "] has the same token!");
peerJedis = JedisUtils.connect(peer, REDIS_PORT);
if (peerJedis != null && isAlive()) { // Checking if there are
// peers, and if so if they
// are alive
AlivePeer currentAlivePeer = peerNodeSelection(peer, peerJedis);
// Checking the one with the longest up time. Disconnect the one
// that is not the longest.
if (currentAlivePeer.selectedJedis == null) {
logger.error("Cannot find uptime_in_seconds in peer " + peer);
return Bootstrap.CANNOT_CONNECT_FAIL;
} else if (longestAlivePeer.selectedJedis == null) {
longestAlivePeer = currentAlivePeer;
} else if (currentAlivePeer.upTime > longestAlivePeer.upTime) {
longestAlivePeer.selectedJedis.disconnect();
longestAlivePeer = currentAlivePeer;
}
}
}
// We check if the select peer is alive and we connect to it.
if (longestAlivePeer.selectedJedis == null) {
logger.error("Cannot connect to peer node to bootstrap");
return Bootstrap.CANNOT_CONNECT_FAIL;
} else {
String alivePeer = longestAlivePeer.selectedPeer;
peerJedis = longestAlivePeer.selectedJedis;
logger.info("Issue slaveof command on peer [" + alivePeer + "] and port [" + REDIS_PORT + "]");
startPeerSync(alivePeer, REDIS_PORT);
long diff = 0;
long previousDiff = 0;
short retry = 0;
short numErrors = 0;
long startTime = System.currentTimeMillis();
// Conditions under which warmp up will end
// 1. number of Jedis errors are 5.
// 2. number of consecutive increases of offset differences (caused
// when client produces high load).
// 3. the difference between offsets is very small or zero
// (success).
// 4. warmp up takes more than FP defined minutes (default 20 min).
// 5. Dynomite has started and is healthy.
while (numErrors < 5) {
// sleep 10 seconds in between checks
sleeper.sleepQuietly(10000);
try {
diff = canPeerSyncStop(peerJedis, startTime);
} catch (Exception e) {
numErrors++;
}
// Diff meaning:
// a. diff == 0 --> we are either in sync or close to sync.
// b. diff == -1 --> there was an error in sync process.
// c. diff == -2 --> offset is still zero, peer syncing has not
// started.
// d. diff == -3 --> warm up lasted more than bootstrapTime
if (diff == 0) {
break;
} else if (diff == -1) {
logger.error("There was an error in the warm up process - do NOT start Dynomite");
peerJedis.disconnect();
return Bootstrap.WARMUP_ERROR_FAIL;
} else if (diff == -2) {
startTime = System.currentTimeMillis();
} else if (diff == -3) {
peerJedis.disconnect();
return Bootstrap.EXPIRED_BOOTSTRAPTIME_FAIL;
}
// Exit conditions:
// a. retry more than 5 times continuously and if the diff is
// larger than the previous diff.
if (previousDiff < diff) {
logger.info("Previous diff (" + previousDiff + ") was smaller than current diff (" + diff
+ ") ---> Retry effort: " + retry);
retry++;
if (retry == 10) {
logger.error("Reached 10 consecutive retries, peer syncing cannot complete");
peerJedis.disconnect();
return Bootstrap.RETRIES_FAIL;
}
} else {
retry = 0;
}
previousDiff = diff;
}
peerJedis.disconnect();
if (diff > 0) {
logger.info("Stopping peer syncing with difference: " + diff);
}
}
return Bootstrap.IN_SYNC_SUCCESS;
}
/**
* Resets Storage to master if it was a slave due to warm up failure.
*/
@Override
public boolean resetStorage() {
logger.info("Checking if Storage needs to be reset to master");
localRedisConnect();
String localRedisInfo = null;
try {
localRedisInfo = localJedis.info();
} catch (JedisConnectionException e) {
// Try to reconnect
try {
localRedisConnect();
localRedisInfo = localJedis.info();
} catch (JedisConnectionException ex) {
logger.error("Cannot connect to Redis");
return false;
}
}
Iterable<String> result = Splitter.on('\n').split(localRedisInfo);
String role = null;
for (String line : result) {
if (line.startsWith("role")) {
String[] items = line.split(":");
// logger.info(items[0] + ": " + items[1]);
role = items[1].trim();
if (role.equals("slave")) {
logger.info("Redis: Stop replication. Switch from slave to master");
stopPeerSync();
}
return true;
}
}
return false;
}
/**
* Determining if the warm up process can stop
*
* @param peerJedis
* Jedis connection with the peer node
* @param startTime
* @return Long status code
* @throws RedisSyncException
*/
private Long canPeerSyncStop(Jedis peerJedis, long startTime) throws RedisSyncException {
if (System.currentTimeMillis() - startTime > config.getMaxTimeToBootstrap()) {
logger.warn("Warm up takes more than " + config.getMaxTimeToBootstrap() / 60000 + " minutes --> moving on");
return (long) -3;
}
logger.info("Checking for peer syncing");
String peerRedisInfo = peerJedis.info();
Long masterOffset = -1L;
Long slaveOffset = -1L;
// get peer's repl offset
Iterable<String> result = Splitter.on('\n').split(peerRedisInfo);
for (String line : result) {
if (line.startsWith("master_repl_offset")) {
String[] items = line.split(":");
logger.info(items[0] + ": " + items[1]);
masterOffset = Long.parseLong(items[1].trim());
}
// slave0:ip=10.99.160.121,port=22122,state=online,offset=17279,lag=0
if (line.startsWith("slave0")) {
String[] items = line.split(",");
for (String item : items) {
if (item.startsWith("offset")) {
String[] offset = item.split("=");
logger.info(offset[0] + ": " + offset[1]);
slaveOffset = Long.parseLong(offset[1].trim());
}
}
}
}
if (slaveOffset == -1) {
logger.error("Slave offset could not be parsed --> check memory overcommit configuration");
return (long) -1;
} else if (slaveOffset == 0) {
logger.info("Slave offset is zero ---> Redis master node still dumps data to the disk");
return (long) -2;
}
Long diff = Math.abs(masterOffset - slaveOffset);
logger.info("masterOffset: " + masterOffset + " slaveOffset: " + slaveOffset + " current Diff: " + diff
+ " allowable diff: " + config.getAllowableBytesSyncDiff());
// Allowable bytes sync diff can be configured by a Fast Property.
// If the difference is very small, then we return zero.
if (diff < config.getAllowableBytesSyncDiff()) {
logger.info("master and slave are in sync!");
return (long) 0;
} else if (slaveOffset == 0) {
logger.info("slave has not started syncing");
}
return diff;
}
private class RedisSyncException extends Exception {
/**
* Exception during peer syncing
*/
private static final long serialVersionUID = -7736577871204223637L;
}
/**
* Generate redis.conf.
*
* @throws IOException
*/
public void updateConfiguration() throws IOException {
long storeMaxMem = getStoreMaxMem();
if (config.getRedisCompatibleEngine().equals(ArdbRocksDbRedisCompatible.DYNO_ARDB)) {
ArdbRocksDbRedisCompatible rocksDb = new ArdbRocksDbRedisCompatible(storeMaxMem, config);
rocksDb.updateConfiguration(ArdbRocksDbRedisCompatible.DYNO_ARDB_CONF_PATH);
} else {
// Updating the file.
logger.info("Updating Redis conf: " + DYNO_REDIS_CONF_PATH);
Path confPath = Paths.get(DYNO_REDIS_CONF_PATH);
Path backupPath = Paths.get(DYNO_REDIS_CONF_PATH + ".bkp");
// backup the original baked in conf only and not subsequent updates
if (!Files.exists(backupPath)) {
logger.info("Backing up baked in Redis config at: " + backupPath);
Files.copy(confPath, backupPath, COPY_ATTRIBUTES);
}
if (config.isPersistenceEnabled() && config.persistenceType().equals("aof")) {
logger.info("Persistence with AOF is enabled");
} else if (config.isPersistenceEnabled() && !config.persistenceType().equals("aof")) {
logger.info("Persistence with RDB is enabled");
}
// Not using Properties file to load as we want to retain all
// comments,
// and for easy diffing with the ami baked version of the conf file.
List<String> lines = Files.readAllLines(confPath, Charsets.UTF_8);
boolean saveReplaced = false;
for (int i = 0; i < lines.size(); i++) {
String line = lines.get(i);
boolean isComment = false;
if (line.startsWith("#")) {
isComment = true;
String withoutHash = line.substring(1);
if (!withoutHash.matches(REDIS_CONF_SAVE_SCHEDULE) && !withoutHash.matches(REDIS_CONF_UNIXSOCKET)
&& !withoutHash.matches(REDIS_CONF_UNIXSOCKETPERM)) {
continue;
}
line = withoutHash;
}
if (line.matches(REDIS_CONF_UNIXSOCKET)) {
String unixSocket;
// This empty check is to make sure we disable unixsocket
// when the FP is deleted.
// Mostly this use case will not arise but the code is more
// complete now.
if (config.getRedisUnixPath().isEmpty()) {
unixSocket = "# unixsocket /tmp/redis.sock";
logger.info("Resetting Redis property: " + unixSocket);
} else {
unixSocket = "unixsocket " + config.getRedisUnixPath();
logger.info("Updating Redis property: " + unixSocket);
}
lines.set(i, unixSocket);
}
if (line.matches(REDIS_CONF_UNIXSOCKETPERM)) {
String unixSocketPerm;
if (config.getRedisUnixPath().isEmpty()) {
unixSocketPerm = "# unixsocketperm 700";
logger.info("Resetting Redis property: " + unixSocketPerm);
} else {
unixSocketPerm = "unixsocketperm 755";
logger.info("Updating Redis property: " + unixSocketPerm);
}
lines.set(i, unixSocketPerm);
}
if (line.matches(REDIS_CONF_MAXMEMORY_PATTERN)) {
String maxMemConf = "maxmemory " + storeMaxMem + "kb";
logger.info("Updating Redis property: " + maxMemConf);
lines.set(i, maxMemConf);
}
if (line.matches(REDIS_CONF_DAEMONIZE)) {
String daemonize = "daemonize yes";
logger.info("Updating Redis property: " + daemonize);
lines.set(i, daemonize);
}
// Pub/sub
if (line.contains(REDIS_CONF_PUBSUB)) {
String notifyKeySpaceEvents = REDIS_CONF_PUBSUB + " \"" + config.getKeySpaceEvents() + "\"";
logger.info("Updating Redis Property: " + notifyKeySpaceEvents);
lines.set(i, notifyKeySpaceEvents);
}
if (line.matches(REDIS_CONF_ZSET_MAXZIPLISTVALUE) && config.getRedisMaxZsetZiplistValue() != -1) {
String zsetMaxZiplistValue = "zset-max-ziplist-value " + config.getRedisMaxZsetZiplistValue();
logger.info("Updating Redis property: " + zsetMaxZiplistValue);
lines.set(i, zsetMaxZiplistValue);
}
// Persistence configuration
if (config.isPersistenceEnabled() && config.persistenceType().equals("aof")) {
if (line.matches(REDIS_CONF_APPENDONLY)) {
String appendOnly = "appendonly yes";
logger.info("Updating Redis property: " + appendOnly);
lines.set(i, appendOnly);
} else if (line.matches(REDIS_CONF_APPENDFSYNC)) {
String appendfsync = "appendfsync no";
logger.info("Updating Redis property: " + appendfsync);
lines.set(i, appendfsync);
} else if (line.matches(REDIS_CONF_AUTOAOFREWRITEPERCENTAGE)) {
String autoAofRewritePercentage = "auto-aof-rewrite-percentage 100";
logger.info("Updating Redis property: " + autoAofRewritePercentage);
lines.set(i, autoAofRewritePercentage);
} else if (line.matches(REDIS_CONF_SAVE_SCHEDULE)) {
String saveSchedule = "# save 60 10000"; // if we select
// AOF, it is
// better to
// stop
// RDB
logger.info("Updating Redis property: " + saveSchedule);
lines.set(i, saveSchedule);
}
} else if (config.isPersistenceEnabled() && !config.persistenceType().equals("aof")) {
if (line.matches(REDIS_CONF_STOP_WRITES_BGSAVE_ERROR)) {
String bgsaveerror = "stop-writes-on-bgsave-error no";
logger.info("Updating Redis property: " + bgsaveerror);
lines.set(i, bgsaveerror);
} else if (line.matches(REDIS_CONF_SAVE_SCHEDULE) && !saveReplaced) {
saveReplaced = true;
String saveSchedule = "save 60 10000"; // after 60 sec
// if at
// least 10000
// keys
// changed
logger.info("Updating Redis property: " + saveSchedule);
lines.set(i, saveSchedule);
} else if (line.matches(REDIS_CONF_APPENDONLY)) { // if we
// select
// RDB,
// it is
// better
// to
// stop
// AOF
String appendOnly = "appendonly no";
logger.info("Updating Redis property: " + appendOnly);
lines.set(i, appendOnly);
}
}
}
Files.write(confPath, lines, Charsets.UTF_8, WRITE, TRUNCATE_EXISTING);
}
}
/**
* Get the maximum amount of memory available for Redis or Memcached.
*
* @return the maximum amount of storage available for Redis or Memcached in
* KB
*/
public long getStoreMaxMem() {
int memPct = config.getStorageMaxMemoryPercent();
// Long is big enough for the amount of ram is all practical systems
// that we deal with.
long totalMem = getTotalAvailableSystemMemory();
long storeMaxMem = (totalMem * memPct) / 100;
storeMaxMem = ((totalMem - storeMaxMem) > GB_2_IN_KB) ? storeMaxMem : (totalMem - GB_2_IN_KB);
logger.info(String.format("totalMem: %s setting storage max mem to %s", totalMem, storeMaxMem));
return storeMaxMem;
}
/**
* Get the amount of memory available on this instance.
*
* @return total available memory (RAM) on instance in KB
*/
public long getTotalAvailableSystemMemory() {
String memInfo;
try {
memInfo = new Scanner(new File(PROC_MEMINFO_PATH)).useDelimiter("\\Z").next();
} catch (FileNotFoundException e) {
String errMsg = String.format("Unable to find %s file for retrieving memory info.", PROC_MEMINFO_PATH);
logger.error(errMsg);
throw new RuntimeException(errMsg);
}
Matcher matcher = MEMINFO_PATTERN.matcher(memInfo);
if (matcher.find()) {
try {
return Long.parseLong(matcher.group(1));
} catch (NumberFormatException e) {
logger.info("Failed to parse long", e);
}
}
String errMsg = String.format("Could not extract total mem using pattern %s from:\n%s ", MEMINFO_PATTERN,
memInfo);
logger.error(errMsg);
throw new RuntimeException(errMsg);
}
@Override
public String getStartupScript() {
if (config.getRedisCompatibleEngine().equals(ArdbRocksDbRedisCompatible.DYNO_ARDB)) {
return ArdbRocksDbRedisCompatible.ARDB_ROCKSDB_START_SCRIPT;
}
return REDIS_START_SCRIPT;
}
@Override
public String getStopScript() {
if (config.getRedisCompatibleEngine().equals(ArdbRocksDbRedisCompatible.DYNO_ARDB)) {
return ArdbRocksDbRedisCompatible.ARDB_ROCKSDB_STOP_SCRIPT;
}
return REDIS_STOP_SCRIPT;
}
@Override
public String getIpAddress() {
return REDIS_ADDRESS;
}
@Override
public int getPort() {
return REDIS_PORT;
}
@Override
public void check(HealthIndicatorCallback healthCallback) {
if (redisHealth) {
healthCallback.inform(Health.healthy().withDetail("Redis", "All good!").build());
} else {
logger.info("Reporting Redis is down to Health check callback");
healthCallback.inform(Health.unhealthy().withDetail("Redis", "Down!").build());
}
}
public String getUnixPath() {
return config.getRedisUnixPath();
}
}
| 3,088 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/StorageProcessManager.java | package com.netflix.dynomitemanager.storage;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.InstanceState;
import com.netflix.nfsidecar.utils.Sleeper;
/**
* Start or stop the storage engine, such as Redis or Memcached.
*/
@Singleton
public class StorageProcessManager {
private static final Logger logger = LoggerFactory.getLogger(StorageProcessManager.class);
private static final String SUDO_STRING = "/usr/bin/sudo";
private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000;
private final Sleeper sleeper;
private final InstanceState instanceState;
private final StorageProxy storageProxy;
@Inject
public StorageProcessManager(Sleeper sleeper, InstanceState instanceState, StorageProxy storageProxy) {
this.sleeper = sleeper;
this.instanceState = instanceState;
this.storageProxy = storageProxy;
}
protected void setStorageEnv(Map<String, String> env) {
env.put("FLORIDA_STORAGE", String.valueOf(this.storageProxy.getEngine()));
}
/**
* Start the storage engine (Redis, Memcached).
*
* @throws IOException
*/
public void start() throws IOException {
logger.info(String.format("Starting Storage process"));
ProcessBuilder startBuilder = process(getStartCommand());
setStorageEnv(startBuilder.environment());
Process starter = startBuilder.start();
try {
sleeper.sleepQuietly(SCRIPT_EXECUTE_WAIT_TIME_MS);
int code = starter.exitValue();
if (code == 0) {
logger.info("Storage process has been started");
instanceState.setStorageProxyAlive(true);
} else {
logger.error("Unable to start Storage process. Error code: {}", code);
}
logProcessOutput(starter);
} catch (Exception e) {
logger.warn("Starting Storage process has an error", e);
}
}
/**
* A common class to initialize a ProcessBuilder
* @param executeCommand
* @return the process to start
* @throws IOException
*/
private ProcessBuilder process(List<String> executeCommand) throws IOException {
List<String> command = Lists.newArrayList();
if (!"root".equals(System.getProperty("user.name"))) {
command.add(SUDO_STRING);
command.add("-n");
command.add("-E");
}
command.addAll(executeCommand);
ProcessBuilder actionStorage = new ProcessBuilder(command);
actionStorage.directory(new File("/"));
actionStorage.redirectErrorStream(true);
return actionStorage;
}
/**
* Getting the start command
* @return
*/
private List<String> getStartCommand() {
List<String> startCmd = new LinkedList<String>();
for (String param : storageProxy.getStartupScript().split(" ")) {
if (StringUtils.isNotBlank(param))
startCmd.add(param);
}
return startCmd;
}
/**
* Getting the stop command
* @return
*/
private List<String> getStopCommand() {
List<String> stopCmd = new LinkedList<String>();
for (String param : storageProxy.getStopScript().split(" ")) {
if (StringUtils.isNotBlank(param))
stopCmd.add(param);
}
return stopCmd;
}
private void logProcessOutput(Process p) {
try {
final String stdOut = readProcessStream(p.getInputStream());
final String stdErr = readProcessStream(p.getErrorStream());
logger.info("std_out: {}", stdOut);
logger.info("std_err: {}", stdErr);
} catch (IOException ioe) {
logger.warn("Failed to read the std out/err streams", ioe);
}
}
String readProcessStream(InputStream inputStream) throws IOException {
final byte[] buffer = new byte[512];
final ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.length);
int cnt;
while ((cnt = inputStream.read(buffer)) != -1)
baos.write(buffer, 0, cnt);
return baos.toString();
}
/**
* Stop the storage engine (Redis, Memcached).
*
* @throws IOException
*/
public void stop() throws IOException {
logger.info("Stopping storage process...");
ProcessBuilder stopBuilder = process(getStopCommand());
Process stopper = stopBuilder.start();
sleeper.sleepQuietly(SCRIPT_EXECUTE_WAIT_TIME_MS);
try {
int code = stopper.exitValue();
if (code == 0) {
logger.info("Storage process has been stopped");
instanceState.setStorageProxyAlive(false);
} else {
logger.error("Unable to stop storage process. Error code: {}", code);
logProcessOutput(stopper);
}
} catch (Exception e) {
logger.warn("Could not shut down storage process correctly: ", e);
}
}
} | 3,089 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/MemcachedStorageProxy.java | package com.netflix.dynomitemanager.storage;
import java.io.IOException;
public class MemcachedStorageProxy implements StorageProxy {
private static final String DYNO_MEMCACHED = "memcached";
private static final int MEMCACHE_PORT = 11211;
private static final String MEMCACHE_ADDRESS = "127.0.0.1";
private final String DEFAULT_MEMCACHED_START_SCRIPT = "/apps/memcached/bin/memcached";
private final String DEFAULT_MEMCACHED_STOP_SCRIPT = "/usr/bin/pkill memcached";
@Override
public String getEngine() {
return DYNO_MEMCACHED;
}
@Override
public int getEngineNumber() {
return 1;
}
@Override
public boolean isAlive() {
return false;
}
@Override
public long getUptime() {
return 0;
}
@Override
public Bootstrap warmUpStorage(String[] peers) {
return Bootstrap.IN_SYNC_SUCCESS;
}
@Override
public boolean resetStorage() {
return true;
}
@Override
public boolean takeSnapshot() {
return false;
}
@Override
public boolean loadingData() {
return false;
}
@Override
public void stopPeerSync() {
}
@Override
public void updateConfiguration() throws IOException {
// TODO Auto-generated method stub
}
@Override
public String getStartupScript() {
return DEFAULT_MEMCACHED_START_SCRIPT;
}
@Override
public String getStopScript() {
return DEFAULT_MEMCACHED_STOP_SCRIPT;
}
@Override
public String getIpAddress() {
return MEMCACHE_ADDRESS;
}
@Override
public int getPort() {
return MEMCACHE_PORT;
}
@Override
public long getStoreMaxMem() {
return 0;
}
@Override
public long getTotalAvailableSystemMemory() {
return 0;
}
@Override
public String getUnixPath() { return ""; }
}
| 3,090 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/ArdbRocksDbRedisCompatible.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.storage;
import static java.nio.file.StandardCopyOption.COPY_ATTRIBUTES;
import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING;
import static java.nio.file.StandardOpenOption.WRITE;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.netflix.dynomitemanager.config.FloridaConfig;
public class ArdbRocksDbRedisCompatible {
final static String DYNO_ARDB = "ardb-rocksdb";
final static String DYNO_ARDB_CONF_PATH = "/apps/ardb/conf/rocksdb.conf";
final static String ARDB_ROCKSDB_START_SCRIPT = "/apps/ardb/bin/launch_ardb.sh";
final static String ARDB_ROCKSDB_STOP_SCRIPT = "/apps/ardb/bin/kill_ardb.sh";
private static final Logger logger = LoggerFactory.getLogger(ArdbRocksDbRedisCompatible.class);
private int writeBufferSize;
private int maxWriteBufferNumber;
private int minWriteBufferToMerge;
private long storeMaxMem;
private String loglevel;
private String compactionStrategy;
public ArdbRocksDbRedisCompatible(long storeMaxMem, FloridaConfig config) {
this.writeBufferSize = config.getRocksDBWriteBufferSize();
this.maxWriteBufferNumber = config.getRocksDBMaxWriteBufferNumber();
this.minWriteBufferToMerge = config.getRocksDBMinWriteBuffersToMerge();
this.compactionStrategy = config.getRocksDBCompactionStrategy();
switch (this.compactionStrategy) {
case "OptimizeLevelStyleCompaction":
break;
case "OptimizeUniversalStyleCompaction":
break;
case "none":
break;
default:
throw new IllegalArgumentException("RocksDB unsupported compaction style: " + this.compactionStrategy);
}
this.loglevel = config.getArdbLoglevel();
this.storeMaxMem = storeMaxMem;
}
private String ConvertRocksDBOptions(String rocksDBOptions) {
// split the arguments based on the ";"
String[] allOptions = rocksDBOptions.split(";");
// String builder to put the properties back
StringBuilder newProperties = new StringBuilder();
// parse the properties and replace
for (String pr : allOptions) {
logger.info("Checking Property: '" + pr + "'");
// change the properties to the updated values
if (pr.contains("write_buffer_size")) {
pr = "write_buffer_size=" + writeBufferSize + "M";
logger.info("Updating to: '" + pr + "'");
} else if (pr.contains("max_write_buffer_number")) {
pr = "max_write_buffer_number=" + maxWriteBufferNumber;
logger.info("Updating to: '" + pr + "'");
} else if (pr.contains("min_write_buffer_number_to_merge")) {
pr = "min_write_buffer_number_to_merge=" + minWriteBufferToMerge;
logger.info("Updating to: '" + pr + "'");
}
/*
* reconstructing
*/
if (pr.contains("\\")) {
pr = pr.replace("\\", "");
if (pr.length() > 0) {
pr += ";";
}
pr = pr + "\\";
newProperties.append(pr);
} else
newProperties.append(pr + ";");
logger.info("Appending Property: '" + pr + "'");
}
return newProperties.toString();
}
public void updateConfiguration(String confPathName) throws IOException {
/**
* --- ARDB configuration ----
*
* rocksdb.options
* write_buffer_size=512M;max_write_buffer_number=5;min_write_buffer_number_to_merge=2;compression=kSnappyCompression;\
* bloom_locality=1;memtable_prefix_bloom_bits=100000000;memtable_prefix_bloom_probes=6;\
* block_based_table_factory={block_cache=512M;filter_policy=bloomfilter:10:true};\
* create_if_missing=true;max_open_files=10000;rate_limiter_bytes_per_sec=50M
*
* write_buffer_size = 512M; max_write_buffer_number = 5;
*
* We check if the memory is above 10GB and then allocate more
* max_write_buffer_number. This approach is naive and should be
* optimized
*
*/
if (this.writeBufferSize * this.maxWriteBufferNumber > this.storeMaxMem) {
logger.warn("There is not enough memory in the instance. Using writeBufferSize = 128M");
this.writeBufferSize = 128;
if (this.writeBufferSize * this.maxWriteBufferNumber > this.storeMaxMem) {
logger.warn("There is still not enough memory. Using maxWriteBufferNumber = 10");
this.maxWriteBufferNumber = 10;
}
}
logger.info("Updating ARDB/RocksDB conf: " + confPathName);
Path confPath = Paths.get(confPathName);
Path backupPath = Paths.get(confPathName + ".bkp");
// backup the original baked in conf only and not subsequent updates
if (!Files.exists(backupPath)) {
logger.info("Backing up baked in ARDB/RocksDB config at: " + backupPath);
Files.copy(confPath, backupPath, COPY_ATTRIBUTES);
}
boolean rocksParse = false;
StringBuilder rocksProperties = new StringBuilder();
// Not using Properties file to load as we want to retain all comments,
// and for easy diffing with the ami baked version of the conf file.
List<String> lines = Files.readAllLines(confPath, Charsets.UTF_8);
// Create a new list to write back the file.
List<String> newLines = new ArrayList<String>();
for (int i = 0; i < lines.size(); i++) {
String line = lines.get(i);
if (line.startsWith("#")) {
newLines.add(line);
continue;
}
if (line.matches("^redis-compatible-mode \\s*[a-zA-Z]*")) {
String compatible = "redis-compatible-mode yes";
logger.info("Updating ARDB property: " + compatible);
newLines.add(compatible);
continue;
} else if (line.matches("^loglevel \\s*[a-zA-Z]*")) {
String logLevel = "loglevel " + this.loglevel;
logger.info("Updating ARDB property: " + logLevel);
newLines.add(logLevel);
continue;
} else if (line.contains("rocksdb.compaction")) {
logger.info("RocksDB Compaction strategy");
String compactionStrategy = "rocksdb.compaction " + this.compactionStrategy;
logger.info("Updating RocksDB property: +" + compactionStrategy);
newLines.add(compactionStrategy);
} else if (line.contains("rocksdb.options")) {
logger.info("RocksDB options");
rocksParse = true;
String[] keyValue = line.split("\\s+");
newLines.add(keyValue[0] + spaces(15) + ConvertRocksDBOptions(keyValue[1]));
continue;
} else if (rocksParse) {
// we need this for multi-line options parsing
if (!line.contains("\\")) {
rocksParse = false;
}
newLines.add(ConvertRocksDBOptions(line));
continue;
} else {
newLines.add(line);
}
}
Files.write(confPath, newLines, Charsets.UTF_8, WRITE, TRUNCATE_EXISTING);
}
private static String spaces(int numberOfSpaces) {
// String builder is efficient at concatenating strings together
StringBuilder sb = new StringBuilder();
// Loop as many times as specified; each time add a space to the string
for (int i = 0; i < numberOfSpaces; i++) {
sb.append(" ");
}
// Return the string
return sb.toString();
}
}
| 3,091 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/storage/RedisInfoParser.java | package com.netflix.dynomitemanager.storage;
import java.io.BufferedReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class RedisInfoParser {
private static final Set<String> WHITE_LIST = new HashSet<String>();
static {
WHITE_LIST.add("uptime_in_seconds");
WHITE_LIST.add("connected_clients");
WHITE_LIST.add("client_longest_output_list");
WHITE_LIST.add("client_biggest_input_buf");
WHITE_LIST.add("blocked_clients");
WHITE_LIST.add("used_memory");
WHITE_LIST.add("used_memory_rss");
WHITE_LIST.add("used_memory_lua");
WHITE_LIST.add("mem_fragmentation_ratio");
WHITE_LIST.add("rdb_changes_since_last_save");
WHITE_LIST.add("rdb_last_save_time");
WHITE_LIST.add("aof_enabled");
WHITE_LIST.add("aof_rewrite_in_progress");
WHITE_LIST.add("total_connections_received");
WHITE_LIST.add("total_commands_processed");
WHITE_LIST.add("instantaneous_ops_per_sec");
WHITE_LIST.add("rejected_connections");
WHITE_LIST.add("expired_keys");
WHITE_LIST.add("evicted_keys");
WHITE_LIST.add("keyspace_hits");
WHITE_LIST.add("keyspace_misses");
WHITE_LIST.add("used_cpu_sys");
WHITE_LIST.add("used_cpu_user");
WHITE_LIST.add("db0");
/**
* The following apply only for ARDB/RocksDB"
*/
WHITE_LIST.add("used_disk_space");
WHITE_LIST.add("rocksdb_memtable_total");
WHITE_LIST.add("rocksdb_memtable_unflushed");
}
/**
* This is to create a constructor for the test cases.
*/
public RedisInfoParser() {
}
public Map<String, Long> parse(Reader inReader) throws Exception {
final Map<String, Long> metrics = new HashMap<String, Long>();
BufferedReader reader = null;
try {
reader = new BufferedReader(inReader);
List<StatsSection> sections = new ArrayList<StatsSection>();
boolean stop = false;
while (!stop) {
StatsSection section = new StatsSection(reader, RuleIter);
section.initSection();
if (section.isEmpty()) {
stop = true;
break;
}
section.parseSectionData();
if (section.data.isEmpty()) {
continue;
}
sections.add(section);
}
for (StatsSection section : sections) {
metrics.putAll(section.getMetrics());
}
} finally {
if (reader != null) {
reader.close();
}
}
return metrics;
}
private class StatsSection {
private final BufferedReader reader;
private String sectionName;
private String sectionNamePrefix = "Redis_";
private final Map<String, Long> data = new HashMap<String, Long>();
private final SectionRule sectionRule;
private StatsSection(BufferedReader br, SectionRule rule) {
reader = br;
sectionRule = rule;
}
private boolean isEmpty() {
return sectionName == null && data.isEmpty();
}
private void initSection() throws Exception {
String line = null;
while ((line = reader.readLine()) != null) {
line = line.trim();
if (!line.startsWith("#")) {
continue;
} else {
break;
}
}
if (line == null) {
return;
}
sectionName = readSectionName(line);
if (sectionName != null && !sectionName.isEmpty()) {
sectionNamePrefix = "Redis_" + sectionName + "_";
}
}
private void parseSectionData() throws Exception {
String line = reader.readLine();
while (line != null && !line.isEmpty()) {
processLine(line.trim());
line = reader.readLine();
}
}
private String readSectionName(String line) {
String[] parts = line.split(" ");
if (parts.length != 2) {
return null;
}
return parts[1];
}
private void processLine(String line) throws Exception {
String[] parts = line.split(":");
if (parts.length != 2) {
return;
}
String name = parts[0];
String sVal = parts[1];
// while list filtering
if (!WHITE_LIST.contains(name))
return;
if (sVal.endsWith("M")) {
sVal = sVal.substring(0, sVal.length() - 1);
}
if (sectionRule.processSection(this, name, sVal)) {
return; // rule already applied. data is processed with custom
// logic
}
// else do generic rule processing
Double val = null;
try {
val = Double.parseDouble(sVal);
} catch (NumberFormatException nfe) {
val = null;
}
if (val != null) {
data.put(name, val.longValue());
}
}
private Map<String, Long> getMetrics() {
Map<String, Long> map = new HashMap<String, Long>();
for (String key : data.keySet()) {
map.put(sectionNamePrefix + key, data.get(key));
}
return map;
}
}
private interface SectionRule {
boolean processSection(StatsSection section, String key, String value);
}
private SectionRule Rule0 = new SectionRule() {
@Override
public boolean processSection(StatsSection section, String key, String value) {
if (section.sectionName.equals("Server")) {
if (key.equals("uptime_in_seconds")) {
try {
Double dVal = Double.parseDouble(value);
section.data.put(key, dVal.longValue());
return true;
} catch (NumberFormatException e) {
}
}
}
return false;
}
};
private SectionRule Rule1 = new SectionRule() {
@Override
public boolean processSection(StatsSection section, String key, String value) {
if (section.sectionName.equals("Memory")) {
if (key.equals("mem_fragmentation_ratio")) {
try {
Double dVal = Double.parseDouble(value);
dVal = dVal * 100;
section.data.put(key, dVal.longValue());
return true;
} catch (NumberFormatException e) {
}
}
}
return false;
}
};
private SectionRule Rule2 = new SectionRule() {
@Override
public boolean processSection(StatsSection section, String key, String value) {
if (section.sectionName.equals("Persistence")) {
if (key.equals("rdb_last_bgsave_status") || key.equals("aof_last_bgrewrite_status")
|| key.equals("aof_last_write_status")) {
Long val = value.equalsIgnoreCase("ok") ? 1L : 0L;
section.data.put(key, val);
return true;
}
}
return false;
}
};
private SectionRule Rule3 = new SectionRule() {
@Override
public boolean processSection(StatsSection section, String key, String value) {
if (section.sectionName.equals("Keyspace")) {
if (key.equals("db0")) {
String[] parts = value.split(",");
for (String part : parts) {
addPart(key, part, section);
}
return true;
}
}
return false;
}
private void addPart(String parentKey, String keyVal, StatsSection section) {
String[] parts = keyVal.split("=");
if (parts.length != 2) {
return;
}
try {
String key = parentKey + "_" + parts[0];
Double dVal = Double.parseDouble(parts[1]);
section.data.put(key, dVal.longValue());
} catch (NumberFormatException e) {
// ignore
}
}
};
private SectionRule RuleIter = new SectionRule() {
SectionRule[] arr = { Rule0, Rule1, Rule2, Rule3 };
final List<SectionRule> rules = Arrays.asList(arr);
@Override
public boolean processSection(StatsSection section, String key, String value) {
for (SectionRule rule : rules) {
if (rule.processSection(section, key, value)) {
return true;
}
}
return false;
}
};
}
| 3,092 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/aws/S3Restore.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.aws;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.commons.io.IOUtils;
import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.nfsidecar.aws.ICredential;
import com.netflix.nfsidecar.backup.Restore;
import com.netflix.nfsidecar.config.AWSCommonConfig;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.S3Object;
@Singleton
public class S3Restore implements Restore {
private static final Logger logger = LoggerFactory.getLogger(S3Restore.class);
@Inject
private AWSCommonConfig commonConfig;
@Inject
private FloridaConfig floridaConfig;
@Inject
private ICredential cred;
@Inject
private InstanceIdentity iid;
/**
* Uses the Amazon S3 API to restore from S3
*/
@Override
public boolean restoreData(String dateString) {
long time = restoreTime(dateString);
if (time > -1) {
logger.info("Restoring data from S3.");
AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider());
try {
/* construct the key for the backup data */
String keyName = commonConfig.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/"
+ iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + time;
logger.info("S3 Bucket Name: " + commonConfig.getBucketName());
logger.info("Key in Bucket: " + keyName);
// Checking if the S3 bucket exists, and if does not, then we
// create it
if (!(s3Client.doesBucketExist(commonConfig.getBucketName()))) {
logger.error("Bucket with name: " + commonConfig.getBucketName() + " does not exist");
} else {
S3Object s3object = s3Client.getObject(new GetObjectRequest(commonConfig.getBucketName(), keyName));
logger.info("Content-Type: " + s3object.getObjectMetadata().getContentType());
String filepath = null;
if (floridaConfig.persistenceType().equals("aof")) {
filepath = floridaConfig.getPersistenceLocation() + "/appendonly.aof";
} else {
filepath = floridaConfig.getPersistenceLocation() + "/nfredis.rdb";
}
IOUtils.copy(s3object.getObjectContent(), new FileOutputStream(new File(filepath)));
}
return true;
} catch (AmazonServiceException ase) {
logger.error(
"AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error ");
logger.error("Error Message: " + ase.getMessage());
logger.error("HTTP Status Code: " + ase.getStatusCode());
logger.error("AWS Error Code: " + ase.getErrorCode());
logger.error("Error Type: " + ase.getErrorType());
logger.error("Request ID: " + ase.getRequestId());
} catch (AmazonClientException ace) {
logger.error("AmazonClientException;" + " the client encountered "
+ "an internal error while trying to " + "communicate with S3, ");
logger.error("Error Message: " + ace.getMessage());
} catch (IOException io) {
logger.error("File storing error: " + io.getMessage());
}
} else {
logger.error("Date in FP: " + dateString);
}
return false;
}
private long restoreTime(String dateString) {
logger.info("Date to restore to: " + dateString);
DateTimeFormatter formatter = null;
try {
formatter = DateTimeFormat.forPattern("yyyyMMdd");
} catch (Exception e) {
logger.error("Restore fast property not formatted properly " + e.getMessage());
return -1;
}
DateTime dt = formatter.parseDateTime(dateString);
DateTime dateBackup = dt.withTimeAtStartOfDay();
return dateBackup.getMillis();
}
}
| 3,093 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/aws/S3Backup.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.aws;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.joda.time.DateTime;
import com.netflix.nfsidecar.aws.ICredential;
import com.netflix.nfsidecar.backup.Backup;
import com.netflix.nfsidecar.config.AWSCommonConfig;
import com.netflix.nfsidecar.identity.InstanceIdentity;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.model.PartETag;
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
import com.amazonaws.services.s3.model.UploadPartRequest;
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
@Singleton
public class S3Backup implements Backup {
private static final Logger logger = LoggerFactory.getLogger(S3Backup.class);
private final long initPartSize = 500 * 1024 * 1024; // we set the part size
// equal to 500MB. We
// do not want this too
// large
// and run out of heap space
@Inject
private AWSCommonConfig awsCommonConfig;
@Inject
private ICredential cred;
@Inject
private InstanceIdentity iid;
/**
* Uses the Amazon S3 API to upload the AOF/RDB to S3 Filename: Backup
* location + DC + Rack + App + Token
*/
@Override
public boolean upload(File file, DateTime todayStart) {
logger.info("Snapshot backup: sending " + file.length() + " bytes to S3");
/*
* Key name is comprised of the backupDir + DC + Rack + token + Date
*/
String keyName = awsCommonConfig.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/"
+ iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + todayStart.getMillis();
// Get bucket location.
logger.info("Key in Bucket: " + keyName);
logger.info("S3 Bucket Name:" + awsCommonConfig.getBucketName());
AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider());
try {
// Checking if the S3 bucket exists, and if does not, then we create
// it
if (!(s3Client.doesBucketExist(awsCommonConfig.getBucketName()))) {
logger.error("Bucket with name: " + awsCommonConfig.getBucketName() + " does not exist");
return false;
} else {
logger.info("Uploading data to S3\n");
// Create a list of UploadPartResponse objects. You get one of
// these for
// each part upload.
List<PartETag> partETags = new ArrayList<PartETag>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(awsCommonConfig.getBucketName(),
keyName);
InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
long contentLength = file.length();
long filePosition = 0;
long partSize = this.initPartSize;
try {
for (int i = 1; filePosition < contentLength; i++) {
// Last part can be less than initPartSize (500MB).
// Adjust part size.
partSize = Math.min(partSize, (contentLength - filePosition));
// Create request to upload a part.
UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(awsCommonConfig.getBucketName())
.withKey(keyName).withUploadId(initResponse.getUploadId()).withPartNumber(i)
.withFileOffset(filePosition).withFile(file).withPartSize(partSize);
// Upload part and add response to our list.
partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
filePosition += partSize;
}
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
awsCommonConfig.getBucketName(), keyName, initResponse.getUploadId(), partETags);
s3Client.completeMultipartUpload(compRequest);
} catch (Exception e) {
logger.error("Abosting multipart upload due to error");
s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(awsCommonConfig.getBucketName(), keyName,
initResponse.getUploadId()));
}
return true;
}
} catch (AmazonServiceException ase) {
logger.error("AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error ");
logger.error("Error Message: " + ase.getMessage());
logger.error("HTTP Status Code: " + ase.getStatusCode());
logger.error("AWS Error Code: " + ase.getErrorCode());
logger.error("Error Type: " + ase.getErrorType());
logger.error("Request ID: " + ase.getRequestId());
return false;
} catch (AmazonClientException ace) {
logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to "
+ "communicate with S3, ");
logger.error("Error Message: " + ace.getMessage());
return false;
}
}
}
| 3,094 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/DynomiteProcessManager.java | package com.netflix.dynomitemanager.dynomite;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.storage.JedisUtils;
import com.netflix.nfsidecar.identity.IInstanceState;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.utils.Sleeper;
import com.netflix.runtime.health.api.Health;
import com.netflix.runtime.health.api.HealthIndicator;
import com.netflix.runtime.health.api.HealthIndicatorCallback;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.LinkedList;
import java.util.List;
import redis.clients.jedis.Jedis;
@Singleton
public class DynomiteProcessManager extends Task implements IDynomiteProcess, HealthIndicator {
private static final Logger logger = LoggerFactory.getLogger(DynomiteProcessManager.class);
private static final String SUDO_STRING = "/usr/bin/sudo";
public static final String JOB_TASK_NAME = "DYNOMITE HEALTH TRACKER";
private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000;
private final FloridaConfig config;
private final Sleeper sleeper;
private final IInstanceState instanceState;
private final IDynomiteProcess dynProcess;
private boolean dynomiteHealth = false;
@Inject
public DynomiteProcessManager(FloridaConfig config, Sleeper sleeper, IInstanceState instanceState,
IDynomiteProcess dynProcess) {
this.config = config;
this.sleeper = sleeper;
this.instanceState = instanceState;
this.dynProcess = dynProcess;
}
public static TaskTimer getTimer() {
return new SimpleTimer(JOB_TASK_NAME, 15L * 1000);
}
@Override
public String getName() {
return JOB_TASK_NAME;
}
@Override
public void execute() throws Exception {
dynomiteHealth = dynomiteProcessCheck()
&& JedisUtils.isAliveWithRetry(config.getDynomiteLocalAddress(), config.getDynomiteClientPort());
}
public void start() throws IOException {
logger.info(String.format("Starting dynomite server"));
List<String> command = Lists.newArrayList();
if (!"root".equals(System.getProperty("user.name"))) {
command.add(SUDO_STRING);
command.add("-n");
command.add("-E");
}
command.addAll(getStartCommand());
ProcessBuilder startDynomite = new ProcessBuilder(command);
startDynomite.directory(new File("/"));
startDynomite.redirectErrorStream(true);
Process starter = startDynomite.start();
try {
sleeper.sleepQuietly(SCRIPT_EXECUTE_WAIT_TIME_MS);
int code = starter.exitValue();
if (code == 0) {
logger.info("Dynomite server has been started");
instanceState.setStorageProxyAlive(true);
} else {
logger.error("Unable to start Dynomite server. Error code: {}", code);
}
logProcessOutput(starter);
} catch (Exception e) {
logger.warn("Starting Dynomite has an error", e);
}
}
protected List<String> getStartCommand() {
List<String> startCmd = new LinkedList<String>();
for (String param : config.getDynomiteStartScript().split(" ")) {
if (StringUtils.isNotBlank(param))
startCmd.add(param);
}
return startCmd;
}
void logProcessOutput(Process p) {
try {
final String stdOut = readProcessStream(p.getInputStream());
final String stdErr = readProcessStream(p.getErrorStream());
logger.info("std_out: {}", stdOut);
logger.info("std_err: {}", stdErr);
} catch (IOException ioe) {
logger.warn("Failed to read the std out/err streams", ioe);
}
}
String readProcessStream(InputStream inputStream) throws IOException {
final byte[] buffer = new byte[512];
final ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.length);
int cnt;
while ((cnt = inputStream.read(buffer)) != -1)
baos.write(buffer, 0, cnt);
return baos.toString();
}
public void stop() throws IOException {
logger.info("Stopping Dynomite server ....");
List<String> command = Lists.newArrayList();
if (!"root".equals(System.getProperty("user.name"))) {
command.add(SUDO_STRING);
command.add("-n");
command.add("-E");
}
for (String param : config.getDynomiteStopScript().split(" ")) {
if (StringUtils.isNotBlank(param))
command.add(param);
}
ProcessBuilder stopDyno = new ProcessBuilder(command);
stopDyno.directory(new File("/"));
stopDyno.redirectErrorStream(true);
Process stopper = stopDyno.start();
sleeper.sleepQuietly(SCRIPT_EXECUTE_WAIT_TIME_MS);
try {
int code = stopper.exitValue();
if (code == 0) {
logger.info("Dynomite server has been stopped");
instanceState.setStorageProxyAlive(false);
} else {
logger.error("Unable to stop Dynomite server with script " + config.getDynomiteStopScript()
+ " Error code: {}", code);
logProcessOutput(stopper);
}
} catch (Exception e) {
logger.warn("couldn't shut down Dynomite correctly", e);
}
}
/**
* Ping Dynomite to perform a basic health check.
*
* @param dynomiteJedis
* the Jedis client with a connection to Dynomite.
* @return true if Dynomite replies to PING with PONG, else false.
*/
private boolean dynomiteRedisPing(Jedis dynomiteJedis) {
if (dynomiteJedis.ping().equals("PONG") == false) {
logger.warn("Pinging Dynomite failed");
return false;
}
logger.info("Dynomite is up and running");
return true;
}
/**
* Basic health check for Dynomite.
*
* @return true if health check passes, or false if health check fails.
*/
private boolean dynomiteRedisCheck() {
boolean result = true;
Jedis dynomiteJedis = new Jedis(config.getDynomiteLocalAddress(), config.getDynomiteClientPort(), 5000);
try {
dynomiteJedis.connect();
if (!dynomiteRedisPing(dynomiteJedis)) {
sleeper.sleepQuietly(1000);
if (!dynomiteRedisPing(dynomiteJedis)) {
logger.warn("Second effort to ping Dynomite failed");
result = false;
}
}
} catch (Exception e) {
logger.warn("Unable to create a Jedis connection to Dynomite" + e.getMessage());
result = false;
}
dynomiteJedis.disconnect();
return result;
}
/**
* Dynomite health check performed via Redis PING command. If health check
* fails, then we stop Dynomite to prevent a zombie Dynomite process (i.e. a
* situation where Dynomite is running but Redis is stopped).
*
* @return true if health check passes and false if it fails.
*/
public boolean dynomiteCheck() {
logger.info("Dynomite check with Redis Ping");
if (!dynomiteRedisCheck()) {
try {
logger.error("Dynomite was down");
this.dynProcess.stop();
sleeper.sleepQuietly(1000);
return false;
} catch (IOException e) {
logger.error("Dynomite cannot be restarted --> Requires manual restart" + e.getMessage());
}
}
return true;
}
public boolean dynomiteProcessCheck() {
Process process = null;
try {
String cmd = String.format("ps -ef | grep '[/]apps/%1$s/bin/%1$s'", config.getDynomiteProcessName());
String[] cmdArray = { "/bin/sh", "-c", cmd };
logger.debug("Running checkProxyProcess command: " + cmd);
// This returns pid for the Dynomite process
process = Runtime.getRuntime().exec(cmdArray);
BufferedReader input = new BufferedReader(new InputStreamReader(process.getInputStream()));
String line = input.readLine();
if (logger.isDebugEnabled()) {
logger.debug("Output from checkProxyProcess command: " + line);
}
return line != null;
} catch (Exception e) {
logger.warn("Exception thrown while checking if the process is running or not ", e);
return false;
} finally {
if (process != null) {
IOUtils.closeQuietly(process.getInputStream());
IOUtils.closeQuietly(process.getOutputStream());
IOUtils.closeQuietly(process.getErrorStream());
}
}
}
public void check(HealthIndicatorCallback healthCallback) {
if (dynomiteHealth) {
healthCallback.inform(Health.healthy().withDetail("Dynomite", "All good!").build());
} else {
logger.info("Reporting Dynomite is down to Health check callback");
healthCallback.inform(Health.unhealthy().withDetail("Dynomite", "Down!").build());
}
}
}
| 3,095 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/DynomiteRest.java | package com.netflix.dynomitemanager.dynomite;
import org.apache.commons.httpclient.DefaultHttpMethodRetryHandler;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.params.HttpMethodParams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.config.DynamicPropertyFactory;
import com.netflix.config.DynamicStringProperty;
public class DynomiteRest {
private static final Logger logger = LoggerFactory.getLogger(DynomiteRest.class);
public static boolean sendCommand(String cmd) {
DynamicStringProperty adminUrl =
DynamicPropertyFactory.getInstance().getStringProperty("florida.metrics.url", "http://localhost:22222");
String url = adminUrl.get() + cmd;
HttpClient client = new HttpClient();
client.getParams().setParameter(HttpMethodParams.RETRY_HANDLER,
new DefaultHttpMethodRetryHandler());
GetMethod get = new GetMethod(url);
try {
int statusCode = client.executeMethod(get);
if (!(statusCode == 200)) {
logger.error("Got non 200 status code from " + url);
return false;
}
String response = get.getResponseBodyAsString();
//logger.info("Received response from " + url + "\n" + response);
if (!response.isEmpty()) {
logger.info("Received response from " + url + "\n" + response);
} else {
logger.error("Cannot parse empty response from " + url);
return false;
}
} catch (Exception e) {
logger.error("Failed to sendCommand and invoke url: " + url, e);
return false;
}
logger.info("Dynomite REST completed succesfully: " + url);
return true;
}
}
| 3,096 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/DynomiteYamlTask.java | /**
* Copyright 2013 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.dynomitemanager.dynomite;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.nfsidecar.scheduler.SimpleTimer;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.scheduler.TaskTimer;
import com.netflix.nfsidecar.utils.ProcessTuner;
@Singleton
public class DynomiteYamlTask extends Task
{
public static final String JOBNAME = "Tune-Task";
private final ProcessTuner tuner;
private final FloridaConfig config;
@Inject
public DynomiteYamlTask(FloridaConfig config, ProcessTuner tuner)
{
this.config = config;
this.tuner = tuner;
}
public void execute() throws Exception
{
tuner.writeAllProperties(config.getDynomiteYaml());
}
@Override
public String getName()
{
return "Tune-Task";
}
// update the YML every 60 seconds.
public static TaskTimer getTimer()
{
return new SimpleTimer(JOBNAME, 60L * 1000);
}
}
| 3,097 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/IDynomiteProcess.java | package com.netflix.dynomitemanager.dynomite;
import java.io.IOException;
/**
* Interface to aid in starting and stopping Dynomite.
*
*/
public interface IDynomiteProcess
{
void start() throws IOException;
void stop() throws IOException;
boolean dynomiteCheck();
boolean dynomiteProcessCheck();
}
| 3,098 |
0 | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager | Create_ds/dynomite-manager/dynomitemanager-core/src/main/java/com/netflix/dynomitemanager/dynomite/ProxyAndStorageResetTask.java | package com.netflix.dynomitemanager.dynomite;
import java.io.IOException;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
import com.netflix.dynomitemanager.config.FloridaConfig;
import com.netflix.dynomitemanager.storage.StorageProxy;
import com.netflix.nfsidecar.scheduler.Task;
import com.netflix.nfsidecar.utils.Sleeper;
@Singleton
public class ProxyAndStorageResetTask extends Task {
public static final String JOBNAME = "ProxyResetTask-Task";
private static final Logger logger = LoggerFactory.getLogger(ProxyAndStorageResetTask.class);
private final IDynomiteProcess dynProcess;
private final StorageProxy storageProxy;
private final Sleeper sleeper;
private final FloridaConfig config;
@Inject
public ProxyAndStorageResetTask(FloridaConfig config, IDynomiteProcess dynProcess, StorageProxy storageProxy,
Sleeper sleeper) {
this.config = config;
this.storageProxy = storageProxy;
this.dynProcess = dynProcess;
this.sleeper = sleeper;
}
public void execute() throws IOException {
storageProxy.resetStorage();
dynomiteCheck();
setConsistency();
}
@Override
public String getName() {
return JOBNAME;
}
private void setConsistency() {
logger.info("Setting the consistency level for the cluster");
if (!DynomiteRest.sendCommand("/set_consistency/read/" + config.getDynomiteReadConsistency()))
logger.error("REST call to Dynomite for read consistency failed --> using the default");
if (!DynomiteRest.sendCommand("/set_consistency/write/" + config.getDynomiteWriteConsistency()))
logger.error("REST call to Dynomite for write consistency failed --> using the default");
}
private void dynomiteCheck() {
Jedis dynomiteJedis = new Jedis(config.getDynomiteLocalAddress(), config.getDynomiteClientPort(), 5000);
logger.info("Checking Dynomite's status");
try {
dynomiteJedis.connect();
if (dynomiteJedis.ping().equals("PONG") == false) {
logger.warn("Pinging Dynomite failed ---> trying again after 1 sec");
sleeper.sleepQuietly(1000);
if (dynomiteJedis.ping().equals("PONG") == false) {
try {
this.dynProcess.stop();
sleeper.sleepQuietly(1000);
this.dynProcess.start();
} catch (IOException e) {
logger.error("Dynomite cannot be restarted --> Requires manual restart" + e.getMessage());
}
} else {
logger.info("Dynomite is up and running");
}
} else {
logger.info("Dynomite is up and running");
}
} catch (Exception e) {
logger.warn("Unable to connect to Dynomite --> restarting: " + e.getMessage());
try {
this.dynProcess.stop();
sleeper.sleepQuietly(1000);
this.dynProcess.start();
} catch (IOException e1) {
logger.error("Dynomite cannot be restarted --> Requires manual restart" + e1.getMessage());
}
}
}
}
| 3,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.